python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Dict, Optional
from omegaconf import MISSING
from nemo.core import config
from nemo.core.classes.dataset import DatasetConfig
from nemo.utils import exp_manager
@dataclass
class SchedConfig:
name: str = MISSING
min_lr: float = 0.0
last_epoch: int = -1
@dataclass
class OptimConfig:
name: str = MISSING
sched: Optional[SchedConfig] = None
@dataclass
class ModelConfig:
"""
Model component inside ModelPT
"""
# ...
train_ds: Optional[DatasetConfig] = None
validation_ds: Optional[DatasetConfig] = None
test_ds: Optional[DatasetConfig] = None
optim: Optional[OptimConfig] = None
@dataclass
class HydraConfig:
run: Dict[str, Any] = field(default_factory=lambda: {"dir": "."})
job_logging: Dict[str, Any] = field(default_factory=lambda: {"root": {"handlers": None}})
@dataclass
class NemoConfig:
name: str = MISSING
model: ModelConfig = MISSING
trainer: config.TrainerConfig = config.TrainerConfig(
strategy="ddp", enable_checkpointing=False, logger=False, log_every_n_steps=1, accelerator='gpu'
)
exp_manager: Optional[Any] = exp_manager.ExpManagerConfig()
hydra: HydraConfig = HydraConfig()
class ModelConfigBuilder:
def __init__(self, model_cfg: ModelConfig):
"""
Base class for any Model Config Builder.
A Model Config Builder is a utility class that accepts a ModelConfig dataclass,
and via a set of utility methods (that are implemented by the subclassed ModelConfigBuilder),
builds a finalized ModelConfig that can be supplied to a NemoModel dataclass as
the `model` component.
Subclasses *must* implement the private method `_finalize_cfg`.
Inside this method, they must update `self.model_cfg` with all interdependent config
options that need to be set (either updated by user explicitly or with their default value).
The updated model config must then be preserved in `self.model_cfg`.
Example:
# Create the config builder
config_builder = <subclass>ModelConfigBuilder()
# Update the components of the config that are modifiable
config_builder.set_X(X)
config_builder.set_Y(Y)
# Create a "finalized" config dataclass that will contain all the updates
# that were specified by the builder
model_config = config_builder.build()
# Use model config as is (or further update values), then create a new Model
model = nemo.<domain>.models.<ModelName>Model(cfg=model_config, trainer=Trainer())
Supported build methods:
- set_train_ds: All model configs can accept a subclass of `DatasetConfig` as their
training config. Subclasses can override this method to enable auto-complete
by replacing `Optional[DatasetConfig]` with `Optional[<subclass of DatasetConfig>]`.
- set_validation_ds: All model configs can accept a subclass of `DatasetConfig` as their
validation config. Subclasses can override this method to enable auto-complete
by replacing `Optional[DatasetConfig]` with `Optional[<subclass of DatasetConfig>]`.
- set_test_ds: All model configs can accept a subclass of `DatasetConfig` as their
test config. Subclasses can override this method to enable auto-complete
by replacing `Optional[DatasetConfig]` with `Optional[<subclass of DatasetConfig>]`.
- set_optim: A build method that supports changes to the Optimizer (and optionally,
the Scheduler) used for training the model. The function accepts two inputs -
`cfg`: A subclass of `OptimizerParams` - any OptimizerParams subclass can be used,
in order to select an appropriate Optimizer. Examples: AdamParams.
`sched_cfg`: A subclass of `SchedulerParams` - any SchedulerParams subclass can be used,
in order to select an appropriate Scheduler. Examples: CosineAnnealingParams.
Note that this argument is optional.
- build(): The method which should return a "finalized" ModelConfig dataclass.
Subclasses *should* always override this method, and update the signature
of this method with the return type of the Dataclass, so that it enables
autocomplete for the user.
Example:
def build(self) -> EncDecCTCConfig:
return super().build()
Any additional build methods must be added by subclasses of ModelConfigBuilder.
Args:
model_cfg:
"""
self.model_cfg = model_cfg
self.train_ds_cfg = None
self.validation_ds_cfg = None
self.test_ds_cfg = None
self.optim_cfg = None
def set_train_ds(self, cfg: Optional[DatasetConfig] = None):
self.model_cfg.train_ds = cfg
def set_validation_ds(self, cfg: Optional[DatasetConfig] = None):
self.model_cfg.validation_ds = cfg
def set_test_ds(self, cfg: Optional[DatasetConfig] = None):
self.model_cfg.test_ds = cfg
def set_optim(self, cfg: config.OptimizerParams, sched_cfg: Optional[config.SchedulerParams] = None):
@dataclass
class WrappedOptimConfig(OptimConfig, cfg.__class__):
pass
# Setup optim
optim_name = cfg.__class__.__name__.replace("Params", "").lower()
wrapped_cfg = WrappedOptimConfig(name=optim_name, sched=None, **vars(cfg))
if sched_cfg is not None:
@dataclass
class WrappedSchedConfig(SchedConfig, sched_cfg.__class__):
pass
# Setup scheduler
sched_name = sched_cfg.__class__.__name__.replace("Params", "")
wrapped_sched_cfg = WrappedSchedConfig(name=sched_name, **vars(sched_cfg))
wrapped_cfg.sched = wrapped_sched_cfg
self.model_cfg.optim = wrapped_cfg
def _finalize_cfg(self):
raise NotImplementedError()
def build(self) -> ModelConfig:
# validate config
self._finalize_cfg()
return self.model_cfg
|
NeMo-main
|
nemo/core/config/modelPT.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import functools
import os
import sys
from typing import Any, Callable, Optional
from hydra._internal.utils import _run_hydra, get_args_parser
from hydra.core.config_store import ConfigStore
from hydra.types import TaskFunction
from omegaconf import DictConfig, OmegaConf
def _get_gpu_name():
try:
import pynvml
except (ImportError, ModuleNotFoundError):
return None
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
cuda_capability, _ = pynvml.nvmlDeviceGetCudaComputeCapability(handle)
pynvml.nvmlShutdown()
if cuda_capability == 8:
return "a100"
elif cuda_capability == 9:
return "h100"
else:
return None
OmegaConf.register_new_resolver("gpu_name", _get_gpu_name)
# multiple interpolated values in the config
OmegaConf.register_new_resolver("multiply", lambda x, y: x * y)
def hydra_runner(
config_path: Optional[str] = ".", config_name: Optional[str] = None, schema: Optional[Any] = None
) -> Callable[[TaskFunction], Any]:
"""
Decorator used for passing the Config paths to main function.
Optionally registers a schema used for validation/providing default values.
Args:
config_path: Optional path that will be added to config search directory.
NOTE: The default value of `config_path` has changed between Hydra 1.0 and Hydra 1.1+.
Please refer to https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_hydra_main_config_path/
for details.
config_name: Pathname of the config file.
schema: Structured config type representing the schema used for validation/providing default values.
"""
def decorator(task_function: TaskFunction) -> Callable[[], None]:
@functools.wraps(task_function)
def wrapper(cfg_passthrough: Optional[DictConfig] = None) -> Any:
# Check it config was passed.
if cfg_passthrough is not None:
return task_function(cfg_passthrough)
else:
args = get_args_parser()
# Parse arguments in order to retrieve overrides
parsed_args = args.parse_args() # type: argparse.Namespace
# Get overriding args in dot string format
overrides = parsed_args.overrides # type: list
# Disable the creation of .hydra subdir
# https://hydra.cc/docs/tutorials/basic/running_your_app/working_directory
overrides.append("hydra.output_subdir=null")
# Hydra logging outputs only to stdout (no log file).
# https://hydra.cc/docs/configure_hydra/logging
overrides.append("hydra/job_logging=stdout")
# Set run.dir ONLY for ExpManager "compatibility" - to be removed.
overrides.append("hydra.run.dir=.")
# Check if user set the schema.
if schema is not None:
# Create config store.
cs = ConfigStore.instance()
# Get the correct ConfigStore "path name" to "inject" the schema.
if parsed_args.config_name is not None:
path, name = os.path.split(parsed_args.config_name)
# Make sure the path is not set - as this will disable validation scheme.
if path != '':
sys.stderr.write(
f"ERROR Cannot set config file path using `--config-name` when "
"using schema. Please set path using `--config-path` and file name using "
"`--config-name` separately.\n"
)
sys.exit(1)
else:
name = config_name
# Register the configuration as a node under the name in the group.
cs.store(name=name, node=schema) # group=group,
# Wrap a callable object with name `parse_args`
# This is to mimic the ArgParser.parse_args() API.
def parse_args(self, args=None, namespace=None):
return parsed_args
parsed_args.parse_args = parse_args
# no return value from run_hydra() as it may sometime actually run the task_function
# multiple times (--multirun)
# argparse_wrapper = _argparse_wrapper(args)
argparse_wrapper = parsed_args
_run_hydra(
args=argparse_wrapper,
args_parser=args,
task_function=task_function,
config_path=config_path,
config_name=config_name,
)
return wrapper
return decorator
|
NeMo-main
|
nemo/core/config/hydra_runner.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from functools import partial
from typing import Any, Dict, Optional
@dataclass
class SchedulerParams:
"""
Base configuration for all schedulers.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
last_epoch: int = -1
@dataclass
class SquareRootConstantSchedulerParams(SchedulerParams):
"""
Base configuration for all schedulers.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
constant_steps: Optional[float] = None
constant_ratio: Optional[float] = None
@dataclass
class WarmupSchedulerParams(SchedulerParams):
"""
Base configuration for all schedulers.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
max_steps: int = 0
warmup_steps: Optional[float] = None
warmup_ratio: Optional[float] = None
@dataclass
class WarmupHoldSchedulerParams(WarmupSchedulerParams):
"""
Base configuration for all schedulers.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
hold_steps: Optional[float] = None
hold_ratio: Optional[float] = None
min_lr: float = 0.0
@dataclass
class WarmupAnnealingHoldSchedulerParams(WarmupSchedulerParams):
"""
Base configuration for all schedulers.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
constant_steps: Optional[float] = None
constant_ratio: Optional[float] = None
min_lr: float = 0.0
@dataclass
class SquareAnnealingParams(WarmupSchedulerParams):
"""
Square Annealing parameter config
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
min_lr: float = 1e-5
@dataclass
class SquareRootAnnealingParams(WarmupSchedulerParams):
"""
Square Root Annealing parameter config
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
min_lr: float = 0.0
@dataclass
class CosineAnnealingParams(WarmupAnnealingHoldSchedulerParams):
"""
Cosine Annealing parameter config
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
min_lr: float = 0.0
@dataclass
class NoamAnnealingParams(WarmupSchedulerParams):
"""
Cosine Annealing parameter config
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
min_lr: float = 0.0
@dataclass
class NoamHoldAnnealingParams(WarmupHoldSchedulerParams):
"""
Polynomial Hold Decay Annealing parameter config
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
decay_rate: float = 0.5
@dataclass
class WarmupAnnealingParams(WarmupSchedulerParams):
"""
Warmup Annealing parameter config
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
warmup_ratio: Optional[float] = None
@dataclass
class InverseSquareRootAnnealingParams(WarmupSchedulerParams):
"""
Inverse Square Root Annealing parameter config
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
@dataclass
class PolynomialDecayAnnealingParams(WarmupSchedulerParams):
"""
Polynomial Decay Annealing parameter config
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
power: float = 1.0
cycle: bool = False
@dataclass
class PolynomialHoldDecayAnnealingParams(WarmupSchedulerParams):
"""
Polynomial Hold Decay Annealing parameter config
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
power: float = 1.0
cycle: bool = False
"""
Pytorch Optimizers
"""
@dataclass
class StepLRParams(SchedulerParams):
"""
Config for StepLR.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
step_size: float = 0.1
gamma: float = 0.1
@dataclass
class ExponentialLRParams(SchedulerParams):
"""
Config for ExponentialLR.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
gamma: float = 0.9
@dataclass
class ReduceLROnPlateauParams:
"""
Config for ReduceLROnPlateau.
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
mode: str = 'min'
factor: float = 0.1
patience: int = 10
verbose: bool = False
threshold: float = 1e-4
threshold_mode: str = 'rel'
cooldown: int = 0
min_lr: float = 0
eps: float = 1e-8
@dataclass
class CyclicLRParams(SchedulerParams):
"""
Config for CyclicLR.
NOTE:
# `scale_fn` is not supported
It is not derived from Config as it is not a NeMo object (and in particular it doesn't need a name).
"""
base_lr: float = 0.001
max_lr: float = 0.1
step_size_up: int = 2000
step_size_down: Optional[int] = None
mode: str = 'triangular'
gamma: float = 1.0
scale_mode: str = 'cycle'
# scale_fn is not supported
cycle_momentum: bool = True
base_momentum: float = 0.8
max_momentum: float = 0.9
def register_scheduler_params(name: str, scheduler_params: SchedulerParams):
"""
Checks if the schduler config name exists in the registry, and if it doesnt, adds it.
This allows custom schedulers to be added and called by name during instantiation.
Args:
name: Name of the optimizer. Will be used as key to retrieve the optimizer.
scheduler_params: SchedulerParams class
"""
if name in AVAILABLE_SCHEDULER_PARAMS:
raise ValueError(f"Cannot override pre-existing optimizers. Conflicting optimizer name = {name}")
AVAILABLE_SCHEDULER_PARAMS[name] = scheduler_params
def get_scheduler_config(name: str, **kwargs: Optional[Dict[str, Any]]) -> SchedulerParams:
"""
Convenience method to obtain a SchedulerParams class and partially instantiate it with optimizer kwargs.
Args:
name: Name of the SchedulerParams in the registry.
kwargs: Optional kwargs of the optimizer used during instantiation.
Returns:
a partially instantiated SchedulerParams
"""
if name not in AVAILABLE_SCHEDULER_PARAMS:
raise ValueError(
f"Cannot resolve scheduler parameters '{name}'. Available scheduler parameters are : "
f"{AVAILABLE_SCHEDULER_PARAMS.keys()}"
)
scheduler_params = AVAILABLE_SCHEDULER_PARAMS[name]
scheduler_params = partial(scheduler_params, **kwargs)
return scheduler_params
AVAILABLE_SCHEDULER_PARAMS = {
'SchedulerParams': SchedulerParams,
'WarmupPolicyParams': WarmupSchedulerParams,
'WarmupHoldPolicyParams': WarmupHoldSchedulerParams,
'WarmupAnnealingHoldSchedulerParams': WarmupAnnealingHoldSchedulerParams,
'SquareAnnealingParams': SquareAnnealingParams,
'SquareRootAnnealingParams': SquareRootAnnealingParams,
'InverseSquareRootAnnealingParams': InverseSquareRootAnnealingParams,
'SquareRootConstantSchedulerParams': SquareRootConstantSchedulerParams,
'CosineAnnealingParams': CosineAnnealingParams,
'NoamAnnealingParams': NoamAnnealingParams,
'NoamHoldAnnealingParams': NoamHoldAnnealingParams,
'WarmupAnnealingParams': WarmupAnnealingParams,
'PolynomialDecayAnnealingParams': PolynomialDecayAnnealingParams,
'PolynomialHoldDecayAnnealingParams': PolynomialHoldDecayAnnealingParams,
'ReduceLROnPlateauParams': ReduceLROnPlateauParams,
}
|
NeMo-main
|
nemo/core/config/schedulers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import List, Union
import torch
from pytorch_lightning.core.module import _jit_is_scripting
from nemo.core.classes import typecheck
from nemo.core.utils.neural_type_utils import get_dynamic_axes, get_io_names
from nemo.utils import logging
from nemo.utils.export_utils import (
ExportFormat,
augment_filename,
get_export_format,
parse_input_example,
replace_for_export,
verify_runtime,
verify_torchscript,
wrap_forward_method,
)
__all__ = ['ExportFormat', 'Exportable']
class Exportable(ABC):
"""
This Interface should be implemented by particular classes derived from nemo.core.NeuralModule or nemo.core.ModelPT.
It gives these entities ability to be exported for deployment to formats such as ONNX.
Usage:
# exporting pre-trained model to ONNX file for deployment.
model.eval()
model.to('cuda') # or to('cpu') if you don't have GPU
model.export('mymodel.onnx', [options]) # all arguments apart from `output` are optional.
"""
@property
def input_module(self):
return self
@property
def output_module(self):
return self
def export(
self,
output: str,
input_example=None,
verbose=False,
do_constant_folding=True,
onnx_opset_version=None,
check_trace: Union[bool, List[torch.Tensor]] = False,
dynamic_axes=None,
check_tolerance=0.01,
export_modules_as_functions=False,
keep_initializers_as_inputs=None,
):
"""
Exports the model to the specified format. The format is inferred from the file extension of the output file.
Args:
output (str): Output file name. File extension be .onnx, .pt, or .ts, and is used to select export
path of the model.
input_example (list or dict): Example input to the model's forward function. This is used to
trace the model and export it to ONNX/TorchScript. If the model takes multiple inputs, then input_example
should be a list of input examples. If the model takes named inputs, then input_example
should be a dictionary of input examples.
verbose (bool): If True, will print out a detailed description of the model's export steps, along with
the internal trace logs of the export process.
do_constant_folding (bool): If True, will execute constant folding optimization on the model's graph
before exporting. This is ONNX specific.
onnx_opset_version (int): The ONNX opset version to export the model to. If None, will use a reasonable
default version.
check_trace (bool): If True, will verify that the model's output matches the output of the traced
model, upto some tolerance.
dynamic_axes (dict): A dictionary mapping input and output names to their dynamic axes. This is
used to specify the dynamic axes of the model's inputs and outputs. If the model takes multiple inputs,
then dynamic_axes should be a list of dictionaries. If the model takes named inputs, then dynamic_axes
should be a dictionary of dictionaries. If None, will use the dynamic axes of the input_example
derived from the NeuralType of the input and output of the model.
check_tolerance (float): The tolerance to use when checking the model's output against the traced
model's output. This is only used if check_trace is True. Note the high tolerance is used because
the traced model is not guaranteed to be 100% accurate.
export_modules_as_functions (bool): If True, will export the model's submodules as functions. This is
ONNX specific.
keep_initializers_as_inputs (bool): If True, will keep the model's initializers as inputs in the onnx graph.
This is ONNX specific.
Returns:
A tuple of two outputs.
Item 0 in the output is a list of outputs, the outputs of each subnet exported.
Item 1 in the output is a list of string descriptions. The description of each subnet exported can be
used for logging purposes.
"""
all_out = []
all_descr = []
for subnet_name in self.list_export_subnets():
model = self.get_export_subnet(subnet_name)
out_name = augment_filename(output, subnet_name)
out, descr, out_example = model._export(
out_name,
input_example=input_example,
verbose=verbose,
do_constant_folding=do_constant_folding,
onnx_opset_version=onnx_opset_version,
check_trace=check_trace,
dynamic_axes=dynamic_axes,
check_tolerance=check_tolerance,
export_modules_as_functions=export_modules_as_functions,
keep_initializers_as_inputs=keep_initializers_as_inputs,
)
# Propagate input example (default scenario, may need to be overriden)
if input_example is not None:
input_example = out_example
all_out.append(out)
all_descr.append(descr)
logging.info("Successfully exported {} to {}".format(model.__class__.__name__, out_name))
return (all_out, all_descr)
def _export(
self,
output: str,
input_example=None,
verbose=False,
do_constant_folding=True,
onnx_opset_version=None,
check_trace: Union[bool, List[torch.Tensor]] = False,
dynamic_axes=None,
check_tolerance=0.01,
export_modules_as_functions=False,
keep_initializers_as_inputs=None,
):
my_args = locals().copy()
my_args.pop('self')
self.eval()
for param in self.parameters():
param.requires_grad = False
exportables = []
for m in self.modules():
if isinstance(m, Exportable):
exportables.append(m)
qual_name = self.__module__ + '.' + self.__class__.__qualname__
format = get_export_format(output)
output_descr = f"{qual_name} exported to {format}"
# Pytorch's default opset version is too low, using reasonable latest one
if onnx_opset_version is None:
onnx_opset_version = 16
try:
# Disable typechecks
typecheck.set_typecheck_enabled(enabled=False)
# Allow user to completely override forward method to export
forward_method, old_forward_method = wrap_forward_method(self)
# Set module mode
with torch.inference_mode(), torch.no_grad(), torch.jit.optimized_execution(True), _jit_is_scripting():
if input_example is None:
input_example = self.input_module.input_example()
# Remove i/o examples from args we propagate to enclosed Exportables
my_args.pop('output')
my_args.pop('input_example')
# Run (posibly overridden) prepare methods before calling forward()
for ex in exportables:
ex._prepare_for_export(**my_args, noreplace=True)
self._prepare_for_export(output=output, input_example=input_example, **my_args)
input_list, input_dict = parse_input_example(input_example)
input_names = self.input_names
output_names = self.output_names
output_example = tuple(self.forward(*input_list, **input_dict))
if check_trace:
if isinstance(check_trace, bool):
check_trace_input = [input_example]
else:
check_trace_input = check_trace
jitted_model = self
if format == ExportFormat.TORCHSCRIPT:
jitted_model = torch.jit.trace_module(
self,
{"forward": tuple(input_list) + tuple(input_dict.values())},
strict=True,
check_trace=check_trace,
check_tolerance=check_tolerance,
)
jitted_model = torch.jit.freeze(jitted_model)
if verbose:
logging.info(f"JIT code:\n{jitted_model.code}")
jitted_model.save(output)
jitted_model = torch.jit.load(output)
if check_trace:
verify_torchscript(jitted_model, output, check_trace_input, check_tolerance)
elif format == ExportFormat.ONNX:
# dynamic axis is a mapping from input/output_name => list of "dynamic" indices
if dynamic_axes is None:
dynamic_axes = get_dynamic_axes(self.input_module.input_types_for_export, input_names)
dynamic_axes.update(get_dynamic_axes(self.output_module.output_types_for_export, output_names))
torch.onnx.export(
jitted_model,
input_example,
output,
input_names=input_names,
output_names=output_names,
verbose=verbose,
do_constant_folding=do_constant_folding,
dynamic_axes=dynamic_axes,
opset_version=onnx_opset_version,
keep_initializers_as_inputs=keep_initializers_as_inputs,
export_modules_as_functions=export_modules_as_functions,
)
if check_trace:
verify_runtime(self, output, check_trace_input, input_names, check_tolerance=check_tolerance)
else:
raise ValueError(f'Encountered unknown export format {format}.')
finally:
typecheck.set_typecheck_enabled(enabled=True)
if forward_method:
type(self).forward = old_forward_method
self._export_teardown()
return (output, output_descr, output_example)
@property
def disabled_deployment_input_names(self):
"""Implement this method to return a set of input names disabled for export"""
return set()
@property
def disabled_deployment_output_names(self):
"""Implement this method to return a set of output names disabled for export"""
return set()
@property
def supported_export_formats(self):
"""Implement this method to return a set of export formats supported. Default is all types."""
return set([ExportFormat.ONNX, ExportFormat.TORCHSCRIPT])
def _prepare_for_export(self, **kwargs):
"""
Override this method to prepare module for export. This is in-place operation.
Base version does common necessary module replacements (Apex etc)
"""
if not 'noreplace' in kwargs:
replace_for_export(self)
def _export_teardown(self):
"""
Override this method for any teardown code after export.
"""
pass
@property
def input_names(self):
return get_io_names(self.input_module.input_types_for_export, self.disabled_deployment_input_names)
@property
def output_names(self):
return get_io_names(self.output_module.output_types_for_export, self.disabled_deployment_output_names)
@property
def input_types_for_export(self):
return self.input_types
@property
def output_types_for_export(self):
return self.output_types
def get_export_subnet(self, subnet=None):
"""
Returns Exportable subnet model/module to export
"""
if subnet is None or subnet == 'self':
return self
else:
return getattr(self, subnet)
def list_export_subnets(self):
"""
Returns default set of subnet names exported for this model
First goes the one receiving input (input_example)
"""
return ['self']
def get_export_config(self):
"""
Returns export_config dictionary
"""
return getattr(self, 'export_config', {})
def set_export_config(self, args):
"""
Sets/updates export_config dictionary
"""
ex_config = self.get_export_config()
ex_config.update(args)
self.export_config = ex_config
|
NeMo-main
|
nemo/core/classes/exportable.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hydra
import omegaconf
import pytorch_lightning
from nemo.core.classes.common import (
FileIO,
Model,
PretrainedModelInfo,
Serialization,
Typing,
is_typecheck_enabled,
typecheck,
)
from nemo.core.classes.dataset import Dataset, IterableDataset
from nemo.core.classes.exportable import Exportable, ExportFormat
from nemo.core.classes.loss import Loss
from nemo.core.classes.mixins import access_mixins, adapter_mixins
from nemo.core.classes.modelPT import ModelPT
from nemo.core.classes.module import NeuralModule
from nemo.utils import exceptions
|
NeMo-main
|
nemo/core/classes/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional
from torch.utils import data
from nemo.core.classes import Serialization, Typing, typecheck
__all__ = ['Dataset', 'IterableDataset']
class Dataset(data.Dataset, Typing, Serialization):
"""Dataset with output ports
Please Note: Subclasses of IterableDataset should *not* implement input_types.
"""
def _collate_fn(self, batch):
"""
A default implementation of a collation function.
Users should override this method to define custom data loaders.
"""
return data.dataloader.default_collate(batch)
@typecheck()
def collate_fn(self, batch):
"""
This is the method that user pass as functor to DataLoader.
The method optionally performs neural type checking and add types to the outputs.
Please note, subclasses of Dataset should not implement `input_types`.
# Usage:
dataloader = torch.utils.data.DataLoader(
....,
collate_fn=dataset.collate_fn,
....
)
Returns:
Collated batch, with or without types.
"""
if self.input_types is not None:
raise TypeError("Datasets should not implement `input_types` as they are not checked")
# Simply forward the inner `_collate_fn`
return self._collate_fn(batch)
class IterableDataset(data.IterableDataset, Typing, Serialization):
"""Iterable Dataset with output ports
Please Note: Subclasses of IterableDataset should *not* implement input_types.
"""
def _collate_fn(self, batch):
"""
A default implementation of a collation function.
Users should override this method to define custom data loaders.
"""
return data.dataloader.default_collate(batch)
@typecheck()
def collate_fn(self, batch):
"""
This is the method that user pass as functor to DataLoader.
The method optionally performs neural type checking and add types to the outputs.
# Usage:
dataloader = torch.utils.data.DataLoader(
....,
collate_fn=dataset.collate_fn,
....
)
Returns:
Collated batch, with or without types.
"""
if self.input_types is not None:
raise TypeError("Datasets should not implement `input_types` as they are not checked")
# Simply forward the inner `_collate_fn`
return self._collate_fn(batch)
@dataclass
class DatasetConfig:
"""
"""
# ...
batch_size: int = 32
drop_last: bool = False
shuffle: bool = False
num_workers: Optional[int] = 0
pin_memory: bool = True
|
NeMo-main
|
nemo/core/classes/dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.core.classes.common import Serialization, Typing
__all__ = ['Loss']
class Loss(torch.nn.modules.loss._Loss, Typing, Serialization):
"""Inherit this class to implement custom loss."""
def __init__(self, **kwargs):
super(Loss, self).__init__(**kwargs)
|
NeMo-main
|
nemo/core/classes/loss.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interfaces common to all Neural Modules and Models."""
import hashlib
import inspect
import traceback
from abc import ABC, abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass, field
from enum import Enum
from functools import total_ordering
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple, Union
import hydra
import torch
import wrapt
from huggingface_hub import HfApi, HfFolder, ModelFilter, hf_hub_download
from huggingface_hub.hf_api import ModelInfo
from omegaconf import DictConfig, OmegaConf
import nemo
from nemo.core.connectors.save_restore_connector import SaveRestoreConnector
from nemo.core.neural_types import NeuralType, NeuralTypeComparisonResult
from nemo.utils import logging
from nemo.utils.cloud import maybe_download_from_cloud
from nemo.utils.data_utils import resolve_cache_dir
from nemo.utils.model_utils import import_class_by_path, maybe_update_config_version
__all__ = ['Typing', 'FileIO', 'Model', 'Serialization', 'typecheck', 'PretrainedModelInfo']
_TYPECHECK_ENABLED = True
# TODO @blisc: Remove _HAS_HYDRA
_HAS_HYDRA = True
def is_typecheck_enabled():
"""
Getter method for typechecking state.
"""
return _TYPECHECK_ENABLED
@dataclass
class TypecheckMetadata:
"""
Metadata class for input/output neural types.
# Primary attributes
original_types: Preserve the dictionary of type information provided.
ignore_collections: For backward compatibility, container support can be disabled explicitly
using this flag. When set to True, all nesting is ignored and nest-depth checks are skipped.
# Derived attributed
mandatory_types: Sub-dictionary of `original_types` which contains only those types which
are mandatory to include when calling the function.
base_types: Dictionary of flattened `str: NeuralType` definitions, disregarding the nest level
details into appropriate arguments.
container_depth: Dictionary mapping `str: int` - such that the valid depth of the nest of this
neural type is recorded.
has_container_types: Bool flag declaring if any of the neural types declares a container nest
in its signature.
is_singular_container_type: Bool flag declaring if this is a single Neural Type with a container
nest in its signature. Required for supporting python list expansion in return statement.
"""
original_types: Dict[str, NeuralType]
ignore_collections: bool
mandatory_types: Dict[str, NeuralType] = field(init=False)
base_types: Dict[str, NeuralType] = field(init=False)
container_depth: Dict[str, int] = field(init=False)
has_container_types: bool = field(init=False)
is_singular_container_type: bool = field(init=False)
def __post_init__(self):
# If even one NeuralType declares a container nest, set to True
has_container_types = False
for type_val in self.original_types.values():
if isinstance(type_val, (list, tuple)):
has_container_types = True
break
self.has_container_types = has_container_types
# If only one NeuralType is declared, and it declares a container nest, set to True
if self.has_container_types and len(self.original_types) == 1:
self.is_singular_container_type = True
else:
self.is_singular_container_type = False
# If container nests are declared, flatten the nest into `base_types`
# Also compute the nest depth for each of the NeuralTypes
if self.has_container_types:
self.base_types = {}
self.container_depth = {}
for type_key, type_val in self.original_types.items():
depth = 0
while isinstance(type_val, (list, tuple)):
if len(type_val) > 1:
raise TypeError(
f"Neural Type `{type_key}`: {type_val} definition contains more than one element when"
"declaring the nested container structure.\n"
"Please ensure that you have only 1 NeuralType inside of the entire nested structure "
"definition."
)
type_val = type_val[0]
depth += 1
self.base_types[type_key] = type_val
self.container_depth[type_key] = depth
else:
# Otherwise, simply preserve the original_types and set depth of nest to 0.
self.base_types = self.original_types
self.container_depth = {type_key: 0 for type_key in self.base_types.keys()}
# Compute subset of original_types which are mandatory in the call argspec
self.mandatory_types = {
type_key: type_val for type_key, type_val in self.base_types.items() if not type_val.optional
}
class Typing(ABC):
"""
An interface which endows module with neural types
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
"""Define these to enable input neural type checks"""
return None
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Define these to enable output neural type checks"""
return None
def _validate_input_types(self, input_types=None, ignore_collections=False, **kwargs):
"""
This function does a few things.
1) It ensures that len(self.input_types <non-optional>) <= len(kwargs) <= len(self.input_types).
2) For each (keyword name, keyword value) passed as input to the wrapped function:
- Check if the keyword name exists in the list of valid self.input_types names.
- Check if keyword value has the `neural_type` property.
- If it does, then perform a comparative check and assert that neural types
are compatible (SAME or GREATER).
- Check if keyword value is a container type (list or tuple). If yes,
then perform the elementwise test of neural type above on each element
of the nested structure, recursively.
Args:
input_types: Either the `input_types` defined at class level, or the local function
overridden type definition.
ignore_collections: For backward compatibility, container support can be disabled explicitly
using this flag. When set to True, all nesting is ignored and nest-depth checks are skipped.
kwargs: Dictionary of argument_name:argument_value pairs passed to the wrapped
function upon call.
"""
# TODO: Properly implement this
if input_types is not None:
# Precompute metadata
metadata = TypecheckMetadata(original_types=input_types, ignore_collections=ignore_collections)
total_input_types = len(input_types)
mandatory_input_types = len(metadata.mandatory_types)
# Allow number of input arguments to be <= total input neural types.
if len(kwargs) < mandatory_input_types or len(kwargs) > total_input_types:
raise TypeError(
f"Number of input arguments provided ({len(kwargs)}) is not as expected. Function has "
f"{total_input_types} total inputs with {mandatory_input_types} mandatory inputs."
)
for key, value in kwargs.items():
# Check if keys exists in the defined input types
if key not in input_types:
raise TypeError(
f"Input argument {key} has no corresponding input_type match. "
f"Existing input_types = {input_types.keys()}"
)
# Perform neural type check
if hasattr(value, 'neural_type') and not metadata.base_types[key].compare(value.neural_type) in (
NeuralTypeComparisonResult.SAME,
NeuralTypeComparisonResult.GREATER,
):
error_msg = [
f"{input_types[key].compare(value.neural_type)} :",
f"Input type expected : {input_types[key]}",
f"Input type found : {value.neural_type}",
f"Argument: {key}",
]
for i, dict_tuple in enumerate(metadata.base_types[key].elements_type.type_parameters.items()):
error_msg.insert(i + 2, f' input param_{i} : {dict_tuple[0]}: {dict_tuple[1]}')
for i, dict_tuple in enumerate(value.neural_type.elements_type.type_parameters.items()):
error_msg.append(f' input param_{i} : {dict_tuple[0]}: {dict_tuple[1]}')
raise TypeError("\n".join(error_msg))
# Perform input ndim check
if hasattr(value, 'shape'):
value_shape = value.shape
type_shape = metadata.base_types[key].axes
name = key
if type_shape is not None and len(value_shape) != len(type_shape):
raise TypeError(
f"Input shape mismatch occured for {name} in module {self.__class__.__name__} : \n"
f"Input shape expected = {metadata.base_types[key].axes} | \n"
f"Input shape found : {value_shape}"
)
# Perform recursive neural type check for homogeneous elements
elif isinstance(value, list) or isinstance(value, tuple):
for ind, val in enumerate(value):
"""
This initiates a DFS, tracking the depth count as it goes along the nested structure.
Initial depth is 1 as we consider the current loop to be the 1st step inside the nest.
"""
self.__check_neural_type(val, metadata, depth=1, name=key)
def _attach_and_validate_output_types(self, out_objects, ignore_collections=False, output_types=None):
"""
This function does a few things.
1) It ensures that len(out_object) == len(self.output_types).
2) If the output is a tensor (or list/tuple of list/tuple ... of tensors), it
attaches a neural_type to it. For objects without the neural_type attribute,
such as python objects (dictionaries and lists, primitive data types, structs),
no neural_type is attached.
Note: tensor.neural_type is only checked during _validate_input_types which is
called prior to forward().
Args:
output_types: Either the `output_types` defined at class level, or the local function
overridden type definition.
ignore_collections: For backward compatibility, container support can be disabled explicitly
using this flag. When set to True, all nesting is ignored and nest-depth checks are skipped.
out_objects: The outputs of the wrapped function.
"""
# TODO: Properly implement this
if output_types is not None:
# Precompute metadata
metadata = TypecheckMetadata(original_types=output_types, ignore_collections=ignore_collections)
out_types_list = list(metadata.base_types.items())
mandatory_out_types_list = list(metadata.mandatory_types.items())
# First convert all outputs to list/tuple format to check correct number of outputs
if isinstance(out_objects, (list, tuple)):
out_container = out_objects # can be any rank nested structure
else:
out_container = [out_objects]
# If this neural type has a *single output*, with *support for nested outputs*,
# then *do not* perform any check on the number of output items against the number
# of neural types (in this case, 1).
# This is done as python will *not* wrap a single returned list into a tuple of length 1,
# instead opting to keep the list intact. Therefore len(out_container) in such a case
# is the length of all the elements of that list - each of which has the same corresponding
# neural type (defined as the singular container type).
if metadata.is_singular_container_type:
pass
# In all other cases, python will wrap multiple outputs into an outer tuple.
# Allow number of output arguments to be <= total output neural types and >= mandatory outputs.
elif len(out_container) > len(out_types_list) or len(out_container) < len(mandatory_out_types_list):
raise TypeError(
"Number of output arguments provided ({}) is not as expected. "
"It should be larger or equal than {} and less or equal than {}.\n"
"This can be either because insufficient/extra number of output NeuralTypes were provided,"
"or the provided NeuralTypes {} should enable container support "
"(add '[]' to the NeuralType definition)".format(
len(out_container), len(out_types_list), len(mandatory_out_types_list), output_types
)
)
# Attach types recursively, if possible
if not isinstance(out_objects, tuple) and not isinstance(out_objects, list):
# Here, out_objects is a single object which can potentially be attached with a NeuralType
try:
out_objects.neural_type = out_types_list[0][1]
except Exception:
pass
# Perform output ndim check
if hasattr(out_objects, 'shape'):
value_shape = out_objects.shape
type_shape = out_types_list[0][1].axes
name = out_types_list[0][0]
if type_shape is not None and len(value_shape) != len(type_shape):
raise TypeError(
f"Output shape mismatch occured for {name} in module {self.__class__.__name__} : \n"
f"Output shape expected = {type_shape} | \n"
f"Output shape found : {value_shape}"
)
elif metadata.is_singular_container_type:
# If only a single neural type is provided, and it defines a container nest,
# then all elements of the returned list/tuple are assumed to belong to that
# singular neural type.
# As such, the "current" depth inside the DFS loop is counted as 1,
# and subsequent nesting will increase this count.
# NOTE:
# As the flag `is_singular_container_type` will activate only for
# the case where there is 1 output type defined with container nesting,
# this is a safe assumption to make.
depth = 1
# NOTE:
# A user may chose to explicitly wrap the single output list within an explicit tuple
# In such a case we reduce the "current" depth to 0 - to acknowledge the fact that
# the actual nest exists within a wrapper tuple.
if len(out_objects) == 1 and type(out_objects) == tuple:
depth = 0
for ind, res in enumerate(out_objects):
self.__attach_neural_type(res, metadata, depth=depth, name=out_types_list[0][0])
else:
# If more then one item is returned in a return statement, python will wrap
# the output with an outer tuple. Therefore there must be a 1:1 correspondence
# of the output_neural type (with or without nested structure) to the actual output
# (whether it is a single object or a nested structure of objects).
# Therefore in such a case, we "start" the DFS at depth 0 - since the recursion is
# being applied on 1 neural type : 1 output struct (single or nested output).
# Since we are guarenteed that the outer tuple will be built by python,
# assuming initial depth of 0 is appropriate.
for ind, res in enumerate(out_objects):
self.__attach_neural_type(res, metadata, depth=0, name=out_types_list[ind][0])
def __check_neural_type(self, obj, metadata: TypecheckMetadata, depth: int, name: str = None):
"""
Recursively tests whether the obj satisfies the semantic neural type assertion.
Can include shape checks if shape information is provided.
Args:
obj: Any python object that can be assigned a value.
metadata: TypecheckMetadata object.
depth: Current depth of recursion.
name: Optional name used of the source obj, used when an error occurs.
"""
if isinstance(obj, tuple) or isinstance(obj, list):
for elem in obj:
self.__check_neural_type(elem, metadata, depth + 1, name=name)
return # after processing nest, return to avoid testing nest itself
type_val = metadata.base_types[name]
# If nest depth doesnt match neural type structure depth, raise an error
if not metadata.ignore_collections and depth != metadata.container_depth[name]:
raise TypeError(
"While checking input neural types,\n"
"Nested depth of value did not match container specification:\n"
f"Current nested depth of NeuralType '{name}' ({type_val}): {depth}\n"
f"Expected nested depth : {metadata.container_depth[name]}"
)
if hasattr(obj, 'neural_type') and not type_val.compare(obj.neural_type) in (
NeuralTypeComparisonResult.SAME,
NeuralTypeComparisonResult.GREATER,
):
raise TypeError(
f"{type_val.compare(obj.neural_type)} : \n"
f"Input type expected = {type_val} | \n"
f"Input type found : {obj.neural_type}"
)
# Perform input ndim check
if hasattr(obj, 'shape'):
value_shape = obj.shape
type_shape = type_val.axes
if type_shape is not None and len(value_shape) != len(type_shape):
raise TypeError(
f"Input shape mismatch occured for {name} in module {self.__class__.__name__} : \n"
f"Input shape expected = {type_shape} | \n"
f"Input shape found : {value_shape}"
)
def __attach_neural_type(self, obj, metadata: TypecheckMetadata, depth: int, name: str = None):
"""
Recursively attach neural types to a given object - as long as it can be assigned some value.
Args:
obj: Any python object that can be assigned a value.
metadata: TypecheckMetadata object.
depth: Current depth of recursion.
name: Optional name used of the source obj, used when an error occurs.
"""
if isinstance(obj, tuple) or isinstance(obj, list):
for elem in obj:
self.__attach_neural_type(elem, metadata, depth=depth + 1, name=name)
return # after processing nest, return to avoid argument insertion into nest itself
type_val = metadata.base_types[name]
# If nest depth doesnt match neural type structure depth, raise an error
if not metadata.ignore_collections and depth != metadata.container_depth[name]:
raise TypeError(
"While attaching output neural types,\n"
"Nested depth of value did not match container specification:\n"
f"Current nested depth of NeuralType '{name}' ({type_val}): {depth}\n"
f"Expected nested depth : {metadata.container_depth[name]}"
)
try:
obj.neural_type = type_val
except Exception:
pass
# Perform output ndim check
if hasattr(obj, 'shape'):
value_shape = obj.shape
type_shape = type_val.axes
if type_shape is not None and len(value_shape) != len(type_shape):
raise TypeError(
f"Output shape mismatch occured for {name} in module {self.__class__.__name__} : \n"
f"Output shape expected = {type_shape} | \n"
f"Output shape found : {value_shape}"
)
class Serialization(ABC):
@classmethod
def from_config_dict(cls, config: 'DictConfig', trainer: Optional['Trainer'] = None):
"""Instantiates object using DictConfig-based configuration"""
# Resolve the config dict
if _HAS_HYDRA:
if isinstance(config, DictConfig):
config = OmegaConf.to_container(config, resolve=True)
config = OmegaConf.create(config)
OmegaConf.set_struct(config, True)
config = maybe_update_config_version(config)
# Hydra 0.x API
if ('cls' in config or 'target' in config) and 'params' in config and _HAS_HYDRA:
# regular hydra-based instantiation
instance = hydra.utils.instantiate(config=config)
# Hydra 1.x API
elif '_target_' in config and _HAS_HYDRA:
# regular hydra-based instantiation
instance = hydra.utils.instantiate(config=config)
else:
instance = None
prev_error = ""
# Attempt class path resolution from config `target` class (if it exists)
if 'target' in config:
target_cls = config["target"] # No guarantee that this is a omegaconf class
imported_cls = None
try:
# try to import the target class
imported_cls = import_class_by_path(target_cls)
# if calling class (cls) is subclass of imported class,
# use subclass instead
if issubclass(cls, imported_cls):
imported_cls = cls
accepts_trainer = Serialization._inspect_signature_for_trainer(imported_cls)
if accepts_trainer:
instance = imported_cls(cfg=config, trainer=trainer)
else:
instance = imported_cls(cfg=config)
except Exception as e:
# record previous error
tb = traceback.format_exc()
prev_error = f"Model instantiation failed!\nTarget class:\t{target_cls}" f"\nError(s):\t{e}\n{tb}"
logging.debug(prev_error + "\nFalling back to `cls`.")
# target class resolution was unsuccessful, fall back to current `cls`
if instance is None:
try:
accepts_trainer = Serialization._inspect_signature_for_trainer(cls)
if accepts_trainer:
instance = cls(cfg=config, trainer=trainer)
else:
instance = cls(cfg=config)
except Exception as e:
# report saved errors, if any, and raise
if prev_error:
logging.error(prev_error)
raise e
if not hasattr(instance, '_cfg'):
instance._cfg = config
return instance
def to_config_dict(self) -> 'DictConfig':
"""Returns object's configuration to config dictionary"""
if hasattr(self, '_cfg') and self._cfg is not None:
# Resolve the config dict
if _HAS_HYDRA and isinstance(self._cfg, DictConfig):
config = OmegaConf.to_container(self._cfg, resolve=True)
config = OmegaConf.create(config)
OmegaConf.set_struct(config, True)
config = maybe_update_config_version(config)
self._cfg = config
return self._cfg
else:
raise NotImplementedError(
'to_config_dict() can currently only return object._cfg but current object does not have it.'
)
@classmethod
def _inspect_signature_for_trainer(cls, check_cls):
if hasattr(check_cls, '__init__'):
signature = inspect.signature(check_cls.__init__)
if 'trainer' in signature.parameters:
return True
else:
return False
else:
return False
class FileIO(ABC):
def save_to(self, save_path: str):
"""
Standardized method to save a tarfile containing the checkpoint, config, and any additional artifacts.
Implemented via :meth:`nemo.core.connectors.save_restore_connector.SaveRestoreConnector.save_to`.
Args:
save_path: str, path to where the file should be saved.
"""
raise NotImplementedError()
@classmethod
def restore_from(
cls,
restore_path: str,
override_config_path: Optional[str] = None,
map_location: Optional['torch.device'] = None,
strict: bool = True,
return_config: bool = False,
trainer: Optional['Trainer'] = None,
save_restore_connector: SaveRestoreConnector = None,
):
"""
Restores model instance (weights and configuration) from a .nemo file
Args:
restore_path: path to .nemo file from which model should be instantiated
override_config_path: path to a yaml config that will override the internal
config file or an OmegaConf / DictConfig object representing the model config.
map_location: Optional torch.device() to map the instantiated model to a device.
By default (None), it will select a GPU if available, falling back to CPU otherwise.
strict: Passed to load_state_dict. By default True
return_config: If set to true, will return just the underlying config of the restored
model as an OmegaConf DictConfig object without instantiating the model.
trainer: An optional Trainer object, passed to the model constructor.
save_restore_connector: An optional SaveRestoreConnector object that defines the implementation
of the restore_from() method.
"""
raise NotImplementedError()
@classmethod
def from_config_file(cls, path2yaml_file: str):
"""
Instantiates an instance of NeMo Model from YAML config file.
Weights will be initialized randomly.
Args:
path2yaml_file: path to yaml file with model configuration
Returns:
"""
if issubclass(cls, Serialization):
conf = OmegaConf.load(path2yaml_file)
return cls.from_config_dict(config=conf)
else:
raise NotImplementedError()
def to_config_file(self, path2yaml_file: str):
"""
Saves current instance's configuration to YAML config file. Weights will not be saved.
Args:
path2yaml_file: path2yaml_file: path to yaml file where model model configuration will be saved
Returns:
"""
if hasattr(self, '_cfg'):
self._cfg = maybe_update_config_version(self._cfg)
with open(path2yaml_file, 'w', encoding='utf-8') as fout:
OmegaConf.save(config=self._cfg, f=fout, resolve=True)
else:
raise NotImplementedError()
@total_ordering
@dataclass
class PretrainedModelInfo:
pretrained_model_name: str
description: str
location: str
class_: 'Model' = None
aliases: List[str] = None
def __repr__(self):
base = self.__class__.__name__
extras = (
"pretrained_model_name={pretrained_model_name},\n\t"
"description={description},\n\t"
"location={location}".format(**self.__dict__)
)
if self.class_ is not None:
extras = "{extras},\n\t" "class_={class_}".format(extras=extras, **self.__dict__)
representation = f"{base}(\n\t{extras}\n)"
return representation
def __hash__(self):
# assumes that locations are unique urls, and therefore their hashes
# should ideally also be unique
location_hash = hash(self.location)
return location_hash
def __eq__(self, other):
# another object is equal to self, iff
# if it's hash is equal to hash(self)
return hash(self) == hash(other) or self.pretrained_model_name == other.pretrained_model_name
def __lt__(self, other):
return self.pretrained_model_name < other.pretrained_model_name
class Model(Typing, Serialization, FileIO):
"""
Abstract class offering interface which should be implemented by all NeMo models.
"""
@classmethod
@abstractmethod
def list_available_models(cls) -> Optional[List[PretrainedModelInfo]]:
"""
Should list all pre-trained models available via NVIDIA NGC cloud.
Note: There is no check that requires model names and aliases to be unique. In the case of a collision, whatever
model (or alias) is listed first in the this returned list will be instantiated.
Returns:
A list of PretrainedModelInfo entries
"""
pass
@classmethod
def search_huggingface_models(
cls, model_filter: Optional[Union[ModelFilter, List[ModelFilter]]] = None
) -> List[ModelInfo]:
"""
Should list all pre-trained models available via Hugging Face Hub.
The following metadata can be passed via the `model_filter` for additional results.
Metadata:
resolve_card_info: Bool flag, if set, returns the model card metadata. Default: False.
limit_results: Optional int, limits the number of results returned.
.. code-block:: python
# You can replace <DomainSubclass> with any subclass of ModelPT.
from nemo.core import ModelPT
# Get default ModelFilter
filt = <DomainSubclass>.get_hf_model_filter()
# Make any modifications to the filter as necessary
filt.language = [...]
filt.task = ...
filt.tags = [...]
# Add any metadata to the filter as needed
filt.limit_results = 5
# Obtain model info
model_infos = <DomainSubclass>.search_huggingface_models(model_filter=filt)
# Browse through cards and select an appropriate one
card = model_infos[0]
# Restore model using `modelId` of the card.
model = ModelPT.from_pretrained(card.modelId)
Args:
model_filter: Optional ModelFilter or List[ModelFilter] (from Hugging Face Hub)
that filters the returned list of compatible model cards, and selects all results from each filter.
Users can then use `model_card.modelId` in `from_pretrained()` to restore a NeMo Model.
If no ModelFilter is provided, uses the classes default filter as defined by `get_hf_model_filter()`.
Returns:
A list of ModelInfo entries.
"""
# Resolve model filter if not provided as argument
if model_filter is None:
model_filter = cls.get_hf_model_filter()
# If single model filter, wrap into list
if not isinstance(model_filter, Iterable):
model_filter = [model_filter]
# Inject `nemo` library filter
for mfilter in model_filter:
if isinstance(mfilter.library, str) and mfilter.library != 'nemo':
logging.warning(f"Model filter's `library` tag updated be `nemo`. Original value: {mfilter.library}")
mfilter.library = "nemo"
elif isinstance(mfilter, Iterable) and 'nemo' not in mfilter.library:
logging.warning(
f"Model filter's `library` list updated to include `nemo`. Original value: {mfilter.library}"
)
mfilter.library = list(mfilter)
mfilter.library.append('nemo')
# Check if api token exists, use if it does
is_token_available = HfFolder.get_token() is not None
# Search for all valid models after filtering
api = HfApi()
# Setup extra arguments for model filtering
all_results = [] # type: List[ModelInfo]
for mfilter in model_filter:
cardData = None
limit = None
if hasattr(mfilter, 'resolve_card_info') and mfilter.resolve_card_info is True:
cardData = True
if hasattr(mfilter, 'limit_results') and mfilter.limit_results is not None:
limit = mfilter.limit_results
results = api.list_models(
filter=mfilter,
use_auth_token=is_token_available,
sort="lastModified",
direction=-1,
cardData=cardData,
limit=limit,
) # type: List[ModelInfo]
all_results.extend(results)
return all_results
@classmethod
def get_available_model_names(cls) -> List[str]:
"""
Returns the list of model names available via NVIDIA NGC cloud,
to get the complete model description use list_available_models()
Returns:
A list of model names
"""
model_names = []
if cls.list_available_models() is not None:
model_names = [model.pretrained_model_name for model in cls.list_available_models()]
return model_names
@classmethod
def get_hf_model_filter(cls) -> ModelFilter:
"""
Generates a filter for HuggingFace models.
Additionally includes default values of some metadata about results returned by the Hub.
Metadata:
resolve_card_info: Bool flag, if set, returns the model card metadata. Default: False.
limit_results: Optional int, limits the number of results returned.
Returns:
A Hugging Face Hub ModelFilter object.
"""
model_filter = ModelFilter(library='nemo')
# Attach some additional info
model_filter.resolve_card_info = False
model_filter.limit_results = None
return model_filter
@classmethod
def from_pretrained(
cls,
model_name: str,
refresh_cache: bool = False,
override_config_path: Optional[str] = None,
map_location: Optional['torch.device'] = None,
strict: bool = True,
return_config: bool = False,
trainer: Optional['Trainer'] = None,
save_restore_connector: SaveRestoreConnector = None,
):
"""
Instantiates an instance of NeMo from NVIDIA NGC cloud
Use restore_from() to instantiate from a local .nemo file.
Args:
model_name: string key which will be used to find the module.
refresh_cache: If set to True, then when fetching from cloud, this will re-fetch the file
from cloud even if it is already found in a cache locally.
override_config_path: path to a yaml config that will override the internal
config file
map_location: Optional torch.device() to map the instantiated model to a device.
By default (None), it will select a GPU if available, falling back to CPU otherwise.
strict: Passed to torch.load_state_dict. By default true.
return_config: If set to true, will return just the underlying config of the restored
model as an OmegaConf DictConfig object without instantiating the model.
Returns:
A model instance of a particular model class or its underlying config (if return_config is set).
"""
if save_restore_connector is None:
save_restore_connector = SaveRestoreConnector()
# Resolve if the pretrained model name is from NGC or other sources
# HF Hub source
if '/' in model_name:
class_, nemo_model_file_in_cache = cls._get_hf_hub_pretrained_model_info(
model_name=model_name, refresh_cache=refresh_cache
)
else:
# NGC source
class_, nemo_model_file_in_cache = cls._get_ngc_pretrained_model_info(
model_name=model_name, refresh_cache=refresh_cache
)
instance = class_.restore_from(
restore_path=nemo_model_file_in_cache,
override_config_path=override_config_path,
map_location=map_location,
strict=strict,
return_config=return_config,
trainer=trainer,
save_restore_connector=save_restore_connector,
)
return instance
@classmethod
def _get_ngc_pretrained_model_info(cls, model_name: str, refresh_cache: bool = False) -> Tuple[type, str]:
"""
Resolve the NGC model pretrained information given a model name.
Assumes the model subclass implements the `list_available_models()` inherited method.
Args:
model_name: Str name of the model. Must be the original name or an alias of the model, without any '/'.
refresh_cache: Bool, determines whether cache must be refreshed (model is re-downloaded).
Returns:
A tuple of details describing :
- The resolved class of the model. This requires subclass to implement PretrainedModelInfo.class_.
If the class cannot be resolved, default to the class that called this method.
- The path to the NeMo model (.nemo file) in some cached directory.
"""
location_in_the_cloud = None
description = None
class_ = None
models = cls.list_available_models()
if models is not None:
for pretrained_model_info in cls.list_available_models():
found = False
if pretrained_model_info.pretrained_model_name == model_name:
found = True
elif pretrained_model_info.aliases is not None:
for alias in pretrained_model_info.aliases:
if alias == model_name:
found = True
break
if found:
location_in_the_cloud = pretrained_model_info.location
description = pretrained_model_info.description
class_ = pretrained_model_info.class_
break
if location_in_the_cloud is None:
raise FileNotFoundError(
f"Model {model_name} was not found. Check cls.list_available_models() for the list of all available models."
)
filename = location_in_the_cloud.split("/")[-1]
url = location_in_the_cloud.replace(filename, "")
cache_dir = Path.joinpath(resolve_cache_dir(), f'{filename[:-5]}')
# If either description and location in the cloud changes, this will force re-download
cache_subfolder = hashlib.md5((location_in_the_cloud + description).encode('utf-8')).hexdigest()
# if file exists on cache_folder/subfolder, it will be re-used, unless refresh_cache is True
nemo_model_file_in_cache = maybe_download_from_cloud(
url=url, filename=filename, cache_dir=cache_dir, subfolder=cache_subfolder, refresh_cache=refresh_cache
)
logging.info("Instantiating model from pre-trained checkpoint")
if class_ is None:
class_ = cls
return class_, nemo_model_file_in_cache
@classmethod
def _get_hf_hub_pretrained_model_info(cls, model_name: str, refresh_cache: bool = False) -> Tuple[type, str]:
"""
Resolve the HuggingFace Hub model pretrained information given a model name.
The model name must be of general syntax ``{source_repo}/{model_name}``.
Note:
The ``{source_repo}`` need not be ``nvidia``, it can be any public repository, even external to Nvidia.
This allows public, externally contributed models to be run freely using Nvidia NeMo.
Args:
model_name: Str name of the model. Must be the original name or an alias of the model, without any '/'.
refresh_cache: Bool, determines whether cache must be refreshed (model is re-downloaded).
Returns:
A tuple of details describing :
- The resolved class of the model. Since the source is external to NeMo, always default to using
the calling class. Depend on target class resolution by restore_from() for calling the correct class.
- The path to the NeMo model (.nemo file) in some cached directory (managed by HF Hub).
"""
# Resolve the model name without origin for filename
resolved_model_filename = model_name.split("/")[-1] + '.nemo'
# Check if api token exists, use if it does
is_token_available = HfFolder.get_token() is not None
# Try to load the model from the Huggingface Hub
path = hf_hub_download(
repo_id=model_name,
filename=resolved_model_filename,
library_name="nemo",
library_version=nemo.__version__,
force_download=refresh_cache,
use_auth_token=is_token_available,
)
# Cannot pre-resolve the specific class without double instantiation (first for config, second for model params)
# Default to current class, and perform basic class path resolution (handled via restore_from() + target class)
class_ = cls
return class_, path
class typecheck:
"""
A decorator which performs input-output neural type checks, and attaches
neural types to the output of the function that it wraps.
Requires that the class inherit from :class:`~nemo.core.Typing` in order to perform
type checking, and will raise an error if that is not the case.
# Usage (Class level type support)
.. code-block:: python
@typecheck()
def fn(self, arg1, arg2, ...):
...
# Usage (Function level type support)
.. code-block:: python
@typecheck(input_types=..., output_types=...)
def fn(self, arg1, arg2, ...):
...
Points to be noted:
1) The brackets () in `@typecheck()` are necessary.
You will encounter a TypeError: __init__() takes 1 positional argument but X
were given without those brackets.
2) The function can take any number of positional arguments during definition.
When you call this function, all arguments must be passed using kwargs only.
"""
class TypeState(Enum):
"""
Placeholder to denote the default value of type information provided.
If the constructor of this decorator is used to override the class level type definition,
this enum value indicate that types will be overridden.
"""
UNINITIALIZED = 0
def __init__(
self,
input_types: Union[TypeState, Dict[str, NeuralType]] = TypeState.UNINITIALIZED,
output_types: Union[TypeState, Dict[str, NeuralType]] = TypeState.UNINITIALIZED,
ignore_collections: bool = False,
):
self.input_types = input_types
self.output_types = output_types
if input_types == self.TypeState.UNINITIALIZED:
self.input_override = False
else:
self.input_override = True
if output_types == self.TypeState.UNINITIALIZED:
self.output_override = False
else:
self.output_override = True
self.ignore_collections = ignore_collections
@wrapt.decorator(enabled=is_typecheck_enabled)
def __call__(self, wrapped, instance: Typing, args, kwargs):
"""
Wrapper method that can be used on any function of a class that implements :class:`~nemo.core.Typing`.
By default, it will utilize the `input_types` and `output_types` properties of the class inheriting Typing.
Local function level overrides can be provided by supplying dictionaries as arguments to the decorator.
Args:
input_types: Union[TypeState, Dict[str, NeuralType]]. By default, uses the global `input_types`.
output_types: Union[TypeState, Dict[str, NeuralType]]. By default, uses the global `output_types`.
ignore_collections: Bool. Determines if container types should be asserted for depth checks, or
if depth checks are skipped entirely.
"""
if instance is None:
raise RuntimeError("Only classes which inherit nemo.core.Typing can use this decorator !")
if not isinstance(instance, Typing):
raise RuntimeError("Only classes which inherit nemo.core.Typing can use this decorator !")
if hasattr(instance, 'input_ports') or hasattr(instance, 'output_ports'):
raise RuntimeError(
"Typing requires override of `input_types()` and `output_types()`, "
"not `input_ports() and `output_ports()`"
)
# Preserve type information
if self.input_types is typecheck.TypeState.UNINITIALIZED:
self.input_types = instance.input_types
if self.output_types is typecheck.TypeState.UNINITIALIZED:
self.output_types = instance.output_types
# Resolve global type or local overridden type
if self.input_override:
input_types = self.input_types
else:
input_types = instance.input_types
if self.output_override:
output_types = self.output_types
else:
output_types = instance.output_types
# If types are not defined, skip type checks and just call the wrapped method
if input_types is None and output_types is None:
return wrapped(*args, **kwargs)
# Check that all arguments are kwargs
if input_types is not None and len(args) > 0:
raise TypeError("All arguments must be passed by kwargs only for typed methods")
# Perform rudimentary input checks here
instance._validate_input_types(input_types=input_types, ignore_collections=self.ignore_collections, **kwargs)
# Call the method - this can be forward, or any other callable method
outputs = wrapped(*args, **kwargs)
instance._attach_and_validate_output_types(
output_types=output_types, ignore_collections=self.ignore_collections, out_objects=outputs
)
return outputs
@staticmethod
def set_typecheck_enabled(enabled: bool = True):
"""
Global method to enable/disable typechecking.
Args:
enabled: bool, when True will enable typechecking.
"""
global _TYPECHECK_ENABLED
_TYPECHECK_ENABLED = enabled
@staticmethod
@contextmanager
def disable_checks():
"""
Context manager that temporarily disables type checking within its context.
"""
typecheck.set_typecheck_enabled(enabled=False)
try:
yield
finally:
typecheck.set_typecheck_enabled(enabled=True)
|
NeMo-main
|
nemo/core/classes/common.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import inspect
import os
import uuid
from abc import abstractmethod
from os import path
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import hydra
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.utilities import model_summary, rank_zero_only
from nemo import package_info
from nemo.core import optim
from nemo.core.classes.common import Model
from nemo.core.connectors.save_restore_connector import SaveRestoreConnector
from nemo.core.optim import prepare_lr_scheduler
from nemo.utils import logging, model_utils
from nemo.utils.app_state import AppState
from nemo.utils.debug_hook import register_debug_hooks
from nemo.utils.exceptions import NeMoBaseException
from nemo.utils.get_rank import get_rank, is_global_rank_zero
__all__ = ['ModelPT']
class ModelPT(LightningModule, Model):
"""
Interface for Pytorch-lightning based NeMo models
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
"""
Base class from which all NeMo models should inherit
Args:
cfg (DictConfig): configuration object.
The cfg object should have (optionally) the following sub-configs:
* train_ds - to instantiate training dataset
* validation_ds - to instantiate validation dataset
* test_ds - to instantiate testing dataset
* optim - to instantiate optimizer with learning rate scheduler
trainer (Optional): Pytorch Lightning Trainer instance
"""
if trainer is not None and not isinstance(trainer, Trainer):
raise ValueError(
f"trainer constructor argument must be either None or pytorch_lightning.Trainer. But got {type(trainer)} instead."
)
super().__init__()
"""
Internal global flags that determine core functionality of ModelPT.
_MODEL_IS_RESTORED:
This flag determines the context of the model - whether the model is currently being
restored or not.
- When set, it can be assumed that the model's will disable all automatic methods -
setup_training_data(), setup_validation/test_data() and their multi equivalents.
- If a model is being restored from a archive file (tarfile), it can be assumed that
under this context, the cwd is *inside* the tarfile itself.
_MODEL_RESTORE_PATH:
A string path to a a file from which the model is being restored.
This file can either be a PyTorch Lightning Checkpoint, or a archive (tarfile) that contains
artifact objects.
If it is an archive file, during restoration, the cwd will be temporarily moved to inside the
archive itself.
"""
# set global vars in AppState
app_state = AppState()
# Convert config to a DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
# Convert config to support Hydra 1.0+ instantiation
cfg = model_utils.maybe_update_config_version(cfg)
if 'model' in cfg:
raise ValueError(
"Creating model config node is forbidden due to collision problem when loading from checkpoint."
)
if 'target' not in cfg:
# This is for Jarvis service.
OmegaConf.set_struct(cfg, False)
cfg.target = "{0}.{1}".format(self.__class__.__module__, self.__class__.__name__)
OmegaConf.set_struct(cfg, True)
if 'nemo_version' not in cfg:
with open_dict(cfg):
cfg.nemo_version = package_info.__version__
self._cfg = cfg
# init mapping submodule attribute -> config_field for nested NeMo models
self._nemo_submodule_name_to_config_field = dict()
self.save_hyperparameters("cfg")
self._train_dl = None
self._validation_dl = None
self._test_dl = None
self._optimizer_param_groups = None
self._optimizer = None
self._scheduler = None
self.set_trainer(trainer)
self._save_restore_connector = SaveRestoreConnector()
self._set_model_guid()
# Set device_id in AppState
if torch.cuda.is_available() and torch.cuda.current_device() is not None:
app_state.device_id = torch.cuda.current_device()
if self._cfg is not None and not self._is_model_being_restored():
# Setup data loaders now (default) or defer setup to `self.setup()`
# if `defer_setup` is set in the config of the corresponding dataloader.
if (
'train_ds' in self._cfg
and self._cfg.train_ds is not None
and not self._cfg.train_ds.get('defer_setup', False)
):
self.setup_training_data(self._cfg.train_ds)
if (
'validation_ds' in self._cfg
and self._cfg.validation_ds is not None
and not self._cfg.validation_ds.get('defer_setup', False)
):
self.setup_multiple_validation_data(val_data_config=cfg.validation_ds)
if (
'test_ds' in self._cfg
and self._cfg.test_ds is not None
and not self._cfg.test_ds.get('defer_setup', False)
):
self.setup_multiple_test_data(test_data_config=cfg.test_ds)
else:
if 'train_ds' in self._cfg and self._cfg.train_ds is not None:
logging.warning(
f"If you intend to do training or fine-tuning, please call the ModelPT.setup_training_data() method "
f"and provide a valid configuration file to setup the train data loader.\n"
f"Train config : \n{OmegaConf.to_yaml(self._cfg.train_ds)}"
)
if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:
logging.warning(
f"If you intend to do validation, please call the ModelPT.setup_validation_data() or ModelPT.setup_multiple_validation_data() method "
f"and provide a valid configuration file to setup the validation data loader(s). \n"
f"Validation config : \n{OmegaConf.to_yaml(self._cfg.validation_ds)}"
)
if 'test_ds' in self._cfg and self._cfg.test_ds is not None:
logging.warning(
f"Please call the ModelPT.setup_test_data() or ModelPT.setup_multiple_test_data() method "
f"and provide a valid configuration file to setup the test data loader(s).\n"
f"Test config : \n{OmegaConf.to_yaml(self._cfg.test_ds)}"
)
# Create list of lists for val and test outputs to support multiple dataloaders
# Initialize an empty list as sometimes self._validation_dl can be None at this stage
self.validation_step_outputs = []
# Check len(self._validation_dl) > 1 as sometimes single dataloader can be in a list: [<Dataloader obj>] when ds_item in
# config has 1 item passed in a list
if self._validation_dl and type(self._validation_dl) == list and len(self._validation_dl) > 1:
for _ in range(len(self._validation_dl)):
self.validation_step_outputs.append([])
# Initialize an empty list as sometimes self._test_dl can be None at this stage
self.test_step_outputs = []
if self._test_dl and type(self._test_dl) == list and len(self._test_dl) > 1:
for _ in range(len(self._test_dl)):
self.test_step_outputs.append([])
# ModelPT wrappers over subclass implementations
self.training_step = model_utils.wrap_training_step(self.training_step)
# Setup nsys profiling if it has been enabled in the model config
self._setup_nsys_profiling()
def __init_subclass__(cls) -> None:
cls._save_restore_connector = SaveRestoreConnector()
def on_fit_start(self) -> None:
if self.cfg.get("dump_debug_info", False):
register_debug_hooks(self.model, self.trainer, self.log, self.cfg.get("dump_debug_info_to_file", False))
return super().on_fit_start()
def register_artifact(
self, config_path: str, src: str, verify_src_exists: bool = True,
):
""" Register model artifacts with this function. These artifacts (files) will be included inside .nemo file
when model.save_to("mymodel.nemo") is called.
How it works:
1. It always returns existing absolute path which can be used during Model constructor call
EXCEPTION: src is None or "" in which case nothing will be done and src will be returned
2. It will add (config_path, model_utils.ArtifactItem()) pair to self.artifacts
.. code-block::
If "src" is local existing path:
then it will be returned in absolute path form.
elif "src" starts with "nemo_file:unique_artifact_name":
.nemo will be untarred to a temporary folder location and an actual existing path will be returned
else:
an error will be raised.
WARNING: use .register_artifact calls in your models' constructors.
The returned path is not guaranteed to exist after you have exited your model's constructor.
Args:
config_path (str): Artifact key. Usually corresponds to the model config.
src (str): Path to artifact.
verify_src_exists (bool): If set to False, then the artifact is optional and register_artifact will return None even if
src is not found. Defaults to True.
Returns:
str: If src is not None or empty it always returns absolute path which is guaranteed to exist during model instance life
"""
if src is None or src == "":
return src
if Path(src).suffix == ".nemo":
raise NeMoBaseException(
"Registering .nemo files as artifacts not supported. "
"If you are trying to make a nested model, use `register_nemo_submodule`."
)
if not hasattr(self, 'artifacts'):
self.artifacts = {}
if self.artifacts is None:
self.artifacts = {}
if config_path in self.artifacts.keys():
logging.warning(
f"You tried to register an artifact under config key={config_path} but an artifact for "
f"it has already been registered."
)
return self._save_restore_connector.register_artifact(self, config_path, src, verify_src_exists)
def has_artifacts(self) -> bool:
"""Returns True if model has artifacts registered"""
return hasattr(self, 'artifacts') and self.artifacts is not None and len(self.artifacts) > 0
def has_native_or_submodules_artifacts(self) -> bool:
"""Returns True if it has artifacts or any of the submodules have artifacts"""
for module in self.modules():
if (
isinstance(module, ModelPT)
and hasattr(module, 'artifacts')
and module.artifacts is not None
and len(module.artifacts) > 0
):
return True
return False
def has_nemo_submodules(self) -> bool:
"""Returns True if it has any registered NeMo submodules"""
return len(self._nemo_submodule_name_to_config_field) > 0
def register_nemo_submodule(self, name: str, config_field: str, model: "ModelPT") -> None:
"""
Adds a NeMo model as a submodule. Submodule can be accessed via the `name` attribute on the parent NeMo model this submodule was registered on (`self`).
In the saving process, the whole parent model (self) is held as a solid model with artifacts
from the child submodule, the submodule config will be saved to the `config_field` of the parent model.
This method is necessary to create a nested model, e.g.
.. code-block:: python
class ParentModel(ModelPT):
def __init__(self, cfg, trainer=None):
super().__init__(cfg=cfg, trainer=trainer)
# annotate type for autocompletion and type checking (optional)
self.child_model: Optional[ChildModel] = None
if cfg.get("child_model") is not None:
self.register_nemo_submodule(
name="child_model",
config_field="child_model",
model=ChildModel(self.cfg.child_model, trainer=trainer),
)
# ... other code
Args:
name: name of the attribute for the submodule
config_field: field in config, where submodule config should be saved
model: NeMo model, instance of ModelPT
"""
# check it is a real NeMo model
if not isinstance(model, ModelPT):
raise NeMoBaseException(
f"Model is not and instance of ModelPT, so can't be registered. Got {type(model).__name__}"
)
# check if it is called after __init__
if not hasattr(self, "_nemo_submodule_name_to_config_field"):
raise NeMoBaseException(
"You are trying to register a submodule before the model is initialized. This is not allowed. "
"Did you forget to call `super().__init__`?"
)
# assign attribute to self
setattr(self, name, model)
# add to the submodules mapping
self._nemo_submodule_name_to_config_field[name] = config_field
def named_nemo_modules(
self, prefix_name: str = "", prefix_config: str = ""
) -> Iterator[Tuple[str, str, "ModelPT"]]:
"""
Returns an iterator over all NeMo submodules recursively, yielding
tuples of (attribute path, path in config, submodule), starting from the core module
Args:
prefix_name: prefix for the name path
prefix_config: prefix for the path in config
Returns:
Iterator over (attribute path, path in config, submodule), starting from (prefix, self)
"""
if not hasattr(self, "_nemo_submodule_name_to_config_field"):
raise NeMoBaseException(
"Model is not fully initialized. Calling `named_nemo_modules` before __init__ not allowed. "
"Did you forget to call `super().__init__`?"
)
yield prefix_name, prefix_config, self
# recursive iteration over all NeMo submodules
for name, config_field in self._nemo_submodule_name_to_config_field.items():
attribute_path = f"{prefix_name}.{name}" if prefix_name else name
config_path = f"{prefix_config}.{config_field}" if prefix_config else config_field
module: ModelPT = getattr(self, name)
for submodule_name, subconfig_path, submodule in module.named_nemo_modules(
prefix_name=attribute_path, prefix_config=config_path
):
yield submodule_name, subconfig_path, submodule
def save_to(self, save_path: str):
"""
Saves model instance (weights and configuration) into .nemo file
You can use "restore_from" method to fully restore instance from .nemo file.
.nemo file is an archive (tar.gz) with the following:
model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor
model_wights.ckpt - model checkpoint
Args:
save_path: Path to .nemo file where model instance should be saved
"""
def maybe_make_save_dir(path: 'pathlib.Path'):
if not path.parent.exists():
path.parent.mkdir(parents=True)
save_path = Path(save_path).expanduser().resolve()
app_state = AppState()
if app_state.model_parallel_size is not None:
if app_state.model_parallel_size > 1:
if type(self._save_restore_connector) == SaveRestoreConnector:
raise ValueError(
'Default NeMo SaveRestoreConnector will not work in model parallel mode. You should use a '
'connector which supports model parallel mode, such as NLPSaveRestoreConnector in NLP. You '
'can also use a custom one.'
)
if is_global_rank_zero():
maybe_make_save_dir(save_path)
if torch.distributed.is_initialized():
torch.distributed.barrier()
# connector checks for ranks properly, no need to check here
self._save_restore_connector.save_to(self, str(save_path)) # downstream tasks expect str, not Path
elif is_global_rank_zero():
maybe_make_save_dir(save_path)
self._save_restore_connector.save_to(self, str(save_path)) # downstream tasks expect str, not Path
@classmethod
def restore_from(
cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = True,
return_config: bool = False,
save_restore_connector: SaveRestoreConnector = None,
trainer: Optional[Trainer] = None,
):
"""
Restores model instance (weights and configuration) from .nemo file.
Args:
restore_path: path to .nemo file from which model should be instantiated
override_config_path: path to a yaml config that will override the internal
config file or an OmegaConf / DictConfig object representing the model config.
map_location: Optional torch.device() to map the instantiated model to a device.
By default (None), it will select a GPU if available, falling back to CPU otherwise.
strict: Passed to load_state_dict. By default True.
return_config: If set to true, will return just the underlying config of the restored
model as an OmegaConf DictConfig object without instantiating the model.
trainer: Optional, a pytorch lightning Trainer object that will be forwarded to the
instantiated model's constructor.
save_restore_connector (SaveRestoreConnector): Can be overridden to add custom save and restore logic.
Example:
```
model = nemo.collections.asr.models.EncDecCTCModel.restore_from('asr.nemo')
assert isinstance(model, nemo.collections.asr.models.EncDecCTCModel)
```
Returns:
An instance of type cls or its underlying config (if return_config is set).
"""
if save_restore_connector is None:
save_restore_connector = SaveRestoreConnector()
if save_restore_connector.model_extracted_dir is None:
restore_path = os.path.abspath(os.path.expanduser(restore_path))
else:
restore_path = os.path.abspath(os.path.expanduser(save_restore_connector.model_extracted_dir))
if not path.exists(restore_path):
raise FileNotFoundError(f"Can't find {restore_path}")
app_state = AppState()
app_state.model_restore_path = restore_path
cls.update_save_restore_connector(save_restore_connector)
instance = cls._save_restore_connector.restore_from(
cls, restore_path, override_config_path, map_location, strict, return_config, trainer
)
if isinstance(instance, ModelPT):
instance._save_restore_connector = save_restore_connector
return instance
@classmethod
def load_from_checkpoint(
cls,
checkpoint_path: str,
*args,
map_location: Optional[Union[Dict[str, str], str, torch.device, int, Callable]] = None,
hparams_file: Optional[str] = None,
strict: bool = True,
**kwargs,
):
"""
Loads ModelPT from checkpoint, with some maintenance of restoration.
For documentation, please refer to LightningModule.load_from_checkpoint() documentation.
"""
checkpoint = None
try:
cls._set_model_restore_state(is_being_restored=True)
checkpoint = super().load_from_checkpoint(
checkpoint_path=checkpoint_path,
*args,
map_location=map_location,
hparams_file=hparams_file,
strict=strict,
**kwargs,
)
finally:
cls._set_model_restore_state(is_being_restored=False)
return checkpoint
@abstractmethod
def setup_training_data(self, train_data_config: Union[DictConfig, Dict]):
"""
Setups data loader to be used in training
Args:
train_data_layer_config: training data layer parameters.
Returns:
"""
pass
@abstractmethod
def setup_validation_data(self, val_data_config: Union[DictConfig, Dict]):
"""
Setups data loader to be used in validation
Args:
val_data_layer_config: validation data layer parameters.
Returns:
"""
pass
def setup_test_data(self, test_data_config: Union[DictConfig, Dict]):
"""
(Optionally) Setups data loader to be used in test
Args:
test_data_layer_config: test data layer parameters.
Returns:
"""
raise NotImplementedError()
def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict]):
"""
(Optionally) Setups data loader to be used in validation, with support for multiple data loaders.
Args:
val_data_layer_config: validation data layer parameters.
"""
# Set some placeholder overriden by helper method
self._val_dl_idx = 0
self._validation_names = None
self._validation_dl = None # type: torch.utils.data.DataLoader
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
try:
self._multi_dataset_mode = True
model_utils.resolve_validation_dataloaders(model=self)
finally:
self._multi_dataset_mode = False
if self._validation_names is None:
if self._validation_dl is not None and type(self._validation_dl) in [list, tuple]:
self._validation_names = ['val_{}_'.format(idx) for idx in range(len(self._validation_dl))]
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):
"""
(Optionally) Setups data loader to be used in test, with support for multiple data loaders.
Args:
test_data_layer_config: test data layer parameters.
"""
# Set some placeholder overriden by helper method
self._test_dl_idx = 0
self._test_names = None
self._test_dl = None # type: torch.utils.data.DataLoader
# preserve config
self._update_dataset_config(dataset_name='test', config=test_data_config)
try:
self._multi_dataset_mode = True
model_utils.resolve_test_dataloaders(model=self)
finally:
self._multi_dataset_mode = False
if self._test_names is None:
if self._test_dl is not None and type(self._test_dl) in [list, tuple]:
self._test_names = ['test_{}_'.format(idx) for idx in range(len(self._test_dl))]
def setup_optimization(
self, optim_config: Optional[Union[DictConfig, Dict]] = None, optim_kwargs: Optional[Dict[str, Any]] = None,
):
"""Prepares an optimizer from a string name and its optional config parameters.
Args:
optim_config: A dictionary containing the following keys:
* "lr": mandatory key for learning rate. Will raise ValueError if not provided.
* "optimizer": string name pointing to one of the available optimizers in the registry. \
If not provided, defaults to "adam".
* "opt_args": Optional list of strings, in the format "arg_name=arg_value". \
The list of "arg_value" will be parsed and a dictionary of optimizer kwargs \
will be built and supplied to instantiate the optimizer.
optim_kwargs: A dictionary with additional kwargs for the
optimizer. Used for non-primitive types that are not
compatible with OmegaConf.
"""
# Setup the optimizer parameter groups (by default use all parameters that are trainable)
self.setup_optimizer_param_groups()
# If config was not explicitly passed to us
if optim_config is None:
# See if internal config has `optim` namespace
if self._cfg is not None and hasattr(self._cfg, 'optim'):
optim_config = self._cfg.optim
# If config is still None, or internal config has no Optim, return without instantiation
if optim_config is None:
logging.info('No optimizer config provided, therefore no optimizer was created')
return
else:
# Preserve the configuration
if not isinstance(optim_config, DictConfig):
optim_config = OmegaConf.create(optim_config)
# See if internal config has `optim` namespace before preservation
if self._cfg is not None and hasattr(self._cfg, 'optim'):
if self._cfg.optim is None:
self._cfg.optim = copy.deepcopy(optim_config)
else:
with open_dict(self._cfg.optim):
self._cfg.optim = copy.deepcopy(optim_config)
# Setup optimizer and scheduler
if optim_config is not None and isinstance(optim_config, DictConfig):
optim_config = OmegaConf.to_container(optim_config, resolve=True)
if self._trainer is None:
logging.warning(f"Trainer wasn't specified in model constructor. Make sure that you really wanted it.")
if 'sched' in optim_config and self._trainer is not None:
if not isinstance(self._trainer.accumulate_grad_batches, int):
raise ValueError("We do not currently support gradient acculumation that is not an integer.")
if self.trainer.max_steps < 0:
# Store information needed to calculate max_steps
optim_config['sched']['t_max_epochs'] = self._trainer.max_epochs
optim_config['sched']['t_accumulate_grad_batches'] = self._trainer.accumulate_grad_batches
optim_config['sched']['t_limit_train_batches'] = self._trainer.limit_train_batches
app_state = AppState()
if app_state.data_parallel_size is not None:
optim_config['sched']['t_num_workers'] = app_state.data_parallel_size
elif app_state.model_parallel_size is None:
optim_config['sched']['t_num_workers'] = self._trainer.num_devices * self._trainer.num_nodes
else:
optim_config['sched']['t_num_workers'] = (
self._trainer.num_devices * self._trainer.num_nodes
) / app_state.model_parallel_size
else:
optim_config['sched']['max_steps'] = self._trainer.max_steps
# Force into DictConfig from nested structure
optim_config = OmegaConf.create(optim_config)
# Get back nested dict so we its mutable
optim_config = OmegaConf.to_container(optim_config, resolve=True)
# Extract scheduler config if inside optimizer config
if 'sched' in optim_config:
scheduler_config = optim_config.pop('sched')
else:
scheduler_config = None
# Check if caller provided optimizer name, default to Adam otherwise
optimizer_cls = optim_config.get('_target_', None)
if optimizer_cls is None:
# Try to get optimizer name for dynamic resolution, defaulting to Adam
optimizer_name = optim_config.get('name', 'adam')
else:
if inspect.isclass(optimizer_cls):
optimizer_name = optimizer_cls.__name__.lower()
else:
# resolve the class name (lowercase) from the class path if not provided
optimizer_name = optimizer_cls.split(".")[-1].lower()
# We are guarenteed to have lr since it is required by the argparser
# But maybe user forgot to pass it to this function
lr = optim_config.get('lr', None)
# Check if caller has optimizer kwargs, default to empty dictionary
if 'args' in optim_config:
optimizer_args = optim_config.pop('args')
optimizer_args = optim.parse_optimizer_args(optimizer_name, optimizer_args)
else:
optimizer_args = copy.deepcopy(optim_config)
# Remove extra parameters from optimizer_args nest
# Assume all other parameters are to be passed into optimizer constructor
optimizer_args.pop('name', None)
optimizer_args.pop('cls', None)
optimizer_args.pop('lr', None)
# Include user-provided kwargs
if optim_kwargs is not None:
optimizer_args.update(optim_kwargs)
# Adaptive schedulers don't need `lr`
if lr is not None:
optimizer_args['lr'] = lr
# Actually instantiate the optimizer
if optimizer_cls is not None:
if inspect.isclass(optimizer_cls):
optimizer = optimizer_cls(self._optimizer_param_groups, **optimizer_args)
logging.info("Optimizer config = %s", str(optimizer))
self._optimizer = optimizer
else:
# Attempt class path resolution
try:
optimizer_cls = OmegaConf.create({'_target_': optimizer_cls})
if lr is not None:
optimizer_config = {'lr': lr}
else:
optimizer_config = {}
optimizer_config.update(optimizer_args)
optimizer_instance = hydra.utils.instantiate(
optimizer_cls, self._optimizer_param_groups, **optimizer_config
) # type: DictConfig
logging.info("Optimizer config = %s", str(optimizer_instance))
self._optimizer = optimizer_instance
except Exception as e:
logging.error(
"Could not instantiate class path - {} with kwargs {}".format(
optimizer_cls, str(optimizer_config)
)
)
raise e
else:
optimizer = optim.get_optimizer(optimizer_name)
optimizer = optimizer(self._optimizer_param_groups, **optimizer_args)
logging.info("Optimizer config = %s", str(optimizer))
self._optimizer = optimizer
# Try to instantiate scheduler for optimizer
self._scheduler = prepare_lr_scheduler(
optimizer=self._optimizer, scheduler_config=scheduler_config, train_dataloader=self._train_dl
)
# Return the optimizer with/without scheduler
# This return allows multiple optimizers or schedulers to be created
return self._optimizer, self._scheduler
def setup_optimizer_param_groups(self):
"""
Used to create param groups for the optimizer.
As an example, this can be used to specify per-layer learning rates:
optim.SGD([
{'params': model.base.parameters()},
{'params': model.classifier.parameters(), 'lr': 1e-3}
], lr=1e-2, momentum=0.9)
See https://pytorch.org/docs/stable/optim.html for more information.
By default, ModelPT will use self.parameters().
Override this method to add custom param groups.
In the config file, add 'optim_param_groups' to support different LRs
for different components (unspecified params will use the default LR):
model:
optim_param_groups:
encoder:
lr: 1e-4
momentum: 0.8
decoder:
lr: 1e-3
optim:
lr: 3e-3
momentum: 0.9
"""
if not hasattr(self, "parameters"):
self._optimizer_param_groups = None
return
known_groups = []
param_groups = []
if "optim_param_groups" in self.cfg:
param_groups_cfg = self.cfg.optim_param_groups
for group, group_cfg in param_groups_cfg.items():
module = getattr(self, group, None)
if module is None:
raise ValueError(f"{group} not found in model.")
elif hasattr(module, "parameters"):
known_groups.append(group)
new_group = {"params": module.parameters()}
for k, v in group_cfg.items():
new_group[k] = v
param_groups.append(new_group)
else:
raise ValueError(f"{group} does not have parameters.")
other_params = []
for n, p in self.named_parameters():
is_unknown = True
for group in known_groups:
if n.startswith(group):
is_unknown = False
if is_unknown:
other_params.append(p)
if len(other_params):
param_groups = [{"params": other_params}] + param_groups
else:
param_groups = [{"params": self.parameters()}]
self._optimizer_param_groups = param_groups
def configure_optimizers(self):
self.setup_optimization()
if self._scheduler is None:
return self._optimizer
else:
return [self._optimizer], [self._scheduler]
def setup(self, stage: Optional[str] = None):
"""Called at the beginning of fit, validate, test, or predict.
This is called on every process when using DDP.
Args:
stage: fit, validate, test or predict
"""
if stage == 'fit':
train_deferred_setup = (
'train_ds' in self._cfg
and self._cfg.train_ds is not None
and self._cfg.train_ds.get('defer_setup', False)
)
if self.train_dataloader() is None and train_deferred_setup:
self.setup_training_data(self._cfg.train_ds)
if stage in ('fit', 'validate'):
val_deferred_setup = (
'validation_ds' in self._cfg
and self._cfg.validation_ds is not None
and self._cfg.validation_ds.get('defer_setup', False)
)
if self.val_dataloader() is None and val_deferred_setup:
self.setup_multiple_validation_data(val_data_config=self._cfg.validation_ds)
if stage == 'test':
test_deferred_setup = (
'test_ds' in self._cfg
and self._cfg.test_ds is not None
and self._cfg.test_ds.get('defer_setup', False)
)
if self.test_dataloader() is None and test_deferred_setup:
self.setup_multiple_test_data(test_data_config=self._cfg.test_ds)
def train_dataloader(self):
if self._train_dl is not None:
return self._train_dl
def val_dataloader(self):
if self._validation_dl is not None:
return self._validation_dl
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
def on_validation_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""
Default DataLoader for Validation set which automatically supports multiple data loaders
via `multi_validation_epoch_end`.
If multi dataset support is not required, override this method entirely in base class.
In such a case, there is no need to implement `multi_validation_epoch_end` either.
.. note::
If more than one data loader exists, and they all provide `val_loss`,
only the `val_loss` of the first data loader will be used by default.
This default can be changed by passing the special key `val_dl_idx: int`
inside the `validation_ds` config.
Args:
outputs: Single or nested list of tensor outputs from one or more data loaders.
Returns:
A dictionary containing the union of all items from individual data_loaders,
along with merged logs from all data loaders.
"""
# Case where we dont provide data loaders
if self.validation_step_outputs is not None and len(self.validation_step_outputs) == 0:
return {}
# Case where we provide exactly 1 data loader
if isinstance(self.validation_step_outputs[0], dict):
output_dict = self.multi_validation_epoch_end(self.validation_step_outputs, dataloader_idx=0)
if output_dict is not None and 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
self.validation_step_outputs.clear() # free memory
return output_dict
else: # Case where we provide more than 1 data loader
output_dict = {'log': {}}
# The output is a list of list of dicts, outer list corresponds to dataloader idx
for dataloader_idx, val_outputs in enumerate(self.validation_step_outputs):
# Get prefix and dispatch call to multi epoch end
dataloader_prefix = self.get_validation_dataloader_prefix(dataloader_idx)
dataloader_logs = self.multi_validation_epoch_end(val_outputs, dataloader_idx=dataloader_idx)
# If result was not provided, generate empty dict
dataloader_logs = dataloader_logs or {}
# Perform `val_loss` resolution first (if provided outside logs)
if 'val_loss' in dataloader_logs:
if 'val_loss' not in output_dict and dataloader_idx == self._val_dl_idx:
output_dict['val_loss'] = dataloader_logs['val_loss']
# For every item in the result dictionary
for k, v in dataloader_logs.items():
# If the key is `log`
if k == 'log':
# Parse every element of the log, and attach the prefix name of the data loader
log_dict = {}
for k_log, v_log in v.items():
# If we are logging the metric, but dont provide it at result level,
# store it twice - once in log and once in result level.
# Also mark log with prefix name to avoid log level clash with other data loaders
if k_log not in output_dict['log'] and dataloader_idx == self._val_dl_idx:
new_k_log = k_log
# Also insert duplicate key with prefix for ease of comparison / avoid name clash
log_dict[dataloader_prefix + k_log] = v_log
else:
# Simply prepend prefix to key and save
new_k_log = dataloader_prefix + k_log
# Store log value
log_dict[new_k_log] = v_log
# Update log storage of individual data loader
output_logs = output_dict['log']
output_logs.update(log_dict)
# Update global log storage
output_dict['log'] = output_logs
else:
# If any values are stored outside 'log', simply prefix name and store
new_k = dataloader_prefix + k
output_dict[new_k] = v
self.validation_step_outputs[dataloader_idx].clear() # free memory
if 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
# return everything else
return output_dict
def on_test_epoch_end(self) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""
Default DataLoader for Test set which automatically supports multiple data loaders
via `multi_test_epoch_end`.
If multi dataset support is not required, override this method entirely in base class.
In such a case, there is no need to implement `multi_test_epoch_end` either.
.. note::
If more than one data loader exists, and they all provide `test_loss`,
only the `test_loss` of the first data loader will be used by default.
This default can be changed by passing the special key `test_dl_idx: int`
inside the `test_ds` config.
Args:
outputs: Single or nested list of tensor outputs from one or more data loaders.
Returns:
A dictionary containing the union of all items from individual data_loaders,
along with merged logs from all data loaders.
"""
# Case where we dont provide data loaders
if self.test_step_outputs is not None and len(self.test_step_outputs) == 0:
return {}
# Case where we provide exactly 1 data loader
if isinstance(self.test_step_outputs[0], dict):
output_dict = self.multi_test_epoch_end(self.test_step_outputs, dataloader_idx=0)
if output_dict is not None and 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
self.test_step_outputs.clear() # free memory
return output_dict
else: # Case where we provide more than 1 data loader
output_dict = {'log': {}}
# The output is a list of list of dicts, outer list corresponds to dataloader idx
for dataloader_idx, test_outputs in enumerate(self.test_step_outputs):
# Get prefix and dispatch call to multi epoch end
dataloader_prefix = self.get_test_dataloader_prefix(dataloader_idx)
dataloader_logs = self.multi_test_epoch_end(test_outputs, dataloader_idx=dataloader_idx)
# If result was not provided, generate empty dict
dataloader_logs = dataloader_logs or {}
# Perform `test_loss` resolution first (if provided outside logs)
if 'test_loss' in dataloader_logs:
if 'test_loss' not in output_dict and dataloader_idx == self._test_dl_idx:
output_dict['test_loss'] = dataloader_logs['test_loss']
# For every item in the result dictionary
for k, v in dataloader_logs.items():
# If the key is `log`
if k == 'log':
# Parse every element of the log, and attach the prefix name of the data loader
log_dict = {}
for k_log, v_log in v.items():
# If we are logging the loss, but dont provide it at result level,
# store it twice - once in log and once in result level.
# Also mark log with prefix name to avoid log level clash with other data loaders
if k_log not in output_dict['log'] and dataloader_idx == self._test_dl_idx:
new_k_log = k_log
# Also insert duplicate key with prefix for ease of comparison / avoid name clash
log_dict[dataloader_prefix + k_log] = v_log
else:
# Simply prepend prefix to key and save
new_k_log = dataloader_prefix + k_log
log_dict[new_k_log] = v_log
# Update log storage of individual data loader
output_logs = output_dict.get('log', {})
output_logs.update(log_dict)
# Update global log storage
output_dict['log'] = output_logs
else:
# If any values are stored outside 'log', simply prefix name and store
new_k = dataloader_prefix + k
output_dict[new_k] = v
self.test_step_outputs[dataloader_idx].clear() # free memory
if 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
# return everything else
return output_dict
def multi_validation_epoch_end(
self, outputs: List[Dict[str, torch.Tensor]], dataloader_idx: int = 0
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""
Adds support for multiple validation datasets. Should be overriden by subclass,
so as to obtain appropriate logs for each of the dataloaders.
Args:
outputs: Same as that provided by LightningModule.on_validation_epoch_end()
for a single dataloader.
dataloader_idx: int representing the index of the dataloader.
Returns:
A dictionary of values, optionally containing a sub-dict `log`,
such that the values in the log will be pre-pended by the dataloader prefix.
"""
logging.warning(
"Multi data loader support has been enabled, but "
"`multi_validation_epoch_end(outputs, dataloader_idx) has not been implemented.\n"
"If you require multi data loader support for validation sets, please override this method.\n"
"If you do not require multi data loader support, please instead override "
"`on_validation_epoch_end(outputs)."
)
def multi_test_epoch_end(
self, outputs: List[Dict[str, torch.Tensor]], dataloader_idx: int = 0
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""
Adds support for multiple test datasets. Should be overriden by subclass,
so as to obtain appropriate logs for each of the dataloaders.
Args:
outputs: Same as that provided by LightningModule.on_validation_epoch_end()
for a single dataloader.
dataloader_idx: int representing the index of the dataloader.
Returns:
A dictionary of values, optionally containing a sub-dict `log`,
such that the values in the log will be pre-pended by the dataloader prefix.
"""
logging.warning(
"Multi data loader support has been enabled, but "
"`multi_test_epoch_end(outputs, dataloader_idx) has not been implemented.\n"
"If you require multi data loader support for validation sets, please override this method.\n"
"If you do not require multi data loader support, please instead override "
"`on_test_epoch_end(outputs)."
)
def get_validation_dataloader_prefix(self, dataloader_idx: int = 0) -> str:
"""
Get the name of one or more data loaders, which will be prepended to all logs.
Args:
dataloader_idx: Index of the data loader.
Returns:
str name of the data loader at index provided.
"""
return self._validation_names[dataloader_idx]
def get_test_dataloader_prefix(self, dataloader_idx: int = 0) -> str:
"""
Get the name of one or more data loaders, which will be prepended to all logs.
Args:
dataloader_idx: Index of the data loader.
Returns:
str name of the data loader at index provided.
"""
return self._test_names[dataloader_idx]
def load_part_of_state_dict(self, state_dict, include, exclude, load_from_string=None):
excluded_param_names = []
# create dict
dict_to_load = {}
for k, v in state_dict.items():
should_add = False
# if any string in include is present, should add
for p in include:
if p in k:
should_add = True
break
# except for if any string from exclude is present
for e in exclude:
if e in k:
excluded_param_names.append(k)
should_add = False
break
if should_add:
dict_to_load[k] = v
# Restore checkpoint part into current model
self.load_state_dict(dict_to_load, strict=False)
if load_from_string is not None:
logging.info(f'Model checkpoint partially restored from {load_from_string}')
if len(excluded_param_names) > 0:
logging.info(
f'The following parameters were excluded when loading from {load_from_string} : {excluded_param_names}'
)
logging.info(f'Make sure that this is what you wanted!')
else:
if len(excluded_param_names) > 0:
logging.info(
f'The following parameters were excluded when loading checkpoint : {excluded_param_names}'
)
@rank_zero_only
def maybe_init_from_pretrained_checkpoint(self, cfg: OmegaConf, map_location: str = 'cpu'):
"""
Initializes a given model with the parameters obtained via specific config arguments.
The state dict of the provided model will be updated with `strict=False` setting so as to prevent
requirement of exact model parameters matching.
Initializations:
init_from_nemo_model: Str path to a .nemo model in order to load state_dict from single nemo file;
if loading from multiple files, pass in a dict where the values have the following fields:
path: Str path to .nemo model
include: Optional list of strings, at least one of which needs to be contained in parameter name
to be loaded from this .nemo file. Default: everything is included.
exclude: Optional list of strings, which can be used to exclude any parameter containing one of
these strings from being loaded from this .nemo file. Default: nothing is excluded.
hydra usage example:
init_from_nemo_model:
model0:
path:<path/to/model1>
include:["encoder"]
model1:
path:<path/to/model2>
include:["decoder"]
exclude:["embed"]
init_from_pretrained_model: Str name of a pretrained model checkpoint (obtained via cloud).
The model will be downloaded (or a cached copy will be used), instantiated and then
its state dict will be extracted. If loading from multiple models, you can pass in a dict
with the same format as for init_from_nemo_model, except with "name" instead of "path"
init_from_ptl_ckpt: Str name of a Pytorch Lightning checkpoint file. It will be loaded and
the state dict will extracted. If loading from multiple files, you can pass in a dict
with the same format as for init_from_nemo_model.
Args:
cfg: The config used to instantiate the model. It need only contain one of the above keys.
map_location: str or torch.device() which represents where the intermediate state dict
(from the pretrained model or checkpoint) will be loaded.
"""
args = [
'init_from_nemo_model',
'init_from_pretrained_model',
'init_from_ptl_ckpt',
]
arg_matches = [(1 if arg in cfg and arg is not None else 0) for arg in args]
if sum(arg_matches) == 0:
# model weights do not need to be restored
return
if sum(arg_matches) > 1:
raise ValueError(
f"Cannot pass more than one model initialization arguments to config!\n"
f"Found : {[args[idx] for idx, arg_present in enumerate(arg_matches) if arg_present]}"
)
if 'init_from_nemo_model' in cfg and cfg.init_from_nemo_model is not None:
with open_dict(cfg):
if isinstance(cfg.init_from_nemo_model, str):
model_path = cfg.init_from_nemo_model
# Restore model
restored_model = self.restore_from(
model_path, map_location=map_location, strict=cfg.get("init_strict", True)
)
# Restore checkpoint into current model
self.load_state_dict(restored_model.state_dict(), strict=False)
logging.info(f'Model checkpoint restored from nemo file with path : `{model_path}`')
del restored_model
elif isinstance(cfg.init_from_nemo_model, (DictConfig, dict)):
model_load_dict = cfg.init_from_nemo_model
for model_load_cfg in model_load_dict.values():
model_path = model_load_cfg.path
# Restore model
restored_model = self.restore_from(
model_path, map_location=map_location, strict=cfg.get("init_strict", True)
)
include = model_load_cfg.pop('include', [""])
exclude = model_load_cfg.pop('exclude', [])
self.load_part_of_state_dict(
restored_model.state_dict(), include, exclude, f'nemo file with path `{model_path}`'
)
del restored_model
else:
raise TypeError("Invalid type: init_from_nemo_model is not a string or a dict!")
if 'init_from_pretrained_model' in cfg and cfg.init_from_pretrained_model is not None:
with open_dict(cfg):
# Restore model
if isinstance(cfg.init_from_pretrained_model, str):
model_name = cfg.pop('init_from_pretrained_model')
# Check if model is being resumed or not - only works if `Trainer` is attached to model
if hasattr(self, 'trainer') and self.trainer is not None:
trainer = self.trainer
if (
hasattr(trainer, 'resume_from_checkpoint')
and trainer._checkpoint_connector.resume_checkpoint_path is not None
):
logging.info(
"Model training is being resumed via Pytorch Lightning.\n"
"Initialization from pretrained model (via cloud) will be skipped."
)
return
restored_model = self.from_pretrained(
model_name, map_location=map_location, strict=cfg.get("init_strict", True)
)
# Restore checkpoint into current model
self.load_state_dict(restored_model.state_dict(), strict=False)
logging.info(f'Model checkpoint restored from pretrained checkpoint with name : `{model_name}`')
del restored_model
elif isinstance(cfg.init_from_pretrained_model, (DictConfig, dict)):
model_load_dict = cfg.init_from_pretrained_model
for model_load_cfg in model_load_dict.values():
model_name = model_load_cfg.name
# Restore model
restored_model = self.from_pretrained(
model_name, map_location=map_location, strict=cfg.get("init_strict", True)
)
include = model_load_cfg.pop('include', [""])
exclude = model_load_cfg.pop('exclude', [])
self.load_part_of_state_dict(
restored_model.state_dict(),
include,
exclude,
f'pretrained checkpoint with name `{model_name}`',
)
del restored_model
else:
raise TypeError("Invalid type: init_from_pretrained_model is not a string or a dict!")
if 'init_from_ptl_ckpt' in cfg and cfg.init_from_ptl_ckpt is not None:
with open_dict(cfg):
if isinstance(cfg.init_from_ptl_ckpt, str):
# Restore checkpoint
ckpt_path = cfg.pop('init_from_ptl_ckpt')
ckpt = torch.load(ckpt_path, map_location=map_location)
# Restore checkpoint into current model
self.load_state_dict(ckpt['state_dict'], strict=False)
logging.info(
f'Model checkpoint restored from pytorch lightning checkpoint with path : `{ckpt_path}`'
)
del ckpt
elif isinstance(cfg.init_from_ptl_ckpt, (DictConfig, dict)):
model_load_dict = cfg.init_from_ptl_ckpt
for model_load_cfg in model_load_dict.values():
ckpt_path = model_load_cfg.path
# Restore model
ckpt = torch.load(ckpt_path, map_location=map_location)
include = model_load_cfg.pop('include', [""])
exclude = model_load_cfg.pop('exclude', [])
self.load_part_of_state_dict(
ckpt['state_dict'], include, exclude, f'nemo file with path `{ckpt_path}`'
)
del ckpt
else:
raise TypeError("Invalid type: init_from_ptl_ckpt is not a string or a dict!")
def teardown(self, stage: str):
"""
Called at the end of fit and test.
Args:
stage: either 'fit' or 'test'
"""
if stage == 'fit':
# Update env variable to bypass multi gpu issue after training
# This fix affects usage of trainer.test() after trainer.train()
# If trainer.train() was done on multiple GPUs, then trainer.test()
# will try to do ddp, even if its a new Trainer object with just 1 GPU.
# Temporary patch to fix that
if 'PL_TRAINER_GPUS' in os.environ:
os.environ.pop('PL_TRAINER_GPUS')
super().teardown(stage)
@classmethod
def extract_state_dict_from(
cls,
restore_path: str,
save_dir: str,
split_by_module: bool = False,
save_restore_connector: SaveRestoreConnector = None,
):
"""
Extract the state dict(s) from a provided .nemo tarfile and save it to a directory.
Args:
restore_path: path to .nemo file from which state dict(s) should be extracted
save_dir: directory in which the saved state dict(s) should be stored
split_by_module: bool flag, which determins whether the output checkpoint should
be for the entire Model, or the individual module's that comprise the Model
save_restore_connector (SaveRestoreConnector): Can be overrided to add custom save and restore logic.
Example:
To convert the .nemo tarfile into a single Model level PyTorch checkpoint
::
state_dict = nemo.collections.asr.models.EncDecCTCModel.extract_state_dict_from('asr.nemo', './asr_ckpts')
To restore a model from a Model level checkpoint
::
model = nemo.collections.asr.models.EncDecCTCModel(cfg) # or any other method of restoration
model.load_state_dict(torch.load("./asr_ckpts/model_weights.ckpt"))
To convert the .nemo tarfile into multiple Module level PyTorch checkpoints
::
state_dict = nemo.collections.asr.models.EncDecCTCModel.extract_state_dict_from('asr.nemo', './asr_ckpts', split_by_module=True)
To restore a module from a Module level checkpoint
::
model = nemo.collections.asr.models.EncDecCTCModel(cfg) # or any other method of restoration
# load the individual components
model.preprocessor.load_state_dict(torch.load("./asr_ckpts/preprocessor.ckpt"))
model.encoder.load_state_dict(torch.load("./asr_ckpts/encoder.ckpt"))
model.decoder.load_state_dict(torch.load("./asr_ckpts/decoder.ckpt"))
Returns:
The state dict that was loaded from the original .nemo checkpoint
"""
if save_restore_connector is None:
save_restore_connector = SaveRestoreConnector()
if not path.exists(restore_path):
raise FileExistsError(f"Can't find {restore_path}")
cls.update_save_restore_connector(save_restore_connector)
state_dict = cls._save_restore_connector.extract_state_dict_from(restore_path, save_dir, split_by_module)
return state_dict
def prepare_test(self, trainer: 'Trainer') -> bool:
"""
Helper method to check whether the model can safely be tested
on a dataset after training (or loading a checkpoint).
::
trainer = Trainer()
if model.prepare_test(trainer):
trainer.test(model)
Returns:
bool which declares the model safe to test. Provides warnings if it has to
return False to guide the user.
"""
if not hasattr(self._cfg, 'test_ds'):
logging.info("No `test_ds` config found within the manifest.")
return False
# Replace ddp multi-gpu until PTL has a fix
DDP_WARN = """\n\nDuring testing, it is currently advisable to construct a new Trainer "
"with single GPU and no DDP to obtain accurate results.
"Following pattern should be used: "
"trainer = Trainer(devices=1, accelerator='gpu')"
"if model.prepare_test(trainer):"
" trainer.test(model)\n\n"""
if trainer is not None:
if trainer.num_devices > 1:
logging.warning(DDP_WARN)
return False
# Assign trainer to the model
self.set_trainer(trainer)
return True
def set_trainer(self, trainer: Trainer):
"""
Set an instance of Trainer object.
Args:
trainer: PyTorch Lightning Trainer object.
"""
self.trainer = trainer
self._trainer = trainer
self.set_world_size(trainer)
def set_world_size(self, trainer: Trainer):
"""
Determines the world size from the PyTorch Lightning Trainer.
And then updates AppState.
Args:
trainer (Trainer): PyTorch Lightning Trainer object
"""
# Update AppState with world information from trainer
self.world_size = 1
if trainer is not None:
if isinstance(trainer, Trainer):
if trainer.num_devices and trainer.num_nodes:
self.world_size = trainer.num_devices * trainer.num_nodes
else:
logging.warning(f'World size can only be set by PyTorch Lightning Trainer.')
app_state = AppState()
app_state.world_size = self.world_size
def summarize(self, max_depth: int = 1) -> model_summary.ModelSummary:
"""Summarize this LightningModule.
Args:
max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the
layer summary off. Default: 1.
Return:
The model summary object
"""
return model_summary.summarize(self, max_depth=max_depth)
def _update_dataset_config(self, dataset_name: str, config: Optional[Union[DictConfig, Dict]]):
"""
Update the config (if not None) of the dataset by given name.
Preserves said config after updating.
Args:
dataset_name: str name of the dataset whose config is being updated.
Can be one of `train`, `validation` and `test`.
config: Optional DictConfig or dict. If None is passed, this method simply returns.
If dict is passed, it is cast into a DictConfig.
The internal config is updated with the passed config.
"""
if hasattr(self, '_multi_dataset_mode') and self._multi_dataset_mode is True:
return
if config is not None:
if not isinstance(config, DictConfig):
config = OmegaConf.create(config)
if dataset_name in ['train', 'validation', 'test']:
OmegaConf.set_struct(self.cfg, False)
key_name = dataset_name + "_ds"
self.cfg[key_name] = config
OmegaConf.set_struct(self.cfg, True)
# Update hyper parameters by calling property setter
self.cfg = self._cfg
else:
raise ValueError("`dataset_name` when updating config must be one of [train, validation, test]")
@property
def num_weights(self):
"""
Utility property that returns the total number of parameters of the Model.
"""
num: int = 0
for p in self.parameters():
if p.requires_grad:
num += p.numel()
return num
@property
def cfg(self):
"""
Property that holds the finalized internal config of the model.
Note:
Changes to this config are not reflected in the state of the model.
Please create a new model using an updated config to properly update the model.
"""
return self._cfg
@LightningModule.trainer.getter
def trainer(self):
return self._trainer
@cfg.setter
def cfg(self, cfg):
"""
Property that holds the finalized internal config of the model.
Note:
Changes to this config are not reflected in the state of the model.
Please create a new model using an updated config to properly update the model.
"""
self._cfg = cfg
self._set_hparams(OmegaConf.create({'cfg': self._cfg}))
# TODO: Remove in NeMo 1.7 (or when PTL fixes this on their end)
if hasattr(self, '_hparams_initial') and 'cfg' in self._hparams_initial:
self._hparams_initial['cfg'] = OmegaConf.to_object(self._cfg)
@staticmethod
def _is_model_being_restored() -> bool:
app_state = AppState()
return app_state.is_model_being_restored
@staticmethod
def _set_model_restore_state(is_being_restored: bool, folder: str = None):
app_state = AppState()
app_state.is_model_being_restored = is_being_restored
app_state.nemo_file_folder = folder
def _set_model_guid(self):
if not hasattr(self, 'model_guid'):
appstate = AppState()
# Generate a unique uuid for the instance
# also determine if the model is being restored or not, and preserve the path
self.model_guid = str(uuid.uuid4())
if self._is_model_being_restored():
restore_path = appstate.model_restore_path
else:
restore_path = None
appstate.register_model_guid(self.model_guid, restoration_path=restore_path)
@classmethod
def update_save_restore_connector(cls, save_restore_connector):
if hasattr(cls, '_save_restore_connector'):
cls._save_restore_connector = save_restore_connector
else:
setattr(cls, '_save_restore_connector', save_restore_connector)
def _setup_nsys_profiling(self):
""" Enables nsys profiling
To use, add the following optoins to the model config:
## Nsys profiling options
nsys_profile: False
start_step: 10 # Global batch to start profiling
end_step: 10 # Global batch to end profiling
ranks: [0] # Global rank IDs to profile
gen_shape: False # Generate model and kernel details including input shapes
And then wrap the model training script with:
nsys profile -s none -o <profile filepath> -t cuda,nvtx --force-overwrite true --capture-range=cudaProfilerApi --capture-range-end=stop python ./examples/...
See more options at: https://docs.nvidia.com/nsight-systems/UserGuide/index.html#cli-profiling
"""
if self.cfg.get('nsys_profile', None) is not None:
if self.cfg.nsys_profile.get('enabled', False):
# Nsys profiling options
self._nsys_profile_enabled = True
self._nsys_profile_start_step = self.cfg.nsys_profile.get('start_step', 0)
self._nsys_profile_end_step = self.cfg.nsys_profile.get('end_step', 0)
self._nsys_profile_ranks = self.cfg.nsys_profile.get('ranks', [0])
self._nsys_profile_gen_shape = self.cfg.nsys_profile.get('gen_shape', False)
if type(self._nsys_profile_start_step) == int:
logging.info(f'Nsys profiling setup with start_step: {self._nsys_profile_start_step}')
else:
raise ValueError(
f'Nsys start_step must be of type int. Found: {type(self._nsys_profile_start_step)}'
)
if type(self._nsys_profile_end_step) == int:
logging.info(f'Nsys profiling setup with end_step: {self._nsys_profile_end_step}')
else:
raise ValueError(f'Nsys end_step must be of type int. Found: {type(self._nsys_profile_end_step)}')
if self._nsys_profile_end_step >= self._nsys_profile_start_step:
pass
else:
raise ValueError(f'Nsys end_step must be greater than or equal to nsys start_step')
def on_train_start(self):
""" PyTorch Lightning hook:
https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-train-start
We use it here to copy the relevant config for dynamic freezing.
"""
# dynamic freezing
# should fire only once, on the very first batch of training and never again
if not hasattr(self, '_freeze_cfg'):
if (
hasattr(self.cfg, 'freeze_updates')
and self.cfg.freeze_updates is not None
and self.cfg.freeze_updates.get('enabled', False)
):
setattr(self, '_freeze_cfg', OmegaConf.to_container(self.cfg.freeze_updates))
self._freeze_cfg['is_frozen'] = {k: False for k in self._freeze_cfg['modules'].keys()}
else:
setattr(self, '_freeze_cfg', None)
def on_train_batch_start(self, batch: Any, batch_idx: int, unused: int = 0) -> Optional[int]:
""" PyTorch Lightning hook:
https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-train-batch-start
We use it here to enable nsys profiling and dynamic freezing.
"""
# nsys profiling
if self.device.type == 'cuda':
if hasattr(self, '_nsys_profile_enabled'):
if self._nsys_profile_enabled:
if batch_idx == self._nsys_profile_start_step and get_rank() in self._nsys_profile_ranks:
logging.info("====== Start nsys profiling ======")
torch.cuda.cudart().cudaProfilerStart()
if self._nsys_profile_gen_shape:
torch.autograd.profiler.emit_nvtx(record_shapes=True).__enter__()
# dynamic freezing
if hasattr(self, '_freeze_cfg') and self._freeze_cfg is not None:
if self.training and hasattr(self, "trainer") and self.trainer is not None:
num_updates = self.trainer.global_step + 1
for ml, m_steps in self._freeze_cfg['modules'].items():
# we could do hasattr check here, but it's too expensive for each step
# consequently you'll throw an error if the module name doesn't exist
# or was spelled wrong in the config.yaml
if isinstance(m_steps, list):
assert len(m_steps) == 2, "freeze_updates modules list cannot have more than two elements"
should_freeze = (num_updates >= m_steps[0]) and (num_updates <= m_steps[1] or m_steps[1] == -1)
else:
should_freeze = num_updates <= m_steps or m_steps == -1
if should_freeze and not self._freeze_cfg['is_frozen'][ml]:
getattr(self, ml).freeze()
getattr(self, ml).train()
self._freeze_cfg['is_frozen'][ml] = True
elif not should_freeze and self._freeze_cfg['is_frozen'][ml]:
getattr(self, ml).unfreeze()
self._freeze_cfg['is_frozen'][ml] = False
def on_train_batch_end(self, outputs, batch: Any, batch_idx: int, unused: int = 0) -> None:
""" PyTorch Lightning hook:
https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-train-batch-end
We use it here to enable nsys profiling.
"""
if self.device.type == 'cuda':
if hasattr(self, '_nsys_profile_enabled'):
if self._nsys_profile_enabled:
if batch_idx == self._nsys_profile_end_step and get_rank() in self._nsys_profile_ranks:
logging.info("====== End nsys profiling ======")
torch.cuda.cudart().cudaProfilerStop()
def on_train_end(self):
""" PyTorch Lightning hook:
https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#on-train-end
We use it here to cleanup the dynamic freezing config.
"""
# dynamic freezing cleanup
if hasattr(self, '_freeze_cfg'):
delattr(self, '_freeze_cfg')
# TODO: Remove in PTL 1.7.2
def cuda(self, device=None):
""" PTL is overriding this method and changing the pytorch behavior of a module.
The PTL LightingModule override will move the module to device 0 if device is None.
See the PTL method here: https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/core/mixins/device_dtype_mixin.py#L113
Here we are overriding this to maintain the default Pytorch nn.module behavior:
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/module.py#L728
Moves all model parameters and buffers to the GPU.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on GPU while being optimized.
.. note::
This method modifies the module in-place.
Args:
device (int, optional): if specified, all parameters will be
copied to that device
Returns:
Module: self
"""
if device is None:
device = torch.device("cuda", torch.cuda.current_device())
elif isinstance(device, int):
device = torch.device("cuda", index=device)
return super().cuda(device=device)
|
NeMo-main
|
nemo/core/classes/modelPT.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from torch.nn import Module
from nemo.core.classes.common import FileIO, Serialization, Typing
__all__ = ['NeuralModule']
class NeuralModule(Module, Typing, Serialization, FileIO):
"""
Abstract class offering interface shared between all PyTorch Neural Modules.
"""
@property
def num_weights(self):
"""
Utility property that returns the total number of parameters of NeuralModule.
"""
num: int = 0
for p in self.parameters():
if p.requires_grad:
num += p.numel()
return num
def input_example(self, max_batch=None, max_dim=None):
"""
Override this method if random inputs won't work
Returns:
A tuple sample of valid input data.
"""
return None
def freeze(self) -> None:
r"""
Freeze all params for inference.
"""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""
Unfreeze all parameters for training.
"""
for param in self.parameters():
param.requires_grad = True
self.train()
@contextmanager
def as_frozen(self):
"""
Context manager which temporarily freezes a module, yields control and finally unfreezes the module.
"""
training_mode = self.training
grad_map = {}
for pname, param in self.named_parameters():
grad_map[pname] = param.requires_grad
self.freeze()
try:
yield
finally:
self.unfreeze()
for pname, param in self.named_parameters():
param.requires_grad = grad_map[pname]
if training_mode:
self.train()
else:
self.eval()
|
NeMo-main
|
nemo/core/classes/module.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.core.classes.mixins.access_mixins import AccessMixin, set_access_cfg
from nemo.core.classes.mixins.adapter_mixin_strategies import (
ResidualAddAdapterStrategy,
ResidualAddAdapterStrategyConfig,
ReturnResultAdapterStrategy,
ReturnResultAdapterStrategyConfig,
)
from nemo.core.classes.mixins.adapter_mixins import (
AdapterModelPTMixin,
AdapterModuleMixin,
get_registered_adapter,
register_adapter,
)
|
NeMo-main
|
nemo/core/classes/mixins/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import Optional
import torch
from omegaconf import DictConfig
_ACCESS_CFG = DictConfig({"detach": False, "convert_to_cpu": False})
_ACCESS_ENABLED = False
def set_access_cfg(cfg: 'DictConfig'):
if cfg is None or not isinstance(cfg, DictConfig):
raise TypeError(f"cfg must be a DictConfig")
global _ACCESS_CFG
_ACCESS_CFG = cfg
class AccessMixin(ABC):
"""
Allows access to output of intermediate layers of a model
"""
def __init__(self):
super().__init__()
self._registry = {} # dictionary of lists
def register_accessible_tensor(self, name, tensor):
"""
Register tensor for later use.
"""
if self.access_cfg.get('convert_to_cpu', False):
tensor = tensor.cpu()
if self.access_cfg.get('detach', False):
tensor = tensor.detach()
if not hasattr(self, '_registry'):
self._registry = {}
if name not in self._registry:
self._registry[name] = []
self._registry[name].append(tensor)
@classmethod
def get_module_registry(cls, module: torch.nn.Module):
"""
Extract all registries from named submodules, return dictionary where
the keys are the flattened module names, the values are the internal registry
of each such module.
"""
module_registry = {}
for name, m in module.named_modules():
if hasattr(m, '_registry') and len(m._registry) > 0:
module_registry[name] = m._registry
return module_registry
def reset_registry(self: torch.nn.Module, registry_key: Optional[str] = None):
"""
Reset the registries of all named sub-modules
"""
if hasattr(self, "_registry"):
if registry_key is None:
self._registry.clear()
else:
if registry_key in self._registry:
self._registry.pop(registry_key)
else:
raise KeyError(
f"Registry key `{registry_key}` provided, but registry does not have this key.\n"
f"Available keys in registry : {list(self._registry.keys())}"
)
for _, m in self.named_modules():
if hasattr(m, "_registry"):
if registry_key is None:
m._registry.clear()
else:
if registry_key in self._registry:
self._registry.pop(registry_key)
else:
raise KeyError(
f"Registry key `{registry_key}` provided, but registry does not have this key.\n"
f"Available keys in registry : {list(self._registry.keys())}"
)
# Explicitly disable registry cache after reset
AccessMixin.set_access_enabled(access_enabled=False)
@property
def access_cfg(self):
"""
Returns:
The global access config shared across all access mixin modules.
"""
global _ACCESS_CFG
return _ACCESS_CFG
@classmethod
def update_access_cfg(cls, cfg: dict):
global _ACCESS_CFG
_ACCESS_CFG.update(cfg)
@classmethod
def is_access_enabled(cls):
global _ACCESS_ENABLED
return _ACCESS_ENABLED
@classmethod
def set_access_enabled(cls, access_enabled: bool):
global _ACCESS_ENABLED
_ACCESS_ENABLED = access_enabled
|
NeMo-main
|
nemo/core/classes/mixins/access_mixins.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from abc import ABC
from dataclasses import dataclass, is_dataclass
from typing import List, Optional, Set, Tuple, Union
import torch
import torch.nn as nn
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf, open_dict
from nemo.utils import logging, model_utils
# Global registry of all adapters
ADAPTER_REGISTRY = {}
@dataclass
class AdapterRegistryInfo:
base_class: type
adapter_class: type
# generated automatically
base_class_path: str = ""
adapter_class_path: str = ""
def __post_init__(self):
self.base_class_path = f'{self.base_class.__module__}.{self.base_class.__name__}'
self.adapter_class_path = f'{self.adapter_class.__module__}.{self.adapter_class.__name__}'
def register_adapter(base_class: type, adapter_class: type):
"""
Registers a pair (Base class, Adapter class) into the adapter registry, used for de-referencing.
Args:
base_class: A Class, which is the base class of the object.
adapter_class: A Class, which is the subclass of the base class, and implements the Adapter mixin methods.
"""
global ADAPTER_REGISTRY
base_class_path = f'{base_class.__module__}.{base_class.__name__}'
adapter_class_path = f'{adapter_class.__module__}.{adapter_class.__name__}'
# test if base class already in registry
if base_class_path in ADAPTER_REGISTRY:
raise ValueError(f"`{base_class_path}` has already been added to the adapter registry !")
# test if adapter is a subclass of the base class
if not issubclass(adapter_class, base_class):
raise ValueError(f"`{adapter_class_path}` is not a sub-class of {base_class_path} !")
# register the base class : adapter class pair
ADAPTER_REGISTRY[base_class_path] = AdapterRegistryInfo(base_class=base_class, adapter_class=adapter_class)
# attach adapter class to base class
base_class._meta_adapter_class = adapter_class
# attach base class to adapter class
adapter_class._meta_base_class = base_class
def get_registered_adapter(cls: Union[str, type]) -> Optional[AdapterRegistryInfo]:
"""
Resolves a provided `cls` (whether str path to class, a registered base or an adapter class)
to obtain the metadata for the adapter.
Args:
cls: Can be a str (absolute path to a class), a base class or an adapter class (which have already
been registered).
Returns:
A AdapterRegistryInfo object if it could resolve successfully, otherwise None.
"""
global ADAPTER_REGISTRY
if isinstance(cls, str):
cls = model_utils.import_class_by_path(cls)
# If an adapter class was provided, de-reference its base class
if hasattr(cls, '_meta_base_class'):
cls = cls._meta_base_class
class_path = f'{cls.__module__}.{cls.__name__}'
# If base class, check registry
if class_path in ADAPTER_REGISTRY:
return ADAPTER_REGISTRY[class_path]
return None
def _prepare_default_adapter_config(*, global_key: str, meta_key: str, cfg: DictConfig = None) -> DictConfig:
if cfg is None:
cfg = OmegaConf.create({})
with open_dict(cfg):
if global_key not in cfg:
cfg[global_key] = OmegaConf.create({})
if meta_key not in cfg[global_key]:
cfg[global_key][meta_key] = OmegaConf.create({})
if 'modules' not in cfg[global_key][meta_key]:
cfg[global_key][meta_key]['modules'] = OmegaConf.create({})
return cfg
class AdapterModuleMixin(ABC):
""" Generic Adapter Mixin that can augment any torch.nn.Module with Adapter module support.
This mixin class adds a hierarchical way to add any type of Adapter modules to a pre-existing module.
Since Models are inherently also nn.Module, this mixin can be attached to any Model or Module.
This mixin class adds several utility methods which are utilized or overridden as necessary.
An Adapter module is any Pytorch nn.Module that possess a few properties :
- It's input and output dimension are the same, while the hidden dimension need not be the same.
- The final layer of the Adapter module is zero-initialized, so that the residual connection to the adapter
yields the original output.
This mixin adds the following instance variables to the class this inherits it:
- `adapter_layer`: A torch.nn.ModuleDict(), whose keys are the names of the adapter (globally unique),
and values are the Adapter nn.Module().
- `adapter_cfg`: A OmegaConf DictConfig object that holds the config of the adapters that are initialized.
- `adapter_name`: A str resolved name which is unique key globally, but more than one modules may share
this name.
- `adapter_global_cfg_key`: A str representing a key in the model config that can be provided by the user.
The value resolves to `global_cfg`, and can be overridden via `model.cfg.adapters.global_cfg.*`.
- `adapter_metadata_cfg_key`: A str representing a key in the model config that is used to preserve the
metadata of the adapter config.
.. note::
This module is **not** responsible for maintaining its config. Subclasses must ensure config is updated
or preserved as needed. It is the responsibility of the subclasses to propagate the most up to date config to
lower layers.
"""
adapter_global_cfg_key = "global_cfg"
adapter_metadata_cfg_key = "adapter_meta_cfg"
def add_adapter(self, name: str, cfg: DictConfig, **kwargs):
"""
Add an Adapter module to this module.
Args:
name: A globally unique name for the adapter. Will be used to access, enable and disable adapters.
cfg: A DictConfig or Dataclass that contains at the bare minimum `__target__` to instantiate a
new Adapter module.
"""
if not isinstance(cfg, DictConfig):
cfg = DictConfig(cfg)
adapter_types = self.get_accepted_adapter_types()
_pass_types = False
if len(adapter_types) > 0:
test = model_utils.import_class_by_path(cfg._target_)
for _type in adapter_types:
# TODO: (@adithyare) should revisit if subclass is the best check...
if issubclass(test, _type):
_pass_types = True
break
if not _pass_types:
raise ValueError(
f"Config: \n{OmegaConf.to_yaml(cfg)}\n"
f"It creates adapter class {test} \n"
f"that is not in the list of accepted adapter types.\n"
f"Accepted adapters: {[t for t in adapter_types]}"
)
# Convert to DictConfig from dict or Dataclass
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if not isinstance(cfg, DictConfig):
cfg = DictConfig(cfg)
# Add adapter_layer ModuleDict() if not present.
if not hasattr(self, 'adapter_layer'):
self.adapter_layer = nn.ModuleDict()
# Add adapter_cfg if it doesnt exist or hasnt been assigned yet.
if not hasattr(self, 'adapter_cfg'):
self.adapter_cfg = OmegaConf.create({})
# Resolve the module name and adapter name (if module name is provided)
_, adapter_name = self.resolve_adapter_module_name_(name)
# Add adapter_name to this module for later identification
self.adapter_name = adapter_name
# Assert that name is globally unique to all adapters.
if adapter_name in self.adapter_layer:
raise ValueError(
f"Adapter with name `{name}` already exists ! Adapter names = {list(self.adapter_layer.keys())}"
)
# Assert that name is not `adapter_global_cfg_key`
if adapter_name == self.adapter_global_cfg_key:
raise ValueError(f"Adapters cannot have the reserved name : `{self.adapter_global_cfg_key}`")
# Update internal config and instantiate the Adapter module
with open_dict(cfg), open_dict(self.adapter_cfg):
adapter_enabled = cfg.pop('enabled', True)
self.adapter_layer[adapter_name] = instantiate(cfg, **kwargs)
cfg['enabled'] = adapter_enabled
self.adapter_cfg[adapter_name] = cfg
def is_adapter_available(self) -> bool:
"""
Checks if any Adapter module has been instantiated.
Returns:
bool, determining if any Adapter module has been instantiated. Returns true even if the adapters are
enabled or disabled, false only if no adapters exist.
"""
if hasattr(self, 'adapter_layer'):
return self.adapter_layer is not None and len(self.adapter_layer) > 0
return False
def set_enabled_adapters(self, name: Optional[str] = None, enabled: bool = True):
"""
Updated the internal adapter config, determining if an adapter (or all adapters) are either
enabled or disabled.
A common user pattern would be to disable all adapters (either after adding them, or restoring a model
with pre-existing adapters) and then simply enable one of the adapters.
.. code::
module.set_enabled_adapters(enabled=False)
module.set_enabled_adapters(name=<some adapter name>, enabled=True)
Args:
name: Optional str. If a str name is given, the config will be updated to the value of `enabled`.
If no name is given, then all adapters will be enabled/disabled.
enabled: Bool, determines if the adapter(s) will be enabled/disabled.
"""
if not self.is_adapter_available():
raise ValueError("No adapter is available to enable/disable")
# If name is None, enable/disable all adapters.
if name is None:
for key, config in self.adapter_cfg.items():
# Skip the global adapter config
if key == self.adapter_global_cfg_key:
continue
# Enable/Disable the current adapter
self.adapter_cfg[key]['enabled'] = enabled
else:
_, adapter_name = self.resolve_adapter_module_name_(name)
# Cannot set the state of the global config for adapters
if adapter_name == self.adapter_global_cfg_key:
raise ValueError(
f'Cannot set the state of the global config of adapters, '
f'given name = `{self.adapter_global_cfg_key}`'
)
# Enable/Disable just named adapter
self.adapter_cfg[adapter_name]['enabled'] = enabled
def get_enabled_adapters(self) -> List[str]:
"""
Returns a list of all enabled adapters names. The names will always be the resolved names, without
module info.
Returns:
A list of str names of each enabled adapter names(s).
"""
if not self.is_adapter_available():
return []
# populate set of available modules (by name)
available_module_names = set([])
if hasattr(self, 'adapter_layer'):
available_module_names.update(list(self.adapter_layer.keys()))
# populate list of allowed adapter classes
adapter_types = self.get_accepted_adapter_types()
enabled_adapters = []
for name, config in self.adapter_cfg.items():
# Skip the global adapter config
if name == self.adapter_global_cfg_key:
continue
# If name is in the current available modules, and it is enabled in the config
if name in available_module_names and self.adapter_cfg[name]['enabled']:
# Check if type is supported (if available) and is an enabled adapter
if len(adapter_types) > 0:
module = self.get_adapter_module(name)
for adapter_type in adapter_types:
if isinstance(module, adapter_type):
enabled_adapters.append(name)
break
else:
# Ignore type checking and fall back to adding all enabled adapters
enabled_adapters.append(name)
return enabled_adapters
# Inherited methods that don't need to be overridden
def get_adapter_module(self, name: str):
"""
Gets an adapter module by name if possible, otherwise returns None.
Args:
name: A str name (resolved or not) corresponding to an Adapter.
Returns:
An nn.Module if the name could be resolved and matched, otherwise None/
"""
_, name = self.resolve_adapter_module_name_(name)
if hasattr(self, "adapter_layer"):
return self.adapter_layer[name] if name in self.adapter_layer else None
return None
def set_accepted_adapter_types(self, adapter_types: List[Union[type, str]]) -> None:
"""
The module with this mixin can define a list of adapter names that it will accept.
This method should be called in the modules init method and set the adapter names the module will expect to be added.
Args:
adapter_types: A list of str paths that correspond to classes. The class paths will be instantiated to
ensure that the class path is correct.
"""
# Let user update and set accepted adapter types.
types = []
for s in adapter_types:
if inspect.isclass(s):
if not issubclass(s, nn.Module):
raise ValueError(f"Attempted to add class ({s}) but is not a subclass of torch.nn.Module")
types.append(s)
else:
types.append(model_utils.import_class_by_path(s))
self._accepted_adapter_types = set(types)
def get_accepted_adapter_types(self,) -> Set[type]:
"""
Utility function to get the set of all classes that are accepted by the module.
Returns:
Returns the set of accepted adapter types as classes, otherwise an empty set.
"""
if hasattr(self, '_accepted_adapter_types'):
return self._accepted_adapter_types
else:
return set([])
def unfreeze_enabled_adapters(self, freeze_batchnorm: bool = True) -> None:
"""
Utility method to unfreeze only the enabled Adapter module(s).
A common user pattern is to freeze all the modules (including all the adapters), and then
unfreeze just the required adapters.
.. code::
module.freeze() # only available to nemo.core.NeuralModule !
module.unfreeze_enabled_adapters()
Args:
freeze_batchnorm: An optional (and recommended) practice of freezing the updates to the moving average
buffers of any and all BatchNorm*D layers. This is necessary to ensure that disabling all adapters
will precisely yield the original (base) model's outputs.
"""
if freeze_batchnorm:
for mname, module in self.named_modules():
if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
if hasattr(module, 'weight'):
module.weight.requires_grad_(False)
if hasattr(module, 'bias'):
module.bias.requires_grad_(False)
module.eval()
module.track_running_stats = False # prevent running stats from updated during finetuning
logging.info(f"Froze module {mname}: {module}")
adapter_names = set([])
for module in self.modules(): # access PT subclass method via inheritance
if hasattr(module, 'adapter_layer') and module.is_adapter_available():
for name, config in self.adapter_cfg.items():
# Skip global adapter config
if name == self.adapter_global_cfg_key:
continue
# Check if adapter is enabled or not
if self.adapter_cfg[name]['enabled'] and name in module.adapter_layer:
# Recursively set training mode of submodules
module.adapter_layer[name].train()
# Recursively set grad required for submodules
module.adapter_layer[name].adapter_unfreeze()
# unfreeze batch norm if any in the adapter submodules
for mname, module_ in module.adapter_layer[name].named_modules():
if isinstance(module_, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
module_.track_running_stats = (
True # prevent running stats from updated during finetuning
)
logging.info(f"Unfroze adapter module {mname}: {module_}")
adapter_names.add(name)
for name in adapter_names:
logging.info(f"Unfrozen adapter : {name}")
def forward_enabled_adapters(self, input: 'torch.Tensor'):
"""
Forward's all active adapters one by one with the provided input, and chaining the outputs of each
adapter layer to the next.
Utilizes the implicit merge strategy of each adapter when computing the adapter's output, and
how that output will be merged back with the original input.
Args:
input: The output tensor of the calling module is the input to the first adapter, whose output
is then chained to the next adapter until all adapters are consumed.
Returns:
The result tensor, after all active adapters have finished their forward passes.
"""
enabled_adapters = self.get_enabled_adapters()
for adapter_name in enabled_adapters:
adapter_module = self.adapter_layer[adapter_name]
if hasattr(adapter_module, 'adapter_strategy'):
strategy = (
adapter_module.adapter_strategy
) # type: 'nemo.core.classes.mixins.adapter_mixin_strategies.AbstractAdapterStrategy'
else:
raise AttributeError(
f"Adapter module `{adapter_name}` does not set the value `adapter_strategy` ! "
f"Please set the value of the adapter's strategy with the class "
f"{adapter_module.__class__.__module}.{adapter_module.__class__.__name__}."
)
# Call a single adapter's forward, and accept its output as the new input for the next adapter.
input = self.forward_single_enabled_adapter_(
input, adapter_module, adapter_name=adapter_name, adapter_strategy=strategy
)
return input
# Utility methods
def resolve_adapter_module_name_(self, name: str) -> Tuple[str, str]:
"""
Utility method to resolve a given global/module adapter name to its components.
Always returns a tuple representing (module_name, adapter_name). ":" is used as the
delimiter for denoting the module name vs the adapter name.
Will attempt to also resolve a given adapter_name alone back to (module_name, adapter_name)
if the metadata config exists for access.
Args:
name: A global adapter, or a module adapter name (with structure module_name:adapter_name).
Returns:
A tuple representing (module_name, adapter_name). If a global adapter is provided,
module_name is set to ''.
"""
# Attempt to split into module adapter name, iff : exists in the given name.
if ':' in name:
splits = name.split(":")
module_name = splits[0]
adapter_name = ":".join(splits[1:])
return (module_name, adapter_name)
else:
# Prepare default module name
module_name = ''
# Can be following cases:
# 1) Adapters are being restored. In this case, we need to resolve the module name from the config
if hasattr(self, 'adapter_cfg') and self.adapter_cfg is not None:
cfg = self.adapter_cfg.get(self.adapter_global_cfg_key, {})
cfg = cfg.get(self.adapter_metadata_cfg_key, {})
cfg = cfg.get('modules', {})
# Try to get the module for the given adapter name, if available, else use default.
module_name = cfg.get(name, '')
# If the above cases dont hold, no module name provided when the user is adding a new adapter.
# Just return whatever module name was resolved, or the default
return (module_name, name)
def forward_single_enabled_adapter_(
self,
input: torch.Tensor,
adapter_module: torch.nn.Module,
*,
adapter_name: str,
adapter_strategy: 'nemo.core.classes.mixins.adapter_mixin_strategies.AbstractAdapterStrategy',
):
"""
Perform the forward step of a single adapter module on some input data.
.. note::
Subclasses can override this method to accommodate more complicate adapter forward steps.
Args:
input: input: The output tensor of the calling module is the input to the first adapter, whose output
is then chained to the next adapter until all adapters are consumed.
adapter_module: The adapter module that is currently required to perform the forward pass.
adapter_name: The resolved name of the adapter that is undergoing the current forward pass.
adapter_strategy: A subclass of `AbstractAdapterStrategy`, that determines how the
output of the adapter should be merged with the input, or if it should be merged at all.
Returns:
The result tensor, after the current active adapter has finished its forward pass.
"""
# (input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin')
output = adapter_strategy(input, adapter_module, module=self)
return output
class AdapterModelPTMixin(AdapterModuleMixin):
""" Adapter Mixin that can augment a ModelPT subclass with Adapter support.
This mixin class should be used only with a top level ModelPT subclass.
This mixin class adds several utility methods which should be subclassed and overriden to
propagated to the submodules as necessary.
An Adapter module is any Pytorch nn.Module that possess a few properties :
- It's input and output dimension are the same, while the hidden dimension need not be the same.
- The final layer of the Adapter module is zero-initialized, so that the residual connection to the adapter
yields the original output.
This mixin adds the following instance variables to the class this inherits it:
- `adapter_layer`: A torch.nn.ModuleDict(), whose keys are the names of the adapter (globally unique),
and values are the Adapter nn.Module().
- `adapter_cfg`: A OmegaConf DictConfig object that holds the config of the adapters that are initialized.
- `adapter_global_cfg_key`: A str representing a key in the model config that can be provided by the user.
The value resolves to `global_cfg`, and can be overridden via `model.cfg.adapters.global_cfg.*`.
.. note::
This module **is** responsible for maintaining its config. At the ModelPT level, it will access and
write Adapter config information to `self.cfg.adapters`.
"""
def setup_adapters(self):
"""
Utility method that is called in the ASR ModelPT-implementation constructor, so as to restore any
adapters that were previously added.
Should be overriden by the subclass for additional setup steps as required.
This method should be called just once at constructor time.
"""
# Test if `adapters` is part of the config (injected from previous Adapter additions)
if 'adapters' in self.cfg:
# Set the global config of adapters
self.update_adapter_cfg(self.cfg.adapters)
# Dispatch the call to the encoder, for every adapter contained in the config.
for adapter_name, adapter_cfg in self.cfg.adapters.items():
# reserve special key `model.adapters.cfg`
if adapter_name == self.adapter_global_cfg_key:
continue
# Add the adapters back to the model during setup
# Add a guard so that during restoration, unique name check is disabled
self._restoring_adapters = True
# Restore the unique adapter
self.add_adapter(name=adapter_name, cfg=adapter_cfg)
# Remove restoration guard
del self._restoring_adapters
# Log the setup adapter name
module_name, adapter_name = self.resolve_adapter_module_name_(adapter_name)
if module_name != '':
full_adapter_name = f'{module_name}:{adapter_name}'
else:
full_adapter_name = adapter_name
logging.info(
f"Finished setup of adapter : '{full_adapter_name}'. Enabled: {adapter_cfg.get('enabled', True)}."
)
def add_adapter(self, name: str, cfg: DictConfig):
"""
Add an Adapter module to this model.
Should be overridden by subclass and super() call must be used - this will setup the config.
After calling super(), forward this call to modules that implement the mixin.
Args:
name: A globally unique name for the adapter. Will be used to access, enable and disable adapters.
cfg: A DictConfig that contains at the bare minimum `__target__` to instantiate a new Adapter module.
"""
# Convert to DictConfig from dict or Dataclass
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if not isinstance(cfg, DictConfig):
cfg = DictConfig(cfg)
# Resolve the module name and adapter name (if provided for the first time)
module_name, adapter_name = self.resolve_adapter_module_name_(name)
# Update the model.cfg with information about the new adapter from cfg
with open_dict(cfg), open_dict(self.cfg):
# Construct the minimum config required to be updated by adapter implementations
if 'adapters' not in self.cfg:
self.cfg.adapters = OmegaConf.create({})
self.cfg.adapters = _prepare_default_adapter_config(
global_key=self.adapter_global_cfg_key, meta_key=self.adapter_metadata_cfg_key, cfg=self.cfg.adapters,
)
# If the adapter is not being restored, force unique name to be provided for all adapters.
if hasattr(self, '_restoring_adapters') and self._restoring_adapters is not True:
if adapter_name in self.cfg.adapters:
raise ValueError(f"Attempting to add multiple adapters with the same name ({adapter_name}) !")
# Inject the module name in the adapter metadata cfg
gcfg = self.adapter_global_cfg_key
mcfg = self.adapter_metadata_cfg_key
self.cfg.adapters[gcfg][mcfg]['modules'][adapter_name] = module_name
# By default, enable the adapter that is being added
if 'enabled' not in cfg:
cfg['enabled'] = True
# Assign the
self.cfg.adapters[adapter_name] = OmegaConf.create(cfg)
# Set the global config of adapters
self.update_adapter_cfg(self.cfg.adapters)
self.check_valid_model_with_adapter_support_()
def is_adapter_available(self) -> bool:
"""
Checks if any Adapter module has been instantiated.
Should be overridden by the subclass.
Returns:
bool, determining if any Adapter module has been instantiated. Returns true even if the adapters are
enabled or disabled, false only if no adapters exist.
"""
self.check_valid_model_with_adapter_support_()
if 'adapters' in self.cfg:
self.update_adapter_cfg(self.cfg.adapters)
return 'adapters' in self.cfg and len(self.get_enabled_adapters()) > 0
def set_enabled_adapters(self, name: Optional[str] = None, enabled: bool = True):
"""
Updated the internal adapter config, determining if an adapter (or all adapters) are either
enabled or disabled.
A common user pattern would be to disable all adapters (either after adding them, or restoring a model
with pre-existing adapters) and then simply enable one of the adapters.
Should be overridden by subclass and super() call must be used - this will setup the config.
After calling super(), forward this call to modules that implement the mixin.
.. code::
model.set_enabled_adapters(enabled=False)
model.set_enabled_adapters(name=<some adapter name>, enabled=True)
Args:
name: Optional str. If a str name is given, the config will be updated to the value of `enabled`.
If no name is given, then all adapters will be enabled/disabled.
enabled: Bool, determines if the adapter(s) will be enabled/disabled.
"""
self.check_valid_model_with_adapter_support_()
# Update the adapter config with information about whether it is enabled/disabled.
with open_dict(self.cfg.adapters):
# If no name is provided, update all adapters.
if name is None:
for key in self.cfg.adapters.keys():
# Skip the global adapter config
if key == self.adapter_global_cfg_key:
continue
self.cfg.adapters[key]['enabled'] = enabled
logging.info(f"Setting adapter '{key}' status : Enabled = {enabled}")
else:
# Resolve the module name and adapter name
module_name, adapter_name = self.resolve_adapter_module_name_(name)
# Cannot set the state of the global config for adapters
if adapter_name == self.adapter_global_cfg_key:
raise ValueError(
f'Cannot set the state of the global config of adapters, '
f'given name = `{self.adapter_global_cfg_key}`'
)
# Otherwise, update just the specified adapter.
self.cfg.adapters[adapter_name]['enabled'] = enabled
logging.info(f"Setting adapter '{name}' status : Enabled = {enabled}")
self.update_adapter_cfg(self.cfg.adapters)
def get_enabled_adapters(self) -> List[str]:
"""
Returns a list of all enabled adapters.
Should be implemented by the subclass.
Returns:
A list of str names of each enabled adapter(s).
"""
self.check_valid_model_with_adapter_support_()
if 'adapters' in self.cfg:
self.update_adapter_cfg(self.cfg.adapters)
return []
def check_valid_model_with_adapter_support_(self):
"""
Utility method to test if the subclass of this mixin is an appropriate subclass of ModelPT itself.
Should be implemented by the subclass.
"""
pass
def save_adapters(self, filepath: str, name: str = None):
"""
Utility method that saves only the adapter module(s), and not the entire model itself.
This allows the sharing of adapters which are often just a fraction of the size of the full model,
enabling easier deliver.
.. note::
The saved file is a pytorch compatible pickle file, containing the state dicts of the adapter(s),
as well as a binary representation of the adapter config.
Args:
filepath: A str filepath where the .pt file that will contain the adapter state dict.
name: Optional name of the adapter that will be saved to this file. If None is passed,
all adapters will be saved to the file. The name can be either the global name (adapter_name),
or the module level name (module:adapter_name).
"""
if not hasattr(self, 'cfg') or 'adapters' not in self.cfg:
raise AttributeError("No adapters have been added to this model, so no adapters can be saved.")
output_dict = {}
# Normalize the name to a list of strings
if isinstance(name, str):
name = [name]
if name is None:
name = self.cfg.adapters.keys()
# Assert that the config must be present to save and restore the adapters.
if not hasattr(self.cfg, 'adapters'):
raise ValueError(
"The model has no adapter config, therefore it cannot save any adapter. "
"Please first add one or more adapters to generate the config."
)
# For each adapter name (either global adapter or module adapters)
for adapter_name in name:
if adapter_name != self.adapter_global_cfg_key:
# Resolve the adapter name into its components
module_name, adapter_name = self.resolve_adapter_module_name_(adapter_name)
# Reconstruct a module adapter's original name. For global adapters, the '' is preserved.
if module_name == '':
key = adapter_name
else:
key = f'{module_name}:{adapter_name}'
output_dict[key] = []
# Search all modules with the following criterion -
# It must be an implementation of AdapterModuleMixin.
# It must have the attribute `adapter_name`.
# It must match the adapter name provided by the user.
for module in self.modules():
if isinstance(module, AdapterModuleMixin):
# If all match, extract the state dict into a list of state dicts.
# This is because one name can be shared within one model by multiple adapters bearing
# a common name. This can occur when the adapter is common to a module which has multiple
# layers and blocks, all of which require an adapter.
adapter_module = module.get_adapter_module(adapter_name)
if adapter_module is not None:
# If the module was found, then extract the entire adapter ModuleDict state_dict(),
# Then select only the parts of the state dict that correspond to the current adapter_name.
# This is done so that it preserves the relation ship of the module name : parameters
# inside of the state dict.
# It will be normalized in the corresponding `load_adapters()` call.
adapter_state_dict = module.adapter_layer.state_dict()
state_dict = {}
for k, v in adapter_state_dict.items():
if adapter_name in k:
state_dict[k] = v
output_dict[key].append(state_dict)
# Preserve the binary OmegaConf dictionary of the model's adapter config
output_dict['__cfg__'] = self.cfg.adapters
# Finally, save the adapter state dict(s).
torch.save(output_dict, filepath)
def load_adapters(self, filepath: str, name: str = None, map_location: str = None, strict: bool = True):
"""
Utility method that restores only the adapter module(s), and not the entire model itself.
This allows the sharing of adapters which are often just a fraction of the size of the full model,
enabling easier deliver.
.. note::
During restoration, assumes that the model does not currently already have an adapter with
the name (if provided), or any adapter that shares a name with the state dict's modules
(if name is not provided). This is to ensure that each adapter name is globally unique
in a model.
Args:
filepath: Filepath of the .pt file.
name: Optional name of the adapter that will be saved to this file. If None is passed,
all adapters will be saved to the file. The name must be either the global name (adapter_name),
or the module level name (module:adapter_name), whichever exactly matches the state dict.
map_location: Pytorch flag, where to place the adapter(s) state dict(s).
strict: Pytorch flag, whether to load the weights of the adapter(s) strictly or not.
"""
# Determine device
if map_location is None:
if torch.cuda.is_available():
map_location = 'cuda'
else:
map_location = 'cpu'
# Load the state dict and extract the internal config
state_dict = torch.load(filepath, map_location=map_location)
config = state_dict.pop('__cfg__')
# Normalize the name to a list of names (exact match with the state dict)
if isinstance(name, str):
name = [name]
if name is None:
name = list(config.keys())
# For all module:adapter names (note, for global modules, we ignore the module: part)
for module_adapter_name in name:
# Extract current config as copy
internal_adapter_cfg = None
if hasattr(self, 'adapter_cfg') and self.adapter_cfg is not None:
internal_adapter_cfg = self.adapter_cfg
# Override internal adapter config with restoration config
self.adapter_cfg = config
# Resolve the adapter name and extract the adapter's config from the checkpoint.
module_name, adapter_name = self.resolve_adapter_module_name_(module_adapter_name)
adapter_cfg = config[adapter_name]
# Recreate the module:adapter_name
if module_name == '':
module_adapter_name = adapter_name
else:
module_adapter_name = f'{module_name}:{adapter_name}'
# Reset internal adapter config
self.adapter_cfg = internal_adapter_cfg
# Skip the global config key
if adapter_name == self.adapter_global_cfg_key:
continue
# Restore weights with exact key, if it fails, give useful error message.
try:
adapter_state = state_dict[module_adapter_name]
except KeyError:
all_keys = list(state_dict.keys())
raise KeyError(
f"Requested to load adapter with name `{module_adapter_name}`, but could not "
f"the adapter in the state dict. \nAvailable adapter names in state dict are: "
f"{all_keys}"
)
# If key was found, add a new adapter with random weights
self.add_adapter(name=module_adapter_name, cfg=adapter_cfg)
# Determine apriori how many modules must be loaded from the state dict
# This is dont to guarentee that partial match does not occur, only exact match
# between state dict and the adapters parameters will be allowed.
modules_to_load = [] # type: List[torch.nn.Module]
for module in self.modules():
if isinstance(module, AdapterModuleMixin):
adapter_module = module.get_adapter_module(adapter_name)
if adapter_module is not None:
modules_to_load.append(adapter_module)
# Assert that the number of states in the state dict matches the newly created adapter
if len(adapter_state) != len(modules_to_load):
raise ValueError(
f"The number of adapters in current model ({len(modules_to_load)}) does not "
f"match the number of modules in the state dict for adapter `{adapter_name}`: "
f"({len(adapter_state)})"
)
# For the pair of (adapter_state_in_checkpoint, adapter_in_model), restore the weights
for state, module in zip(adapter_state, modules_to_load):
# Note that state is a list of multiple state dicts for 1:1 Module mapping.
# However, the state_dict keys are of the form `adapter_name.<module hierarchy with dots>`.
# We therefore strip the `adapter_name.` part of the state dict
# And then directly load each module with its 1:1 state dict.
sub_dict = {}
for k, v in state.items():
if adapter_name in k:
k_ = k.replace(f"{adapter_name}.", "")
sub_dict[k_] = v
module.load_state_dict(sub_dict, strict=strict)
del sub_dict
# delete the dictionaries to preserve memory for next adapter
del adapter_state, modules_to_load
def update_adapter_cfg(self, cfg: DictConfig):
"""
Utility method to recursively update all of the Adapter module configs with the provided config.
.. note::
It is not a (deep)copy, but a reference copy. Changes made to the config will be reflected to
adapter submodules, but it is still encouraged to explicitly update the adapter_cfg using this method.
Args:
cfg: DictConfig containing the value of `model.cfg.adapters`.
"""
for module in self.modules(): # access PT subclass method via inheritance
if isinstance(module, AdapterModuleMixin):
module.adapter_cfg = cfg
@property
def adapter_module_names(self) -> List[str]:
"""
List of valid adapter modules that are supported by the model.
.. note::
Subclasses should override this property and return a list of str names, of all the modules
that they support, which will enable users to determine where to place the adapter modules.
Returns:
A list of str, one for each of the adapter modules that are supported. By default, the subclass
should support the "global adapter" ('').
"""
return ['']
|
NeMo-main
|
nemo/core/classes/mixins/adapter_mixins.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from dataclasses import dataclass
from typing import Any, Dict, List, Tuple, Union
import torch
from nemo.core.classes.mixins import AccessMixin
class AbstractAdapterStrategy(ABC):
def forward(self, input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin'):
"""
Forward method that defines how the output of the adapter should be merged with the input, or if it
should be merged at all.
Also provides the module that called this strategy - thereby allowing access to all other
adapters in the calling module. This can be useful if one adapter is a meta adapter, that
combines the outputs of various adapters. In such a case, the input can be forwarded across
all other adapters, collecting their outputs, and those outputs can then be merged via some
strategy. For example, refer to :
- [AdapterFusion: Non-Destructive Task Composition for Transfer Learning](https://arxiv.org/abs/2005.00247)
- [Exploiting Adapters for Cross-lingual Low-resource Speech Recognition](https://arxiv.org/abs/2105.11905)
Args:
input: Original output tensor of the module, or the output of the previous adapter (if more than
one adapters are enabled).
adapter: The adapter module that is currently required to perform the forward pass.
module: The calling module, in its entirety. It is a module that implements `AdapterModuleMixin`,
therefore the strategy can access all other adapters in this module via `module.adapter_layer`.
Returns:
The result tensor, after one of the active adapters has finished its forward passes.
"""
raise NotImplementedError()
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
class ReturnResultAdapterStrategy(AbstractAdapterStrategy):
"""
An implementation of an adapter strategy that simply returns the result of the adapter.
Supports stochastic
"""
def forward(self, input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin'):
"""
A basic strategy, which simply returns the result of the adapter's calculation as the output.
Args:
input: Original output tensor of the module, or the output of the previous adapter (if more than
one adapters are enabled).
adapter: The adapter module that is currently required to perform the forward pass.
module: The calling module, in its entirety. It is a module that implements `AdapterModuleMixin`,
therefore the strategy can access all other adapters in this module via `module.adapter_layer`.
Returns:
The result tensor, after one of the active adapters has finished its forward passes.
"""
result = self.compute_output(input, adapter, module=module)
return result
def compute_output(
self,
input: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor], Dict[str, Any]],
adapter: torch.nn.Module,
*,
module: 'AdapterModuleMixin',
) -> torch.Tensor:
"""
Compute the output of a single adapter to some input.
Args:
input: Original output tensor of the module, or the output of the previous adapter (if more than
one adapters are enabled).
adapter: The adapter module that is currently required to perform the forward pass.
module: The calling module, in its entirety. It is a module that implements `AdapterModuleMixin`,
therefore the strategy can access all other adapters in this module via `module.adapter_layer`.
Returns:
The result tensor, after one of the active adapters has finished its forward passes.
"""
if isinstance(input, (list, tuple)):
out = adapter(*input)
elif isinstance(input, dict):
out = adapter(**input)
else:
out = adapter(input)
return out
@dataclass
class ReturnResultAdapterStrategyConfig:
_target_: str = "{0}.{1}".format(
ReturnResultAdapterStrategy.__module__, ReturnResultAdapterStrategy.__name__
) # mandatory field
class ResidualAddAdapterStrategy(AbstractAdapterStrategy):
"""
An implementation of residual addition of an adapter module with its input.
Supports stochastic depth regularization.
"""
def __init__(self, stochastic_depth: float = 0.0, l2_lambda: float = 0.0):
"""
An implementation of residual addition of an adapter module with its input.
Performs output = input + adapter(input).
Args:
stochastic_depth: float, when greater than one, can optionally dropout the output of
the adapter's forward pass.
l2_lambda: L2 norm of the difference between the original input to the function, and the adapter's
output result. Disabled if set to 0.0.
"""
super().__init__()
self.stochastic_depth = stochastic_depth
self.l2_lambda = l2_lambda
def forward(self, input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin'):
"""
A basic strategy, comprising of a residual connection over the input, after forward pass by
the underlying adapter.
Args:
input: Original output tensor of the module, or the output of the previous adapter (if more than
one adapters are enabled).
adapter: The adapter module that is currently required to perform the forward pass.
module: The calling module, in its entirety. It is a module that implements `AdapterModuleMixin`,
therefore the strategy can access all other adapters in this module via `module.adapter_layer`.
Returns:
The result tensor, after one of the active adapters has finished its forward passes.
"""
out = self.compute_output(input, adapter, module=module)
# If not in training mode, or probability of stochastic depth is 0, skip step.
p = self.stochastic_depth
if not module.training or p == 0.0:
pass
else:
out = self.apply_stochastic_depth(out, input, adapter, module=module)
# Return the residual connection output = input + adapter(input)
result = input + out
# If l2_lambda is activated, register the loss value
self.compute_auxiliary_losses(result, input, adapter, module=module)
return result
def compute_output(
self, input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin'
) -> torch.Tensor:
"""
Compute the output of a single adapter to some input.
Args:
input: Original output tensor of the module, or the output of the previous adapter (if more than
one adapters are enabled).
adapter: The adapter module that is currently required to perform the forward pass.
module: The calling module, in its entirety. It is a module that implements `AdapterModuleMixin`,
therefore the strategy can access all other adapters in this module via `module.adapter_layer`.
Returns:
The result tensor, after one of the active adapters has finished its forward passes.
"""
out = adapter(input)
return out
def apply_stochastic_depth(
self, output: torch.Tensor, input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin'
):
"""
Compute and apply stochastic depth if probability is greater than 0.
Args:
output: The result tensor, after one of the active adapters has finished its forward passes.
input: Original output tensor of the module, or the output of the previous adapter (if more than
one adapters are enabled).
adapter: The adapter module that is currently required to perform the forward pass.
module: The calling module, in its entirety. It is a module that implements `AdapterModuleMixin`,
therefore the strategy can access all other adapters in this module via `module.adapter_layer`.
Returns:
The result tensor, after stochastic depth has been potentially applied to it.
"""
# Perform stochastic depth if needed.
p = self.stochastic_depth
if p < 0.0 or p > 1.0:
raise ValueError(f"Stochastic depth probability has to be between 0 and 1, but got {p}")
# Apply stochastic depth to the output of adapter.
keep_prob = 1.0 - p
shape = [1] * output.ndim
noise = torch.empty(shape, dtype=output.dtype, device=output.device)
noise = noise.bernoulli_(keep_prob)
if keep_prob > 0.0: # Done to normalize activation for inference mode
noise.div_(keep_prob)
output = noise * output
return output
def compute_auxiliary_losses(
self, output: torch.Tensor, input: torch.Tensor, adapter: torch.nn.Module, *, module: 'AdapterModuleMixin'
):
"""
Compute any auxiliary losses and preserve it in the tensor registry.
Args:
output: The result tensor, after one of the active adapters has finished its forward passes.
input: Original output tensor of the module, or the output of the previous adapter (if more than
one adapters are enabled).
adapter: The adapter module that is currently required to perform the forward pass.
module: The calling module, in its entirety. It is a module that implements `AdapterModuleMixin`,
therefore the strategy can access all other adapters in this module via `module.adapter_layer`.
"""
if module.training and self.l2_lambda > 0.0:
if not isinstance(adapter, AccessMixin):
raise ValueError(f"Module {adapter.__class__.__name__} does not implement AccessMixin !")
# Only add auxiliary loss if adapter has trainable parameters that require gradients
if next(adapter.parameters()).requires_grad is True:
# Check if globally allowed to compute aux loss
compute_aux_loss = adapter.access_cfg.get('compute_adapter_loss', True)
if compute_aux_loss:
# if l2 lambda is enabled, also enable AccessMixin
adapter.set_access_enabled(access_enabled=True)
l2_loss = self.l2_lambda * (input - output).square().reshape(input.size(0), -1).sum(dim=-1).mean()
adapter.register_accessible_tensor(name='adapter_loss', tensor=l2_loss)
@dataclass
class ResidualAddAdapterStrategyConfig:
stochastic_depth: float = 0.0
l2_lambda: float = 0.0
_target_: str = "{0}.{1}".format(
ResidualAddAdapterStrategy.__module__, ResidualAddAdapterStrategy.__name__
) # mandatory field
|
NeMo-main
|
nemo/core/classes/mixins/adapter_mixin_strategies.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import dataclasses
import inspect
import math
import warnings
from functools import partial
from typing import Any, Dict, Optional, Union
import hydra
import torch.optim as optim
import torch.optim.lr_scheduler as pt_scheduler
import torch.utils.data.dataloader as dataloader
from omegaconf import DictConfig, OmegaConf
from torch.optim.lr_scheduler import _LRScheduler
from nemo.core.config import SchedulerParams, get_scheduler_config, register_scheduler_params
from nemo.utils import logging
from nemo.utils.model_utils import maybe_update_config_version
class WarmupPolicy(_LRScheduler):
"""Adds warmup kwargs and warmup logic to lr policy.
All arguments should be passed as kwargs for clarity,
Args:
warmup_steps: Number of training steps in warmup stage
warmup_ratio: Ratio of warmup steps to total steps
max_steps: Total number of steps while training or `None` for
infinite training
"""
def __init__(self, optimizer, *, warmup_steps=None, warmup_ratio=None, max_steps=None, min_lr=0.0, last_epoch=-1):
assert not (
warmup_steps is not None and warmup_ratio is not None
), "Either use particular number of step or ratio"
assert warmup_ratio is None or max_steps is not None, "If there is a ratio, there should be a total steps"
# It is necessary to assign all attributes *before* __init__,
# as class is wrapped by an inner class.
self.max_steps = max_steps
if warmup_steps is not None:
self.warmup_steps = warmup_steps
elif warmup_ratio is not None:
self.warmup_steps = int(warmup_ratio * max_steps)
else:
self.warmup_steps = 0
self.min_lr = min_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, please use `get_last_lr()`.", UserWarning
)
step = self.last_epoch
if step <= self.warmup_steps and self.warmup_steps > 0:
return self._get_warmup_lr(step)
if (self.max_steps is not None) and (step > self.max_steps):
return [self.min_lr for _ in self.base_lrs]
return self._get_lr(step)
def _get_warmup_lr(self, step):
lr_val = (step + 1) / (self.warmup_steps + 1)
return [initial_lr * lr_val for initial_lr in self.base_lrs]
def _get_lr(self, step):
"""Simple const lr policy"""
return self.base_lrs
class SquareRootConstantPolicy(_LRScheduler):
"""Adds warmup kwargs and warmup logic to lr policy.
All arguments should be passed as kwargs for clarity,
Args:
warmup_steps: Number of training steps in warmup stage
warmup_ratio: Ratio of warmup steps to total steps
max_steps: Total number of steps while training or `None` for
infinite training
"""
def __init__(
self, optimizer, *, constant_steps=None, constant_ratio=None, max_steps=None, min_lr=0.0, last_epoch=-1
):
assert not (
constant_steps is not None and constant_ratio is not None
), "Either use particular number of step or ratio"
assert constant_ratio is None or max_steps is not None, "If there is a ratio, there should be a total steps"
# It is necessary to assign all attributes *before* __init__,
# as class is wrapped by an inner class.
self.max_steps = max_steps
if constant_steps is not None:
self.constant_steps = constant_steps
elif constant_ratio is not None:
self.constant_steps = int(constant_ratio * max_steps)
else:
self.constant_steps = 0
self.constant_lr = 1 / (constant_steps ** 0.5)
self.min_lr = min_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, please use `get_last_lr()`.", UserWarning
)
step = self.last_epoch
if step <= self.constant_steps:
return [self.constant_lr for _ in self.base_lrs]
if step > self.max_steps:
return [self.min_lr for _ in self.base_lrs]
return self._get_lr(step)
def _get_lr(self, step):
"""Simple const lr policy"""
return self.base_lrs
class WarmupHoldPolicy(WarmupPolicy):
"""Variant of WarmupPolicy which maintains high learning rate for a defined number of steps.
All arguments should be passed as kwargs for clarity,
Args:
warmup_steps: Number of training steps in warmup stage
warmup_ratio: Ratio of warmup steps to total steps
hold_steps: Number of training steps to hold the learning rate after warm up
hold_ratio: Ratio of hold steps to total steps
max_steps: Total number of steps while training or `None` for
infinite training
"""
def __init__(
self,
optimizer,
*,
warmup_steps=None,
warmup_ratio=None,
hold_steps=None,
hold_ratio=None,
max_steps=None,
min_lr=0.0,
last_epoch=-1,
):
assert not (hold_steps is not None and hold_ratio is not None), "Either use particular number of step or ratio"
assert hold_ratio is None or max_steps is not None, "If there is a ratio, there should be a total steps"
self.min_lr = min_lr
self._last_warmup_lr = 0.0
# Necessary to duplicate as class attributes are hidden in inner class
self.max_steps = max_steps
if warmup_steps is not None:
self.warmup_steps = warmup_steps
elif warmup_ratio is not None:
self.warmup_steps = int(warmup_ratio * max_steps)
else:
self.warmup_steps = 0
if hold_steps is not None:
self.hold_steps = hold_steps + self.warmup_steps
elif hold_ratio is not None:
self.hold_steps = int(hold_ratio * max_steps) + self.warmup_steps
else:
self.hold_steps = 0
super().__init__(
optimizer,
warmup_steps=warmup_steps,
warmup_ratio=warmup_ratio,
max_steps=max_steps,
last_epoch=last_epoch,
min_lr=min_lr,
)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning
)
step = self.last_epoch
# Warmup phase
if step <= self.warmup_steps and self.warmup_steps > 0:
return self._get_warmup_lr(step)
# Hold phase
if (step >= self.warmup_steps) and (step < self.hold_steps):
return self.base_lrs
if step > self.max_steps:
return [self.min_lr for _ in self.base_lrs]
return self._get_lr(step)
class WarmupAnnealHoldPolicy(_LRScheduler):
"""Adds warmup kwargs and warmup logic to lr policy.
All arguments should be passed as kwargs for clarity,
Args:
warmup_steps: Number of training steps in warmup stage
warmup_ratio: Ratio of warmup steps to total steps
max_steps: Total number of steps while training or `None` for
infinite training
min_lr: Minimum lr to hold the learning rate after decay at.
constant_steps: Number of steps to keep lr constant at.
constant_ratio: Ratio of steps to keep lr constant.
"""
def __init__(
self,
optimizer,
*,
warmup_steps=None,
warmup_ratio=None,
constant_steps=None,
constant_ratio=None,
max_steps=None,
min_lr=0.0,
last_epoch=-1,
):
assert not (
warmup_steps is not None and warmup_ratio is not None
), "Either use particular number of step or ratio"
assert not (
constant_steps is not None and constant_ratio is not None
), "Either use constant_steps or constant_ratio"
assert warmup_ratio is None or max_steps is not None, "If there is a ratio, there should be a total steps"
# It is necessary to assign all attributes *before* __init__,
# as class is wrapped by an inner class.
self.max_steps = max_steps
if warmup_steps is not None:
self.warmup_steps = warmup_steps
elif warmup_ratio is not None:
self.warmup_steps = int(warmup_ratio * max_steps)
else:
self.warmup_steps = 0
if constant_steps is not None:
self.constant_steps = constant_steps
elif constant_ratio is not None:
self.constant_steps = int(constant_ratio * max_steps)
else:
self.constant_steps = 0
self.decay_steps = max_steps - (self.constant_steps + self.warmup_steps)
self.min_lr = min_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, please use `get_last_lr()`.", UserWarning
)
step = self.last_epoch
# Warmup steps
if self.warmup_steps > 0 and step <= self.warmup_steps:
return self._get_warmup_lr(step)
# Constant steps after warmup and decay
if self.constant_steps > 0 and (self.warmup_steps + self.decay_steps) < step <= self.max_steps:
return self._get_constant_lr(step)
# Min lr after max steps of updates
if step > self.max_steps:
return [self.min_lr for _ in self.base_lrs]
return self._get_lr(step)
def _get_warmup_lr(self, step):
lr_val = (step + 1) / (self.warmup_steps + 1)
return [initial_lr * lr_val for initial_lr in self.base_lrs]
def _get_constant_lr(self, step):
return [self.min_lr for _ in self.base_lrs]
def _get_lr(self, step):
"""Simple const lr policy"""
return self.base_lrs
def _squareroot_annealing(initial_lr, step, max_steps, min_lr):
mult = ((max_steps - step) / max_steps) ** 0.5
out_lr = initial_lr * mult
out_lr = max(out_lr, min_lr)
return out_lr
def _square_annealing(initial_lr, step, max_steps, min_lr):
mult = ((max_steps - step) / max_steps) ** 2
out_lr = initial_lr * mult
out_lr = max(out_lr, min_lr)
return out_lr
def _cosine_annealing(initial_lr, step, max_steps, min_lr):
mult = 0.5 * (1 + math.cos(math.pi * step / max_steps))
out_lr = (initial_lr - min_lr) * mult + min_lr
return out_lr
def _linear_warmup_with_cosine_annealing(max_lr, warmup_steps, step, decay_steps, min_lr):
assert max_lr > min_lr
# Use linear warmup for the initial part.
if warmup_steps > 0 and step <= warmup_steps:
return max_lr * float(step) / float(warmup_steps)
# For any steps larger than `decay_steps`, use `min_lr`.
if step > warmup_steps + decay_steps:
return min_lr
# If we are done with the warmup period, use the decay style.
num_steps_ = step - warmup_steps
decay_steps_ = decay_steps
decay_ratio = float(num_steps_) / float(decay_steps_)
assert decay_ratio >= 0.0
assert decay_ratio <= 1.0
delta_lr = max_lr - min_lr
coeff = 0.5 * (math.cos(math.pi * decay_ratio) + 1.0)
return min_lr + coeff * delta_lr
def _poly_decay(initial_lr, step, decay_steps, power, min_lr, cycle):
if cycle:
multiplier = 1.0 if step == 0 else math.ceil(step / decay_steps)
decay_steps *= multiplier
else:
step = min(step, decay_steps)
p = step / decay_steps
lr = (initial_lr - min_lr) * math.pow(1.0 - p, power)
lr += min_lr
return lr
def _noam_hold_annealing(initial_lr, step, warmup_steps, hold_steps, decay_rate, min_lr):
# hold_steps = total number of steps to hold the LR, not the warmup + hold steps.
T_warmup_decay = max(1, warmup_steps ** decay_rate)
T_hold_decay = max(1, (step - hold_steps) ** decay_rate)
lr = (initial_lr * T_warmup_decay) / T_hold_decay
lr = max(lr, min_lr)
return lr
class SquareAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=1e-5, last_epoch=-1, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_square_annealing(
initial_lr=initial_lr,
step=step - self.warmup_steps,
max_steps=self.max_steps - self.warmup_steps,
min_lr=self.min_lr,
)
for initial_lr in self.base_lrs
]
return new_lrs
class SquareRootAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0, last_epoch=-1, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_squareroot_annealing(initial_lr=initial_lr, step=step, max_steps=self.max_steps, min_lr=self.min_lr)
for initial_lr in self.base_lrs
]
return new_lrs
class CosineAnnealing(WarmupAnnealHoldPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0, last_epoch=-1, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
for initial_lr in self.base_lrs:
if initial_lr < self.min_lr:
raise ValueError(
f"{self} received an initial learning rate that was lower than the minimum learning rate."
)
if self.constant_steps is None or self.constant_steps == 0:
new_lrs = [
_cosine_annealing(
initial_lr=initial_lr,
step=step - self.warmup_steps,
max_steps=self.max_steps - self.warmup_steps,
min_lr=self.min_lr,
)
for initial_lr in self.base_lrs
]
else:
new_lrs = self._get_linear_warmup_with_cosine_annealing_lr(step)
return new_lrs
def _get_warmup_lr(self, step):
if self.constant_steps is None or self.constant_steps == 0:
return super()._get_warmup_lr(step)
else:
# Use linear warmup for the initial part.
return self._get_linear_warmup_with_cosine_annealing_lr(step)
def _get_constant_lr(self, step):
# Only called when `constant_steps` > 0.
return self._get_linear_warmup_with_cosine_annealing_lr(step)
def _get_linear_warmup_with_cosine_annealing_lr(self, step):
# Cosine Schedule for Megatron LM, slightly different warmup schedule + constant LR at the end.
new_lrs = [
_linear_warmup_with_cosine_annealing(
max_lr=self.base_lrs[0],
warmup_steps=self.warmup_steps,
step=step,
decay_steps=self.decay_steps,
min_lr=self.min_lr,
)
for _ in self.base_lrs
]
return new_lrs
class NoamAnnealing(_LRScheduler):
def __init__(
self, optimizer, *, d_model, warmup_steps=None, warmup_ratio=None, max_steps=None, min_lr=0.0, last_epoch=-1
):
self._normalize = d_model ** (-0.5)
assert not (
warmup_steps is not None and warmup_ratio is not None
), "Either use particular number of step or ratio"
assert warmup_ratio is None or max_steps is not None, "If there is a ratio, there should be a total steps"
# It is necessary to assign all attributes *before* __init__,
# as class is wrapped by an inner class.
self.max_steps = max_steps
if warmup_steps is not None:
self.warmup_steps = warmup_steps
elif warmup_ratio is not None:
self.warmup_steps = int(warmup_ratio * max_steps)
else:
self.warmup_steps = 0
self.min_lr = min_lr
super().__init__(optimizer, last_epoch)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, please use `get_last_lr()`.", UserWarning
)
step = max(1, self.last_epoch)
for initial_lr in self.base_lrs:
if initial_lr < self.min_lr:
raise ValueError(
f"{self} received an initial learning rate that was lower than the minimum learning rate."
)
new_lrs = [self._noam_annealing(initial_lr=initial_lr, step=step) for initial_lr in self.base_lrs]
return new_lrs
def _noam_annealing(self, initial_lr, step):
if self.warmup_steps > 0:
mult = self._normalize * min(step ** (-0.5), step * (self.warmup_steps ** (-1.5)))
else:
mult = self._normalize * step ** (-0.5)
out_lr = initial_lr * mult
if step > self.warmup_steps:
out_lr = max(out_lr, self.min_lr)
return out_lr
class NoamHoldAnnealing(WarmupHoldPolicy):
def __init__(self, optimizer, *, max_steps, decay_rate=0.5, min_lr=0.0, last_epoch=-1, **kwargs):
"""
Implementation of the Noam Hold Annealing policy from the SqueezeFormer paper.
Unlike NoamAnnealing, the peak learning rate can be explicitly set for this scheduler.
The schedule first performs linear warmup, then holds the peak LR, then decays with some schedule for
the remainder of the steps. Therefore the min-lr is still dependent on the hyper parameters selected.
It's schedule is determined by three factors-
Warmup Steps: Initial stage, where linear warmup occurs uptil the peak LR is reached. Unlike NoamAnnealing,
the peak LR is explicitly stated here instead of a scaling factor.
Hold Steps: Intermediate stage, where the peak LR is maintained for some number of steps. In this region,
the high peak LR allows the model to converge faster if training is stable. However the high LR
may also cause instability during training. Should usually be a significant fraction of training
steps (around 30-40% of the entire training steps).
Decay Steps: Final stage, where the LR rapidly decays with some scaling rate (set by decay rate).
To attain Noam decay, use 0.5, for Squeezeformer recommended decay, use 1.0. The fast decay after
prolonged high LR during hold phase allows for rapid convergence.
References:
- [Squeezeformer: An Efficient Transformer for Automatic Speech Recognition](https://arxiv.org/abs/2206.00888)
Args:
optimizer: Pytorch compatible Optimizer object.
warmup_steps: Number of training steps in warmup stage
warmup_ratio: Ratio of warmup steps to total steps
hold_steps: Number of training steps to hold the learning rate after warm up
hold_ratio: Ratio of hold steps to total steps
max_steps: Total number of steps while training or `None` for
infinite training
decay_rate: Float value describing the polynomial decay after the hold period. Default value
of 0.5 corresponds to Noam decay.
min_lr: Minimum learning rate.
"""
self.decay_rate = decay_rate
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
if self.warmup_steps is None or self.warmup_steps == 0:
raise ValueError("Noam scheduler cannot be used without warmup steps")
if self.hold_steps > 0:
hold_steps = self.hold_steps - self.warmup_steps
else:
hold_steps = 0
new_lrs = [
_noam_hold_annealing(
initial_lr,
step=step,
warmup_steps=self.warmup_steps,
hold_steps=hold_steps,
decay_rate=self.decay_rate,
min_lr=self.min_lr,
)
for initial_lr in self.base_lrs
]
return new_lrs
class WarmupAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, last_epoch=-1, min_lr=0.0, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
delta_lr = self.base_lrs[0] - self.min_lr
mult = (step - self.warmup_steps) / (self.max_steps - self.warmup_steps)
out_lr = [self.min_lr + (1 - mult) * delta_lr for _ in self.base_lrs]
return out_lr
class InverseSquareRootAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, last_epoch=-1, min_lr=0.0, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, **kwargs, last_epoch=last_epoch, min_lr=min_lr)
def _get_lr(self, step):
denom = ((step + 1) / (self.warmup_steps + 1)) ** 0.5
out_lr = [initial_lr / denom for initial_lr in self.base_lrs]
return out_lr
class T5InverseSquareRootAnnealing(SquareRootConstantPolicy):
def __init__(self, optimizer, *, max_steps, last_epoch=-1, min_lr=0.0, **kwargs):
super().__init__(optimizer=optimizer, max_steps=max_steps, **kwargs, last_epoch=last_epoch, min_lr=min_lr)
def _get_lr(self, step):
return [1 / (step ** 0.5) for _ in self.base_lrs]
class PolynomialDecayAnnealing(WarmupPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0.0, power=1.0, cycle=False, last_epoch=-1, **kwargs):
self.power = power
self.cycle = cycle
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_poly_decay(
initial_lr,
step=step - self.warmup_steps,
decay_steps=self.max_steps - self.warmup_steps,
power=self.power,
min_lr=self.min_lr,
cycle=self.cycle,
)
for initial_lr in self.base_lrs
]
return new_lrs
class PolynomialHoldDecayAnnealing(WarmupHoldPolicy):
def __init__(self, optimizer, *, max_steps, min_lr=0.0, power=1.0, cycle=False, last_epoch=-1, **kwargs):
self.power = power
self.cycle = cycle
super().__init__(optimizer=optimizer, max_steps=max_steps, last_epoch=last_epoch, min_lr=min_lr, **kwargs)
def _get_lr(self, step):
new_lrs = [
_poly_decay(
initial_lr,
step=step - self.hold_steps,
decay_steps=self.max_steps - max(self.warmup_steps, self.hold_steps),
power=self.power,
min_lr=self.min_lr,
cycle=self.cycle,
)
for initial_lr in self.base_lrs
]
return new_lrs
def register_scheduler(name: str, scheduler: _LRScheduler, scheduler_params: SchedulerParams):
"""
Checks if the scheduler name exists in the registry, and if it doesnt, adds it.
This allows custom schedulers to be added and called by name during instantiation.
Args:
name: Name of the optimizer. Will be used as key to retrieve the optimizer.
scheduler: Scheduler class (inherits from _LRScheduler)
scheduler_params: The parameters as a dataclass of the scheduler
"""
if name in AVAILABLE_SCHEDULERS:
raise ValueError(f"Cannot override pre-existing schedulers. Conflicting scheduler name = {name}")
AVAILABLE_SCHEDULERS[name] = scheduler
sched_name = "{}_params".format(scheduler.__name__)
register_scheduler_params(name=sched_name, scheduler_params=scheduler_params)
def get_scheduler(name: str, **kwargs: Optional[Dict[str, Any]]) -> _LRScheduler:
"""
Convenience method to obtain an _LRScheduler class and partially instantiate it with optimizer kwargs.
Args:
name: Name of the scheduler in the registry.
kwargs: Optional kwargs of the scheduler used during instantiation.
Returns:
a partially instantiated _LRScheduler
"""
if name not in AVAILABLE_SCHEDULERS:
raise ValueError(
f"Cannot resolve scheduler{name}'. Available optimizers are : " f"{AVAILABLE_SCHEDULERS.keys()}"
)
scheduler_cls = AVAILABLE_SCHEDULERS[name]
# Pop 'max_steps' if it's not required by the scheduler
if 'max_steps' in kwargs and 'max_steps' not in inspect.signature(scheduler_cls).parameters:
kwargs.pop('max_steps')
scheduler = partial(scheduler_cls, **kwargs)
return scheduler
def prepare_lr_scheduler(
optimizer: optim.Optimizer,
scheduler_config: Union[Dict[str, Any], DictConfig],
train_dataloader: Optional[dataloader.DataLoader] = None,
) -> Optional[Dict[str, Any]]:
"""
Constructs an LR Scheduler (optionally) for a given optimizer, based on a config with the following schema
optim:
name: <name of optimizer>
lr: <maximal learning rate>
# <additional optimizer arguments>
args:
name: auto # special keyword, resolves to correct optimizer config for given optimizer name
# cls: nemo.core.config.optimizers.NovogradParams # explicit instantiation by class path
params: # optional override parameters for the optimizer config
betas: [0.8, 0.5]
weight_decay: 0.001
# scheduler setup
sched:
name: <name of scheduler>
iters_per_batch: null # computed at runtime; mandatory to have
max_steps: -1 # computed at runtime or explicitly set here; mandatory to have
# pytorch lightning args <mandatory>
monitor: val_loss
reduce_on_plateau: false
# <scheduler config override>
args:
name: auto # special keyword, resolves to correct optimizer config for given optimizer name
# cls: nemo.core.config.schedulers.CosineAnnealingParams # explicit instantiation by class path
params: # optional override parameters for the optimizer config
warmup_steps: null
warmup_ratio: null
min_lr: 0.0
last_epoch: -1
Args:
optimizer: An instantiated Optimizer.
scheduler_config: A dictionary / config dict which follows the above schema.
train_dataloader: Optional requirement, must be passed if "iters_per_batch" is defined
instead of "max_steps". Used to compute effective "max_steps".
Returns:
A dictionary containing the LR Scheduler implementation if the config was successfully parsed
along with other parameters required by Pytorch Lightning, otherwise None.
"""
if scheduler_config is not None:
scheduler_config = maybe_update_config_version(scheduler_config)
# Build nested dictionary for convenience out of structured objects
if isinstance(scheduler_config, DictConfig):
scheduler_config = OmegaConf.to_container(scheduler_config, resolve=True)
elif dataclasses.is_dataclass(scheduler_config):
# Recursively transform data classes to basic dictionaries
scheduler_config = OmegaConf.create(scheduler_config)
scheduler_config = OmegaConf.to_container(scheduler_config, resolve=True)
# Test to see if config follows above schema
interval = 'step'
if scheduler_config is not None:
if 'args' in scheduler_config:
scheduler_args = scheduler_config.pop('args')
else:
scheduler_args = copy.deepcopy(scheduler_config)
# Remove extra parameters from scheduler_args nest
# Assume all other parameters are to be passed into scheduler constructor
scheduler_args.pop('name', None)
scheduler_args.pop('t_max_epochs', None)
scheduler_args.pop('t_accumulate_grad_batches', None)
scheduler_args.pop('t_limit_train_batches', None)
scheduler_args.pop('t_num_workers', None)
scheduler_args.pop('monitor', None)
scheduler_args.pop('reduce_on_plateau', None)
if 'name' in scheduler_config and scheduler_config['name'] in EPOCH_SCHEDULERS:
interval = 'epoch'
else:
# Return gracefully in case `sched` was not supplied; inform user
logging.info('Scheduler not initialized as no `sched` config supplied to setup_optimizer()')
return None
# Try instantiation of scheduler params from config class path
if '_target_' in scheduler_args:
scheduler_args_cfg = OmegaConf.create(scheduler_args)
scheduler_conf = hydra.utils.instantiate(scheduler_args_cfg)
scheduler_args = vars(scheduler_conf)
# Get name of the scheduler
scheduler_name = scheduler_conf.__class__.__name__
if 'Params' in scheduler_name:
scheduler_name = scheduler_name.replace('Params', '')
else:
# Class path instantiation failed; try resolving "name" component
# Get name of the scheduler
if 'name' in scheduler_config:
scheduler_name = scheduler_config['name']
else:
logging.warning(
"Could not resolve classpath for Scheduler Config, and `name` "
"was not provided either. \n"
"Scheduler cannot be instantiated !"
)
return None
# If class path was not provided, perhaps `name` is provided for resolution
if 'name' in scheduler_args:
# If `auto` is passed as name for resolution of optimizer name,
# then lookup optimizer name and resolve its parameter config
if scheduler_args['name'] == 'auto':
scheduler_params_name = "{}Params".format(scheduler_name)
else:
scheduler_params_name = scheduler_args['name']
# Get override arguments provided in the config yaml file / Dict Config
scheduler_params_override = scheduler_args.get('params', {})
# If params is itself a dict config object provided explicitly in Dict Config
# Resolve to dictionary for convenience
if isinstance(scheduler_params_override, DictConfig):
scheduler_params_override = OmegaConf.to_container(scheduler_params_override, resolve=True)
# Get and instantiate the Config dataclass for this scheduler
scheduler_params_cls = get_scheduler_config(scheduler_params_name, **scheduler_params_override)
scheduler_params = scheduler_params_cls() # instantiate the parameters object
scheduler_args = vars(scheduler_params) # extract just the dictionary from the Config object
else:
# assume the input dictionary is schedular args (from dataclasses / omegaconf)
pass
# Extract value to monitor in losses, if provided.
if 'monitor' in scheduler_config:
monitor = scheduler_config.get('monitor')
else:
# Default to train loss
monitor = 'loss'
# Store exact max_steps if it is provided
if 'max_steps' in scheduler_config and scheduler_config['max_steps'] is not None:
max_steps = scheduler_config['max_steps']
elif 't_max_epochs' in scheduler_config:
# Compute effective max_steps if t_max_epochs is provided
if train_dataloader is None:
logging.warning(
'As `t_max_epochs` is provided/computed, it is required to pass the train dataloader in order\n'
'to compute effective maximum number of steps.\n'
'Scheduler will not be instantiated !'
)
return None
# Raise exception if neither `max_steps` nor `t_max_epochs` is provided
if scheduler_config.get('t_max_epochs', None) is None:
logging.warning(
"`t_max_epochs` cannot be None when `max_steps` is not not provided.\n"
"This can occur when `train dataloader` is not available to correctly "
"prepare the scheduler.\n"
"Scheduler will not be instantiated !"
)
return None
# Get iters_per_batch
max_epochs = scheduler_config.get('t_max_epochs')
accumulate_grad_batches = scheduler_config.get('t_accumulate_grad_batches')
limit_train_batches = scheduler_config.get('t_limit_train_batches')
num_workers = scheduler_config.get('t_num_workers')
# Compute effective num max_steps
num_samples = len(train_dataloader.dataset)
# TODO: not sure if this will be the correct LR schedule for Megatron
# we may need to override ModelPT setup_optimization
if train_dataloader.batch_size is not None:
batch_size = train_dataloader.batch_size
elif hasattr(train_dataloader, 'batch_sampler') and train_dataloader.batch_sampler is not None:
if train_dataloader.batch_sampler.micro_batch_size is not None:
batch_size = train_dataloader.batch_sampler.micro_batch_size
else:
raise ValueError(f'Could not find batch_size from batch_sampler: {train_dataloader.batch_sampler}')
else:
raise ValueError(f'Could not find batch_size from train_dataloader: {train_dataloader}')
drop_last = train_dataloader.drop_last
max_steps = compute_max_steps(
max_epochs=max_epochs,
accumulate_grad_batches=accumulate_grad_batches,
limit_train_batches=limit_train_batches,
num_workers=num_workers,
num_samples=num_samples,
batch_size=batch_size,
drop_last=drop_last,
)
else:
logging.warning(
"Neither `max_steps` nor `iters_per_batch` were provided to `optim.sched`, "
"cannot compute effective `max_steps` !\n"
"Scheduler will not be instantiated !"
)
return None
# Inject max_steps (effective or provided) into the scheduler config
scheduler_args['max_steps'] = max_steps
# Get the scheduler class from the config
scheduler_cls = get_scheduler(scheduler_name, **scheduler_args)
# Pop 'max_steps' if it's not required by the scheduler
if 'max_steps' not in inspect.signature(scheduler_cls).parameters:
scheduler_args.pop('max_steps')
# Instantiate the LR schedule
schedule = scheduler_cls(optimizer, **scheduler_args)
logging.info(
'Scheduler "%s" \nwill be used during training (effective maximum steps = %d) - \nParameters : \n(%s)',
str(schedule),
max_steps,
OmegaConf.to_yaml(OmegaConf.create(scheduler_args)),
)
# Wrap the schedule in PTL arguments to perform stepwise computation
# Rather than epoch level computation
if isinstance(schedule, optim.lr_scheduler.ReduceLROnPlateau):
reduce_lr_on_plateau = True
else:
reduce_lr_on_plateau = False
schedule_dict = {
'scheduler': schedule,
'interval': interval,
'frequency': 1,
'monitor': monitor,
'reduce_on_plateau': reduce_lr_on_plateau,
}
return schedule_dict
def compute_max_steps(
max_epochs, accumulate_grad_batches, limit_train_batches, num_workers, num_samples, batch_size, drop_last
):
_round = math.floor if drop_last else math.ceil
sampler_num_samples = math.ceil(num_samples / max(1, num_workers))
if drop_last and num_workers > 1:
logging.warning(
"Please note that drop_last is broken in pytorch 1.6.0. We will fix when pytorch 1.7.0 is released"
)
# TODO: Master version, not in pytorch 1.6.0
# sampler_num_samples = math.ceil((num_samples - num_workers)/ num_workers)
steps_per_epoch = _round(sampler_num_samples / batch_size)
if isinstance(limit_train_batches, int) or limit_train_batches == 0.0:
steps_per_epoch = min(steps_per_epoch, int(limit_train_batches))
elif steps_per_epoch != float('inf'):
# limit_train_batches is a percentage of batches per epoch
steps_per_epoch = int(steps_per_epoch * limit_train_batches)
return math.ceil(steps_per_epoch / accumulate_grad_batches) * max_epochs
AVAILABLE_SCHEDULERS = {
'WarmupPolicy': WarmupPolicy,
'WarmupHoldPolicy': WarmupHoldPolicy,
'SquareAnnealing': SquareAnnealing,
'CosineAnnealing': CosineAnnealing,
'NoamAnnealing': NoamAnnealing,
'NoamHoldAnnealing': NoamHoldAnnealing,
'WarmupAnnealing': WarmupAnnealing,
'InverseSquareRootAnnealing': InverseSquareRootAnnealing,
'T5InverseSquareRootAnnealing': T5InverseSquareRootAnnealing,
'SquareRootAnnealing': SquareRootAnnealing,
'PolynomialDecayAnnealing': PolynomialDecayAnnealing,
'PolynomialHoldDecayAnnealing': PolynomialHoldDecayAnnealing,
'StepLR': pt_scheduler.StepLR,
'ExponentialLR': pt_scheduler.ExponentialLR,
'ReduceLROnPlateau': pt_scheduler.ReduceLROnPlateau,
'CyclicLR': pt_scheduler.CyclicLR,
}
EPOCH_SCHEDULERS = {
'ExponentialLR': pt_scheduler.ExponentialLR,
'ReduceLROnPlateau': pt_scheduler.ReduceLROnPlateau,
}
|
NeMo-main
|
nemo/core/optim/lr_scheduler.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import itertools
from typing import Callable, Iterable, Optional, Union
import torch
from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam, _disable_pre_forward_hook
from megatron.core import parallel_state
from megatron.core.dist_checkpointing.dict_utils import dict_list_map_inplace
from megatron.core.dist_checkpointing.mapping import ShardedTensor
from megatron.core.dist_checkpointing.optimizer import get_param_id_to_sharded_param_map, optim_state_to_sharding_state
def _str_to_dtype(dtype: Union[str, torch.dtype]) -> torch.dtype:
if isinstance(dtype, torch.dtype):
return dtype
name = str(dtype).strip().lower()
if name.startswith("torch."):
name = name.replace("torch.", "", 1)
if name.startswith("fp"):
name = name.replace("fp", "float", 1)
dtype = dict(
float32=torch.float32,
float=torch.float32,
float64=torch.float64,
double=torch.float64,
float16=torch.float16,
half=torch.float16,
bfloat16=torch.bfloat16,
bf16=torch.bfloat16,
uint8=torch.uint8,
byte=torch.uint8,
int8=torch.int8,
char=torch.int8,
int16=torch.int16,
short=torch.int16,
int32=torch.int32,
int=torch.int32,
int64=torch.int64,
long=torch.int64,
bool=torch.bool,
)[name]
return dtype
class MegatronDistributedFusedAdam(DistributedFusedAdam):
"""Wrapper class that supports NeMo-Megatron optimizations
When O2-style optimizations are enabled, gradients are accumulated
into the main_grad buffer instead of the grad buffer.
"""
def __init__(
self,
params: Union[Iterable[torch.nn.Parameter], Iterable[dict]],
disable_distributed_parameters: bool = False,
**kwargs,
):
# Initialize process groups
if 'process_group' not in kwargs and not parallel_state.is_unitialized():
kwargs['process_group'] = parallel_state.get_data_parallel_group()
if disable_distributed_parameters:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
self_groups = [torch.distributed.new_group(ranks=[i]) for i in range(world_size)]
kwargs['distributed_process_group'] = self_groups[rank]
kwargs['redundant_process_group'] = kwargs['process_group']
# Make sure dtypes are in right type
for keyword in ('dtype', 'grad_sync_dtype', 'param_sync_dtype'):
if keyword in kwargs:
kwargs[keyword] = _str_to_dtype(kwargs[keyword])
# Make sure params are in consistent format (list of param group dicts)
param_groups = list(params)
assert param_groups
if not isinstance(param_groups[0], dict):
param_groups = [{'params': param_groups}]
# Construct distributed optimizer
super().__init__(param_groups, **kwargs)
# Initialize weights that require FP32 grads
if self.dtype != torch.float32 or self.grad_sync_dtype != torch.float32:
fp32_params = []
for param_group in param_groups:
fp32_params.extend(
filter(lambda param: getattr(param, '_with_fp32_optimizer', False), param_group['params'],)
)
if fp32_params:
assert self.dtype == torch.float32, (
'Param requires FP32 state, ' f'but optimizer is initialized with {dtype}'
)
self.init_params_bucket(
fp32_params, grad_sync_dtype=torch.float32,
)
def _make_post_backward_hook(self, param: torch.nn.Parameter, param_group_id: int, param_id: int,) -> Callable:
def hook(*unused):
if getattr(param, '_pre_forward_hook_is_enabled', False):
raise RuntimeError(
'A parameter called its post-backward hook '
'before its pre-forward hook. '
'Please manually interact with the parameter '
'before the forward pass (e.g. by calling data_ptr) '
'or run DistributedFusedAdam with overlap_param_sync=False.'
)
with self._lock:
need_to_initialize = 'fragments' not in self.state[param]
if need_to_initialize:
self._init_param_state(param, param_group_id, param_id)
if self.greedy_grad_copy and not getattr(param, '_disable_greedy_grad_copy', False):
self._grad_copy(param)
if self.overlap_grad_sync and not getattr(param, '_disable_overlap_grad_sync', False):
self._try_start_bucket_grad_sync(
params=[param], ignore_last_bucket=need_to_initialize,
)
return hook
def try_grad_sync(self, params: Iterable[torch.nn.Parameter]) -> None:
def is_grad_copy_enabled(param: torch.nn.Parameter) -> bool:
return not getattr(param, '_disable_greedy_grad_copy', False) and not getattr(
param, '_disable_overlap_grad_sync', False
)
params = list(filter(is_grad_copy_enabled, params))
for p in params:
self._grad_copy(p)
self._try_start_bucket_grad_sync(params=params)
def zero_grad(self, *args, **kwargs) -> None:
super().zero_grad(*args, **kwargs)
# Reset main grads
if self.contiguous_grad_buffer:
for param in self.parameters():
with _disable_pre_forward_hook(param):
param.main_grad = self.grad_buffer_view(param)
def grad_norm(
self, parameters: Optional[Iterable[torch.nn.Parameter]] = None, norm_type: float = 2.0, force: bool = False,
) -> torch.Tensor:
assert norm_type == 2
if parameters is not None:
# Make sure we can access iterable multiple times
parameters = list(parameters)
# Compute grad norm
if force or self._grad_norm is None:
# Compute norm of local gradients for distributed optimizer
grad_norm_sq = self._local_grad_norm(parameters=parameters, norm_type=norm_type,)
if self.redundant_size > 1:
grad_norm_sq /= self.redundant_size
# Sum over all procs to get grad norm
torch.distributed.all_reduce(
grad_norm_sq, op=torch.distributed.ReduceOp.SUM,
)
self._grad_norm = grad_norm_sq.sqrt()
# Use cached grad norm
return super().grad_norm()
def sharded_state_dict(self, model_sharded_state_dict):
optimizer_state_dict = self.state_dict()
id_to_sharded_param_map = get_param_id_to_sharded_param_map(
model_sharded_state_dict=model_sharded_state_dict, optim_params_iter=self.parameters(),
)
# Convert state
step = optimizer_state_dict['state'].pop('step')
state_dict_format = optimizer_state_dict.pop('format', None)
optim_state_to_sharding_state(optimizer_state_dict, id_to_sharded_param_map)
optimizer_state_dict['state']['step'] = step
if state_dict_format is not None:
optimizer_state_dict['format'] = state_dict_format
def rename_fp32_params(x):
if isinstance(x, ShardedTensor) and x.key.startswith('optimizer.state.param'):
x.key = x.key.replace('optimizer.state.param', 'optimizer.state.fp32_param')
return x
dict_list_map_inplace(rename_fp32_params, optimizer_state_dict)
return optimizer_state_dict
|
NeMo-main
|
nemo/core/optim/distributed_adam.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Most of the code here has been copied from:
# https://github.com/pytorch/fairseq/blob/main/fairseq/optim/adafactor.py
import math
import torch
from torch.optim.optimizer import Optimizer
__all__ = ['Adafactor']
class Adafactor(Optimizer):
"""Implements Adafactor algorithm.
This implementation is based on:
`Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
(see https://arxiv.org/abs/1804.04235)
Note that this optimizer internally adjusts the learning rate
depending on the *scale_parameter*, *relative_step* and
*warmup_init* options. To use a manual (external) learning rate
schedule you should set `scale_parameter=False` and
`relative_step=False`.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): external learning rate (default: None)
eps (tuple[float, float]): regularization constans for square gradient
and parameter scale respectively (default: (1e-30, 1e-3))
clip_threshold (float): threshold of root mean square of
final gradient update (default: 1.0)
decay_rate (float): coefficient used to compute running averages of square
gradient (default: -0.8)
beta1 (float): coefficient used for computing running averages of gradient
(default: None)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_parameter (bool): if True, learning rate is scaled by root mean square of
parameter (default: True)
relative_step (bool): if True, time-dependent learning rate is computed
instead of external learning rate (default: True)
warmup_init (bool): time-dependent learning rate computation depends on
whether warm-up initialization is being used (default: False)
"""
def __init__(
self,
params,
lr=None,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
beta1=None,
weight_decay=0.0,
scale_parameter=True,
relative_step=True,
warmup_init=False,
min_step=1e-2,
):
if lr is not None and relative_step:
raise ValueError("Cannot combine manual lr and relative_step options")
if warmup_init and not relative_step:
raise ValueError("warmup_init requires relative_step=True")
self.min_step = min_step
defaults = dict(
lr=lr,
eps=eps,
clip_threshold=clip_threshold,
decay_rate=decay_rate,
beta1=beta1,
weight_decay=weight_decay,
scale_parameter=scale_parameter,
relative_step=relative_step,
warmup_init=warmup_init,
min_step=min_step,
)
super(Adafactor, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return False
def _get_lr(self, param_group, param_state):
rel_step_sz = param_group["lr"]
if param_group["relative_step"]:
min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else self.min_step
rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"]))
param_scale = 1.0
if param_group["scale_parameter"]:
param_scale = max(param_group["eps"][1], param_state["RMS"])
return param_scale * rel_step_sz
def _get_options(self, param_group, param_shape):
factored = len(param_shape) >= 2
use_first_moment = param_group["beta1"] is not None
return factored, use_first_moment
def _rms(self, tensor):
return tensor.norm(2) / (tensor.numel() ** 0.5)
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
return torch.mul(r_factor, c_factor)
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError("Adafactor does not support sparse gradients.")
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state["step"] = 0
if use_first_moment:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(grad)
if factored:
state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad)
state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)
else:
state["exp_avg_sq"] = torch.zeros_like(grad)
state["RMS"] = 0
else:
if use_first_moment:
state["exp_avg"] = state["exp_avg"].to(grad)
if factored:
state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad)
state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad)
else:
state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state["step"] += 1
state["RMS"] = self._rms(p_data_fp32)
group["lr"] = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
update = (grad ** 2) + group["eps"][0]
if factored:
exp_avg_sq_row = state["exp_avg_sq_row"]
exp_avg_sq_col = state["exp_avg_sq_col"]
exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t)
exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)
# Approximation of exponential moving average of square of gradient
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state["exp_avg_sq"]
exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0))
update.mul_(group["lr"])
if use_first_moment:
exp_avg = state["exp_avg"]
exp_avg.mul_(group["beta1"]).add_(update, alpha=1 - group["beta1"])
update = exp_avg
if group["weight_decay"] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group["weight_decay"] * group["lr"])
p_data_fp32.add_(-update)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
|
NeMo-main
|
nemo/core/optim/adafactor.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.core.optim.adafactor import Adafactor
from nemo.core.optim.lr_scheduler import (
CosineAnnealing,
InverseSquareRootAnnealing,
NoamAnnealing,
PolynomialDecayAnnealing,
PolynomialHoldDecayAnnealing,
SquareAnnealing,
SquareRootAnnealing,
T5InverseSquareRootAnnealing,
WarmupAnnealing,
WarmupHoldPolicy,
WarmupPolicy,
prepare_lr_scheduler,
)
from nemo.core.optim.novograd import Novograd
from nemo.core.optim.optimizer_with_main_params import MainParamsOptimizerWrapper
from nemo.core.optim.optimizers import get_optimizer, parse_optimizer_args, register_optimizer
|
NeMo-main
|
nemo/core/optim/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from functools import partial
from typing import Any, Dict, Optional, Union
import hydra
import torch
import torch.optim as optim
from omegaconf import DictConfig, OmegaConf
from torch.optim import adadelta, adagrad, adamax, rmsprop, rprop
from torch.optim.optimizer import Optimizer
from nemo.core.config import OptimizerParams, get_optimizer_config, register_optimizer_params
from nemo.core.optim.adafactor import Adafactor
from nemo.core.optim.novograd import Novograd
from nemo.utils import logging
from nemo.utils.model_utils import maybe_update_config_version
AVAILABLE_OPTIMIZERS = {
'sgd': optim.SGD,
'adam': optim.Adam,
'adamw': optim.AdamW,
'adadelta': adadelta.Adadelta,
'adamax': adamax.Adamax,
'adagrad': adagrad.Adagrad,
'rmsprop': rmsprop.RMSprop,
'rprop': rprop.Rprop,
'novograd': Novograd,
'adafactor': Adafactor,
}
try:
from apex.optimizers import FusedAdam, FusedLAMB
HAVE_APEX = True
AVAILABLE_OPTIMIZERS['lamb'] = FusedLAMB
AVAILABLE_OPTIMIZERS['fused_adam'] = FusedAdam
except ModuleNotFoundError:
HAVE_APEX = False
HAVE_APEX_DISTRIBUTED_ADAM = False
if HAVE_APEX:
try:
# Try importing wrapper for Apex distributed Adam optimizer
from nemo.core.optim.distributed_adam import MegatronDistributedFusedAdam
HAVE_APEX_DISTRIBUTED_ADAM = True
AVAILABLE_OPTIMIZERS['distributed_fused_adam'] = MegatronDistributedFusedAdam
except (ImportError, ModuleNotFoundError):
HAVE_APEX_DISTRIBUTED_ADAM = False
__all__ = ['get_optimizer', 'register_optimizer', 'parse_optimizer_args']
def parse_optimizer_args(
optimizer_name: str, optimizer_kwargs: Union[DictConfig, Dict[str, Any]]
) -> Union[Dict[str, Any], DictConfig]:
"""
Parses a list of strings, of the format "key=value" or "key2=val1,val2,..."
into a dictionary of type {key=value, key2=[val1, val2], ...}
This dictionary is then used to instantiate the chosen Optimizer.
Args:
optimizer_name: string name of the optimizer, used for auto resolution of params
optimizer_kwargs: Either a list of strings in a specified format,
or a dictionary. If a dictionary is provided, it is assumed the dictionary
is the final parsed value, and simply returned.
If a list of strings is provided, each item in the list is parsed into a
new dictionary.
Returns:
A dictionary
"""
kwargs = {}
if optimizer_kwargs is None:
return kwargs
optimizer_kwargs = copy.deepcopy(optimizer_kwargs)
optimizer_kwargs = maybe_update_config_version(optimizer_kwargs)
if isinstance(optimizer_kwargs, DictConfig):
optimizer_kwargs = OmegaConf.to_container(optimizer_kwargs, resolve=True)
# If it is a dictionary, perform stepwise resolution
if hasattr(optimizer_kwargs, 'keys'):
# Attempt class path resolution
if '_target_' in optimizer_kwargs: # captures (target, _target_)
optimizer_kwargs_config = OmegaConf.create(optimizer_kwargs)
optimizer_instance = hydra.utils.instantiate(optimizer_kwargs_config) # type: DictConfig
optimizer_instance = vars(optimizer_instance)
return optimizer_instance
# If class path was not provided, perhaps `name` is provided for resolution
if 'name' in optimizer_kwargs:
# If `auto` is passed as name for resolution of optimizer name,
# then lookup optimizer name and resolve its parameter config
if optimizer_kwargs['name'] == 'auto':
optimizer_params_name = "{}_params".format(optimizer_name)
optimizer_kwargs.pop('name')
else:
optimizer_params_name = optimizer_kwargs.pop('name')
# Override arguments provided in the config yaml file
if 'params' in optimizer_kwargs:
# If optimizer kwarg overrides are wrapped in yaml `params`
optimizer_params_override = optimizer_kwargs.get('params')
else:
# If the kwargs themselves are a DictConfig
optimizer_params_override = optimizer_kwargs
if isinstance(optimizer_params_override, DictConfig):
optimizer_params_override = OmegaConf.to_container(optimizer_params_override, resolve=True)
optimizer_params_cls = get_optimizer_config(optimizer_params_name, **optimizer_params_override)
# If we are provided just a Config object, simply return the dictionary of that object
if optimizer_params_name is None:
optimizer_params = vars(optimizer_params_cls)
return optimizer_params
else:
# If we are provided a partial class instantiation of a Config,
# Instantiate it and retrieve its vars as a dictionary
optimizer_params = optimizer_params_cls() # instantiate the parameters object
optimizer_params = vars(optimizer_params)
return optimizer_params
# simply return the dictionary that was provided
return optimizer_kwargs
return kwargs
def register_optimizer(name: str, optimizer: Optimizer, optimizer_params: OptimizerParams):
"""
Checks if the optimizer name exists in the registry, and if it doesnt, adds it.
This allows custom optimizers to be added and called by name during instantiation.
Args:
name: Name of the optimizer. Will be used as key to retrieve the optimizer.
optimizer: Optimizer class
optimizer_params: The parameters as a dataclass of the optimizer
"""
if name in AVAILABLE_OPTIMIZERS:
raise ValueError(f"Cannot override pre-existing optimizers. Conflicting optimizer name = {name}")
AVAILABLE_OPTIMIZERS[name] = optimizer
optim_name = "{}_params".format(optimizer.__name__)
register_optimizer_params(name=optim_name, optimizer_params=optimizer_params)
def get_optimizer(name: str, **kwargs: Optional[Dict[str, Any]]) -> Optimizer:
"""
Convenience method to obtain an Optimizer class and partially instantiate it with optimizer kwargs.
Args:
name: Name of the Optimizer in the registry.
kwargs: Optional kwargs of the optimizer used during instantiation.
Returns:
a partially instantiated Optimizer
"""
if name not in AVAILABLE_OPTIMIZERS:
raise ValueError(
f"Cannot resolve optimizer '{name}'. Available optimizers are : " f"{AVAILABLE_OPTIMIZERS.keys()}"
)
if name == 'fused_adam':
if not torch.cuda.is_available():
raise ValueError(f'CUDA must be available to use fused_adam.')
optimizer = AVAILABLE_OPTIMIZERS[name]
optimizer = partial(optimizer, **kwargs)
return optimizer
|
NeMo-main
|
nemo/core/optim/optimizers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.optim.optimizer import Optimizer
__all__ = ['Novograd']
def _check_valid_opt_params(lr, eps, betas):
if lr < 0:
raise ValueError(f"Invalid learning rate: {lr}")
if eps < 0:
raise ValueError(f"Invalid epsilon value: {eps}")
if not (0.0 <= betas[0] < 1.0 and 0.0 <= betas[1] < 1.0):
raise ValueError(f"Betas have to be between 0 and 1: {betas}")
class Novograd(Optimizer):
"""Implements Novograd algorithm.
It has been proposed in "Stochastic Gradient Methods with Layer-wise
Adaptive Moments for Training of Deep Networks"
(https://arxiv.org/abs/1905.11286)
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and Beyond"
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.95, 0.98),
eps=1e-8,
weight_decay=0,
grad_averaging=False,
amsgrad=False,
luc=False,
luc_trust=1e-3,
luc_eps=1e-8,
):
_check_valid_opt_params(lr, eps, betas)
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, grad_averaging=grad_averaging, amsgrad=amsgrad,
)
self.luc = luc
self.luc_trust = luc_trust
self.luc_eps = luc_eps
super(Novograd, self).__init__(params, defaults)
def __setstate__(self, state):
super(Novograd, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("amsgrad", False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Sparse gradients are not supported.")
amsgrad = group["amsgrad"]
state = self.state[p]
# State initialization
if not state:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros([]).to(state["exp_avg"].device)
if amsgrad:
# Maintains max of all exp moving avg of squared grad
state["max_exp_avg_sq"] = torch.zeros([]).to(state["exp_avg"].device)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
norm = grad.norm().pow(2)
if exp_avg_sq == 0:
exp_avg_sq.copy_(norm)
else:
exp_avg_sq.mul_(beta2).add_(norm, alpha=1.0 - beta2)
if amsgrad:
# Maintains max of all 2nd moment running avg till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
grad.div_(denom)
if group["weight_decay"] != 0:
grad.add_(p.data, alpha=group["weight_decay"])
if group["grad_averaging"]:
grad.mul_(1 - beta1)
exp_avg.mul_(beta1).add_(grad)
if self.luc:
# Clip update so that updates are less than eta*weights
data_norm = torch.norm(p.data)
grad_norm = torch.norm(exp_avg.data)
luc_factor = self.luc_trust * data_norm / (grad_norm + self.luc_eps)
luc_factor = min(luc_factor, group["lr"])
p.data.add_(exp_avg, alpha=-luc_factor)
else:
p.data.add_(exp_avg, alpha=-group["lr"])
return loss
|
NeMo-main
|
nemo/core/optim/novograd.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import torch
from nemo.utils import logging
try:
import amp_C
from apex.multi_tensor_apply import multi_tensor_applier
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core.parallel_state import get_data_parallel_group, get_data_parallel_world_size
from megatron.core.tensor_parallel import copy_tensor_model_parallel_attributes
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
def _zero_grad_group_helper(group, set_to_none):
"""Zero out the gradient for a group of parameters.
Note: copied from torch.optim.optimizer."""
for param in group:
if param.grad is not None:
if set_to_none:
param.grad = None
else:
if param.grad.grad_fn is not None:
param.grad.detach_()
else:
param.grad.requires_grad_(False)
param.grad.zero_()
def _multi_tensor_copy_this_to_that(this, that, overflow_buf):
"""Use multi-tensor-applier to copy values from one list to another.
We don't have a blfoat16 implementation so for now if the overflow_buf
is not provided, we default back to simple loop copy to be compatible
with bfloat16."""
if overflow_buf:
# Scaling with factor `1.0` is equivalent to copy.
multi_tensor_applier(amp_C.multi_tensor_scale, overflow_buf, [this, that], 1.0)
else:
# FIXME: use multi-tensor applier for bf16
for this_, that_ in zip(this, that):
that_.copy_(this_)
class GradBucket(object):
"""
Persistent buffer for main gradients that remains allocated between training iterations
"""
def __init__(self, numel, chunk_size_mb):
if not HAVE_APEX:
raise ImportError(
"Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
if not HAVE_MEGATRON_CORE:
raise ImportError(
"megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
self.numel = numel
self.data = torch.zeros(self.numel, dtype=torch.float, device=torch.cuda.current_device(), requires_grad=False)
self.chunk_size_mb = chunk_size_mb
if self.chunk_size_mb > 0:
chunk_size_bytes = chunk_size_mb * 1024 * 1024
self.chunk_size_numel = chunk_size_bytes // 4
self.num_chunks = self.numel // self.chunk_size_numel
self.numel_per_chunk = [self.chunk_size_numel] * self.num_chunks
if self.numel % self.chunk_size_numel != 0:
self.num_chunks += 1
self.numel_per_chunk.append(self.numel % self.chunk_size_numel)
self.start_index_per_chunk = torch.cumsum(torch.tensor([0] + self.numel_per_chunk[:-1]), dim=0)
self.current_chunk = 0
self.computed_numel_per_chunk = [0] * self.num_chunks
def zero(self):
"""Reset the buffer to zero."""
self.data.zero_()
def allreduce_buffer(self):
"""Synchronous buffer data allreduce """
self.data.div_(get_data_parallel_world_size())
torch.distributed.all_reduce(self.data, group=get_data_parallel_group())
def get(self, shape, start_index):
"""Return a tensor with the input `shape` as a view into the
1-D data starting at `start_index`."""
end_index = start_index + shape.numel()
assert end_index <= self.numel, 'requested tensor is out of the buffer range.'
buffer_tensor = self.data[start_index:end_index]
buffer_tensor = buffer_tensor.view(shape)
grad_chunk_info = None
if self.chunk_size_mb > 0:
grad_chunk_info = {}
chunk = start_index // self.chunk_size_numel
chunk_start_index = self.start_index_per_chunk[chunk]
chunk_end_index = chunk_start_index + self.numel_per_chunk[chunk]
grad_chunk_info[chunk] = min(chunk_end_index, end_index) - start_index
while chunk_end_index < end_index:
chunk += 1
chunk_start_index = self.start_index_per_chunk[chunk]
chunk_end_index = chunk_start_index + self.numel_per_chunk[chunk]
grad_chunk_info[chunk] = min(chunk_end_index, end_index) - chunk_start_index
return buffer_tensor, grad_chunk_info
def update_chunk_info(self, grad_chunk_info):
for chunk in grad_chunk_info.keys():
self.computed_numel_per_chunk[chunk] += grad_chunk_info[chunk]
def get_allreduce_tensor(self):
if self.computed_numel_per_chunk[self.current_chunk] == self.numel_per_chunk[self.current_chunk]:
chunk_start_index = self.start_index_per_chunk[self.current_chunk]
chunk_end_index = chunk_start_index + self.numel_per_chunk[self.current_chunk]
allreduce_tensor = self.data[chunk_start_index:chunk_end_index]
self.computed_numel_per_chunk[self.current_chunk] = 0
self.current_chunk += 1
if self.current_chunk == self.num_chunks:
self.current_chunk = 0
return allreduce_tensor
return None
class MainParamsOptimizerWrapper(torch.optim.Optimizer):
"""
Float16 optimizer wrapper for half precision (fp16 and bf16) data types.
This optimizer wrapper holds main parameters and gradients in fp32 to support
stable convergence.
Arguments:
optimizer: base optimizer such as Adam or SGD.
fp32_grad_accum: to enable the use of fp32 in gradient accumulation and allreduce.
contiguous_grad_bucket: to enable allocating the master gradients in the
contiguous memory space to reduce memory fragmentation.
async_grad_allreduce: enable asynchronous gradient allreduce that is executed
along with the training step backprop.
"""
def __init__(
self,
optimizer,
fp32_grad_accum=False,
contiguous_grad_bucket=False,
async_grad_allreduce=False,
grad_div_ar_fusion=True,
grad_allreduce_chunk_size_mb=0,
):
if not HAVE_APEX:
raise ImportError(
"Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
if not HAVE_MEGATRON_CORE:
raise ImportError(
"megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
self.optimizer = optimizer
assert self.optimizer, 'no optimizer is provided.'
if contiguous_grad_bucket:
assert fp32_grad_accum, 'contiguous gradient buffer assumes using fp32 grad.'
if async_grad_allreduce:
assert fp32_grad_accum, (
'async allreduce applies to master gradients only, '
'which is supposed to be accumulated after grad op.'
)
assert contiguous_grad_bucket, (
'currently async_grad_allreduce is supported only ' 'with contiguous_grad_bucket.'
)
self._fp32_grad_accum = fp32_grad_accum
self._contiguous_grad_bucket = contiguous_grad_bucket
# used with tensor parallel only (no pipeline parallelism)
# be careful, weight update cannot start until all async grad AR works are done
self._async_grad_allreduce = async_grad_allreduce and get_data_parallel_world_size() > 1
self._grad_divisor = 1 / get_data_parallel_world_size()
if self._async_grad_allreduce:
# use @no_sync to disable backward grad sync during gradient accumulation
self._require_backward_grad_sync = True
self._grad_div_ar_fusion = grad_div_ar_fusion
self._grad_allreduce_chunk_size_mb = grad_allreduce_chunk_size_mb
else:
self._require_backward_grad_sync = False
self._grad_div_ar_fusion = False
self._grad_allreduce_chunk_size_mb = 0
# Dummy tensor needed for apex multi-apply tensor.
self._dummy_overflow_buf = None
# Create persistent buffers for main gradients in contiguous memory space
# - Chunked element-wise and allreduce ops without creating a temporary buffer for merged operation
# - Low memory fragmentation
self._main_grad_buffers = None
if self._contiguous_grad_bucket:
self._main_grad_buffers = {}
# get the size of buffers
num_elements = {}
for i, param_group in enumerate(self.optimizer.param_groups):
for param in param_group['params']:
if param.requires_grad:
num_elements[i] = num_elements.get(i, 0) + param.data.nelement()
# Allocate gradient memory buffers for each data type
if any(param.requires_grad for param in param_group['params']):
self._main_grad_buffers[i] = GradBucket(num_elements[i], self._grad_allreduce_chunk_size_mb)
# Three groups of parameters:
self.float16_groups = [] # original float16 parameters
self.fp32_from_float16_groups = [] # fp32 copy of float16 parameters
self.fp32_from_fp32_groups = [] # original fp32 parameters
# gradient function hooks
if self._fp32_grad_accum:
self.grad_accs = []
# For all the groups in the original optimizer:
for i, param_group in enumerate(self.optimizer.param_groups):
float16_params_this_group = []
fp32_params_this_group = []
fp32_from_float16_params_this_group = []
# For all the parameters in this group:
for j, param in enumerate(param_group['params']):
if param.requires_grad:
# float16 params:
if param.type() in ['torch.cuda.HalfTensor', 'torch.cuda.BFloat16Tensor']:
float16_params_this_group.append(param)
# Allocate the main parameter
main_param = param.detach().clone().float()
# Copy tensor model parallel attributes.
copy_tensor_model_parallel_attributes(main_param, param)
if hasattr(param, 'shared'):
main_param.shared = param.shared
# Assign the grad buffer offset to main parameters
if self._contiguous_grad_bucket:
num_elements[i] -= param.data.nelement()
main_param.grad, grad_chunk_info = self._main_grad_buffers[i].get(
param.data.shape, num_elements[i]
)
# Add a pointer to main_grad in model param for first-last stage embedding param reduction
param.main_grad = main_param.grad
# Replace the optimizer params with the new fp32 copy.
param_group['params'][j] = main_param
fp32_from_float16_params_this_group.append(main_param)
# Reset existing state dict key to the new main param.
if param in self.optimizer.state:
self.optimizer.state[main_param] = self.optimizer.state.pop(param)
# fp32 params.
elif param.type() == 'torch.cuda.FloatTensor':
fp32_params_this_group.append(param)
param_group['params'][j] = param
else:
raise TypeError(
'Wrapped parameters must be one of '
'torch.cuda.FloatTensor, '
'torch.cuda.HalfTensor, or '
'torch.cuda.BFloat16Tensor. '
'Received {}'.format(param.type())
)
# Add gradient accumulation hook for fp32 grad accumulation
if self._fp32_grad_accum and param.requires_grad:
# Expand so we get access to grad_fn
param_tmp = param.expand_as(param)
# Get the gradient accumulator function.
grad_acc = param_tmp.grad_fn.next_functions[0][0]
grad_acc.register_hook(self._make_param_hook(param, main_param, i, grad_chunk_info))
self.grad_accs.append(grad_acc)
self.float16_groups.append(float16_params_this_group)
self.fp32_from_float16_groups.append(fp32_from_float16_params_this_group)
self.fp32_from_fp32_groups.append(fp32_params_this_group)
# init exp_avg and exp_avg_sq before loading optimizer state, needed for dist checkpointing
self._init_opt_state()
# Leverage state_dict() and load_state_dict() to
# recast preexisting per-param state tensors
self.optimizer.load_state_dict(self.optimizer.state_dict())
def _make_param_hook(self, param, main_param, i, grad_chunk_info):
"""Create the grad accumulation and all-reduce hook for backprop."""
# Hook used for back-prop.
def param_hook(*unused):
# Accumulates gradients on main gradients
if param.grad is not None:
if main_param.grad is None:
main_param.grad = param.grad.float()
else:
main_param.grad.add_(param.grad.data)
# Deallocate grad memory.
param.grad = None
# Asynchronous gradients allreduce accross data_parallel ranks
if self._require_backward_grad_sync:
if self._grad_allreduce_chunk_size_mb > 0:
self._main_grad_buffers[i].update_chunk_info(grad_chunk_info)
while True:
allreduce_tensor = self._main_grad_buffers[i].get_allreduce_tensor()
if allreduce_tensor is None:
break
if self._grad_div_ar_fusion:
torch.distributed.all_reduce(
allreduce_tensor,
group=get_data_parallel_group(),
async_op=True,
op=torch.distributed._make_nccl_premul_sum(self._grad_divisor),
)
else:
allreduce_tensor.div_(get_data_parallel_world_size())
torch.distributed.all_reduce(
allreduce_tensor, group=get_data_parallel_group(), async_op=True,
)
else:
if self._grad_div_ar_fusion:
torch.distributed.all_reduce(
main_param.grad,
group=get_data_parallel_group(),
async_op=True,
op=torch.distributed._make_nccl_premul_sum(self._grad_divisor),
)
else:
main_param.grad.div_(get_data_parallel_world_size())
torch.distributed.all_reduce(
main_param.grad, group=get_data_parallel_group(), async_op=True,
)
return param_hook
def zero_grad(self, set_to_none=True):
"""We only need to zero the model related parameters, i.e.,
float16_groups & fp32_from_fp32_groups. We additionally zero
fp32_from_float16_groups as a memory optimization to reduce
fragmentation; in the case of set_to_none==True, the space
used by this field can be safely deallocated at this point."""
for group in self.float16_groups:
_zero_grad_group_helper(group, set_to_none)
if self._contiguous_grad_bucket:
for i in self._main_grad_buffers:
self._main_grad_buffers[i].zero()
else:
for group in self.fp32_from_float16_groups:
_zero_grad_group_helper(group, set_to_none)
for group in self.fp32_from_fp32_groups:
_zero_grad_group_helper(group, set_to_none)
def copy_model_grads_to_main_grads(self):
# This only needs to be done for the float16 group.
for model_group, main_group in zip(self.float16_groups, self.fp32_from_float16_groups):
for model_param, main_param in zip(model_group, main_group):
if model_param.grad is not None:
main_param.grad = model_param.grad.float()
# Safe to deallocate model's grad after copying.
# (If using contiguous buffers, main_grad's memory should
# persist and therefore should not be deallocated.)
model_param.grad = None
def _get_model_and_main_params_data_float16(self):
model_data = []
main_data = []
half_dtype = None
for model_group, main_group in zip(self.float16_groups, self.fp32_from_float16_groups):
for model_param, main_param in zip(model_group, main_group):
if half_dtype is None:
half_dtype = model_param.data.dtype
model_data.append(model_param.data)
main_data.append(main_param.data)
return model_data, main_data, half_dtype
def _set_overflow_buffer(self, half_dtype):
if half_dtype == torch.float16:
if self._dummy_overflow_buf is None:
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
else:
self._dummy_overflow_buf.fill_(0)
def _copy_main_params_to_model_params(self):
# Only needed for the float16 params.
model_data, main_data, half_dtype = self._get_model_and_main_params_data_float16()
self._set_overflow_buffer(half_dtype)
_multi_tensor_copy_this_to_that(this=main_data, that=model_data, overflow_buf=self._dummy_overflow_buf)
def _copy_model_params_to_main_params(self):
# Only needed for the float16 params.
model_data, main_data, half_dtype = self._get_model_and_main_params_data_float16()
self._set_overflow_buffer(half_dtype)
_multi_tensor_copy_this_to_that(this=model_data, that=main_data, overflow_buf=self._dummy_overflow_buf)
def reload_model_params(self):
self._copy_model_params_to_main_params()
@torch.no_grad()
def step(self, **kwargs):
# while async grad allreduce is enabled, bprop will keep moving forward without waiting for
# the finish of async grad AR works. Hence, to guarantee the correctness of grads reduction,
# we cannot start weight update until all async grad AR works are done.
if self._async_grad_allreduce:
torch.cuda.synchronize()
# Step the optimizer.
self.optimizer.step(closure=None, **kwargs)
# Update params from main params.
with torch.no_grad():
self._copy_main_params_to_model_params()
# Successful update.
return True
def state_dict(self):
state_dict = {}
state_dict['optimizer'] = self.optimizer.state_dict()
state_dict['fp32_from_fp16_params'] = self.fp32_from_float16_groups
return state_dict
def load_state_dict(self, state_dict):
# Optimizer.
optimizer_key = 'optimizer'
if optimizer_key not in state_dict:
optimizer_key = 'optimizer_state_dict'
logging.info('***WARNING*** loading optimizer from ' 'an old checkpoint ...')
self.optimizer.load_state_dict(state_dict[optimizer_key])
# Copy data for the main params.
fp32_from_float16_params_key = 'fp32_from_fp16_params'
if fp32_from_float16_params_key not in state_dict:
fp32_from_float16_params_key = 'fp32_from_fp16'
for current_group, saved_group in zip(self.fp32_from_float16_groups, state_dict[fp32_from_float16_params_key]):
for current_param, saved_param in zip(current_group, saved_group):
current_param.data.copy_(saved_param.data)
def allreduce_main_grads(self):
for i in self._main_grad_buffers:
self._main_grad_buffers[i].allreduce_buffer()
@contextmanager
def no_sync(self):
""" A context manager to disable gradient synchronizations across
data-parallel ranks."""
old_require_backward_grad_sync = self._require_backward_grad_sync
self._require_backward_grad_sync = False
try:
yield
finally:
self._require_backward_grad_sync = old_require_backward_grad_sync
@property
def async_master_grads_allreudce(self):
return self._async_grad_allreduce
@property
def fp32_grad_accumulation(self):
return self._fp32_grad_accum
def get_parameters_with_grad(self):
params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
if param.grad is not None: # (@adithyare) added to enable pp>1 training for adapters
params.append(param)
return params
# Promote state so it can be retrieved or set via
# "optimizer_instance.state"
def _get_state(self):
if hasattr(self, 'optimizer'):
return self.optimizer.state
else:
return []
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via
# "optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
if hasattr(self, 'optimizer'):
return self.optimizer.param_groups
else:
return []
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
# Promote defaults so it can be retrieved or set via
# "optimizer_instance.defaults
def _get_defaults(self):
if hasattr(self, 'optimizer'):
return self.optimizer.defaults
else:
return []
def _set_defaults(self, value):
self.optimizer.defaults = value
defaults = property(_get_defaults, _set_defaults)
def _init_opt_state(self):
"""
Initialize the optimizer state with zero tensors for 'exp_avg' and 'exp_avg_sq' of each parameter.
"""
for group in self.optimizer.param_groups:
for p in group['params']:
if len(self.optimizer.state[p]) == 0:
self.optimizer.state[p]['exp_avg'] = torch.zeros_like(p.data)
self.optimizer.state[p]['exp_avg_sq'] = torch.zeros_like(p.data)
|
NeMo-main
|
nemo/core/optim/optimizer_with_main_params.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RAdam
Original source taken from https://github.com/LiyuanLucasLiu/RAdam
Copyright 2019 Liyuan Liu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class RAdam(Optimizer):
"""RAdam optimizer"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
"""
Init
:param params: parameters to optimize
:param lr: learning rate
:param betas: beta
:param eps: numerical precision
:param weight_decay: weight decay weight
"""
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for _ in range(10)]
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1.0 - beta2))
exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1))
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = (
group['lr']
* math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
)
/ (1 - beta1 ** state['step'])
)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
|
NeMo-main
|
nemo/core/optim/radam.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.core.utils import numba_utils, process_launcher
|
NeMo-main
|
nemo/core/utils/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging as pylogger
import operator
import os
from typing import Tuple, Union
from nemo.utils import model_utils
# Prevent Numba CUDA logs from showing at info level
cuda_logger = pylogger.getLogger('numba.cuda.cudadrv.driver')
cuda_logger.setLevel(pylogger.ERROR) # only show error
__NUMBA_DEFAULT_MINIMUM_VERSION__ = "0.53.0"
__NUMBA_MINIMUM_VERSION__ = os.environ.get("NEMO_NUMBA_MINVER", __NUMBA_DEFAULT_MINIMUM_VERSION__)
__NUMBA_MINIMUM_VERSION_FP16_SUPPORTED__ = "0.57.0"
NUMBA_INSTALLATION_MESSAGE = (
"Could not import `numba`.\n"
"Please install numba in one of the following ways."
"1) If using conda, simply install it with conda using `conda install -c numba numba`\n"
"2) If using pip (not recommended), `pip install --upgrade numba`\n"
"followed by `export NUMBAPRO_LIBDEVICE='/usr/local/cuda/nvvm/libdevice/'` and \n"
"`export NUMBAPRO_NVVM='/usr/local/cuda/nvvm/lib64/libnvvm.so'`.\n"
"It is advised to always install numba using conda only, "
"as pip installations might interfere with other libraries such as llvmlite.\n"
"If pip install does not work, you can also try adding `--ignore-installed` to the pip command,\n"
"but this is not advised."
)
STRICT_NUMBA_COMPAT_CHECK = True
# Get environment key if available
if 'STRICT_NUMBA_COMPAT_CHECK' in os.environ:
check_str = os.environ.get('STRICT_NUMBA_COMPAT_CHECK')
check_bool = str(check_str).lower() in ("yes", "true", "t", "1")
STRICT_NUMBA_COMPAT_CHECK = check_bool
def is_numba_compat_strict() -> bool:
"""
Returns strictness level of numba cuda compatibility checks.
If value is true, numba cuda compatibility matrix must be satisfied.
If value is false, only cuda availability is checked, not compatibility.
Numba Cuda may still compile and run without issues in such a case, or it may fail.
"""
return STRICT_NUMBA_COMPAT_CHECK
def set_numba_compat_strictness(strict: bool):
"""
Sets the strictness level of numba cuda compatibility checks.
If value is true, numba cuda compatibility matrix must be satisfied.
If value is false, only cuda availability is checked, not compatibility.
Numba Cuda may still compile and run without issues in such a case, or it may fail.
Args:
strict: bool value, whether to enforce strict compatibility checks or relax them.
"""
global STRICT_NUMBA_COMPAT_CHECK
STRICT_NUMBA_COMPAT_CHECK = strict
@contextlib.contextmanager
def with_numba_compat_strictness(strict: bool):
initial_strictness = is_numba_compat_strict()
set_numba_compat_strictness(strict=strict)
yield
set_numba_compat_strictness(strict=initial_strictness)
def numba_cpu_is_supported(min_version: str) -> bool:
"""
Tests if an appropriate version of numba is installed.
Args:
min_version: The minimum version of numba that is required.
Returns:
bool, whether numba CPU supported with this current installation or not.
"""
module_available, msg = model_utils.check_lib_version('numba', checked_version=min_version, operator=operator.ge)
# If numba is not installed
if module_available is None:
return False
else:
return True
def numba_cuda_is_supported(min_version: str) -> bool:
"""
Tests if an appropriate version of numba is installed, and if it is,
if cuda is supported properly within it.
Args:
min_version: The minimum version of numba that is required.
Returns:
bool, whether cuda is supported with this current installation or not.
"""
module_available = numba_cpu_is_supported(min_version)
# If numba is not installed
if module_available is None:
return False
# If numba version is installed and available
if module_available is True:
from numba import cuda
# this method first arrived in 0.53, and that's the minimum version required
if hasattr(cuda, 'is_supported_version'):
try:
cuda_available = cuda.is_available()
if cuda_available:
cuda_compatible = cuda.is_supported_version()
else:
cuda_compatible = False
if is_numba_compat_strict():
return cuda_available and cuda_compatible
else:
return cuda_available
except OSError:
# dlopen(libcudart.dylib) might fail if CUDA was never installed in the first place.
return False
else:
# assume cuda is supported, but it may fail due to CUDA incompatibility
return False
else:
return False
def is_numba_cuda_fp16_supported(return_reason: bool = False) -> Union[bool, Tuple[bool, str]]:
"""
Utility method that returns a bool, stating if FP16 is supported for numba cuda kernels or not.
Returns:
bool, whether Numba CUDA will support fp16 or not.
"""
reason = ""
use_nvidia_binding = os.environ.get('NUMBA_CUDA_USE_NVIDIA_BINDING', None)
if use_nvidia_binding is not None:
use_nvidia_binding = use_nvidia_binding.lower() == "1"
reason += "Env variable `NUMBA_CUDA_USE_NVIDIA_BINDING` is available and set to `1`. "
else:
use_nvidia_binding = False
reason += "Env variable `NUMBA_CUDA_USE_NVIDIA_BINDING` is not available or has not set to `1`."
numba_fp16_version_correct = model_utils.check_lib_version(
'numba', __NUMBA_MINIMUM_VERSION_FP16_SUPPORTED__, operator=operator.ge
)[0]
if numba_fp16_version_correct:
reason += f"Numba CUDA FP16 is supported in installed numba version."
else:
reason += f"Numba CUDA FP16 is not supported in installed numba version."
result = use_nvidia_binding and numba_fp16_version_correct
if return_reason:
return result, reason
else:
return result
def skip_numba_cuda_test_if_unsupported(min_version: str):
"""
Helper method to skip pytest test case if numba cuda is not supported.
Args:
min_version: The minimum version of numba that is required.
"""
numba_cuda_support = numba_cuda_is_supported(min_version)
if not numba_cuda_support:
import pytest
pytest.skip(f"Numba cuda test is being skipped. Minimum version required : {min_version}")
|
NeMo-main
|
nemo/core/utils/numba_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
K2_INSTALLATION_MESSAGE = (
"Could not import `k2`.\n"
"Please install k2 in one of the following ways:\n"
"1) (recommended) Run `bash scripts/speech_recognition/k2/setup.sh`\n"
"2) Use any approach from https://k2-fsa.github.io/k2/installation/index.html "
"if your your cuda and pytorch versions are supported.\n"
"It is advised to always install k2 using setup.sh only, "
"as different versions of k2 may not interact with the NeMo code as expected."
)
|
NeMo-main
|
nemo/core/utils/k2_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Guard for importing optional NeMo dependency k2.
Contains checks for k2 availability and version.
Use `from nemo.core.utils.k2_guard import k2` to import k2 instead of direct import.
If there is an error, the module will raise an exception with a helpful message.
"""
import textwrap
from packaging.version import Version
from pytorch_lightning.utilities.imports import package_available
from nemo.core.utils.k2_utils import K2_INSTALLATION_MESSAGE
__K2_MINIMUM_MAJOR_VERSION = 1
__K2_MINIMUM_MINOR_VERSION = 14
__K2_MINIMUM_VERSION = Version(f"{__K2_MINIMUM_MAJOR_VERSION}.{__K2_MINIMUM_MINOR_VERSION}")
if not package_available("k2"):
raise ModuleNotFoundError("Module k2 is not available.\n" + K2_INSTALLATION_MESSAGE)
import k2 # noqa: E402
try:
__k2_version = Version(k2.__dev_version__)
except AttributeError:
raise ImportError("Module k2 is corrupted.\n" + K2_INSTALLATION_MESSAGE)
if __k2_version < __K2_MINIMUM_VERSION:
raise ImportError(
textwrap.dedent(
f"""
Minimum required k2 version: {__K2_MINIMUM_VERSION};
Installed k2 version: {__k2_version}
"""
)
)
|
NeMo-main
|
nemo/core/utils/k2_guard.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from nemo.core.neural_types import AxisKind, NeuralType
def get_io_names(types, disabled_names):
names = list(types.keys())
for name in disabled_names:
if name in names:
names.remove(name)
return names
def extract_dynamic_axes(name: str, ntype: NeuralType):
"""
This method will extract BATCH and TIME dimension ids from each provided input/output name argument.
For example, if module/model accepts argument named "input_signal" with type corresponding to [Batch, Time, Dim]
shape, then the returned result should contain "input_signal" -> [0, 1] because Batch and Time are dynamic axes
as they can change from call to call during inference.
Args:
name: Name of input or output parameter
ntype: Corresponding Neural Type
Returns:
"""
def unpack_nested_neural_type(neural_type):
if type(neural_type) in (list, tuple):
return unpack_nested_neural_type(neural_type[0])
return neural_type
dynamic_axes = defaultdict(list)
if type(ntype) in (list, tuple):
ntype = unpack_nested_neural_type(ntype)
if ntype.axes:
for ind, axis in enumerate(ntype.axes):
if axis.kind in [AxisKind.Batch, AxisKind.Time, AxisKind.Width, AxisKind.Height]:
dynamic_axes[name].append(ind)
return dynamic_axes
def get_dynamic_axes(types, names):
dynamic_axes = defaultdict(list)
if names is not None:
for name in names:
if name in types:
dynamic_axes.update(extract_dynamic_axes(name, types[name]))
return dynamic_axes
|
NeMo-main
|
nemo/core/utils/neural_type_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.core.utils.process_launcher.launcher import ProcessLauncher, ProcessLauncherConfig
|
NeMo-main
|
nemo/core/utils/process_launcher/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import hashlib
import os
import subprocess
import sys
import threading
import time
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, Optional, Sequence
import torch
from hydra.core.config_store import ConfigStore
from hydra.core.hydra_config import HydraConfig
from hydra.core.plugins import Plugins
from hydra.core.singleton import Singleton
from hydra.core.utils import JobReturn, JobStatus, configure_log, filter_overrides, setup_globals
from hydra.plugins.launcher import Launcher
from hydra.types import HydraContext, TaskFunction
from omegaconf import DictConfig, OmegaConf, open_dict
from nemo.utils import logging
# monkey-patch hydra func
def is_in_toplevel_plugins_module(*args, **kwargs) -> bool:
return True
# Monkey-patch Hydra
Plugins.instance().is_in_toplevel_plugins_module = is_in_toplevel_plugins_module
@dataclass
class ProcessLauncherConfig:
_target_: str = "nemo.core.utils.process_launcher.launcher.ProcessLauncher"
num_gpus: int = -1
jobs_per_gpu: int = 1
def execute_job(
idx: int,
overrides: Sequence[str],
hydra_context: HydraContext,
config: DictConfig,
singleton_state: Dict[Any, Any],
gpu_idx: int,
):
"""
Creates a process that launches a "single" job that is identical in config + updated with sweep hyperparams.
Since a different process is being used, CUDA can work in non-ddp mode without issue.
Attempting ddp when using this script will not work as ddp cannot be used in shared contexts.
Args:
idx: Global index of the job.
overrides: List of str overrides that correspond to this job
hydra_context: Hydra Context used to load the sweep params into the global config
config: Global config that will be updated with sweep hyper parameters.
singleton_state: Hydra state.
gpu_idx: The GPU ID on which this process will be run.
Returns:
- The Process object that corresponds to this sweep
- The JobReturn object holding some metadata about this run
"""
# Required by Hydra (lookup other Hydra Launchers for details)
setup_globals()
Singleton.set_state(singleton_state)
# Update base config with overrides to create sweep config
sweep_config = hydra_context.config_loader.load_sweep_config(config, list(overrides))
with open_dict(sweep_config):
sweep_config.hydra.job.id = "{}_{}".format(sweep_config.hydra.job.name, idx)
sweep_config.hydra.job.num = idx
HydraConfig.instance().set_config(sweep_config)
# Setup a directory where the config will temporarily be stored.
script_path = os.path.join(os.getcwd(), sys.argv[0])
script_path = os.path.abspath(script_path)
hash_salt = "|".join([script_path, str(OmegaConf.to_yaml(config))]).encode('utf-8')
hash_val = hashlib.sha256(hash_salt).hexdigest()
config_dir = os.path.join(os.getcwd(), "hydra_cfg", str(hash_val))
if not os.path.exists(config_dir):
os.makedirs(config_dir, exist_ok=True)
task_cfg = copy.deepcopy(sweep_config)
# Remove hydra from sweep config
# This is done to prevent recursive call to multirun launcher in Hydra.
with open_dict(task_cfg):
task_cfg.pop('hydra', '')
# Save the current jobs config to directory
temp_config_name = f"config_{idx}.yaml"
temp_config = os.path.join(config_dir, temp_config_name)
OmegaConf.save(task_cfg, temp_config)
# Compute the overides as a dict
overrides = OmegaConf.to_container(config.hydra.overrides.task)
# Check and replace trainer.devices in config with gpu_idx
found_devices = False
gpu_override = f'trainer.devices=[{gpu_idx}]'
for oidx, val in enumerate(overrides):
if 'trainer.devices' in val:
overrides[oidx] = gpu_override
found_devices = True
if not found_devices:
overrides.append(gpu_override)
# Build launch command
# Note: We depend on PTL doing the right thing since this command has global visibility of all CUDA_VISIBLE_DEVICES
cmd = [
'python',
script_path,
"--config-path",
config_dir,
"--config-name",
temp_config_name,
*overrides,
]
# Launch the subprocess; pipe the stderr
# NOTE: If this hangs due to some reason after prolonged training, it means that the stderr pipe buffer
# has become full at the OS level and we need to explicitly empty it (either parallel thread or manually
# call proc.communicate(). It should not happen in general case as stderr is filled only in case retcode != 0
# If it does happen though, implement the code here
# https://stackoverflow.com/questions/39607172/python-subprocess-popen-poll-seems-to-hang-but-communicate-works
proc = subprocess.Popen(cmd, stderr=subprocess.PIPE)
# Setup data thread for stderr
std_error_buffer = []
# Trivial thread just reads lines from stdout into the list
drainerthread = threading.Thread(target=std_error_buffer.extend, args=(proc.stderr,))
drainerthread.daemon = True
drainerthread.start()
# Construct JobReturn object for Hydra
res = JobReturn()
res.cfg = task_cfg
res.overrides = overrides
res.hydra_cfg = config
res.working_dir = os.getcwd()
res.return_value = None
return proc, res, (std_error_buffer, drainerthread)
def launch(launcher, job_overrides: Sequence[Sequence[str]], initial_job_idx: int,) -> Sequence[JobReturn]:
"""
Args:
launcher: Reference to the Launched subclass
job_overrides: A List of List<String>, where each inner list is the arguments for one job run
initial_job_idx: Initial job idx in batch
Returns:
A list of JobReturn objects.
"""
# Needed for Hydra, lookup JoblibLauncher in Hydra
setup_globals()
assert launcher.config is not None
assert launcher.task_function is not None
assert launcher.hydra_context is not None
configure_log(launcher.config.hydra.hydra_logging, launcher.config.hydra.verbose)
sweep_dir = Path(str(launcher.config.hydra.sweep.dir))
sweep_dir.mkdir(parents=True, exist_ok=True)
# Extraact the runner's config (its actually a DictConfig, but type is used for autocomplete)
runner_cfg = launcher.runner # type: ProcessLauncherConfig
logging.info(
"ProcessLauncher({}) is launching {} jobs".format(
",".join([f"{k}={v}" for k, v in runner_cfg.items()]), len(job_overrides),
)
)
logging.info("Launching jobs, sweep output dir : {}".format(sweep_dir))
for idx, overrides in enumerate(job_overrides):
logging.info("\t#{} : {}".format(idx, " ".join(filter_overrides(overrides))))
# Needed by Hydra
singleton_state = Singleton.get_state()
# Process the runner's config to build up the multiplex config
num_gpus = runner_cfg.get('num_gpus', -1)
jobs_per_gpu = runner_cfg.get('jobs_per_gpu', 1)
# Only GPUs are supported for now.
if num_gpus <= 0:
if torch.cuda.is_available():
num_gpus = torch.cuda.device_count()
else:
raise ValueError(f"{launcher.__class__.__name__} only supports GPU operations.")
# Setup arguments for multiplex runner
overrides = list(job_overrides)
num_overrides = len(overrides)
job_idx = 0
batch_size = num_gpus * jobs_per_gpu
gpu_idx = 0
ret = [] # List of returned JobResult
subprocess_list = [] # Buffer of subprocess
results = [] # Buffer of JobResult
# STD ERROR cache
std_error_buffers = [] # type: List[List[str]]
std_error_threads = [] # type: threading.Thread
# Run over all job combinations
while job_idx < num_overrides:
# Fill up subprocess buffer while its size is smaller than multiplex batch size
while len(subprocess_list) < batch_size:
# If we run out of jobs, stop trying to submit more jobs
if job_idx >= num_overrides:
break
# Submit a job as a new process
process, res, error_tup = execute_job(
initial_job_idx + job_idx,
overrides[job_idx],
launcher.hydra_context,
launcher.config,
singleton_state,
gpu_idx % num_gpus, # This will evenly distribute GPU load
)
# Store the subprocesses and JobResults
subprocess_list.append(process)
results.append(res)
# Manage stderror thread data
std_error_buffers.append(error_tup[0])
std_error_threads.append(error_tup[1])
job_idx += 1
gpu_idx += 1
# Poll for samples in batch to finish.
if len(subprocess_list) > 0:
finished_processes = [0] * len(subprocess_list)
# Check if all processes are completed or not
# This is busy waiting, this is actually quite necessary
# Turns out that when you do proc.communicate(), you block all other threads immediately.
# IE they may fill up their buffers entirely, and hang while they wait for the first thread
# who called communicate() to finish its work or crash.
# Effectively it entirely stops multiprocessing jobs or multiplexed runs.
# Must poll and busy wait to keep threads alive, along with drain the pipes with thread buffers.
while sum(finished_processes) < len(subprocess_list):
# Check all processes to make sure they have a retcode (doesnt matter yet if 0 or not)
for proc_idx, proc in enumerate(subprocess_list):
# poll() is cheaper op than communicate()
retcode = proc.poll()
if retcode is not None:
# Log that the process with some ID has finished
if finished_processes[proc_idx] == 0:
logging.info(f"Processed job : {len(ret) + proc_idx} :: Ret code = {retcode}")
finished_processes[proc_idx] = 1
# Join this thread and merge its stderror buffer
proc.wait()
std_error_threads[proc_idx].join()
error_data = std_error_buffers[proc_idx]
error_data = [
str(data, encoding='utf-8').encode('utf-8').decode('utf-8').encode('utf-8')
for data in error_data
]
std_error_buffers[proc_idx] = error_data
time.sleep(1.0)
# Process all the subprocess results
for proc_idx, (proc, res) in enumerate(zip(subprocess_list, results)):
# Wait until completion of process
output, error = proc.communicate()
# 0 is for successful run
if proc.returncode == 0:
res.status = JobStatus.COMPLETED
else:
# > 0 is for error, log the error.
# Note: For the sake of efficiency while we log the error and raise an exception,
# It will only raise the 1st wrong job in all the jobs.
# If multiple jobs fail, it will still try to execute every job first before
# raising the error for the first one.
# This is done so that even if some jobs fail (say OOM or something),
# other jobs can still run.
err_buffer = std_error_buffers[proc_idx]
if isinstance(err_buffer, (list, tuple)):
err_string = ""
for err_line in err_buffer:
err_string = (
err_string + f"{str(err_line, encoding='utf-8').encode('utf-8').decode('utf-8')}"
)
else:
err_string = err_buffer
error_msg = (
f"\nHyperparameter Arguments : {proc.args}\n"
f"Process Return code : {proc.returncode}\n"
f"Error Trace :\n"
f"{err_string}"
)
res.return_value = Exception(error_msg)
res.status = JobStatus.FAILED
logging.info(f"Finished executing job : {len(ret)}. Return Code = {proc.returncode}")
ret.append(res)
# Reset for next batch
subprocess_list.clear()
results.clear()
return ret
class ProcessLauncher(Launcher):
def __init__(self, **kwargs: Any) -> None:
"""Process Launcher
Based on the JoblibLauncher, but uses processes to scatter jobs in a multiplexed manner across
some number of GPUs on a single machine.
"""
self.config: Optional[DictConfig] = None
self.task_function: Optional[TaskFunction] = None
self.hydra_context: Optional[HydraContext] = None
self.runner = kwargs # type: ProcessLauncherConfig
def setup(self, *, hydra_context: HydraContext, task_function: TaskFunction, config: DictConfig,) -> None:
self.config = config
self.task_function = task_function
self.hydra_context = hydra_context
def launch(self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int) -> Sequence[JobReturn]:
return launch(launcher=self, job_overrides=job_overrides, initial_job_idx=initial_job_idx)
ConfigStore.instance().store(
group="hydra/launcher", name="nemo_launcher", node=ProcessLauncherConfig, provider="nemo_process_launcher",
)
Plugins.instance().register(ProcessLauncher)
|
NeMo-main
|
nemo/core/utils/process_launcher/launcher.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import os
from dataclasses import dataclass, is_dataclass
from enum import Enum
from functools import lru_cache
from pathlib import Path
from typing import List, Optional, Tuple, Union
import wrapt
from nemo.utils import AppState, logging
from nemo.utils.data_utils import resolve_cache_dir # imported for compatibility: model_utils.resolve_cache_dir()
from nemo.utils.data_utils import is_datastore_path
# TODO @blisc: Perhaps refactor instead of import guarding
_HAS_HYDRA = True
try:
from omegaconf import DictConfig, ListConfig, OmegaConf
from omegaconf import errors as omegaconf_errors
from packaging import version
except ModuleNotFoundError:
_HAS_HYDRA = False
_VAL_TEST_FASTPATH_KEY = 'ds_item'
class ArtifactPathType(Enum):
"""
ArtifactPathType refers to the type of the path that the artifact is located at.
LOCAL_PATH: A user local filepath that exists on the file system.
TAR_PATH: A (generally flattened) filepath that exists inside of an archive (that may have its own full path).
"""
LOCAL_PATH = 0
TAR_PATH = 1
@dataclass(init=False)
class ArtifactItem:
path: str
path_type: ArtifactPathType
hashed_path: Optional[str] = None
def resolve_dataset_name_from_cfg(cfg: 'DictConfig') -> Optional[str]:
"""
Parses items of the provided sub-config to find the first potential key that
resolves to an existing file or directory.
# Fast-path Resolution
In order to handle cases where we need to resolve items that are not paths, a fastpath
key can be provided as defined in the global `_VAL_TEST_FASTPATH_KEY`.
This key can be used in two ways :
## _VAL_TEST_FASTPATH_KEY points to another key in the config
If this _VAL_TEST_FASTPATH_KEY points to another key in this config itself,
then we assume we want to loop through the values of that key.
This allows for any key in the config to become a fastpath key.
Example:
validation_ds:
splits: "val"
...
<_VAL_TEST_FASTPATH_KEY>: "splits" <-- this points to the key name "splits"
Then we can write the following when overriding in hydra:
```python
python train_file.py ... \
model.validation_ds.splits=[val1, val2, dev1, dev2] ...
```
## _VAL_TEST_FASTPATH_KEY itself acts as the resolved key
If this _VAL_TEST_FASTPATH_KEY does not point to another key in the config, then
it is assumed that the items of this key itself are used for resolution.
Example:
validation_ds:
...
<_VAL_TEST_FASTPATH_KEY>: "val" <-- this points to the key name "splits"
Then we can write the following when overriding in hydra:
```python
python train_file.py ... \
model.validation_ds.<_VAL_TEST_FASTPATH_KEY>=[val1, val2, dev1, dev2] ...
```
# IMPORTANT NOTE:
It <can> potentially mismatch if there exist more than 2 valid paths, and the
first path does *not* resolve the the path of the data file (but does resolve to
some other valid path).
To avoid this side-effect, place the data path as the first item on the config file.
Args:
cfg: DictConfig (Sub-config) that should be parsed.
Returns:
A str representing the `key` of the config which hosts the filepath(s),
or None in case path could not be resolved.
"""
if _VAL_TEST_FASTPATH_KEY in cfg and cfg[_VAL_TEST_FASTPATH_KEY] is not None:
fastpath_key = cfg[_VAL_TEST_FASTPATH_KEY]
if isinstance(fastpath_key, str) and fastpath_key in cfg:
return cfg[fastpath_key]
else:
return _VAL_TEST_FASTPATH_KEY
for key, value in cfg.items():
if type(value) in [list, tuple, ListConfig]:
# Count the number of valid paths in the list
values_are_paths = 0
for val_i in value:
val_i = str(val_i)
if os.path.exists(val_i) or os.path.isdir(val_i) or is_datastore_path(val_i):
values_are_paths += 1
else:
# reset counter and break inner loop
break
if values_are_paths == len(value):
return key
else:
if os.path.exists(str(value)) or os.path.isdir(str(value)) or is_datastore_path(str(value)):
return key
return None
def parse_dataset_as_name(name: str) -> str:
"""
Constructs a valid prefix-name from a provided file path.
Args:
name: str path to some valid data/manifest file or a python object that
will be used as a name for the data loader (via str() cast).
Returns:
str prefix used to identify uniquely this data/manifest file.
"""
if os.path.exists(str(name)) or os.path.isdir(str(name)) or is_datastore_path(str(name)):
name = Path(name).stem
else:
name = str(name)
# cleanup name
name = name.replace('-', '_')
if 'manifest' in name:
name = name.replace('manifest', '')
if 'dataset' in name:
name = name.replace('dataset', '')
# Test if the manifes/dataset name was simply `manifest.yaml` or `dataset.yaml`: Invalid names.
if name == '':
raise ValueError(
"Provided dataset / manifest filename was `manifest.json` or `dataset.json`.\n"
"Such a name is invalid, since multiple datasets/manifests can share the same name,\n"
"thereby overriding their results during logging. Please pick a more discriptive filename \n"
"for the provided dataset / manifest file."
)
if '_' != name[-1]:
name = name + '_'
return name
def unique_names_check(name_list: Optional[List[str]]):
"""
Performs a uniqueness check on the name list resolved, so that it can warn users
about non-unique keys.
Args:
name_list: List of strings resolved for data loaders.
"""
if name_list is None:
return
# Name uniqueness checks
names = set()
for name in name_list:
if name in names:
logging.warning(
"Name resolution has found more than one data loader having the same name !\n"
"In such cases, logs will nor be properly generated. "
"Please rename the item to have unique names.\n"
f"Resolved name : {name}"
)
else:
names.add(name) # we need just hash key check, value is just a placeholder
def resolve_validation_dataloaders(model: 'ModelPT'):
"""
Helper method that operates on the ModelPT class to automatically support
multiple dataloaders for the validation set.
It does so by first resolving the path to one/more data files via `resolve_dataset_name_from_cfg()`.
If this resolution fails, it assumes the data loader is prepared to manually support / not support
multiple data loaders and simply calls the appropriate setup method.
If resolution succeeds:
Checks if provided path is to a single file or a list of files.
If a single file is provided, simply tags that file as such and loads it via the setup method.
If multiple files are provided:
Inject a new manifest path at index "i" into the resolved key.
Calls the appropriate setup method to set the data loader.
Collects the initialized data loader in a list and preserves it.
Once all data loaders are processed, assigns the list of loaded loaders to the ModelPT.
Finally assigns a list of unique names resolved from the file paths to the ModelPT.
Args:
model: ModelPT subclass, which requires >=1 Validation Dataloaders to be setup.
"""
if not _HAS_HYDRA:
logging.error("This function requires Hydra/Omegaconf and it was not installed.")
exit(1)
cfg = copy.deepcopy(model._cfg)
dataloaders = []
# process val_loss_idx
if 'val_dl_idx' in cfg.validation_ds:
cfg = OmegaConf.to_container(cfg)
val_dl_idx = cfg['validation_ds'].pop('val_dl_idx')
cfg = OmegaConf.create(cfg)
else:
val_dl_idx = 0
# Set val_loss_idx
model._val_dl_idx = val_dl_idx
ds_key = resolve_dataset_name_from_cfg(cfg.validation_ds)
if ds_key is None or val_dl_idx < 0:
logging.debug(
"Could not resolve file path from provided config - {}. "
"Disabling support for multi-dataloaders.".format(cfg.validation_ds)
)
model.setup_validation_data(cfg.validation_ds)
return
ds_values = cfg.validation_ds[ds_key]
if isinstance(ds_values, (list, tuple, ListConfig)):
for ds_value in ds_values:
if isinstance(ds_value, (dict, DictConfig)):
# this is a nested dataset
cfg.validation_ds = ds_value
else:
cfg.validation_ds[ds_key] = ds_value
model.setup_validation_data(cfg.validation_ds)
dataloaders.append(model._validation_dl)
model._validation_dl = dataloaders
if len(ds_values) > 0 and isinstance(ds_values[0], (dict, DictConfig)):
# using the name of each of the nested dataset
model._validation_names = [ds.name for ds in ds_values]
else:
model._validation_names = [parse_dataset_as_name(ds) for ds in ds_values]
unique_names_check(name_list=model._validation_names)
return
else:
model.setup_validation_data(cfg.validation_ds)
model._validation_names = [parse_dataset_as_name(ds_values)]
unique_names_check(name_list=model._validation_names)
def resolve_test_dataloaders(model: 'ModelPT'):
"""
Helper method that operates on the ModelPT class to automatically support
multiple dataloaders for the test set.
It does so by first resolving the path to one/more data files via `resolve_dataset_name_from_cfg()`.
If this resolution fails, it assumes the data loader is prepared to manually support / not support
multiple data loaders and simply calls the appropriate setup method.
If resolution succeeds:
Checks if provided path is to a single file or a list of files.
If a single file is provided, simply tags that file as such and loads it via the setup method.
If multiple files are provided:
Inject a new manifest path at index "i" into the resolved key.
Calls the appropriate setup method to set the data loader.
Collects the initialized data loader in a list and preserves it.
Once all data loaders are processed, assigns the list of loaded loaders to the ModelPT.
Finally assigns a list of unique names resolved from the file paths to the ModelPT.
Args:
model: ModelPT subclass, which requires >=1 Test Dataloaders to be setup.
"""
if not _HAS_HYDRA:
logging.error("This function requires Hydra/Omegaconf and it was not installed.")
exit(1)
cfg = copy.deepcopy(model._cfg)
dataloaders = []
# process test_loss_idx
if 'test_dl_idx' in cfg.test_ds:
cfg = OmegaConf.to_container(cfg)
test_dl_idx = cfg['test_ds'].pop('test_dl_idx')
cfg = OmegaConf.create(cfg)
else:
test_dl_idx = 0
# Set val_loss_idx
model._test_dl_idx = test_dl_idx
ds_key = resolve_dataset_name_from_cfg(cfg.test_ds)
if ds_key is None:
logging.debug(
"Could not resolve file path from provided config - {}. "
"Disabling support for multi-dataloaders.".format(cfg.test_ds)
)
model.setup_test_data(cfg.test_ds)
return
ds_values = cfg.test_ds[ds_key]
if isinstance(ds_values, (list, tuple, ListConfig)):
for ds_value in ds_values:
if isinstance(ds_value, (dict, DictConfig)):
# this is a nested dataset
cfg.test_ds = ds_value
else:
cfg.test_ds[ds_key] = ds_value
model.setup_test_data(cfg.test_ds)
dataloaders.append(model._test_dl)
model._test_dl = dataloaders
if len(ds_values) > 0 and isinstance(ds_values[0], (dict, DictConfig)):
# using the name of each of the nested dataset
model._test_names = [ds.name for ds in ds_values]
else:
model._test_names = [parse_dataset_as_name(ds) for ds in ds_values]
unique_names_check(name_list=model._test_names)
return
else:
model.setup_test_data(cfg.test_ds)
model._test_names = [parse_dataset_as_name(ds_values)]
unique_names_check(name_list=model._test_names)
@wrapt.decorator
def wrap_training_step(wrapped, instance: 'pl.LightningModule', args, kwargs):
output_dict = wrapped(*args, **kwargs)
if isinstance(output_dict, dict) and output_dict is not None and 'log' in output_dict:
log_dict = output_dict.pop('log')
instance.log_dict(log_dict, on_step=True)
return output_dict
def convert_model_config_to_dict_config(cfg: Union['DictConfig', 'NemoConfig']) -> 'DictConfig':
"""
Converts its input into a standard DictConfig.
Possible input values are:
- DictConfig
- A dataclass which is a subclass of NemoConfig
Args:
cfg: A dict-like object.
Returns:
The equivalent DictConfig
"""
if not _HAS_HYDRA:
logging.error("This function requires Hydra/Omegaconf and it was not installed.")
exit(1)
if not isinstance(cfg, (OmegaConf, DictConfig)) and is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if not isinstance(cfg, DictConfig):
raise ValueError(f"cfg constructor argument must be of type DictConfig/dict but got {type(cfg)} instead.")
config = OmegaConf.to_container(cfg, resolve=True)
config = OmegaConf.create(config)
return config
def _convert_config(cfg: 'OmegaConf'):
""" Recursive function convertint the configuration from old hydra format to the new one. """
if not _HAS_HYDRA:
logging.error("This function requires Hydra/Omegaconf and it was not installed.")
exit(1)
# Get rid of cls -> _target_.
if 'cls' in cfg and '_target_' not in cfg:
cfg._target_ = cfg.pop('cls')
# Get rid of params.
if 'params' in cfg:
params = cfg.pop('params')
for param_key, param_val in params.items():
cfg[param_key] = param_val
# Recursion.
try:
for _, sub_cfg in cfg.items():
if isinstance(sub_cfg, DictConfig):
_convert_config(sub_cfg)
except omegaconf_errors.OmegaConfBaseException as e:
logging.warning(f"Skipped conversion for config/subconfig:\n{cfg}\n Reason: {e}.")
def maybe_update_config_version(cfg: 'DictConfig'):
"""
Recursively convert Hydra 0.x configs to Hydra 1.x configs.
Changes include:
- `cls` -> `_target_`.
- `params` -> drop params and shift all arguments to parent.
- `target` -> `_target_` cannot be performed due to ModelPT injecting `target` inside class.
Args:
cfg: Any Hydra compatible DictConfig
Returns:
An updated DictConfig that conforms to Hydra 1.x format.
"""
if not _HAS_HYDRA:
logging.error("This function requires Hydra/Omegaconf and it was not installed.")
exit(1)
if cfg is not None and not isinstance(cfg, DictConfig):
try:
temp_cfg = OmegaConf.create(cfg)
cfg = temp_cfg
except omegaconf_errors.OmegaConfBaseException:
# Cannot be cast to DictConfig, skip updating.
return cfg
# Make a copy of model config.
cfg = copy.deepcopy(cfg)
OmegaConf.set_struct(cfg, False)
# Convert config.
_convert_config(cfg)
# Update model config.
OmegaConf.set_struct(cfg, True)
return cfg
@lru_cache(maxsize=1024)
def import_class_by_path(path: str):
"""
Recursive import of class by path string.
"""
paths = path.split('.')
path = ".".join(paths[:-1])
class_name = paths[-1]
mod = __import__(path, fromlist=[class_name])
mod = getattr(mod, class_name)
return mod
def resolve_subclass_pretrained_model_info(base_class) -> List['PretrainedModelInfo']:
"""
Recursively traverses the inheritance graph of subclasses to extract all pretrained model info.
First constructs a set of unique pretrained model info by performing DFS over the inheritance graph.
All model info belonging to the same class is added together.
Args:
base_class: The root class, whose subclass graph will be traversed.
Returns:
A list of unique pretrained model infos belonging to all of the inherited subclasses of
this baseclass.
"""
list_of_models = set()
def recursive_subclass_walk(cls):
for subclass in cls.__subclasses__():
# step into its immediate subclass
recursive_subclass_walk(subclass)
subclass_models = subclass.list_available_models()
if subclass_models is not None and len(subclass_models) > 0:
# Inject subclass info into pretrained model info
# if not already overriden by subclass
for model_info in subclass_models:
# If subclass manually injects class_, dont override.
if model_info.class_ is None:
model_info.class_ = subclass
for model_info in subclass_models:
list_of_models.add(model_info)
recursive_subclass_walk(base_class)
list_of_models = list(sorted(list_of_models))
return list_of_models
def check_lib_version(lib_name: str, checked_version: str, operator) -> Tuple[Optional[bool], str]:
"""
Checks if a library is installed, and if it is, checks the operator(lib.__version__, checked_version) as a result.
This bool result along with a string analysis of result is returned.
If the library is not installed at all, then returns None instead, along with a string explaining
that the library is not installed
Args:
lib_name: lower case str name of the library that must be imported.
checked_version: semver string that is compared against lib.__version__.
operator: binary callable function func(a, b) -> bool; that compares lib.__version__ against version in
some manner. Must return a boolean.
Returns:
A tuple of results:
- Bool or None. Bool if the library could be imported, and the result of
operator(lib.__version__, checked_version) or False if __version__ is not implemented in lib.
None is passed if the library is not installed at all.
- A string analysis of the check.
"""
try:
if '.' in lib_name:
mod = import_class_by_path(lib_name)
else:
mod = importlib.import_module(lib_name)
if hasattr(mod, '__version__'):
lib_ver = version.Version(mod.__version__)
match_ver = version.Version(checked_version)
if operator(lib_ver, match_ver):
msg = f"Lib {lib_name} version is satisfied !"
return True, msg
else:
msg = (
f"Lib {lib_name} version ({lib_ver}) is not {operator.__name__} than required version {checked_version}.\n"
f"Please upgrade the lib using either pip or conda to the latest version."
)
return False, msg
else:
msg = (
f"Lib {lib_name} does not implement __version__ in its init file. "
f"Could not check version compatibility."
)
return False, msg
except (AttributeError, ImportError, ModuleNotFoundError):
pass
msg = f"Lib {lib_name} has not been installed. Please use pip or conda to install this package."
return None, msg
def uninject_model_parallel_rank(filepath):
filepath = str(filepath)
if 'mp_rank' in filepath or 'tp_rank' in filepath:
dirname = os.path.dirname(os.path.dirname(filepath))
basename = os.path.basename(filepath)
filepath = os.path.join(dirname, basename)
return filepath
else:
return filepath
def inject_model_parallel_rank(filepath):
"""
Injects tensor/pipeline model parallel ranks into the filepath.
Does nothing if not using model parallelism.
"""
# first make sure filepath does not have rank
filepath = uninject_model_parallel_rank(filepath)
app_state = AppState()
if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1:
# filepath needs to be updated to include mp_rank
dirname = os.path.dirname(filepath)
basename = os.path.basename(filepath)
if app_state.pipeline_model_parallel_size is None or app_state.pipeline_model_parallel_size == 1:
filepath = f'{dirname}/mp_rank_{app_state.tensor_model_parallel_rank:02d}/{basename}'
else:
filepath = f'{dirname}/tp_rank_{app_state.tensor_model_parallel_rank:02d}_pp_rank_{app_state.pipeline_model_parallel_rank:03d}/{basename}'
return filepath
else:
return filepath
def ckpt_to_dir(filepath: Union[str, Path]) -> Path:
""" PTL considers checkpoints as .ckpt files.
This method removes the extension and returns a path
to be used as a directory for distributed checkpoints
"""
filepath = Path(filepath)
# adding this assert because we will later remove directories based on the return value of this method
assert filepath.suffix == ".ckpt", f'filepath: {filepath} must have .ckpt extension'
# create a new path whose name is the original filepath without the .ckpt extension
checkpoint_dir = filepath.with_name(filepath.stem)
return checkpoint_dir
|
NeMo-main
|
nemo/utils/model_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from contextlib import nullcontext
from enum import Enum
from typing import Callable, Dict, Optional, Type
import onnx
import torch
import torch.nn as nn
import torch.nn.functional as F
from nemo.utils import CastToFloat, CastToFloatAll, logging
try:
import onnxruntime
ort_available = True
except (ImportError, ModuleNotFoundError):
ort_available = False
class ExportFormat(Enum):
"""Which format to use when exporting a Neural Module for deployment"""
ONNX = (1,)
TORCHSCRIPT = (2,)
_EXT_DICT = {
".pt": ExportFormat.TORCHSCRIPT,
".ts": ExportFormat.TORCHSCRIPT,
".onnx": ExportFormat.ONNX,
}
class TorchRMSNorm(nn.Module):
def __init__(self, weight, eps=1e-6):
"""
LayerNorm without bias
"""
super().__init__()
self.weight = weight
self.variance_epsilon = eps
def forward(self, hidden_states):
# can be only calculated with precision=32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class LinearWithBiasSkip(nn.Module):
def __init__(self, weight, bias, skip_bias_add):
super(LinearWithBiasSkip, self).__init__()
self.bias = bias
self.weight = weight
self.skip_bias_add = skip_bias_add
def forward(self, x):
if self.skip_bias_add:
return F.linear(x, self.weight), self.bias
return F.linear(x, self.weight, self.bias), None
def get_export_format(filename: str):
_, ext = os.path.splitext(filename)
try:
return _EXT_DICT[ext.lower()]
except KeyError:
raise ValueError(f"Export file {filename} extension does not correspond to any export format!")
def augment_filename(output: str, prepend: str):
if prepend == 'self':
return output
path, filename = os.path.split(output)
filename = f"{prepend}-{filename}"
return os.path.join(path, filename)
def forward_method(self):
if hasattr(self, "forward_for_export"):
return self.forward_for_export
else:
return self.forward
def wrap_forward_method(self):
tp = type(self)
old_forward_method = None
if hasattr(tp, "forward_for_export"):
forward_method = tp.forward_for_export
old_forward_method = tp.forward
tp.forward = forward_method
else:
forward_method = None
return forward_method, old_forward_method
def parse_input_example(input_example):
input_list = list(input_example)
input_dict = {}
# process possible kwargs
if isinstance(input_list[-1], dict):
input_dict = input_list[-1]
input_list = input_list[:-1]
return input_list, input_dict
def to_onnxrt_input(ort_input_names, input_names, input_dict, input_list):
odict = {}
for k in reversed(input_names):
val = None
if k in input_dict:
val = input_dict[k].cpu().numpy()
elif len(input_list) > 0:
val = input_list.pop().cpu().numpy()
if k in ort_input_names and val is not None:
odict[k] = val
return odict
def verify_torchscript(model, output, input_examples, check_tolerance=0.01):
all_good = True
for input_example in input_examples:
input_list, input_dict = parse_input_example(input_example)
# We disable autocast here to make sure exported TS will run under Triton or other C++ env
with torch.cuda.amp.autocast(enabled=False):
output_example = model.forward(*input_list, **input_dict)
ts_model = torch.jit.load(output)
all_good = all_good and run_ts_and_compare(
ts_model, input_list, input_dict, output_example, check_tolerance
)
status = "SUCCESS" if all_good else "FAIL"
logging.info(f"Torchscript generated at {output} verified with torchscript forward : " + status)
return all_good
def verify_runtime(model, output, input_examples, input_names, check_tolerance=0.01):
onnx_model = onnx.load(output)
ort_input_names = [node.name for node in onnx_model.graph.input]
global ort_available
if not ort_available:
logging.warning(f"ONNX generated at {output}, not verified - please install onnxruntime_gpu package.\n")
onnx.checker.check_model(onnx_model, full_check=True)
return
onnx_session_opt = onnxruntime.SessionOptions()
onnx_session_opt.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_BASIC
sess = onnxruntime.InferenceSession(
onnx_model.SerializeToString(), sess_options=onnx_session_opt, providers=['CUDAExecutionProvider']
)
del onnx_model
all_good = True
for input_example in input_examples:
input_list, input_dict = parse_input_example(input_example)
output_example = model.forward(*input_list, **input_dict)
ort_input = to_onnxrt_input(ort_input_names, input_names, input_dict, input_list)
all_good = all_good and run_ort_and_compare(sess, ort_input, output_example, check_tolerance)
status = "SUCCESS" if all_good else "FAIL"
logging.info(f"ONNX generated at {output} verified with onnxruntime : " + status)
return all_good
def run_ts_and_compare(ts_model, ts_input_list, ts_input_dict, output_example, check_tolerance=0.01):
# Verify the model can be read, and is valid
ts_out = ts_model(*ts_input_list, **ts_input_dict)
all_good = True
for i, out in enumerate(ts_out):
expected = output_example[i]
if torch.is_tensor(expected):
tout = out.to('cpu')
logging.debug(f"Checking output {i}, shape: {expected.shape}:\n")
this_good = True
try:
if not torch.allclose(tout, expected.cpu(), rtol=check_tolerance, atol=check_tolerance):
this_good = False
except Exception: # there may ne size mismatch and it may be OK
this_good = False
if not this_good:
logging.info(f"Results mismatch! PyTorch(expected):\n{expected}\nTorchScript:\n{tout}")
all_good = False
return all_good
def run_ort_and_compare(sess, ort_input, output_example, check_tolerance=0.01):
# Verify the model can be read, and is valid
ort_out = sess.run(None, ort_input)
all_good = True
for i, out in enumerate(ort_out):
expected = output_example[i]
if torch.is_tensor(expected):
tout = torch.from_numpy(out)
logging.debug(f"Checking output {i}, shape: {expected.shape}:\n")
this_good = True
try:
if not torch.allclose(tout, expected.cpu(), rtol=check_tolerance, atol=100 * check_tolerance):
this_good = False
except Exception: # there may ne size mismatch and it may be OK
this_good = False
if not this_good:
logging.info(f"onnxruntime results mismatch! PyTorch(expected):\n{expected}\nONNXruntime:\n{tout}")
all_good = False
return all_good
apex_available = True
try:
from apex.contrib.layer_norm.layer_norm import FastLayerNorm
from apex.normalization import MixedFusedRMSNorm
from apex.normalization.fused_layer_norm import FusedLayerNorm, MixedFusedLayerNorm
from apex.transformer.functional.fused_softmax import FusedScaleMaskSoftmax
from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear
def replace_FusedLayerNorm(n: nn.Module) -> Optional[nn.LayerNorm]:
"""
Replaces Apex's FusedLayerNorm with nn.LayerNorm. This is required for ONNX export.
Args:
n: the FusedLayerNorm pytorch module to replace
Returns:
Equivalent LayerNorm module
"""
p = next(n.parameters())
if isinstance(n, FusedLayerNorm) or isinstance(n, MixedFusedLayerNorm):
shape, eps, affine = n.normalized_shape, n.eps, n.elementwise_affine
n_state = n.state_dict()
elif isinstance(n, FastLayerNorm):
shape, eps, affine = n.weight.shape, n.epsilon, True
n_state = n.state_dict()
elif isinstance(n, MixedFusedRMSNorm):
shape, eps, affine = n.normalized_shape, n.eps, n.elementwise_affine
tmp_n_state = n.state_dict()
n_state = {'weight': tmp_n_state['weight'], 'bias': torch.zeros_like(tmp_n_state['weight'])}
else:
return None
n_state = n.state_dict()
mod = nn.LayerNorm(shape, eps=eps, elementwise_affine=affine, device=p.device, dtype=p.dtype)
mod.load_state_dict(n_state)
return mod
def replace_MixedFusedRMSNorm(n: nn.Module):
"""
Replaces Apex's MixedFusedRMSNorm with equivalent Pytorch layer. This is required for ONNX export.
Args:
n: the MixedFusedRMSNorm pytorch module to replace
Returns:
Equivalent module
"""
p = next(n.parameters())
if isinstance(n, MixedFusedRMSNorm):
mod = TorchRMSNorm(n.state_dict()['weight'], n.eps).to(p.device)
else:
return None
return mod
def replace_ParallelLinear(n: nn.Module) -> Optional[nn.Linear]:
"""
Replaces Apex's ColumnParallelLinear or RowParallelLinear with nn.Linear
Args:
n: the nn.Module pytorch module to replace
Returns:
Equivalent Linear module
"""
if not (isinstance(n, ColumnParallelLinear) or isinstance(n, RowParallelLinear)):
raise ValueError("This function can only change the ColumnParallelLinear or RowParallelLinear module.")
dev = next(n.parameters()).device
mod = LinearWithBiasSkip(n.weight, n.bias, n.skip_bias_add).to(dev)
n_state = n.state_dict()
mod.load_state_dict(n_state)
return mod
def replace_FusedScaleMaskSoftmax(n: nn.Module) -> Optional[nn.Linear]:
"""
Replaces Apex's FusedScaleMaskSoftmax with nn.LayerNorm. This is required for ONNX export.
Args:
n: the FusedScaleMaskSoftmax module to replace
Returns:
Equivalent LayerNorm module
"""
if not isinstance(n, FusedScaleMaskSoftmax):
logging.warning("This function can only change the FusedScaleMaskSoftmax module.")
return n
# disable the fusion only
mod = FusedScaleMaskSoftmax(
n.input_in_fp16, n.input_in_bf16, n.attn_mask_type, False, n.mask_func, n.softmax_in_fp32, n.scale
)
return mod
default_Apex_replacements = {
"FusedLayerNorm": replace_FusedLayerNorm,
"MixedFusedLayerNorm": replace_FusedLayerNorm,
"FastLayerNorm": replace_FusedLayerNorm,
"RowParallelLinear": replace_ParallelLinear,
"ColumnParallelLinear": replace_ParallelLinear,
"FusedScaleMaskSoftmax": replace_FusedScaleMaskSoftmax,
"MixedFusedRMSNorm": replace_MixedFusedRMSNorm,
}
except Exception as e:
default_Apex_replacements = {}
apex_available = False
def simple_replace(BaseT: Type[nn.Module], DestT: Type[nn.Module]) -> Callable[[nn.Module], Optional[nn.Module]]:
"""
Generic function generator to replace BaseT module with DestT. BaseT and DestT should have same atrributes. No weights are copied.
Args:
BaseT : module type to replace
DestT : destination module type
Returns:
swap function to replace BaseT module with DestT
"""
def expansion_fn(mod: nn.Module) -> Optional[nn.Module]:
if not isinstance(mod, BaseT):
return None
args = [getattr(mod, name, None) for name in mod.__constants__]
out = DestT(*args)
return out
return expansion_fn
def replace_MatchedScaleMaskSoftmax(n: nn.Module) -> Optional[nn.Linear]:
"""
Replaces MatchedScaleMaskSoftmax with exportable softmax layer
Args:
n: module to replace
Returns:
exportable module
"""
# including the import here to avoid circular imports
from nemo.collections.nlp.modules.common.megatron.fused_softmax import MatchedScaleMaskSoftmax
# disabling fusion for the MatchedScaleMaskSoftmax
mod = MatchedScaleMaskSoftmax(
n.input_in_fp16, n.input_in_bf16, n.attn_mask_type, False, n.mask_func, n.softmax_in_fp32, n.scale
)
return mod
def wrap_module(BaseT: Type[nn.Module], DestT: Type[nn.Module]) -> Callable[[nn.Module], Optional[nn.Module]]:
"""
Generic function generator to replace BaseT module with DestT wrapper.
Args:
BaseT : module type to replace
DestT : destination module type
Returns:
swap function to replace BaseT module with DestT
"""
def expansion_fn(mod: nn.Module) -> Optional[nn.Module]:
out = DestT(mod)
return out
return expansion_fn
def swap_modules(model: nn.Module, mapping: Dict[str, nn.Module]):
"""
This function swaps nested modules as specified by "dot paths" in mod with a desired replacement. This allows
for swapping nested modules through arbitrary levels if children
NOTE: This occurs in place, if you want to preserve model then make sure to copy it first.
"""
for path, new_mod in mapping.items():
expanded_path = path.split(".")
parent_mod = model
for sub_path in expanded_path[:-1]:
parent_mod = parent_mod._modules[sub_path] # noqa
parent_mod._modules[expanded_path[-1]] = new_mod # noqa
return model
def replace_modules(
model: nn.Module, expansions: Dict[str, Callable[[nn.Module], Optional[nn.Module]]] = None
) -> nn.Module:
"""
Top-level function to replace modules in model, specified by class name with a desired replacement.
NOTE: This occurs in place, if you want to preserve model then make sure to copy it first.
Args:
model : top level module
expansions : replacement dictionary: module class name -> replacement function generator
Returns:
model, possibly modified in-place
"""
mapping: Dict[str, nn.Module] = {}
for name, m in model.named_modules():
m_type = type(m).__name__
if m_type in expansions:
swapped = expansions[m_type](m)
if swapped:
mapping[name] = swapped
if len(mapping) > 0:
logging.info(f"Swapped {len(mapping)} modules")
swap_modules(model, mapping)
return model
def script_module(m: nn.Module):
return torch.jit.script(m)
script_replacements = {}
def replace_for_export(model: nn.Module) -> nn.Module:
"""
Top-level function to replace 'default set' of modules in model, called from _prepare_for_export.
NOTE: This occurs in place, if you want to preserve model then make sure to copy it first.
Args:
model : top level module
Returns:
model, possibly modified in-place
"""
from nemo.collections.tts.modules.submodules import MaskedInstanceNorm1d
default_replacements = {
"MatchedScaleMaskSoftmax": wrap_module(None, replace_MatchedScaleMaskSoftmax),
}
replace_modules(model, default_Apex_replacements)
replace_modules(model, default_replacements)
# This one has to be the last
replace_modules(model, script_replacements)
def add_casts_around_norms(model: nn.Module):
"""
Function to put additional to/from float32 casts around operations known to require full precision.
It was used with an extra post-parse script to have TRT preserve extra precision when --fp16 needed.
Should not be needed with TRT 8.6.1 or later.
"""
default_cast_replacements = {
"BatchNorm1d": wrap_module(nn.BatchNorm1d, CastToFloat),
"BatchNorm2d": wrap_module(nn.BatchNorm2d, CastToFloat),
"LayerNorm": wrap_module(nn.LayerNorm, CastToFloat),
"InstanceNorm1d": wrap_module(nn.InstanceNorm1d, CastToFloat),
"MaskedInstanceNorm1d": wrap_module(MaskedInstanceNorm1d, CastToFloatAll),
}
replace_modules(model, default_cast_replacements)
|
NeMo-main
|
nemo/utils/export_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import logging as _logging
import sys
import threading
import warnings
from contextlib import contextmanager
from logging.handlers import MemoryHandler
from nemo.constants import NEMO_ENV_VARNAME_REDIRECT_LOGS_TO_STDERR, NEMO_ENV_VARNAME_TESTING
from nemo.utils.env_var_parsing import get_envbool
from nemo.utils.formatters.base import BaseNeMoFormatter, DebugNeMoFormatter
from nemo.utils.get_rank import is_global_rank_zero
from nemo.utils.metaclasses import Singleton
__all__ = ["Logger", "LogMode"]
class LogMode(enum.IntEnum):
EACH = 0 # Log the message each time
ONCE = 1 # Log the message only once. The same message will not be logged again.
class Logger(metaclass=Singleton):
# Level 0
NOTSET = _logging.NOTSET
# Level 10
DEBUG = _logging.DEBUG
# Level 20
INFO = _logging.INFO
# Level 30
WARNING = _logging.WARNING
# Level 40
ERROR = _logging.ERROR
# Level 50
CRITICAL = _logging.CRITICAL
_level_names = {
0: "NOTSET",
10: "DEBUG",
20: "INFO",
30: "WARNING",
40: "ERROR",
50: "CRITICAL",
}
def __init__(self, capture_warnings=True):
self._logger = None
# Multi-GPU runs run in separate processes, thread locks shouldn't be needed
self._logger_lock = threading.Lock()
self._handlers = dict()
self.old_warnings_showwarning = None
self._define_logger(capture_warnings)
self.once_logged = set()
self.rank = 0 if is_global_rank_zero() else "UNK"
def _define_logger(self, capture_warnings=True):
""" Creates the logger if not already created. Called in init"""
# Use double-checked locking to avoid taking lock unnecessarily.
if self._logger is not None:
return self._logger
with self._logger_lock:
try:
self._logger = _logging.getLogger("nemo_logger")
# By default, silence all loggers except the logger for rank 0
self.remove_stream_handlers()
# If NEMO_TESTING is set, add a streamhandler to all ranks
if get_envbool(NEMO_ENV_VARNAME_TESTING, False):
old_factory = _logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
record = old_factory(*args, **kwargs)
record.rank = self.rank
return record
_logging.setLogRecordFactory(record_factory)
self.add_stream_handlers(formatter=DebugNeMoFormatter)
elif is_global_rank_zero():
self.add_stream_handlers()
# Add memoryhandlers, essentially buffers. They are used to save messages that we will flush to file
# once the appropriate file handlers are added.
if is_global_rank_zero():
# Add a memoryhandler for error messages. Only logged on rank 0
self._handlers["memory_err"] = MemoryHandler(-1)
self._handlers["memory_err"].addFilter(lambda record: record.levelno > _logging.INFO)
formatter = BaseNeMoFormatter
self._handlers["memory_err"].setFormatter(formatter())
self._logger.addHandler(self._handlers["memory_err"])
# Add a memoryhandler for all messages on all ranks
self._handlers["memory_all"] = MemoryHandler(-1)
formatter = BaseNeMoFormatter
self._handlers["memory_all"].setFormatter(formatter())
self._logger.addHandler(self._handlers["memory_all"])
finally:
level = Logger.INFO
if get_envbool(NEMO_ENV_VARNAME_TESTING, False):
level = Logger.DEBUG
self.set_verbosity(verbosity_level=level)
self.captureWarnings(capture_warnings)
self._logger.propagate = False
def remove_stream_handlers(self):
""" Removes StreamHandler that log to stdout and stderr from the logger."""
if self._logger is None:
raise RuntimeError("Impossible to set handlers if the Logger is not predefined")
# ======== Remove Handler if already existing ========
try:
self._logger.removeHandler(self._handlers["stream_stdout"])
del self._handlers["stream_stdout"]
except KeyError:
pass
try:
self._logger.removeHandler(self._handlers["stream_stderr"])
del self._handlers["stream_stderr"]
except KeyError:
pass
def add_stream_handlers(self, formatter=BaseNeMoFormatter):
"""Add StreamHandler that log to stdout and stderr to the logger. INFO and lower logs are streamed to stdout
while WARNING and higher are streamed to stderr. If the NEMO_ENV_VARNAME_REDIRECT_LOGS_TO_STDERR environment
variable is set, all logs are sent to stderr instead.
"""
if self._logger is None:
raise RuntimeError("Impossible to set handlers if the Logger is not predefined")
# Add the output handler.
if get_envbool(NEMO_ENV_VARNAME_REDIRECT_LOGS_TO_STDERR, False):
self._handlers["stream_stdout"] = _logging.StreamHandler(sys.stderr)
else:
self._handlers["stream_stdout"] = _logging.StreamHandler(sys.stdout)
self._handlers["stream_stdout"].addFilter(lambda record: record.levelno <= _logging.INFO)
self._handlers["stream_stderr"] = _logging.StreamHandler(sys.stderr)
self._handlers["stream_stderr"].addFilter(lambda record: record.levelno > _logging.INFO)
self._handlers["stream_stdout"].setFormatter(formatter())
self._logger.addHandler(self._handlers["stream_stdout"])
try:
self._handlers["stream_stderr"].setFormatter(formatter())
self._logger.addHandler(self._handlers["stream_stderr"])
except KeyError:
pass
def reset_stream_handler(self, formatter=BaseNeMoFormatter):
"""Removes then adds stream handlers."""
self.remove_stream_handlers()
self.add_stream_handlers(formatter=formatter)
def add_file_handler(self, log_file):
"""Add a FileHandler to logger that logs all messages to a file. If the logger had a MemoryHandler at
self._handlers["memory_all"], those buffered messages are flushed to the new file, and the MemoryHandler is
closed."""
if self._logger is None:
raise RuntimeError("Impossible to set handlers if the Logger is not predefined")
self._handlers["file"] = _logging.FileHandler(log_file)
formatter = BaseNeMoFormatter
self._handlers["file"].setFormatter(formatter())
self._logger.addHandler(self._handlers["file"])
if self._handlers.get("memory_all", None):
self._handlers["memory_all"].setTarget(self._handlers["file"])
self._handlers["memory_all"].close() # flush and remove
del self._handlers["memory_all"]
def add_err_file_handler(self, log_file):
"""Add a FileHandler to logger that logs all WARNING and higher messages to a file. If the logger had a
MemoryHandler at self._handlers["memory_err"], those buffered messages are flushed to the new file, and the
MemoryHandler is closed."""
if self._logger is None:
raise RuntimeError("Impossible to set handlers if the Logger is not predefined")
self._handlers["file_err"] = _logging.FileHandler(log_file)
self._handlers["file_err"].addFilter(lambda record: record.levelno > _logging.INFO)
formatter = BaseNeMoFormatter
self._handlers["file_err"].setFormatter(formatter())
self._logger.addHandler(self._handlers["file_err"])
if self._handlers.get("memory_err", None):
self._handlers["memory_err"].setTarget(self._handlers["file_err"])
self._handlers["memory_err"].close() # flush and remove
del self._handlers["memory_err"]
def getEffectiveLevel(self):
"""Return how much logging output will be produced."""
if self._logger is not None:
return self._logger.getEffectiveLevel()
def get_verbosity(self):
"""See getEffectiveLevel"""
return self.getEffectiveLevel()
def setLevel(self, verbosity_level):
"""Sets the threshold for what messages will be logged."""
if self._logger is not None:
self._logger.setLevel(verbosity_level)
for handler in self._logger.handlers:
handler.setLevel(verbosity_level)
def set_verbosity(self, verbosity_level):
"""See setLevel"""
self.setLevel(verbosity_level)
@contextmanager
def patch_stderr_handler(self, stream):
""" Sends messages that should log to stderr to stream instead. Useful for unittests """
if self._logger is not None:
try:
old_stream = self._handlers["stream_stderr"].stream
if old_stream is None:
raise ValueError
# Port backwards set_stream() from python 3.7
self._handlers["stream_stderr"].acquire()
try:
self._handlers["stream_stderr"].flush()
self._handlers["stream_stderr"].stream = stream
finally:
self._handlers["stream_stderr"].release()
yield stream
except (KeyError, ValueError):
raise RuntimeError("Impossible to patch logging handlers if handler does not exist")
finally:
# Port backwards set_stream() from python 3.7
self._handlers["stream_stderr"].acquire()
try:
self._handlers["stream_stderr"].flush()
self._handlers["stream_stderr"].stream = old_stream
finally:
self._handlers["stream_stderr"].release()
else:
raise RuntimeError("Impossible to patch logging handlers if handler does not exist")
@contextmanager
def patch_stdout_handler(self, stream):
""" Sends messages that should log to stdout to stream instead. Useful for unittests """
if self._logger is not None:
try:
old_stream = self._handlers["stream_stdout"].stream
if old_stream is None:
raise ValueError
# Port backwards set_stream() from python 3.7
self._handlers["stream_stdout"].acquire()
try:
self._handlers["stream_stdout"].flush()
self._handlers["stream_stdout"].stream = stream
finally:
self._handlers["stream_stdout"].release()
yield stream
except (KeyError, ValueError):
raise RuntimeError("Impossible to patch logging handlers if handler does not exist")
finally:
# Port backwards set_stream() from python 3.7
self._handlers["stream_stdout"].acquire()
try:
self._handlers["stream_stdout"].flush()
self._handlers["stream_stdout"].stream = old_stream
finally:
self._handlers["stream_stdout"].release()
else:
raise RuntimeError("Impossible to patch logging handlers if handler does not exist")
@contextmanager
def temp_verbosity(self, verbosity_level):
"""Sets the a temporary threshold for what messages will be logged."""
if self._logger is not None:
old_verbosity = self.get_verbosity()
try:
self.set_verbosity(verbosity_level)
yield
finally:
self.set_verbosity(old_verbosity)
else:
try:
yield
finally:
pass
def captureWarnings(self, capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
if self._logger is not None:
if capture and self.old_warnings_showwarning is None:
# Backup Method
self.old_warnings_showwarning = warnings.showwarning
warnings.showwarning = self._showwarning
elif not capture and self.old_warnings_showwarning is not None:
# Restore Method
warnings.showwarning = self.old_warnings_showwarning
self.old_warnings_showwarning = None
def _showwarning(self, message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging.
It will call warnings.formatwarning and will log the resulting string
with level logging.WARNING.
"""
s = warnings.formatwarning(message, category, filename, lineno, line)
self.warning("%s", s)
def _logged_once(self, msg, mode):
PREFIX_LEN = 12
if mode == LogMode.ONCE:
if msg[PREFIX_LEN:] in self.once_logged:
return True
self.once_logged.add(msg[PREFIX_LEN:])
return False
def debug(self, msg, *args, mode=LogMode.EACH, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self._logger is not None and self._logger.isEnabledFor(Logger.DEBUG) and not self._logged_once(msg, mode):
self._logger._log(Logger.DEBUG, msg, args, **kwargs)
def info(self, msg, *args, mode=LogMode.EACH, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self._logger is not None and self._logger.isEnabledFor(Logger.INFO) and not self._logged_once(msg, mode):
self._logger._log(Logger.INFO, msg, args, **kwargs)
def warning(self, msg, *args, mode=LogMode.EACH, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self._logger is not None and self._logger.isEnabledFor(Logger.WARNING) and not self._logged_once(msg, mode):
self._logger._log(Logger.WARNING, msg, args, **kwargs)
def error(self, msg, *args, mode=LogMode.EACH, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self._logger is not None and self._logger.isEnabledFor(Logger.ERROR) and not self._logged_once(msg, mode):
self._logger._log(Logger.ERROR, msg, args, **kwargs)
def critical(self, msg, *args, mode=LogMode.EACH, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if (
self._logger is not None
and self._logger.isEnabledFor(Logger.CRITICAL)
and not self._logged_once(msg, mode)
):
self._logger._log(Logger.CRITICAL, msg, args, **kwargs)
|
NeMo-main
|
nemo/utils/nemo_logging.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import nullcontext
import torch
def avoid_bfloat16_autocast_context():
"""
If the current autocast context is bfloat16,
cast it to float32
"""
if torch.is_autocast_enabled() and torch.get_autocast_gpu_dtype() == torch.bfloat16:
return torch.cuda.amp.autocast(dtype=torch.float32)
else:
return nullcontext()
def avoid_float16_autocast_context():
"""
If the current autocast context is float16, cast it to bfloat16
if available (unless we're in jit) or float32
"""
if torch.is_autocast_enabled() and torch.get_autocast_gpu_dtype() == torch.float16:
if torch.jit.is_scripting() or torch.jit.is_tracing():
return torch.cuda.amp.autocast(dtype=torch.float32)
if torch.cuda.is_bf16_supported():
return torch.cuda.amp.autocast(dtype=torch.bfloat16)
else:
return torch.cuda.amp.autocast(dtype=torch.float32)
else:
return nullcontext()
def cast_tensor(x, from_dtype=torch.float16, to_dtype=torch.float32):
return x.to(dtype=to_dtype) if x.dtype == from_dtype else x
def cast_all(x, from_dtype=torch.float16, to_dtype=torch.float32):
if isinstance(x, torch.Tensor):
return cast_tensor(x, from_dtype=from_dtype, to_dtype=to_dtype)
else:
if isinstance(x, dict):
new_dict = {}
for k in x.keys():
new_dict[k] = cast_all(x[k], from_dtype=from_dtype, to_dtype=to_dtype)
return new_dict
elif isinstance(x, tuple):
return tuple(cast_all(y, from_dtype=from_dtype, to_dtype=to_dtype) for y in x)
class CastToFloat(torch.nn.Module):
def __init__(self, mod):
super(CastToFloat, self).__init__()
self.mod = mod
def forward(self, x):
if torch.is_autocast_enabled() and x.dtype != torch.float32:
with torch.cuda.amp.autocast(enabled=False):
ret = self.mod.forward(x.to(torch.float32)).to(x.dtype)
else:
ret = self.mod.forward(x)
return ret
class CastToFloatAll(torch.nn.Module):
def __init__(self, mod):
super(CastToFloatAll, self).__init__()
self.mod = mod
def forward(self, *args):
if torch.is_autocast_enabled():
from_dtype = args[0].dtype
with torch.cuda.amp.autocast(enabled=False):
ret = self.mod.forward(*cast_all(args, from_dtype=from_dtype, to_dtype=torch.float32))
return cast_all(ret, from_dtype=torch.float32, to_dtype=from_dtype)
else:
return self.mod.forward(*args)
|
NeMo-main
|
nemo/utils/cast_utils.py
|
# The MIT Licence (MIT)
#
# Copyright (c) 2016 YunoJuno Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Vendored dependency from : https://github.com/yunojuno/python-env-utils/blob/master/env_utils/utils.py
#
#
# Modified by NVIDIA
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import decimal
import json
import os
from dateutil import parser
__all__ = [
"get_env",
"get_envbool",
"get_envint",
"get_envfloat",
"get_envdecimal",
"get_envdate",
"get_envdatetime",
"get_envlist",
"get_envdict",
"CoercionError",
"RequiredSettingMissingError",
]
class CoercionError(Exception):
"""Custom error raised when a value cannot be coerced."""
def __init__(self, key, value, func):
msg = "Unable to coerce '{}={}' using {}.".format(key, value, func.__name__)
super(CoercionError, self).__init__(msg)
class RequiredSettingMissingError(Exception):
"""Custom error raised when a required env var is missing."""
def __init__(self, key):
msg = "Required env var '{}' is missing.".format(key)
super(RequiredSettingMissingError, self).__init__(msg)
def _get_env(key, default=None, coerce=lambda x: x, required=False):
"""
Return env var coerced into a type other than string.
This function extends the standard os.getenv function to enable
the coercion of values into data types other than string (all env
vars are strings by default).
Args:
key: string, the name of the env var to look up
Kwargs:
default: the default value to return if the env var does not exist. NB the
default value is **not** coerced, and is assumed to be of the correct type.
coerce: a function that is used to coerce the value returned into
another type
required: bool, if True, then a RequiredSettingMissingError error is raised
if the env var does not exist.
Returns the env var, passed through the coerce function
"""
try:
value = os.environ[key]
except KeyError:
if required is True:
raise RequiredSettingMissingError(key)
else:
return default
try:
return coerce(value)
except Exception:
raise CoercionError(key, value, coerce)
# standard type coercion functions
def _bool(value):
if isinstance(value, bool):
return value
return not (value is None or value.lower() in ("false", "0", "no", "n", "f", "none"))
def _int(value):
return int(value)
def _float(value):
return float(value)
def _decimal(value):
return decimal.Decimal(value)
def _dict(value):
return json.loads(value)
def _datetime(value):
return parser.parse(value)
def _date(value):
return parser.parse(value).date()
def get_env(key, *default, **kwargs):
"""
Return env var.
This is the parent function of all other get_foo functions,
and is responsible for unpacking args/kwargs into the values
that _get_env expects (it is the root function that actually
interacts with environ).
Args:
key: string, the env var name to look up.
default: (optional) the value to use if the env var does not
exist. If this value is not supplied, then the env var is
considered to be required, and a RequiredSettingMissingError
error will be raised if it does not exist.
Kwargs:
coerce: a func that may be supplied to coerce the value into
something else. This is used by the default get_foo functions
to cast strings to builtin types, but could be a function that
returns a custom class.
Returns the env var, coerced if required, and a default if supplied.
"""
assert len(default) in (0, 1), "Too many args supplied."
func = kwargs.get('coerce', lambda x: x)
required = len(default) == 0
default = default[0] if not required else None
return _get_env(key, default=default, coerce=func, required=required)
def get_envbool(key, *default):
"""Return env var cast as boolean."""
return get_env(key, *default, coerce=_bool)
def get_envint(key, *default):
"""Return env var cast as integer."""
return get_env(key, *default, coerce=_int)
def get_envfloat(key, *default):
"""Return env var cast as float."""
return get_env(key, *default, coerce=_float)
def get_envdecimal(key, *default):
"""Return env var cast as Decimal."""
return get_env(key, *default, coerce=_decimal)
def get_envdate(key, *default):
"""Return env var as a date."""
return get_env(key, *default, coerce=_date)
def get_envdatetime(key, *default):
"""Return env var as a datetime."""
return get_env(key, *default, coerce=_datetime)
def get_envlist(key, *default, **kwargs):
"""Return env var as a list."""
separator = kwargs.get('separator', ' ')
return get_env(key, *default, coerce=lambda x: x.split(separator))
def get_envdict(key, *default):
"""Return env var as a dict."""
return get_env(key, *default, coerce=_dict)
|
NeMo-main
|
nemo/utils/env_var_parsing.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from time import sleep
import wget
from pytorch_lightning.plugins.environments import LightningEnvironment
from pytorch_lightning.strategies import DDPStrategy, StrategyRegistry
from nemo.utils import logging
def maybe_download_from_cloud(url, filename, subfolder=None, cache_dir=None, refresh_cache=False) -> str:
"""
Helper function to download pre-trained weights from the cloud
Args:
url: (str) URL of storage
filename: (str) what to download. The request will be issued to url/filename
subfolder: (str) subfolder within cache_dir. The file will be stored in cache_dir/subfolder. Subfolder can
be empty
cache_dir: (str) a cache directory where to download. If not present, this function will attempt to create it.
If None (default), then it will be $HOME/.cache/torch/NeMo
refresh_cache: (bool) if True and cached file is present, it will delete it and re-fetch
Returns:
If successful - absolute local path to the downloaded file
else - empty string
"""
# try:
if cache_dir is None:
cache_location = Path.joinpath(Path.home(), ".cache/torch/NeMo")
else:
cache_location = cache_dir
if subfolder is not None:
destination = Path.joinpath(cache_location, subfolder)
else:
destination = cache_location
if not os.path.exists(destination):
os.makedirs(destination, exist_ok=True)
destination_file = Path.joinpath(destination, filename)
if os.path.exists(destination_file):
logging.info(f"Found existing object {destination_file}.")
if refresh_cache:
logging.info("Asked to refresh the cache.")
logging.info(f"Deleting file: {destination_file}")
os.remove(destination_file)
else:
logging.info(f"Re-using file from: {destination_file}")
return str(destination_file)
# download file
wget_uri = url + filename
logging.info(f"Downloading from: {wget_uri} to {str(destination_file)}")
# NGC links do not work everytime so we try and wait
i = 0
max_attempts = 3
while i < max_attempts:
i += 1
try:
wget.download(wget_uri, str(destination_file))
if os.path.exists(destination_file):
return destination_file
else:
return ""
except:
logging.info(f"Download from cloud failed. Attempt {i} of {max_attempts}")
sleep(0.05)
continue
raise ValueError("Not able to download url right now, please try again.")
class SageMakerDDPStrategy(DDPStrategy):
@property
def cluster_environment(self):
env = LightningEnvironment()
env.world_size = lambda: int(os.environ["WORLD_SIZE"])
env.global_rank = lambda: int(os.environ["RANK"])
return env
@cluster_environment.setter
def cluster_environment(self, env):
# prevents Lightning from overriding the Environment required for SageMaker
pass
def initialize_sagemaker() -> None:
"""
Helper function to initiate sagemaker with NeMo.
This function installs libraries that NeMo requires for the ASR toolkit + initializes sagemaker ddp.
"""
StrategyRegistry.register(
name='smddp', strategy=SageMakerDDPStrategy, process_group_backend="smddp", find_unused_parameters=False,
)
def _install_system_libraries() -> None:
os.system('chmod 777 /tmp && apt-get update && apt-get install -y libsndfile1 ffmpeg')
def _patch_torch_metrics() -> None:
"""
Patches torchmetrics to not rely on internal state.
This is because sagemaker DDP overrides the `__init__` function of the modules to do automatic-partitioning.
"""
from torchmetrics import Metric
def __new_hash__(self):
hash_vals = [self.__class__.__name__, id(self)]
return hash(tuple(hash_vals))
Metric.__hash__ = __new_hash__
_patch_torch_metrics()
if os.environ.get("RANK") and os.environ.get("WORLD_SIZE"):
import smdistributed.dataparallel.torch.distributed as dist
# has to be imported, as it overrides torch modules and such when DDP is enabled.
import smdistributed.dataparallel.torch.torch_smddp
dist.init_process_group()
if dist.get_local_rank():
_install_system_libraries()
return dist.barrier() # wait for main process
_install_system_libraries()
return
|
NeMo-main
|
nemo/utils/cloud.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from typing import Any, Dict, List, Optional, Union
def add_optimizer_args(
parent_parser: ArgumentParser,
optimizer: str = 'adam',
default_lr: float = None,
default_opt_args: Optional[Union[Dict[str, Any], List[str]]] = None,
) -> ArgumentParser:
"""Extends existing argparse with support for optimizers.
# Example of adding optimizer args to command line :
python train_script.py ... --optimizer "novograd" --lr 0.01 \
--opt_args betas=0.95,0.5 weight_decay=0.001
Args:
parent_parser (ArgumentParser): Custom CLI parser that will be extended.
optimizer (str): Default optimizer required.
default_lr (float): Default learning rate that should be overriden during training.
default_opt_args (list(str)): List of overriding arguments for the instantiated optimizer.
Returns:
ArgumentParser: Parser extended by Optimizers arguments.
"""
if default_opt_args is None:
default_opt_args = []
parser = ArgumentParser(parents=[parent_parser], add_help=True, conflict_handler='resolve')
parser.add_argument('--optimizer', type=str, default=optimizer, help='Name of the optimizer. Defaults to Adam.')
parser.add_argument('--lr', type=float, default=default_lr, help='Learning rate of the optimizer.')
parser.add_argument(
'--opt_args',
default=default_opt_args,
nargs='+',
type=str,
help='Overriding arguments for the optimizer. \n Must follow the pattern : \n name=value separated by spaces.'
'Example: --opt_args weight_decay=0.001 eps=1e-8 betas=0.9,0.999',
)
return parser
def add_scheduler_args(parent_parser: ArgumentParser) -> ArgumentParser:
"""Extends existing argparse with default LR scheduler args.
Args:
parent_parser (ArgumentParser): Custom CLI parser that will be extended.
Returns:
ArgumentParser: Parser extended by LR Scheduler arguments.
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False, conflict_handler='resolve')
parser.add_argument("--warmup_steps", type=int, required=False, default=None, help="Number of warmup steps")
parser.add_argument(
"--warmup_ratio",
type=float,
required=False,
default=None,
help="Number of warmup steps as a percentage of total training steps",
)
parser.add_argument("--hold_steps", type=int, required=False, default=None, help="Number of hold LR steps")
parser.add_argument(
"--hold_ratio",
type=float,
required=False,
default=None,
help="Number of hold LR steps as a percentage of total training steps",
)
parser.add_argument("--min_lr", type=float, required=False, default=0.0, help="Minimum learning rate")
parser.add_argument(
"--last_epoch", type=int, required=False, default=-1, help="Last epoch id. -1 indicates training from scratch"
)
return parser
def add_asr_args(parent_parser: ArgumentParser) -> ArgumentParser:
"""Extends existing argparse with default ASR collection args.
Args:
parent_parser (ArgumentParser): Custom CLI parser that will be extended.
Returns:
ArgumentParser: Parser extended by NeMo ASR Collection arguments.
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False, conflict_handler='resolve')
parser.add_argument("--asr_model", type=str, required=True, default="bad_quartznet15x5.yaml", help="")
parser.add_argument("--train_dataset", type=str, required=True, default=None, help="training dataset path")
parser.add_argument("--eval_dataset", type=str, required=True, help="evaluation dataset path")
return parser
def add_nlp_args(parent_parser: ArgumentParser) -> ArgumentParser:
"""Extends existing argparse with default NLP collection args.
Args:
parent_parser (ArgumentParser): Custom CLI parser that will be extended.
Returns:
ArgumentParser: Parser extended by NeMo NLP Collection arguments.
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False, conflict_handler='resolve')
parser.add_argument(
"--data_dir", type=str, required=False, help="data directory to training or/and evaluation dataset"
)
parser.add_argument(
"--config_file", type=str, required=False, default=None, help="Huggingface model configuration file"
)
parser.add_argument(
"--pretrained_model_name", default='bert-base-uncased', type=str, required=False, help="pretrained model name"
)
parser.add_argument(
"--tokenizer_name", default='nemobert', type=str, choices=['sentencepiece', 'nemobert'], help="Tokenizer type"
)
parser.add_argument("--tokenizer_model", default=None, type=str, help="Tokenizer file for sentence piece")
parser.add_argument("--do_lower_case", action='store_true', required=False, help="lower case data")
return parser
|
NeMo-main
|
nemo/utils/arguments.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.utils.app_state import AppState
from nemo.utils.cast_utils import (
CastToFloat,
CastToFloatAll,
avoid_bfloat16_autocast_context,
avoid_float16_autocast_context,
cast_all,
cast_tensor,
)
from nemo.utils.nemo_logging import Logger as _Logger
from nemo.utils.nemo_logging import LogMode as logging_mode
logging = _Logger()
try:
from nemo.utils.lightning_logger_patch import add_memory_handlers_to_pl_logger
add_memory_handlers_to_pl_logger()
except ModuleNotFoundError:
pass
|
NeMo-main
|
nemo/utils/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.utils.env_var_parsing import get_envint
def is_global_rank_zero():
""" Helper function to determine if the current process is global_rank 0 (the main process)
"""
# Try to get the pytorch RANK env var
# RANK is set by torch.distributed.launch
rank = get_envint("RANK", None)
if rank is not None:
return rank == 0
# Try to get the SLURM global rank env var
# SLURM_PROCID is set by SLURM
slurm_rank = get_envint("SLURM_PROCID", None)
if slurm_rank is not None:
return slurm_rank == 0
# if neither pytorch and SLURM env vars are set
# check NODE_RANK/GROUP_RANK and LOCAL_RANK env vars
# asume global_rank is zero if undefined
node_rank = get_envint("NODE_RANK", get_envint("GROUP_RANK", 0))
local_rank = get_envint("LOCAL_RANK", 0)
return node_rank == 0 and local_rank == 0
def get_rank():
""" Helper function that returns torch.distributed.get_rank() if DDP has been initialized otherwise it returns 0.
"""
if is_global_rank_zero():
return 0
else:
return torch.distributed.get_rank()
|
NeMo-main
|
nemo/utils/get_rank.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as _logging
from logging.handlers import MemoryHandler
import pytorch_lightning as pl
HANDLERS = {}
PATCHED = False
def add_memory_handlers_to_pl_logger():
"""
Adds two MemoryHandlers to pytorch_lightning's logger. These two handlers are essentially message buffers. This
function is called in nemo.utils.__init__.py. These handlers are used in add_filehandlers_to_pl_logger to flush
buffered messages to files.
"""
if not HANDLERS:
HANDLERS["memory_err"] = MemoryHandler(-1)
HANDLERS["memory_err"].addFilter(lambda record: record.levelno > _logging.INFO)
HANDLERS["memory_all"] = MemoryHandler(-1)
pl._logger.addHandler(HANDLERS["memory_err"])
pl._logger.addHandler(HANDLERS["memory_all"])
def add_filehandlers_to_pl_logger(all_log_file, err_log_file):
"""
Adds two filehandlers to pytorch_lightning's logger. Called in nemo.utils.exp_manager(). The first filehandler
logs all messages to all_log_file while the second filehandler logs all WARNING and higher messages to err_log_file.
If "memory_err" and "memory_all" exist in HANDLERS, then those buffers are flushed to err_log_file and all_log_file
respectively, and then closed.
"""
HANDLERS["file"] = _logging.FileHandler(all_log_file)
pl._logger.addHandler(HANDLERS["file"])
HANDLERS["file_err"] = _logging.FileHandler(err_log_file)
HANDLERS["file_err"].addFilter(lambda record: record.levelno > _logging.INFO)
pl._logger.addHandler(HANDLERS["file_err"])
if HANDLERS.get("memory_all", None):
HANDLERS["memory_all"].setTarget(HANDLERS["file"])
HANDLERS["memory_all"].close()
del HANDLERS["memory_all"]
if HANDLERS.get("memory_err", None):
HANDLERS["memory_err"].setTarget(HANDLERS["file_err"])
HANDLERS["memory_err"].close()
del HANDLERS["memory_err"]
|
NeMo-main
|
nemo/utils/lightning_logger_patch.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from nemo.utils import logging
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
def initialize_distributed(args, backend='nccl'):
"""Initialize torch.distributed."""
# Get local rank in case it is provided.
local_rank = args.local_rank
# Get rank and world size.
rank = int(os.getenv('RANK', '0'))
world_size = int(os.getenv("WORLD_SIZE", '1'))
logging.info(
f'Initializing torch.distributed with local_rank: {local_rank}, rank: {rank}, world_size: {world_size}'
)
# Set the device id.
device = rank % torch.cuda.device_count()
if local_rank is not None:
device = local_rank
torch.cuda.set_device(device)
# Call the init process.
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6000')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(backend=backend, world_size=world_size, rank=rank, init_method=init_method)
return local_rank, rank, world_size
def gather_objects(partial_results_list, main_rank=None):
"""
Collect objects (e.g., results) from all GPUs.
Useful for inference over multiple GPUs with DDP.
Use main_rank to specify which rank will be used to gather results.
This allows to continue execution on the main_rank only after the gather.
Args:
partial_results_list: list of partial results from each GPU
main_rank: rank of the main process to collect results from all GPUs (useful for collecting results in a target rank)
Example:
predictions = gather_objects(predictions,main_rank=0)
# all but rank 0 will return None
if predictions is None:
return
# from here only rank 0 should contiue
pickle.dump(predictions, open(output_fname, "wb"))
"""
# do not fail when DDP is not initialized
if parallel_state.is_unitialized():
return partial_results_list
rank = parallel_state.get_data_parallel_rank()
world_size = parallel_state.get_data_parallel_world_size()
# return input when no DDP is used
if world_size == 1:
return partial_results_list
gathered_results = [None for _ in range(world_size)]
torch.distributed.all_gather_object(gathered_results, partial_results_list)
# return None to non-main ranks
if main_rank is not None:
if rank != main_rank:
return None
# return collected results
results_list = []
for r in gathered_results:
results_list.extend(r)
return results_list
|
NeMo-main
|
nemo/utils/distributed.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import shutil
import subprocess
from typing import Tuple
from nemo import __version__ as NEMO_VERSION
from nemo import constants
from nemo.utils import logging
def resolve_cache_dir() -> pathlib.Path:
"""
Utility method to resolve a cache directory for NeMo that can be overriden by an environment variable.
Example:
NEMO_CACHE_DIR="~/nemo_cache_dir/" python nemo_example_script.py
Returns:
A Path object, resolved to the absolute path of the cache directory. If no override is provided,
uses an inbuilt default which adapts to nemo versions strings.
"""
override_dir = os.environ.get(constants.NEMO_ENV_CACHE_DIR, "")
if override_dir == "":
path = pathlib.Path.joinpath(pathlib.Path.home(), f'.cache/torch/NeMo/NeMo_{NEMO_VERSION}')
else:
path = pathlib.Path(override_dir).resolve()
return path
def is_datastore_path(path) -> bool:
"""Check if a path is from a data object store.
Currently, only AIStore is supported.
"""
return path.startswith('ais://')
def is_tarred_path(path) -> bool:
"""Check if a path is for a tarred file.
"""
return path.endswith('.tar')
def is_datastore_cache_shared() -> bool:
"""Check if store cache is shared.
"""
# Assume cache is shared by default, e.g., as in resolve_cache_dir (~/.cache)
cache_shared = int(os.environ.get(constants.NEMO_ENV_DATA_STORE_CACHE_SHARED, 1))
if cache_shared == 0:
return False
elif cache_shared == 1:
return True
else:
raise ValueError(f'Unexpected value of env {constants.NEMO_ENV_DATA_STORE_CACHE_SHARED}')
def ais_cache_base() -> str:
"""Return path to local cache for AIS.
"""
override_dir = os.environ.get(constants.NEMO_ENV_DATA_STORE_CACHE_DIR, "")
if override_dir == "":
cache_dir = resolve_cache_dir().as_posix()
else:
cache_dir = pathlib.Path(override_dir).resolve().as_posix()
if cache_dir.endswith(NEMO_VERSION):
# Prevent re-caching dataset after upgrading NeMo
cache_dir = os.path.dirname(cache_dir)
return os.path.join(cache_dir, 'ais')
def ais_endpoint() -> str:
"""Get configured AIS endpoint.
"""
return os.getenv('AIS_ENDPOINT')
def bucket_and_object_from_uri(uri: str) -> Tuple[str, str]:
"""Parse a path to determine bucket and object path.
Args:
uri: Full path to an object on an object store
Returns:
Tuple of strings (bucket_name, object_path)
"""
if not is_datastore_path(uri):
raise ValueError(f'Provided URI is not a valid store path: {uri}')
uri_parts = pathlib.PurePath(uri).parts
bucket = uri_parts[1]
object_path = pathlib.PurePath(*uri_parts[2:])
return str(bucket), str(object_path)
def ais_endpoint_to_dir(endpoint: str) -> str:
"""Convert AIS endpoint to a valid dir name.
Used to build cache location.
Args:
endpoint: AIStore endpoint in format https://host:port
Returns:
Directory formed as `host/port`.
"""
if not endpoint.startswith('http://'):
raise ValueError(f'Unexpected format for ais endpoint: {endpoint}')
endpoint = endpoint.replace('http://', '')
host, port = endpoint.split(':')
return os.path.join(host, port)
def ais_binary() -> str:
"""Return location of `ais` binary.
"""
path = shutil.which('ais')
if path is not None:
logging.debug('Found AIS binary at %s', path)
return path
logging.warning('AIS binary not found with `which ais`.')
# Double-check if it exists at the default path
default_path = '/usr/local/bin/ais'
if os.path.isfile(default_path):
logging.info('ais available at the default path: %s', default_path)
return default_path
else:
raise RuntimeError(f'AIS binary not found.')
def datastore_path_to_local_path(store_path: str) -> str:
"""Convert a data store path to a path in a local cache.
Args:
store_path: a path to an object on an object store
Returns:
Path to the same object in local cache.
"""
if store_path.startswith('ais://'):
endpoint = ais_endpoint()
if endpoint is None:
raise RuntimeError(f'AIS endpoint not set, cannot resolve {store_path}')
local_ais_cache = os.path.join(ais_cache_base(), ais_endpoint_to_dir(endpoint))
store_bucket, store_object = bucket_and_object_from_uri(store_path)
local_path = os.path.join(local_ais_cache, store_bucket, store_object)
else:
raise ValueError(f'Unexpected store path format: {store_path}')
return local_path
def get_datastore_object(path: str, force: bool = False, num_retries: int = 5) -> str:
"""Download an object from a store path and return the local path.
If the input `path` is a local path, then nothing will be done, and
the original path will be returned.
Args:
path: path to an object
force: force download, even if a local file exists
num_retries: number of retries if the get command fails
Returns:
Local path of the object.
"""
if path.startswith('ais://'):
endpoint = ais_endpoint()
if endpoint is None:
raise RuntimeError(f'AIS endpoint not set, cannot resolve {path}')
local_path = datastore_path_to_local_path(store_path=path)
if not os.path.isfile(local_path) or force:
# Either we don't have the file in cache or we force download it
# Enhancement: if local file is present, check some tag and compare against remote
local_dir = os.path.dirname(local_path)
if not os.path.isdir(local_dir):
os.makedirs(local_dir, exist_ok=True)
cmd = [ais_binary(), 'get', path, local_path]
# for now info, later debug
logging.debug('Downloading from AIS')
logging.debug('\tendpoint %s', endpoint)
logging.debug('\tpath: %s', path)
logging.debug('\tlocal path: %s', local_path)
logging.debug('\tcmd: %s', subprocess.list2cmdline(cmd))
done = False
for n in range(num_retries):
if not done:
try:
# Use stdout=subprocess.DEVNULL to prevent showing AIS command on each line
subprocess.check_call(cmd, stdout=subprocess.DEVNULL)
done = True
except subprocess.CalledProcessError as err:
logging.warning('Attempt %d of %d failed with: %s', n + 1, num_retries, str(err))
if not done:
raise RuntimeError('Download failed: %s', subprocess.list2cmdline(cmd))
return local_path
else:
# Assume the file is local
return path
class DataStoreObject:
"""A simple class for handling objects in a data store.
Currently, this class supports objects on AIStore.
Args:
store_path: path to a store object
local_path: path to a local object, may be used to upload local object to store
get: get the object from a store
"""
def __init__(self, store_path: str, local_path: str = None, get: bool = False):
if local_path is not None:
raise NotImplementedError('Specifying a local path is currently not supported.')
self._store_path = store_path
self._local_path = local_path
if get:
self.get()
@property
def store_path(self) -> str:
"""Return store path of the object.
"""
return self._store_path
@property
def local_path(self) -> str:
"""Return local path of the object.
"""
return self._local_path
def get(self, force: bool = False) -> str:
"""Get an object from the store to local cache and return the local path.
Args:
force: force download, even if a local file exists
Returns:
Path to a local object.
"""
if not self.local_path:
# Assume the object needs to be downloaded
self._local_path = get_datastore_object(self.store_path, force=force)
return self.local_path
def put(self, force: bool = False) -> str:
"""Push to remote and return the store path
Args:
force: force download, even if a local file exists
Returns:
Path to a (remote) object object on the object store.
"""
raise NotImplementedError()
def __str__(self):
"""Return a human-readable description of the object.
"""
description = f'{type(self)}: store_path={self.store_path}, local_path={self.local_path}'
return description
def datastore_path_to_webdataset_url(store_path: str):
"""Convert store_path to a WebDataset URL.
Args:
store_path: path to buckets on store
Returns:
URL which can be directly used with WebDataset.
"""
if store_path.startswith('ais://'):
url = f'pipe:ais get {store_path} - || true'
else:
raise ValueError(f'Unknown store path format: {store_path}')
return url
def datastore_object_get(store_object: DataStoreObject) -> bool:
"""A convenience wrapper for multiprocessing.imap.
Args:
store_object: An instance of DataStoreObject
Returns:
True if get() returned a path.
"""
return store_object.get() is not None
|
NeMo-main
|
nemo/utils/data_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NeMoBaseException(Exception):
""" NeMo Base Exception. All exceptions created in NeMo should inherit from this class"""
class LightningNotInstalledException(NeMoBaseException):
def __init__(self, obj):
message = (
f" You are trying to use {obj} without installing all of pytorch_lightning, hydra, and "
f"omegaconf. Please install those packages before trying to access {obj}."
)
super().__init__(message)
class CheckInstall:
def __init__(self, *args, **kwargs):
raise LightningNotInstalledException(self)
def __call__(self, *args, **kwargs):
raise LightningNotInstalledException(self)
def __getattr__(self, *args, **kwargs):
raise LightningNotInstalledException(self)
|
NeMo-main
|
nemo/utils/exceptions.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class PrettyStrEnum(Enum):
"""
Pretty enum to work with string values for config options with choices
Provides en automatic error message with possible values, if the value is not in the enum
Converting to string will show the actual string value, which makes serialization/deserialization straightforward
Example:
class ASRModelType(PrettyStrEnum):
CTC = "ctc"
RNNT = "rnnt"
...
model_type = ModelType(model_type_string) # automatically validated
if model_type == ModelType.CTC: # more error-prone (to typos) compared to pure string literals
... # do something specific to CTC model
"""
def __str__(self):
return self.value
@classmethod
def _missing_(cls, value: object):
choices = ', '.join(map(str, cls))
raise ValueError(f"{value} is not a valid {cls.__name__}. Possible choices: {choices}")
|
NeMo-main
|
nemo/utils/enum.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
def get_forward_hook(name, trainer, rank, logger, dump_to_file=False):
"""
A forward hook to dump all of the module input and output norms. It is called at every time after forward() has computed an output.
Only float type input/output tensor norms are computed.
For more details about the forward hook, check https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_forward_hook.html
Args:
name: tensor name
trainer: PTL trainer
rank: worker rank
logger: PTL log function
dump_to_file: wether dump the csv file to the disk
"""
if dump_to_file:
os.makedirs('debug_info', exist_ok=True)
fp = open(f'debug_info/forward_{name}_rank{rank}.txt', 'w')
header = False
def forward_hook(module, inputs, outputs):
nonlocal header
nonlocal fp
if trainer.training:
values = []
headers = []
for n, i in enumerate(inputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('input')
input_norm = i.data.norm()
values.append(f'{input_norm}')
logger(f'debug_info_forward/{name}_rank{rank}_input{n}', input_norm)
if isinstance(outputs, tuple):
for n, i in enumerate(outputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('output')
output_norm = i.data.norm()
values.append(f'{output_norm}')
logger(f'debug_info_forward/{name}_rank{rank}_output{n}', output_norm)
else:
headers.append('output')
values.append(f'{outputs.data.norm()}')
values.append(f'{trainer.global_step}')
if not header:
headers.append('step')
fp.write(','.join(headers) + '\n')
header = True
fp.write(','.join(values) + '\n')
fp.flush()
return forward_hook
def get_backward_hook(name, trainer, rank, logger, dump_to_file=False):
"""
A backward hook to dump all of the module input and output grad norms. The hook will be called every time the gradients with respect to module inputs are computed.
Only float type input/output grad tensor norms are computed.
For more details about the backward hook, check https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_full_backward_hook.html
Args:
name: tensor name
trainer: PTL trainer
rank: worker rank
logger: PTL log function
dump_to_file: wether dump the csv file to the disk
"""
if dump_to_file:
os.makedirs('debug_info', exist_ok=True)
fp = open(f'debug_info/backward_{name}_rank{rank}.txt', 'w')
header = False
def backward_hook(module, inputs, outputs):
nonlocal header
nonlocal fp
if trainer.training:
values = []
headers = []
for n, i in enumerate(inputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('input')
input_norm = i.data.norm()
values.append(f'{input_norm}')
logger(f'debug_info_backward/{name}_rank{rank}_input{n}', input_norm)
if isinstance(outputs, tuple):
for n, i in enumerate(outputs):
if isinstance(i, torch.Tensor) and (
i.dtype == torch.float or i.dtype == torch.half or i.dtype == torch.bfloat16
):
if not header:
headers.append('output')
output_norm = i.data.norm()
values.append(f'{output_norm}')
logger(f'debug_info_backward/{name}_rank{rank}_output{n}', output_norm)
else:
headers.append('output')
values.append(f'{outputs.data.norm()}')
values.append(f'{trainer.global_step}')
if not header:
headers.append('step')
fp.write(','.join(headers) + '\n')
header = True
fp.write(','.join(values) + '\n')
fp.flush()
return backward_hook
def get_tensor_hook(module, name, trainer, rank, logger, dump_to_file=False):
"""
A tensor hook to dump all of the tensor weight norms and grad norms at the end of each of the backward steps.
For more details about the tensor hook, check https://pytorch.org/docs/stable/generated/torch.Tensor.register_hook.html
Args:
module: the model module
name: tensor name
trainer: PTL trainer
rank: worker rank
logger: PTL log function
dump_to_file: wether dump the csv file to the disk
"""
if dump_to_file:
os.makedirs('debug_info', exist_ok=True)
fp = open(f'debug_info/tensor_{name}_rank{rank}.csv', 'w')
header = False
def tensor_hook(grad):
nonlocal header
nonlocal fp
values = []
headers = []
weight = module.get_parameter(name)
weight_norm = weight.data.norm()
grad_norm = grad.data.norm()
logger(f'debug_info_tensors/{name}_rank{rank}_grad_norm', grad_norm)
logger(f'debug_info_tensors/{name}_rank{rank}_weight_norm', weight_norm)
values.append(f'{weight_norm}')
values.append(f'{grad_norm}')
values.append(f'{trainer.global_step}')
if dump_to_file:
if not header:
headers.append('weight')
headers.append('grad')
headers.append('step')
fp.write(','.join(headers) + '\n')
header = True
fp.write(','.join(values) + '\n')
fp.flush()
return grad
return tensor_hook
def register_debug_hooks(module, trainer, logger, dump_to_file=False):
"""
Register debug hooks. It can
1. track the module forward step input/ouput norm
2. track the module backward step input/output grad norm
3. track the parameter weight norm and grad norm.
"""
# default rank 0
rank = 0
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
for name, tensor in module.named_parameters():
if name != '':
tensor.register_hook(get_tensor_hook(module, name, trainer, rank, logger, dump_to_file))
for name, layer in module.named_modules():
if name != '':
layer.register_forward_hook(get_forward_hook(name, trainer, rank, logger, dump_to_file))
layer.register_full_backward_hook(get_backward_hook(name, trainer, rank, logger, dump_to_file))
|
NeMo-main
|
nemo/utils/debug_hook.py
|
"""
This module support timing of code blocks.
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import torch
__all__ = ["NamedTimer"]
class NamedTimer(object):
"""
A timer class that supports multiple named timers.
A named timer can be used multiple times, in which case the average
dt will be returned.
A named timer cannot be started if it is already currently running.
Use case: measuring execution of multiple code blocks.
"""
_REDUCTION_TYPE = ["mean", "sum", "min", "max", "none"]
def __init__(self, reduction="mean", sync_cuda=False, buffer_size=-1):
"""
Args:
reduction (str): reduction over multiple timings of the same timer
(none - returns the list instead of a scalar)
sync_cuda (bool): if True torch.cuda.synchronize() is called for start/stop
buffer_size (int): if positive, limits the number of stored measures per name
"""
if reduction not in self._REDUCTION_TYPE:
raise ValueError(f"Unknown reduction={reduction} please use one of {self._REDUCTION_TYPE}")
self._reduction = reduction
self._sync_cuda = sync_cuda
self._buffer_size = buffer_size
self.reset()
def __getitem__(self, k):
return self.get(k)
@property
def buffer_size(self):
return self._buffer_size
@property
def _reduction_fn(self):
if self._reduction == "none":
fn = lambda x: x
else:
fn = getattr(np, self._reduction)
return fn
def reset(self, name=None):
"""
Resents all / specific timer
Args:
name (str): timer name to reset (if None all timers are reset)
"""
if name is None:
self.timers = {}
else:
self.timers[name] = {}
def start(self, name=""):
"""
Starts measuring a named timer.
Args:
name (str): timer name to start
"""
timer_data = self.timers.get(name, {})
if "start" in timer_data:
raise RuntimeError(f"Cannot start timer = '{name}' since it is already active")
# synchronize pytorch cuda execution if supported
if self._sync_cuda and torch.cuda.is_initialized():
torch.cuda.synchronize()
timer_data["start"] = time.time()
self.timers[name] = timer_data
def stop(self, name=""):
"""
Stops measuring a named timer.
Args:
name (str): timer name to stop
"""
timer_data = self.timers.get(name, None)
if (timer_data is None) or ("start" not in timer_data):
raise RuntimeError(f"Cannot end timer = '{name}' since it is not active")
# synchronize pytorch cuda execution if supported
if self._sync_cuda and torch.cuda.is_initialized():
torch.cuda.synchronize()
# compute dt and make timer inactive
dt = time.time() - timer_data.pop("start")
# store dt
timer_data["dt"] = timer_data.get("dt", []) + [dt]
# enforce buffer_size if positive
if self._buffer_size > 0:
timer_data["dt"] = timer_data["dt"][-self._buffer_size :]
self.timers[name] = timer_data
def active_timers(self):
"""
Return list of all active named timers
"""
return [k for k, v in self.timers.items() if ("start" in v)]
def get(self, name=""):
"""
Returns the value of a named timer
Args:
name (str): timer name to return
"""
dt_list = self.timers[name].get("dt", [])
return self._reduction_fn(dt_list)
def export(self):
"""
Exports a dictionary with average/all dt per named timer
"""
fn = self._reduction_fn
data = {k: fn(v["dt"]) for k, v in self.timers.items() if ("dt" in v)}
return data
|
NeMo-main
|
nemo/utils/timers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from threading import Lock
from typing import Dict, Optional
from nemo.utils.metaclasses import Singleton
@dataclass()
class ModelMetadataRegistry:
guid: str
gidx: int
restoration_path: Optional[str] = None
class AppState(metaclass=Singleton):
def __init__(self):
# method call lock
self.__lock = Lock()
# TODO: should we store global config in hydra_runner?
self._app_cfg = None
# World info
self._device_id = None
self._local_rank = None
self._global_rank = None
self._tensor_model_parallel_rank = None
self._pipeline_model_parallel_rank = None
self._data_parallel_rank = None
self._world_size = None
self._model_parallel_size = None
self._tensor_model_parallel_size = None
self._tensor_model_parallel_group = None
self._pipeline_model_parallel_size = None
self._virtual_pipeline_model_parallel_size = None
self._pipeline_model_parallel_group = None
self._pipeline_model_parallel_split_rank = None
self._is_megatron_initialized = False
self._data_parallel_size = None
self._data_parallel_group = None
self._megatron_checkpoint_version = None
self._use_fp8 = False
self._init_mpi_proc_gruop = False
self._random_seed = None
# Logging info
self._log_dir = None
self._exp_dir = None
self._name = None
self._checkpoint_name = None
self._version = None
self._create_checkpoint_callback = None
self._checkpoint_callback_params = None
# Save and Restore (.nemo)
self._tmpdir_name = None
self._is_model_being_restored = False
self._nemo_file_folder = None
self._model_restore_path = None
self._all_model_restore_paths = []
self._model_guid_map = {} # type: Dict[str, ModelMetadataRegistry]
@property
def device_id(self):
""" Property returns the device_id
Returns:
device_id
"""
return self._device_id
@device_id.setter
def device_id(self, id):
""" Property sets the device_id.
Args:
size (int): The device id.
"""
self._device_id = id
@property
def world_size(self):
""" Property returns the total number of GPUs.
Returns:
Total number of GPUs.
"""
return self._world_size
@world_size.setter
def world_size(self, size):
""" Property sets the total number of GPUs.
Args:
size (int): Total number of GPUs.
"""
self._world_size = size
@property
def model_parallel_size(self):
""" Property returns the number of GPUs in each model parallel group.
Returns:
Number of GPUs in each model parallel group.
"""
return self._model_parallel_size
@model_parallel_size.setter
def model_parallel_size(self, size):
""" Property sets the number of GPUs in each model parallel group.
Args:
size (int): Number of GPUs in each model parallel group.
"""
self._model_parallel_size = size
@property
def tensor_model_parallel_size(self):
""" Property returns the number of GPUs in each model parallel group.
Returns:
Number of GPUs in each model parallel group.
"""
return self._tensor_model_parallel_size
@tensor_model_parallel_size.setter
def tensor_model_parallel_size(self, size):
""" Property sets the number of GPUs in each model parallel group.
Args:
size (int): Number of GPUs in each model parallel group.
"""
self._tensor_model_parallel_size = size
@property
def pipeline_model_parallel_size(self):
""" Property returns the number of GPUs in each model parallel group.
Returns:
Number of GPUs in each model parallel group.
"""
return self._pipeline_model_parallel_size
@pipeline_model_parallel_size.setter
def pipeline_model_parallel_size(self, size):
""" Property sets the number of GPUs in each model parallel group.
Args:
size (int): Number of GPUs in each model parallel group.
"""
self._pipeline_model_parallel_size = size
@property
def virtual_pipeline_model_parallel_size(self):
""" Property returns the number of GPUs in each model parallel group.
Returns:
Number of GPUs in each model parallel group.
"""
return self._virtual_pipeline_model_parallel_size
@virtual_pipeline_model_parallel_size.setter
def virtual_pipeline_model_parallel_size(self, size):
""" Property sets the size of the virtual pipeline parallel model.
Args:
size (int): Number of modules in each pipeline parallel model.
"""
self._virtual_pipeline_model_parallel_size = size
@property
def data_parallel_size(self):
""" Property returns the number of GPUs in each data parallel group.
Returns:
Number of GPUs in each data parallel group.
"""
return self._data_parallel_size
@data_parallel_size.setter
def data_parallel_size(self, size):
""" Property sets the number of GPUs in each data parallel group.
Args:
size (int): Number of GPUs in each data parallel group.
"""
self._data_parallel_size = size
@property
def local_rank(self):
""" Property returns the local rank.
Returns:
Local rank.
"""
return self._local_rank
@local_rank.setter
def local_rank(self, rank):
""" Property sets the local rank.
Args:
rank (int): Local rank.
"""
self._local_rank = rank
@property
def global_rank(self):
""" Property returns the global rank.
Returns:
Global rank.
"""
return self._global_rank
@global_rank.setter
def global_rank(self, rank):
""" Property sets the global rank.
Args:
rank (int): Global rank.
"""
self._global_rank = rank
@property
def tensor_model_parallel_rank(self):
""" Property returns the tensor model parallel rank.
Returns:
Tensor model parallel rank.
"""
return self._tensor_model_parallel_rank
@tensor_model_parallel_rank.setter
def tensor_model_parallel_rank(self, rank):
""" Property sets the tensor model parallel rank.
Args:
rank (int): Tensor model parallel rank.
"""
self._tensor_model_parallel_rank = rank
@property
def tensor_model_parallel_group(self):
""" Property returns the tensor model parallel group.
Returns:
Tensor model parallel group.
"""
return self._tensor_model_parallel_group
@tensor_model_parallel_group.setter
def tensor_model_parallel_group(self, group):
""" Property sets the tensor model parallel group.
Args:
group: Tensor model parallel group.
"""
self._tensor_model_parallel_group = group
@property
def pipeline_model_parallel_rank(self):
""" Property returns the pipeline model parallel rank.
Returns:
Pipeline model parallel rank.
"""
return self._pipeline_model_parallel_rank
@pipeline_model_parallel_rank.setter
def pipeline_model_parallel_rank(self, rank):
""" Property sets the pipeline model parallel rank.
Args:
rank (int): Pipeline model parallel rank.
"""
self._pipeline_model_parallel_rank = rank
@property
def virtual_pipeline_model_parallel_rank(self):
""" Property returns the virtual pipeline parallel rank.
Returns:
Model parallel rank.
"""
return self._virtual_pipeline_model_parallel_rank
@virtual_pipeline_model_parallel_rank.setter
def virtual_pipeline_model_parallel_rank(self, rank):
""" Property sets the virtual pipeline parallel rank.
Args:
rank (int): Virtual pipeline parallel rank.
"""
self._virtual_pipeline_model_parallel_rank = rank
@property
def pipeline_model_parallel_split_rank(self):
""" Property returns the rank at which Encoder and Decoder are split into different pipelines for Megatrron Encoder-Decoder models.
Returns:
Pipeline model parallel split rank.
"""
return self._pipeline_model_parallel_split_rank
@pipeline_model_parallel_split_rank.setter
def pipeline_model_parallel_split_rank(self, rank):
""" Property sets the rank at which Encoder and Decoder are split into different pipelines for Megatrron Encoder-Decoder models.
Args:
rank (int): Model parallel split rank.
"""
self._pipeline_model_parallel_split_rank = rank
@property
def pipeline_model_parallel_group(self):
""" Property returns the pipeline model parallel group.
Returns:
Pipeline model parallel group.
"""
return self._pipeline_model_parallel_group
@pipeline_model_parallel_group.setter
def pipeline_model_parallel_group(self, group):
""" Property sets the pipeline model parallel group.
Args:
group: Pipeline model parallel group.
"""
self._pipeline_model_parallel_group = group
@property
def data_parallel_rank(self):
""" Property returns the data parallel rank.
Returns:
Data parallel rank.
"""
return self._data_parallel_rank
@data_parallel_rank.setter
def data_parallel_rank(self, rank):
""" Property sets the data parallel rank.
Args:
rank (int): Data parallel rank.
"""
self._data_parallel_rank = rank
@property
def data_parallel_group(self):
""" Property returns the data parallel group.
Returns:
Data parallel group.
"""
return self._data_parallel_group
@data_parallel_group.setter
def data_parallel_group(self, group):
""" Property sets the data parallel group.
Args:
group: Data parallel group.
"""
self._data_parallel_group = group
@property
def use_fp8(self):
""" Property returns the use of fp8 precision.
Returns:
Use of FP8.
"""
return self._use_fp8
@use_fp8.setter
def use_fp8(self, use_fp8):
""" Property sets the use of fp8 precision.
Args:
use_fp8: Use of FP8.
"""
self._use_fp8 = use_fp8
@property
def init_mpi_proc_group(self):
""" Property sets the initialization of mpi process group.
Returns:
Initialize mpi process group.
"""
return self._init_mpi_proc_group
@init_mpi_proc_group.setter
def init_mpi_proc_group(self, init_mpi_proc_group):
""" Property sets the initialization of mpi process group.
Args:
init_mpi_proc_group: Initialize mpi process group.
"""
self._init_mpi_proc_group = init_mpi_proc_group
@property
def random_seed(self):
""" Property returns the random seed.
Returns:
Random seed.
"""
return self._random_seed
@random_seed.setter
def random_seed(self, seed):
""" Property sets the random seed.
Args:
seed (int): Random seed.
"""
self._random_seed = seed
@property
def log_dir(self):
"""Returns the log_dir set by exp_manager.
"""
return self._log_dir
@log_dir.setter
def log_dir(self, dir):
"""Sets the log_dir property.
Args:
dir (str): Log_dir set by exp_manager.
"""
self._log_dir = dir
@property
def exp_dir(self):
"""Returns the exp_dir set by exp_manager.
"""
return self._exp_dir
@exp_dir.setter
def exp_dir(self, dir):
"""Sets the log_dir property.
Args:
dir (str): Log_dir set by exp_manager.
"""
self._exp_dir = dir
@property
def name(self):
"""Returns the name set by exp_manager.
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name property.
Args:
dir (str): name set by exp_manager.
"""
self._name = name
@property
def checkpoint_name(self):
"""Returns the name set by exp_manager.
"""
return self._checkpoint_name
@checkpoint_name.setter
def checkpoint_name(self, name):
"""Sets the name property.
Args:
dir (str): name set by exp_manager.
"""
self._checkpoint_name = name
@property
def version(self):
"""Returns the version set by exp_manager.
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version property.
Args:
dir (str): version set by exp_manager.
"""
self._version = version
@property
def create_checkpoint_callback(self):
"""Returns the create_checkpoint_callback set by exp_manager.
"""
return self._create_checkpoint_callback
@create_checkpoint_callback.setter
def create_checkpoint_callback(self, create_checkpoint_callback):
"""Sets the create_checkpoint_callback property.
Args:
dir (bool): create_checkpoint_callback set by exp_manager.
"""
self._create_checkpoint_callback = create_checkpoint_callback
@property
def checkpoint_callback_params(self):
"""Returns the version set by exp_manager.
"""
return self._checkpoint_callback_params
@checkpoint_callback_params.setter
def checkpoint_callback_params(self, params):
"""Sets the name property.
Args:
params (dict): checkpoint_callback_params set by exp_manager.
"""
self._checkpoint_callback_params = params
@property
def model_restore_path(self):
restore_path = self._all_model_restore_paths[-1] if len(self._all_model_restore_paths) > 0 else None
return restore_path
@model_restore_path.setter
def model_restore_path(self, path):
with self.__lock:
self._model_restore_path = path
self._all_model_restore_paths.append(path)
def register_model_guid(self, guid: str, restoration_path: Optional[str] = None):
# Maps a guid to its restore path (None or last absolute path)
with self.__lock:
if guid in self._model_guid_map:
idx = self._model_guid_map[guid].gidx
else:
idx = len(self._model_guid_map)
self._model_guid_map[guid] = ModelMetadataRegistry(guid, idx, restoration_path=restoration_path)
def reset_model_guid_registry(self):
# Reset the guid mapping
with self.__lock:
self._model_guid_map.clear()
def get_model_metadata_from_guid(self, guid) -> ModelMetadataRegistry:
# Returns the global model idx and restoration path
metadata = self._model_guid_map[guid]
return metadata
@property
def is_model_being_restored(self) -> bool:
return self._is_model_being_restored
@is_model_being_restored.setter
def is_model_being_restored(self, is_restored: bool):
self._is_model_being_restored = is_restored
@property
def nemo_file_folder(self) -> str:
return self._nemo_file_folder
@nemo_file_folder.setter
def nemo_file_folder(self, path: str):
self._nemo_file_folder = path
|
NeMo-main
|
nemo/utils/app_state.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
from dataclasses import is_dataclass
from typing import Dict, List, Optional
from nemo.utils import logging
# TODO @blisc: Perhaps refactor instead of import guarding
_HAS_HYDRA = True
try:
from omegaconf import DictConfig, OmegaConf, open_dict
except ModuleNotFoundError:
_HAS_HYDRA = False
def update_model_config(
model_cls: 'nemo.core.config.modelPT.NemoConfig', update_cfg: 'DictConfig', drop_missing_subconfigs: bool = True
):
"""
Helper class that updates the default values of a ModelPT config class with the values
in a DictConfig that mirrors the structure of the config class.
Assumes the `update_cfg` is a DictConfig (either generated manually, via hydra or instantiated via yaml/model.cfg).
This update_cfg is then used to override the default values preset inside the ModelPT config class.
If `drop_missing_subconfigs` is set, the certain sub-configs of the ModelPT config class will be removed, if
they are not found in the mirrored `update_cfg`. The following sub-configs are subject to potential removal:
- `train_ds`
- `validation_ds`
- `test_ds`
- `optim` + nested `sched`.
Args:
model_cls: A subclass of NemoConfig, that details in entirety all of the parameters that constitute
the NeMo Model.
update_cfg: A DictConfig that mirrors the structure of the NemoConfig data class. Used to update the
default values of the config class.
drop_missing_subconfigs: Bool which determins whether to drop certain sub-configs from the NemoConfig
class, if the corresponding sub-config is missing from `update_cfg`.
Returns:
A DictConfig with updated values that can be used to instantiate the NeMo Model along with supporting
infrastructure.
"""
if not _HAS_HYDRA:
logging.error("This function requires Hydra/Omegaconf and it was not installed.")
exit(1)
if not (is_dataclass(model_cls) or isinstance(model_cls, DictConfig)):
raise ValueError("`model_cfg` must be a dataclass or a structured OmegaConf object")
if not isinstance(update_cfg, DictConfig):
update_cfg = OmegaConf.create(update_cfg)
if is_dataclass(model_cls):
model_cls = OmegaConf.structured(model_cls)
# Update optional configs
model_cls = _update_subconfig(
model_cls, update_cfg, subconfig_key='train_ds', drop_missing_subconfigs=drop_missing_subconfigs
)
model_cls = _update_subconfig(
model_cls, update_cfg, subconfig_key='validation_ds', drop_missing_subconfigs=drop_missing_subconfigs
)
model_cls = _update_subconfig(
model_cls, update_cfg, subconfig_key='test_ds', drop_missing_subconfigs=drop_missing_subconfigs
)
model_cls = _update_subconfig(
model_cls, update_cfg, subconfig_key='optim', drop_missing_subconfigs=drop_missing_subconfigs
)
# Add optim and sched additional keys to model cls
model_cls = _add_subconfig_keys(model_cls, update_cfg, subconfig_key='optim')
# Perform full merge of model config class and update config
# Remove ModelPT artifact `target`
if 'target' in update_cfg.model:
# Assume artifact from ModelPT and pop
if 'target' not in model_cls.model:
with open_dict(update_cfg.model):
update_cfg.model.pop('target')
# Remove ModelPT artifact `nemo_version`
if 'nemo_version' in update_cfg.model:
# Assume artifact from ModelPT and pop
if 'nemo_version' not in model_cls.model:
with open_dict(update_cfg.model):
update_cfg.model.pop('nemo_version')
model_cfg = OmegaConf.merge(model_cls, update_cfg)
return model_cfg
def _update_subconfig(
model_cfg: 'DictConfig', update_cfg: 'DictConfig', subconfig_key: str, drop_missing_subconfigs: bool
):
"""
Updates the NemoConfig DictConfig such that:
1) If the sub-config key exists in the `update_cfg`, but does not exist in ModelPT config:
- Add the sub-config from update_cfg to ModelPT config
2) If the sub-config key does not exist in `update_cfg`, but exists in ModelPT config:
- Remove the sub-config from the ModelPT config; iff the `drop_missing_subconfigs` flag is set.
Args:
model_cfg: A DictConfig instantiated from the NemoConfig subclass.
update_cfg: A DictConfig that mirrors the structure of `model_cfg`, used to update its default values.
subconfig_key: A str key used to check and update the sub-config.
drop_missing_subconfigs: A bool flag, whether to allow deletion of the NemoConfig sub-config,
if its mirror sub-config does not exist in the `update_cfg`.
Returns:
The updated DictConfig for the NemoConfig
"""
if not _HAS_HYDRA:
logging.error("This function requires Hydra/Omegaconf and it was not installed.")
exit(1)
with open_dict(model_cfg.model):
# If update config has the key, but model cfg doesnt have the key
# Add the update cfg subconfig to the model cfg
if subconfig_key in update_cfg.model and subconfig_key not in model_cfg.model:
model_cfg.model[subconfig_key] = update_cfg.model[subconfig_key]
# If update config does not the key, but model cfg has the key
# Remove the model cfg subconfig in order to match layout of update cfg
if subconfig_key not in update_cfg.model and subconfig_key in model_cfg.model:
if drop_missing_subconfigs:
model_cfg.model.pop(subconfig_key)
return model_cfg
def _add_subconfig_keys(model_cfg: 'DictConfig', update_cfg: 'DictConfig', subconfig_key: str):
"""
For certain sub-configs, the default values specified by the NemoConfig class is insufficient.
In order to support every potential value in the merge between the `update_cfg`, it would require
explicit definition of all possible cases.
An example of such a case is Optimizers, and their equivalent Schedulers. All optimizers share a few basic
details - such as name and lr, but almost all require additional parameters - such as weight decay.
It is impractical to create a config for every single optimizer + every single scheduler combination.
In such a case, we perform a dual merge. The Optim and Sched Dataclass contain the bare minimum essential
components. The extra values are provided via update_cfg.
In order to enable the merge, we first need to update the update sub-config to incorporate the keys,
with dummy temporary values (merge update config with model config). This is done on a copy of the
update sub-config, as the actual override values might be overriden by the NemoConfig defaults.
Then we perform a merge of this temporary sub-config with the actual override config in a later step
(merge model_cfg with original update_cfg, done outside this function).
Args:
model_cfg: A DictConfig instantiated from the NemoConfig subclass.
update_cfg: A DictConfig that mirrors the structure of `model_cfg`, used to update its default values.
subconfig_key: A str key used to check and update the sub-config.
Returns:
A ModelPT DictConfig with additional keys added to the sub-config.
"""
if not _HAS_HYDRA:
logging.error("This function requires Hydra/Omegaconf and it was not installed.")
exit(1)
with open_dict(model_cfg.model):
# Create copy of original model sub config
if subconfig_key in update_cfg.model:
if subconfig_key not in model_cfg.model:
# create the key as a placeholder
model_cfg.model[subconfig_key] = None
subconfig = copy.deepcopy(model_cfg.model[subconfig_key])
update_subconfig = copy.deepcopy(update_cfg.model[subconfig_key])
# Add the keys and update temporary values, will be updated during full merge
subconfig = OmegaConf.merge(update_subconfig, subconfig)
# Update sub config
model_cfg.model[subconfig_key] = subconfig
return model_cfg
def assert_dataclass_signature_match(
cls: 'class_type',
datacls: 'dataclass',
ignore_args: Optional[List[str]] = None,
remap_args: Optional[Dict[str, str]] = None,
):
"""
Analyses the signature of a provided class and its respective data class,
asserting that the dataclass signature matches the class __init__ signature.
Note:
This is not a value based check. This function only checks if all argument
names exist on both class and dataclass and logs mismatches.
Args:
cls: Any class type - but not an instance of a class. Pass type(x) where x is an instance
if class type is not easily available.
datacls: A corresponding dataclass for the above class.
ignore_args: (Optional) A list of string argument names which are forcibly ignored,
even if mismatched in the signature. Useful when a dataclass is a superset of the
arguments of a class.
remap_args: (Optional) A dictionary, mapping an argument name that exists (in either the
class or its dataclass), to another name. Useful when argument names are mismatched between
a class and its dataclass due to indirect instantiation via a helper method.
Returns:
A tuple containing information about the analysis:
1) A bool value which is True if the signatures matched exactly / after ignoring values.
False otherwise.
2) A set of arguments names that exist in the class, but *do not* exist in the dataclass.
If exact signature match occurs, this will be None instead.
3) A set of argument names that exist in the data class, but *do not* exist in the class itself.
If exact signature match occurs, this will be None instead.
"""
class_sig = inspect.signature(cls.__init__)
class_params = dict(**class_sig.parameters)
class_params.pop('self')
dataclass_sig = inspect.signature(datacls)
dataclass_params = dict(**dataclass_sig.parameters)
dataclass_params.pop("_target_", None)
class_params = set(class_params.keys())
dataclass_params = set(dataclass_params.keys())
if remap_args is not None:
for original_arg, new_arg in remap_args.items():
if original_arg in class_params:
class_params.remove(original_arg)
class_params.add(new_arg)
logging.info(f"Remapped {original_arg} -> {new_arg} in {cls.__name__}")
if original_arg in dataclass_params:
dataclass_params.remove(original_arg)
dataclass_params.add(new_arg)
logging.info(f"Remapped {original_arg} -> {new_arg} in {datacls.__name__}")
if ignore_args is not None:
ignore_args = set(ignore_args)
class_params = class_params - ignore_args
dataclass_params = dataclass_params - ignore_args
logging.info(f"Removing ignored arguments - {ignore_args}")
intersection = set.intersection(class_params, dataclass_params)
subset_cls = class_params - intersection
subset_datacls = dataclass_params - intersection
if (len(class_params) != len(dataclass_params)) or len(subset_cls) > 0 or len(subset_datacls) > 0:
logging.error(f"Class {cls.__name__} arguments do not match " f"Dataclass {datacls.__name__}!")
if len(subset_cls) > 0:
logging.error(f"Class {cls.__name__} has additional arguments :\n" f"{subset_cls}")
if len(subset_datacls):
logging.error(f"Dataclass {datacls.__name__} has additional arguments :\n{subset_datacls}")
return False, subset_cls, subset_datacls
else:
return True, None, None
|
NeMo-main
|
nemo/utils/config_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import subprocess
import sys
import time
import warnings
from dataclasses import dataclass
from datetime import timedelta
from pathlib import Path
from shutil import copy, move
from typing import Any, Dict, List, Optional, Tuple, Union
import pytorch_lightning
import torch
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning.callbacks import Callback, ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.timer import Interval, Timer
from pytorch_lightning.loggers import MLFlowLogger, TensorBoardLogger, WandbLogger
from pytorch_lightning.loops import _TrainingEpochLoop
from pytorch_lightning.strategies.ddp import DDPStrategy
from nemo.collections.common.callbacks import EMA
from nemo.constants import NEMO_ENV_VARNAME_TESTING, NEMO_ENV_VARNAME_VERSION
from nemo.utils import logging, timers
from nemo.utils.app_state import AppState
from nemo.utils.callbacks import NeMoModelCheckpoint, PreemptionCallback
from nemo.utils.env_var_parsing import get_envbool
from nemo.utils.exceptions import NeMoBaseException
from nemo.utils.get_rank import is_global_rank_zero
from nemo.utils.lightning_logger_patch import add_filehandlers_to_pl_logger
from nemo.utils.loggers import ClearMLLogger, ClearMLParams, DLLogger, DLLoggerParams, MLFlowParams
from nemo.utils.model_utils import uninject_model_parallel_rank
class NotFoundError(NeMoBaseException):
""" Raised when a file or folder is not found"""
class LoggerMisconfigurationError(NeMoBaseException):
""" Raised when a mismatch between trainer.logger and exp_manager occurs"""
def __init__(self, message):
message = (
message
+ " You can disable lighning's trainer from creating a logger by passing logger=False to its constructor."
)
super().__init__(message)
class CheckpointMisconfigurationError(NeMoBaseException):
""" Raised when a mismatch between trainer.callbacks and exp_manager occurs"""
@dataclass
class EarlyStoppingParams:
monitor: str = "val_loss" # The metric that early stopping should consider.
mode: str = "min" # inform early stopping whether to look for increase or decrease in monitor.
min_delta: float = 0.001 # smallest change to consider as improvement.
patience: int = 10 # how many (continuous) validation cycles to wait with no improvement and stopping training.
verbose: bool = True
strict: bool = True
check_finite: bool = True
stopping_threshold: Optional[float] = None
divergence_threshold: Optional[float] = None
check_on_train_epoch_end: Optional[bool] = None
log_rank_zero_only: bool = False
@dataclass
class CallbackParams:
filepath: Optional[str] = None # Deprecated
dirpath: Optional[str] = None # If None, exp_manager will attempt to handle the filepath
filename: Optional[str] = None # If None, exp_manager will attempt to handle the filepath
monitor: Optional[str] = "val_loss"
verbose: Optional[bool] = True
save_last: Optional[bool] = True
save_top_k: Optional[int] = 3
save_weights_only: Optional[bool] = False
mode: Optional[str] = "min"
auto_insert_metric_name: bool = True
every_n_epochs: Optional[int] = 1
every_n_train_steps: Optional[int] = None
train_time_interval: Optional[str] = None
prefix: Optional[str] = None # If None, exp_manager will attempt to handle the filepath
postfix: str = ".nemo"
save_best_model: bool = False
always_save_nemo: bool = False
save_nemo_on_train_end: Optional[bool] = True # Whether to automatically save .nemo file durin on_train_end hook
model_parallel_size: Optional[int] = None # tensor parallel size * pipeline parallel size
save_on_train_epoch_end: Optional[bool] = False # Save after training, not after validation
@dataclass
class StepTimingParams:
reduction: Optional[str] = "mean"
# if True torch.cuda.synchronize() is called on start/stop
sync_cuda: Optional[bool] = False
# if positive, defines the size of a sliding window for computing mean
buffer_size: Optional[int] = 1
@dataclass
class EMAParams:
enable: Optional[bool] = False
decay: Optional[float] = 0.999
cpu_offload: Optional[bool] = False
validate_original_weights: Optional[bool] = False
every_n_steps: int = 1
@dataclass
class ExpManagerConfig:
"""Experiment Manager config for validation of passed arguments.
"""
# Log dir creation parameters
explicit_log_dir: Optional[str] = None
exp_dir: Optional[str] = None
name: Optional[str] = None
version: Optional[str] = None
use_datetime_version: Optional[bool] = True
resume_if_exists: Optional[bool] = False
resume_past_end: Optional[bool] = False
resume_ignore_no_checkpoint: Optional[bool] = False
resume_from_checkpoint: Optional[str] = None
# Logging parameters
create_tensorboard_logger: Optional[bool] = True
summary_writer_kwargs: Optional[Dict[Any, Any]] = None
create_wandb_logger: Optional[bool] = False
wandb_logger_kwargs: Optional[Dict[Any, Any]] = None
create_mlflow_logger: Optional[bool] = False
mlflow_logger_kwargs: Optional[MLFlowParams] = MLFlowParams()
create_dllogger_logger: Optional[bool] = False
dllogger_logger_kwargs: Optional[DLLoggerParams] = DLLoggerParams()
create_clearml_logger: Optional[bool] = False
clearml_logger_kwargs: Optional[ClearMLParams] = ClearMLParams()
# Checkpointing parameters
create_checkpoint_callback: Optional[bool] = True
checkpoint_callback_params: Optional[CallbackParams] = CallbackParams()
create_early_stopping_callback: Optional[bool] = False
early_stopping_callback_params: Optional[EarlyStoppingParams] = EarlyStoppingParams()
create_preemption_callback: Optional[bool] = True
# Additional exp_manager arguments
files_to_copy: Optional[List[str]] = None
# logs timing of train/val/test steps
log_step_timing: Optional[bool] = True
step_timing_kwargs: Optional[StepTimingParams] = StepTimingParams()
# Configures creation of log files for different ranks
log_local_rank_0_only: Optional[bool] = False
log_global_rank_0_only: Optional[bool] = False
# disable initial validation when resuming from a checkpoint saved during validation
disable_validation_on_resume: Optional[bool] = True
ema: Optional[EMAParams] = EMAParams()
# Wall clock time limit
max_time_per_run: Optional[str] = None
class TimingCallback(Callback):
"""
Logs execution time of train/val/test steps
"""
def __init__(self, timer_kwargs={}):
self.timer = timers.NamedTimer(**timer_kwargs)
def _on_batch_start(self, name):
# reset only if we do not return mean of a sliding window
if self.timer.buffer_size <= 0:
self.timer.reset(name)
self.timer.start(name)
def _on_batch_end(self, name, pl_module):
self.timer.stop(name)
# Set the `batch_size=1` as WAR for `dataloader_iter`, which is not used for any metric
pl_module.log(
name + ' in s',
self.timer[name],
on_step=True,
on_epoch=False,
batch_size=1,
prog_bar=(name == "train_step_timing"),
)
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
self._on_batch_start("train_step_timing")
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
self._on_batch_end("train_step_timing", pl_module)
def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx=0):
self._on_batch_start("validation_step_timing")
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0):
self._on_batch_end("validation_step_timing", pl_module)
def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx=0):
self._on_batch_start("test_step_timing")
def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0):
self._on_batch_end("test_step_timing", pl_module)
def on_before_backward(self, trainer, pl_module, loss):
self._on_batch_start("train_backward_timing")
def on_after_backward(self, trainer, pl_module):
self._on_batch_end("train_backward_timing", pl_module)
def exp_manager(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictConfig, Dict]] = None) -> Optional[Path]:
"""
exp_manager is a helper function used to manage folders for experiments. It follows the pytorch lightning paradigm
of exp_dir/model_or_experiment_name/version. If the lightning trainer has a logger, exp_manager will get exp_dir,
name, and version from the logger. Otherwise it will use the exp_dir and name arguments to create the logging
directory. exp_manager also allows for explicit folder creation via explicit_log_dir.
The version can be a datetime string or an integer. Datestime version can be disabled if use_datetime_version is set
to False. It optionally creates TensorBoardLogger, WandBLogger, DLLogger, MLFlowLogger, ClearMLLogger,
ModelCheckpoint objects from pytorch lightning.
It copies sys.argv, and git information if available to the logging directory. It creates a log file for each
process to log their output into.
exp_manager additionally has a resume feature (resume_if_exists) which can be used to continuing training from
the constructed log_dir. When you need to continue the training repeatedly (like on a cluster which you need
multiple consecutive jobs), you need to avoid creating the version folders. Therefore from v1.0.0, when
resume_if_exists is set to True, creating the version folders is ignored.
Args:
trainer (pytorch_lightning.Trainer): The lightning trainer.
cfg (DictConfig, dict): Can have the following keys:
- explicit_log_dir (str, Path): Can be used to override exp_dir/name/version folder creation. Defaults to
None, which will use exp_dir, name, and version to construct the logging directory.
- exp_dir (str, Path): The base directory to create the logging directory. Defaults to None, which logs to
./nemo_experiments.
- name (str): The name of the experiment. Defaults to None which turns into "default" via name = name or
"default".
- version (str): The version of the experiment. Defaults to None which uses either a datetime string or
lightning's TensorboardLogger system of using version_{int}.
- use_datetime_version (bool): Whether to use a datetime string for version. Defaults to True.
- resume_if_exists (bool): Whether this experiment is resuming from a previous run. If True, it sets
trainer._checkpoint_connector._ckpt_path so that the trainer should auto-resume. exp_manager will move files
under log_dir to log_dir/run_{int}. Defaults to False. From v1.0.0, when resume_if_exists is True,
we would not create version folders to make it easier to find the log folder for next runs.
- resume_past_end (bool): exp_manager errors out if resume_if_exists is True and a checkpoint matching
``*end.ckpt`` indicating a previous training run fully completed. This behaviour can be disabled, in which
case the ``*end.ckpt`` will be loaded by setting resume_past_end to True. Defaults to False.
- resume_ignore_no_checkpoint (bool): exp_manager errors out if resume_if_exists is True and no checkpoint
could be found. This behaviour can be disabled, in which case exp_manager will print a message and
continue without restoring, by setting resume_ignore_no_checkpoint to True. Defaults to False.
- resume_from_checkpoint (str): Can be used to specify a path to a specific checkpoint file to load from. This will
override any checkpoint found when resume_if_exists is True. Defaults to None.
- create_tensorboard_logger (bool): Whether to create a tensorboard logger and attach it to the pytorch
lightning trainer. Defaults to True.
- summary_writer_kwargs (dict): A dictionary of kwargs that can be passed to lightning's TensorboardLogger
class. Note that log_dir is passed by exp_manager and cannot exist in this dict. Defaults to None.
- create_wandb_logger (bool): Whether to create a Weights and Baises logger and attach it to the pytorch
lightning trainer. Defaults to False.
- wandb_logger_kwargs (dict): A dictionary of kwargs that can be passed to lightning's WandBLogger
class. Note that name and project are required parameters if create_wandb_logger is True.
Defaults to None.
- create_mlflow_logger (bool): Whether to create an MLFlow logger and attach it to the pytorch lightning
training. Defaults to False
- mlflow_logger_kwargs (dict): optional parameters for the MLFlow logger
- create_dllogger_logger (bool): Whether to create an DLLogger logger and attach it to the pytorch lightning
training. Defaults to False
- dllogger_logger_kwargs (dict): optional parameters for the DLLogger logger
- create_clearml_logger (bool): Whether to create an ClearML logger and attach it to the pytorch lightning
training. Defaults to False
- clearml_logger_kwargs (dict): optional parameters for the ClearML logger
- create_checkpoint_callback (bool): Whether to create a ModelCheckpoint callback and attach it to the
pytorch lightning trainer. The ModelCheckpoint saves the top 3 models with the best "val_loss", the most
recent checkpoint under ``*last.ckpt``, and the final checkpoint after training completes under ``*end.ckpt``.
Defaults to True.
- create_early_stopping_callback (bool): Flag to decide if early stopping should be used to stop training. Default is False.
See EarlyStoppingParams dataclass above.
- create_preemption_callback (bool): Flag to decide whether to enable preemption callback to save checkpoints and exit training
immediately upon preemption. Default is True.
- files_to_copy (list): A list of files to copy to the experiment logging directory. Defaults to None which
copies no files.
- log_local_rank_0_only (bool): Whether to only create log files for local rank 0. Defaults to False.
Set this to True if you are using DDP with many GPUs and do not want many log files in your exp dir.
- log_global_rank_0_only (bool): Whether to only create log files for global rank 0. Defaults to False.
Set this to True if you are using DDP with many GPUs and do not want many log files in your exp dir.
- max_time (str): The maximum wall clock time *per run*. This is intended to be used on clusters where you want
a checkpoint to be saved after this specified time and be able to resume from that checkpoint. Defaults to None.
returns:
log_dir (Path): The final logging directory where logging files are saved. Usually the concatenation of
exp_dir, name, and version.
"""
# Add rank information to logger
# Note: trainer.global_rank and trainer.is_global_zero are not set until trainer.fit, so have to hack around it
local_rank = int(os.environ.get("LOCAL_RANK", 0))
global_rank = trainer.node_rank * trainer.num_devices + local_rank
logging.rank = global_rank
if cfg is None:
logging.error("exp_manager did not receive a cfg argument. It will be disabled.")
return
if trainer.fast_dev_run:
logging.info("Trainer was called with fast_dev_run. exp_manager will return without any functionality.")
return
# Ensure passed cfg is compliant with ExpManagerConfig
schema = OmegaConf.structured(ExpManagerConfig)
if isinstance(cfg, dict):
cfg = OmegaConf.create(cfg)
elif not isinstance(cfg, DictConfig):
raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig")
cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True))
cfg = OmegaConf.merge(schema, cfg)
error_checks(trainer, cfg) # Ensures that trainer options are compliant with NeMo and exp_manager arguments
log_dir, exp_dir, name, version = get_log_dir(
trainer=trainer,
exp_dir=cfg.exp_dir,
name=cfg.name,
version=cfg.version,
explicit_log_dir=cfg.explicit_log_dir,
use_datetime_version=cfg.use_datetime_version,
resume_if_exists=cfg.resume_if_exists,
)
check_resume(
trainer,
log_dir,
cfg.resume_if_exists,
cfg.resume_past_end,
cfg.resume_ignore_no_checkpoint,
cfg.checkpoint_callback_params.dirpath,
cfg.resume_from_checkpoint,
)
checkpoint_name = name
# If name returned from get_log_dir is "", use cfg.name for checkpointing
if checkpoint_name is None or checkpoint_name == '':
checkpoint_name = cfg.name or "default"
# Set mlflow name if it's not set, before the main name is erased
if cfg.create_mlflow_logger and (not cfg.mlflow_logger_kwargs.get("experiment_name", None)):
cfg.mlflow_logger_kwargs.experiment_name = cfg.name
logging.warning(
'mlflow logger specified but no experiment name set. Using the same as Tensorboard: %s',
cfg.mlflow_logger_kwargs.experiment_name,
)
cfg.name = name # Used for configure_loggers so that the log_dir is properly set even if name is ""
cfg.version = version
# update app_state with log_dir, exp_dir, etc
app_state = AppState()
app_state.log_dir = log_dir
app_state.exp_dir = exp_dir
app_state.name = name
app_state.version = version
app_state.checkpoint_name = checkpoint_name
app_state.create_checkpoint_callback = cfg.create_checkpoint_callback
app_state.checkpoint_callback_params = cfg.checkpoint_callback_params
# Create the logging directory if it does not exist
os.makedirs(log_dir, exist_ok=True) # Cannot limit creation to global zero as all ranks write to own log file
logging.info(f'Experiments will be logged at {log_dir}')
trainer._default_root_dir = log_dir
if cfg.log_local_rank_0_only is True and cfg.log_global_rank_0_only is True:
raise ValueError(
f"Cannot set both log_local_rank_0_only and log_global_rank_0_only to True. Please set either one or neither."
)
# This is set if the env var NEMO_TESTING is set to True.
nemo_testing = get_envbool(NEMO_ENV_VARNAME_TESTING, False)
# Handle logging to file
log_file = log_dir / f'nemo_log_globalrank-{global_rank}_localrank-{local_rank}.txt'
if cfg.log_local_rank_0_only is True and not nemo_testing:
if local_rank == 0:
logging.add_file_handler(log_file)
elif cfg.log_global_rank_0_only is True and not nemo_testing:
if global_rank == 0:
logging.add_file_handler(log_file)
else:
# Logs on all ranks.
logging.add_file_handler(log_file)
# For some reason, LearningRateLogger requires trainer to have a logger. Safer to create logger on all ranks
# not just global rank 0.
if (
cfg.create_tensorboard_logger
or cfg.create_wandb_logger
or cfg.create_mlflow_logger
or cfg.create_dllogger_logger
or cfg.create_clearml_logger
):
configure_loggers(
trainer,
exp_dir,
log_dir,
cfg.name,
cfg.version,
cfg.checkpoint_callback_params,
cfg.create_tensorboard_logger,
cfg.summary_writer_kwargs,
cfg.create_wandb_logger,
cfg.wandb_logger_kwargs,
cfg.create_mlflow_logger,
cfg.mlflow_logger_kwargs,
cfg.create_dllogger_logger,
cfg.dllogger_logger_kwargs,
cfg.create_clearml_logger,
cfg.clearml_logger_kwargs,
)
# add loggers timing callbacks
if cfg.log_step_timing:
timing_callback = TimingCallback(timer_kwargs=cfg.step_timing_kwargs or {})
trainer.callbacks.insert(0, timing_callback)
if cfg.ema.enable:
ema_callback = EMA(
decay=cfg.ema.decay,
validate_original_weights=cfg.ema.validate_original_weights,
cpu_offload=cfg.ema.cpu_offload,
every_n_steps=cfg.ema.every_n_steps,
)
trainer.callbacks.append(ema_callback)
if cfg.create_early_stopping_callback:
early_stop_callback = EarlyStopping(**cfg.early_stopping_callback_params)
trainer.callbacks.append(early_stop_callback)
if cfg.create_checkpoint_callback:
configure_checkpointing(
trainer,
log_dir,
checkpoint_name,
cfg.resume_if_exists,
cfg.checkpoint_callback_params,
cfg.create_preemption_callback,
)
if cfg.disable_validation_on_resume:
# extend training loop to skip initial validation when resuming from checkpoint
configure_no_restart_validation_training_loop(trainer)
# Setup a stateless timer for use on clusters.
if cfg.max_time_per_run is not None:
found_ptl_timer = False
for idx, callback in enumerate(trainer.callbacks):
if isinstance(callback, Timer):
# NOTE: PTL does not expose a `trainer.max_time`. By the time we are in this function, PTL has already setup a timer if the user specifies `trainer.max_time` so best we can do is replace that.
# Working: If only `trainer.max_time` is set - it behaves as a normal PTL timer. If only `exp_manager.max_time_per_run` is set - it behaves as a StateLessTimer. If both are set, it also behaves as a StateLessTimer.
logging.warning(
f'Found a PTL Timer callback, replacing with a StatelessTimer callback. This will happen if you set trainer.max_time as well as exp_manager.max_time_per_run.'
)
trainer.callbacks[idx] = StatelessTimer(cfg.max_time_per_run)
found_ptl_timer = True
break
if not found_ptl_timer:
trainer.max_time = cfg.max_time_per_run
trainer.callbacks.append(StatelessTimer(cfg.max_time_per_run))
if is_global_rank_zero():
# Move files_to_copy to folder and add git information if present
if cfg.files_to_copy:
for _file in cfg.files_to_copy:
copy(Path(_file), log_dir)
# Create files for cmd args and git info
with open(log_dir / 'cmd-args.log', 'w', encoding='utf-8') as _file:
_file.write(" ".join(sys.argv))
# Try to get git hash
git_repo, git_hash = get_git_hash()
if git_repo:
with open(log_dir / 'git-info.log', 'w', encoding='utf-8') as _file:
_file.write(f'commit hash: {git_hash}')
_file.write(get_git_diff())
# Add err_file logging to global_rank zero
logging.add_err_file_handler(log_dir / 'nemo_error_log.txt')
# Add lightning file logging to global_rank zero
add_filehandlers_to_pl_logger(log_dir / 'lightning_logs.txt', log_dir / 'nemo_error_log.txt')
return log_dir
def error_checks(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictConfig, Dict]] = None):
"""
Checks that the passed trainer is compliant with NeMo and exp_manager's passed configuration. Checks that:
- Throws error when hydra has changed the working directory. This causes issues with lightning's DDP
- Throws error when trainer has loggers defined but create_tensorboard_logger or create_wandB_logger
or create_mlflow_logger or create_dllogger_logger is True
- Prints error messages when 1) run on multi-node and not Slurm, and 2) run on multi-gpu without DDP
"""
if HydraConfig.initialized() and get_original_cwd() != os.getcwd():
raise ValueError(
"Hydra changed the working directory. This interferes with ExpManger's functionality. Please pass "
"hydra.run.dir=. to your python script."
)
if trainer.logger is not None and (
cfg.create_tensorboard_logger or cfg.create_wandb_logger or cfg.create_mlflow_logger
):
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger, and either "
f"create_tensorboard_logger: {cfg.create_tensorboard_logger} or create_wandb_logger: "
f"{cfg.create_wandb_logger} or create_mlflow_logger: {cfg.create_mlflow_logger}"
f"or create_dllogger_logger: {cfg.create_mlflow_logger} was set to True. "
"These can only be used if trainer does not already have a logger."
)
if trainer.num_nodes > 1 and not check_slurm(trainer):
logging.error(
"You are running multi-node training without SLURM handling the processes."
" Please note that this is not tested in NeMo and could result in errors."
)
if trainer.num_devices > 1 and not isinstance(trainer.strategy, DDPStrategy):
logging.error(
"You are running multi-gpu without ddp.Please note that this is not tested in NeMo and could result in "
"errors."
)
def check_resume(
trainer: 'pytorch_lightning.Trainer',
log_dir: str,
resume_if_exists: bool = False,
resume_past_end: bool = False,
resume_ignore_no_checkpoint: bool = False,
dirpath: str = None,
resume_from_checkpoint: str = None,
):
"""Checks that resume=True was used correctly with the arguments pass to exp_manager. Sets
trainer._checkpoint_connector._ckpt_path as necessary.
Returns:
log_dir (Path): The log_dir
exp_dir (str): The base exp_dir without name nor version
name (str): The name of the experiment
version (str): The version of the experiment
Raises:
NotFoundError: If resume is True, resume_ignore_no_checkpoint is False, and checkpoints could not be found.
ValueError: If resume is True, and there were more than 1 checkpoint could found.
"""
if not log_dir:
raise ValueError(f"Resuming requires the log_dir {log_dir} to be passed to exp_manager")
checkpoint = None
if resume_from_checkpoint:
checkpoint = resume_from_checkpoint
if resume_if_exists:
# Use <log_dir>/checkpoints/ unless `dirpath` is set
checkpoint_dir = Path(dirpath) if dirpath else Path(Path(log_dir) / "checkpoints")
# when using distributed checkpointing, checkpoint_dir is a directory of directories
# we check for this here
dist_checkpoints = [d for d in list(checkpoint_dir.glob("*")) if d.is_dir()]
end_dist_checkpoints = [d for d in dist_checkpoints if d.match("*end")]
last_dist_checkpoints = [d for d in dist_checkpoints if d.match("*last")]
end_checkpoints = end_dist_checkpoints if end_dist_checkpoints else list(checkpoint_dir.glob("*end.ckpt"))
last_checkpoints = last_dist_checkpoints if last_dist_checkpoints else list(checkpoint_dir.glob("*last.ckpt"))
if not checkpoint_dir.exists() or (not len(end_checkpoints) > 0 and not len(last_checkpoints) > 0):
if resume_ignore_no_checkpoint:
warn = f"There were no checkpoints found in checkpoint_dir or no checkpoint folder at checkpoint_dir :{checkpoint_dir}. "
if checkpoint is None:
warn += "Training from scratch."
elif checkpoint == resume_from_checkpoint:
warn += f"Training from {resume_from_checkpoint}."
logging.warning(warn)
else:
raise NotFoundError(
f"There were no checkpoints found in checkpoint_dir or no checkpoint folder at checkpoint_dir :{checkpoint_dir}. Cannot resume."
)
elif len(end_checkpoints) > 0:
if resume_past_end:
if len(end_checkpoints) > 1:
if 'mp_rank' in str(end_checkpoints[0]):
checkpoint = end_checkpoints[0]
else:
raise ValueError(f"Multiple checkpoints {end_checkpoints} that matches *end.ckpt.")
else:
raise ValueError(
f"Found {end_checkpoints[0]} indicating that the last training run has already completed."
)
elif len(last_checkpoints) > 1:
if 'mp_rank' in str(last_checkpoints[0]) or 'tp_rank' in str(last_checkpoints[0]):
checkpoint = last_checkpoints[0]
checkpoint = uninject_model_parallel_rank(checkpoint)
else:
raise ValueError(f"Multiple checkpoints {last_checkpoints} that matches *last.ckpt.")
else:
checkpoint = last_checkpoints[0]
# PTL 2.0 supports ckpt_path instead of resume_from_checkpoint as the trainer flag
if checkpoint is not None:
trainer.ckpt_path = str(checkpoint)
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
if is_global_rank_zero():
# Check to see if any files exist that need to be moved
files_to_move = []
if Path(log_dir).exists():
for child in Path(log_dir).iterdir():
if child.is_file():
files_to_move.append(child)
if len(files_to_move) > 0:
# Move old files to a new folder
other_run_dirs = Path(log_dir).glob("run_*")
run_count = 0
for fold in other_run_dirs:
if fold.is_dir():
run_count += 1
new_run_dir = Path(Path(log_dir) / f"run_{run_count}")
new_run_dir.mkdir()
for _file in files_to_move:
move(str(_file), str(new_run_dir))
def check_explicit_log_dir(
trainer: 'pytorch_lightning.Trainer', explicit_log_dir: Union[Path, str], exp_dir: str, name: str, version: str
) -> Tuple[Path, str, str, str]:
""" Checks that the passed arguments are compatible with explicit_log_dir.
Returns:
log_dir (Path): the log_dir
exp_dir (str): the base exp_dir without name nor version
name (str): The name of the experiment
version (str): The version of the experiment
Raise:
LoggerMisconfigurationError
"""
if trainer.logger is not None:
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger and explicit_log_dir: "
f"{explicit_log_dir} was pass to exp_manager. Please remove the logger from the lightning trainer."
)
# Checking only (explicit_log_dir) vs (exp_dir and version).
# The `name` will be used as the actual name of checkpoint/archive.
if exp_dir or version:
logging.error(
f"exp_manager received explicit_log_dir: {explicit_log_dir} and at least one of exp_dir: {exp_dir}, "
f"or version: {version}. Please note that exp_dir, name, and version will be ignored."
)
if is_global_rank_zero() and Path(explicit_log_dir).exists():
logging.warning(f"Exp_manager is logging to {explicit_log_dir}, but it already exists.")
return Path(explicit_log_dir), str(explicit_log_dir), "", ""
def get_log_dir(
trainer: 'pytorch_lightning.Trainer',
exp_dir: str = None,
name: str = None,
version: str = None,
explicit_log_dir: str = None,
use_datetime_version: bool = True,
resume_if_exists: bool = False,
) -> Tuple[Path, str, str, str]:
"""
Obtains the log_dir used for exp_manager.
Returns:
log_dir (Path): the log_dir
exp_dir (str): the base exp_dir without name nor version
name (str): The name of the experiment
version (str): The version of the experiment
explicit_log_dir (str): The explicit path to the log folder. Defaults to False.
use_datetime_version (bool): Uses date and time as the version of the log folder. Defaults to True.
resume_if_exists (bool): if resume_if_exists of the exp_manager's config is enabled or not. When enabled, the
version folders would not get created.
Raise:
LoggerMisconfigurationError: If trainer is incompatible with arguments
NotFoundError: If resume is True, resume_ignore_no_checkpoint is False, and checkpoints could not be found.
ValueError: If resume is True, and there were more than 1 checkpoint could found.
"""
if explicit_log_dir: # If explicit log_dir was passed, short circuit
return check_explicit_log_dir(trainer, explicit_log_dir, exp_dir, name, version)
# Default exp_dir to ./nemo_experiments if None was passed
_exp_dir = exp_dir
if exp_dir is None:
_exp_dir = str(Path.cwd() / 'nemo_experiments')
# If the user has already defined a logger for the trainer, use the logger defaults for logging directory
if trainer.logger is not None:
if trainer.logger.save_dir:
if exp_dir:
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger, the logger's "
f"save_dir was not None, and exp_dir ({exp_dir}) was not None. If trainer.logger.save_dir "
"exists, exp_manager will use trainer.logger.save_dir as the logging directory and exp_dir "
"must be None."
)
_exp_dir = trainer.logger.save_dir
if name:
raise LoggerMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a logger, and name: "
f"{name} was also passed to exp_manager. If the trainer contains a "
"logger, exp_manager will use trainer.logger.name, and name passed to exp_manager must be None."
)
name = trainer.logger.name
version = f"version_{trainer.logger.version}"
# Use user-defined exp_dir, project_name, exp_name, and versioning options
else:
name = name or "default"
version = version or os.environ.get(NEMO_ENV_VARNAME_VERSION, None)
if not version:
if resume_if_exists:
logging.warning(
"No version folders would be created under the log folder as 'resume_if_exists' is enabled."
)
version = None
elif is_global_rank_zero():
if use_datetime_version:
version = time.strftime('%Y-%m-%d_%H-%M-%S')
else:
tensorboard_logger = TensorBoardLogger(save_dir=Path(_exp_dir), name=name, version=version)
version = f"version_{tensorboard_logger.version}"
os.environ[NEMO_ENV_VARNAME_VERSION] = "" if version is None else version
log_dir = Path(_exp_dir) / Path(str(name)) / Path("" if version is None else str(version))
return log_dir, str(_exp_dir), name, version
def get_git_hash():
"""
Helper function that tries to get the commit hash if running inside a git folder
returns:
Bool: Whether the git subprocess ran without error
str: git subprocess output or error message
"""
try:
return (
True,
subprocess.check_output(['git', 'rev-parse', 'HEAD'], stderr=subprocess.STDOUT).decode(),
)
except subprocess.CalledProcessError as err:
return False, "{}\n".format(err.output.decode("utf-8"))
def get_git_diff():
"""
Helper function that tries to get the git diff if running inside a git folder
returns:
Bool: Whether the git subprocess ran without error
str: git subprocess output or error message
"""
try:
return subprocess.check_output(['git', 'diff'], stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
return "{}\n".format(err.output.decode("utf-8"))
def configure_loggers(
trainer: 'pytorch_lightning.Trainer',
exp_dir: [Path, str],
log_dir: [Path, str],
name: str,
version: str,
checkpoint_callback_params: dict,
create_tensorboard_logger: bool,
summary_writer_kwargs: dict,
create_wandb_logger: bool,
wandb_kwargs: dict,
create_mlflow_logger: bool,
mlflow_kwargs: dict,
create_dllogger_logger: bool,
dllogger_kwargs: dict,
create_clearml_logger: bool,
clearml_kwargs: dict,
):
"""
Creates TensorboardLogger and/or WandBLogger / MLFlowLogger / DLlogger / ClearMLLogger and attach them to trainer.
Raises ValueError if summary_writer_kwargs or wandb_kwargs are misconfigured.
"""
# Potentially create tensorboard logger and/or WandBLogger / MLFlowLogger / DLLogger
logger_list = []
if create_tensorboard_logger:
if summary_writer_kwargs is None:
summary_writer_kwargs = {}
elif "log_dir" in summary_writer_kwargs:
raise ValueError(
"You cannot pass `log_dir` as part of `summary_writer_kwargs`. `log_dir` is handled by lightning's "
"TensorBoardLogger logger."
)
tensorboard_logger = TensorBoardLogger(save_dir=exp_dir, name=name, version=version, **summary_writer_kwargs)
logger_list.append(tensorboard_logger)
logging.info("TensorboardLogger has been set up")
if create_wandb_logger:
if wandb_kwargs is None:
wandb_kwargs = {}
if "name" not in wandb_kwargs and "project" not in wandb_kwargs:
raise ValueError("name and project are required for wandb_logger")
# Update the wandb save_dir
if wandb_kwargs.get('save_dir', None) is None:
wandb_kwargs['save_dir'] = exp_dir
os.makedirs(wandb_kwargs['save_dir'], exist_ok=True)
wandb_logger = WandbLogger(version=version, **wandb_kwargs)
logger_list.append(wandb_logger)
logging.info("WandBLogger has been set up")
if create_mlflow_logger:
mlflow_logger = MLFlowLogger(run_name=version, **mlflow_kwargs)
logger_list.append(mlflow_logger)
logging.info("MLFlowLogger has been set up")
if create_dllogger_logger:
dllogger_logger = DLLogger(**dllogger_kwargs)
logger_list.append(dllogger_logger)
logging.info("DLLogger has been set up")
if create_clearml_logger:
clearml_logger = ClearMLLogger(
clearml_cfg=clearml_kwargs,
log_dir=log_dir,
prefix=name,
save_best_model=checkpoint_callback_params.save_best_model,
)
logger_list.append(clearml_logger)
logging.info("ClearMLLogger has been set up")
trainer._logger_connector.configure_logger(logger_list)
def configure_checkpointing(
trainer: 'pytorch_lightning.Trainer',
log_dir: Path,
name: str,
resume: bool,
params: 'DictConfig',
create_preemption_callback: bool,
):
""" Adds ModelCheckpoint to trainer. Raises CheckpointMisconfigurationError if trainer already has a ModelCheckpoint
callback
"""
for callback in trainer.callbacks:
if isinstance(callback, ModelCheckpoint):
raise CheckpointMisconfigurationError(
"The pytorch lightning trainer that was passed to exp_manager contained a ModelCheckpoint "
"and create_checkpoint_callback was set to True. Please either set create_checkpoint_callback "
"to False, or remove ModelCheckpoint from the lightning trainer"
)
# Create the callback and attach it to trainer
if "filepath" in params:
if params.filepath is not None:
logging.warning("filepath is deprecated. Please switch to dirpath and filename instead")
if params.dirpath is None:
params.dirpath = Path(params.filepath).parent
if params.filename is None:
params.filename = Path(params.filepath).name
with open_dict(params):
del params["filepath"]
if params.dirpath is None:
params.dirpath = Path(log_dir / 'checkpoints')
if params.filename is None:
params.filename = f'{name}--{{{params.monitor}:.4f}}-{{epoch}}'
if params.prefix is None:
params.prefix = name
NeMoModelCheckpoint.CHECKPOINT_NAME_LAST = params.filename + '-last'
logging.debug(params.dirpath)
logging.debug(params.filename)
logging.debug(params.prefix)
if "val" in params.monitor:
if (
trainer.max_epochs is not None
and trainer.max_epochs != -1
and trainer.max_epochs < trainer.check_val_every_n_epoch
):
logging.error(
"The checkpoint callback was told to monitor a validation value but trainer.max_epochs("
f"{trainer.max_epochs}) was less than trainer.check_val_every_n_epoch({trainer.check_val_every_n_epoch}"
f"). It is very likely this run will fail with ModelCheckpoint(monitor='{params.monitor}') not found "
"in the returned metrics. Please ensure that validation is run within trainer.max_epochs."
)
elif trainer.max_steps is not None and trainer.max_steps != -1:
logging.warning(
"The checkpoint callback was told to monitor a validation value and trainer's max_steps was set to "
f"{trainer.max_steps}. Please ensure that max_steps will run for at least "
f"{trainer.check_val_every_n_epoch} epochs to ensure that checkpointing will not error out."
)
checkpoint_callback = NeMoModelCheckpoint(n_resume=resume, **params)
checkpoint_callback.last_model_path = trainer.ckpt_path or ""
if 'mp_rank' in checkpoint_callback.last_model_path or 'tp_rank' in checkpoint_callback.last_model_path:
checkpoint_callback.last_model_path = uninject_model_parallel_rank(checkpoint_callback.last_model_path)
trainer.callbacks.append(checkpoint_callback)
if create_preemption_callback:
# Check if cuda is avialable as preemption is supported only on GPUs
if torch.cuda.is_available():
## By default PreemptionCallback handles SIGTERM. To handle other signals pass the signal in the call as below:
## PreemptionCallback(checkpoint_callback, signal.SIGCHLD)
preemption_callback = PreemptionCallback(checkpoint_callback)
trainer.callbacks.append(preemption_callback)
else:
logging.info("Preemption is supported only on GPUs, disabling preemption")
def check_slurm(trainer):
try:
return trainer.accelerator_connector.is_slurm_managing_tasks
except AttributeError:
return False
class StatelessTimer(Timer):
"""Extension of PTL timers to be per run."""
def __init__(self, duration: timedelta = None, interval: str = Interval.step, verbose: bool = True,) -> None:
super().__init__(duration, interval, verbose)
# Override PTL Timer's state dict to not store elapsed time information so that we can restore and continue training.
def state_dict(self) -> Dict[str, Any]:
return {}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
return
def configure_no_restart_validation_training_loop(trainer: pytorch_lightning.Trainer) -> None:
if type(trainer.fit_loop.epoch_loop) != _TrainingEpochLoop:
warnings.warn("Detected custom epoch loop. Skipping no validation on restart support.", UserWarning)
return
## Pass trainer object to avoid trainer getting overwritten as None
loop = SkipResumeTrainingValidationLoop(trainer, trainer.min_steps, trainer.max_steps)
trainer.fit_loop.epoch_loop = loop
class SkipResumeTrainingValidationLoop(_TrainingEpochLoop):
"""
Extend the PTL Epoch loop to skip validating when resuming.
This happens when resuming a checkpoint that has already run validation, but loading restores
the training state before validation has run.
"""
def _should_check_val_fx(self) -> bool:
if self.restarting and self.global_step % self.trainer.val_check_batch == 0:
return False
return super()._should_check_val_fx()
def clean_exp_ckpt(exp_log_dir: Union[str, Path], remove_ckpt: bool = True, remove_nemo: bool = False):
"""
Helper method that removes Pytorch Lightning .ckpt files or NeMo .nemo files from the checkpoint directory
Args:
exp_log_dir: str path to the root directory of the current experiment.
remove_ckpt: bool, whether to remove all *.ckpt files in the checkpoints directory.
remove_nemo: bool, whether to remove all *.nemo files in the checkpoints directory.
"""
exp_log_dir = str(exp_log_dir)
if remove_ckpt:
logging.info("Deleting *.ckpt files ...")
ckpt_files = glob.glob(os.path.join(exp_log_dir, "checkpoints", "*.ckpt"))
for filepath in ckpt_files:
os.remove(filepath)
logging.info(f"Deleted file : {filepath}")
if remove_nemo:
logging.info("Deleting *.nemo files ...")
nemo_files = glob.glob(os.path.join(exp_log_dir, "checkpoints", "*.nemo"))
for filepath in nemo_files:
os.remove(filepath)
logging.info(f"Deleted file : {filepath}")
|
NeMo-main
|
nemo/utils/exp_manager.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import os.path
import subprocess
import tarfile
from typing import Optional
import wget
# Function to build a manifest
def build_manifest(transcripts_path, manifest_path, data_dir, mount_dir, wav_path):
# create manifest with reference to this directory. This is useful when mounting the dataset.
mount_dir = mount_dir if mount_dir else data_dir
with open(transcripts_path, 'r') as fin:
with open(manifest_path, 'w') as fout:
for line in fin:
# Lines look like this:
# <s> transcript </s> (fileID)
transcript = line[: line.find('(') - 1].lower()
transcript = transcript.replace('<s>', '').replace('</s>', '')
transcript = transcript.strip()
file_id = line[line.find('(') + 1 : -2] # e.g. "cen4-fash-b"
audio_path = os.path.join(
data_dir, wav_path, file_id[file_id.find('-') + 1 : file_id.rfind('-')], file_id + '.wav'
)
mounted_audio_path = os.path.join(
mount_dir, wav_path, file_id[file_id.find('-') + 1 : file_id.rfind('-')], file_id + '.wav'
)
# import sox here to not require sox to be available for importing all utils.
import sox
duration = sox.file_info.duration(audio_path)
# Write the metadata to the manifest
metadata = {"audio_filepath": mounted_audio_path, "duration": duration, "text": transcript}
json.dump(metadata, fout)
fout.write('\n')
def download_an4(data_dir: str = "./", train_mount_dir: Optional[str] = None, test_mount_dir: Optional[str] = None):
"""
Function to download the AN4 dataset. This hides pre-processing boilerplate for notebook ASR examples.
Args:
data_dir: Path to store the data.
train_mount_dir: If you plan to mount the dataset, use this to prepend the mount directory to the
audio filepath in the train manifest.
test_mount_dir: If you plan to mount the dataset, use this to prepend the mount directory to the
audio filepath in the test manifest.
"""
print("******")
os.makedirs(data_dir, exist_ok=True)
if not os.path.exists(data_dir + '/an4_sphere.tar.gz'):
an4_url = 'https://dldata-public.s3.us-east-2.amazonaws.com/an4_sphere.tar.gz'
an4_path = wget.download(an4_url, data_dir)
print(f"Dataset downloaded at: {an4_path}")
else:
print("Tarfile already exists.")
an4_path = data_dir + '/an4_sphere.tar.gz'
if not os.path.exists(data_dir + '/an4/'):
tar = tarfile.open(an4_path)
tar.extractall(path=data_dir)
print("Converting .sph to .wav...")
sph_list = glob.glob(data_dir + '/an4/**/*.sph', recursive=True)
for sph_path in sph_list:
wav_path = sph_path[:-4] + '.wav'
cmd = ["sox", sph_path, wav_path]
subprocess.run(cmd)
print("Finished conversion.\n******")
# Building Manifests
print("******")
train_transcripts = data_dir + '/an4/etc/an4_train.transcription'
train_manifest = data_dir + '/an4/train_manifest.json'
if not os.path.isfile(train_manifest):
build_manifest(train_transcripts, train_manifest, data_dir, train_mount_dir, 'an4/wav/an4_clstk')
print("Training manifest created.")
test_transcripts = data_dir + '/an4/etc/an4_test.transcription'
test_manifest = data_dir + '/an4/test_manifest.json'
if not os.path.isfile(test_manifest):
build_manifest(test_transcripts, test_manifest, data_dir, test_mount_dir, 'an4/wav/an4test_clstk')
print("Test manifest created.")
print("***Done***")
|
NeMo-main
|
nemo/utils/notebook_utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
class Singleton(type):
""" Implementation of a generic, tread-safe singleton meta-class.
Can be used as meta-class, i.e. will create
"""
# List of instances - one per class.
__instances = {}
# Lock used for accessing the instance.
__lock = threading.Lock()
def __call__(cls, *args, **kwargs):
""" Returns singleton instance. A thread safe implementation. """
if cls not in cls.__instances:
# Enter critical section.
with cls.__lock:
# Check once again.
if cls not in cls.__instances:
# Create a new object instance - one per class.
cls.__instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
# Return the instance.
return cls.__instances[cls]
|
NeMo-main
|
nemo/utils/metaclasses.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, Optional
@dataclass
class MLFlowParams:
# name of experiment, if none, defaults to the globally set experiment name
experiment_name: Optional[str] = None
# no run_name because it's set by version
# local or remote tracking seerver. If tracking_uri is not set, it defaults to save_dir
tracking_uri: Optional[str] = None
tags: Optional[Dict[str, Any]] = None
save_dir: Optional[str] = "./mlruns"
prefix: str = ""
artifact_location: Optional[str] = None
# provide run_id if resuming a previously started run
run_id: Optional[str] = None
# Log checkpoints created by ModelCheckpoint as MLFlow artifacts.
log_model: bool = False
|
NeMo-main
|
nemo/utils/loggers/mlflow_logger.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.utils.loggers.clearml_logger import ClearMLLogger, ClearMLParams
from nemo.utils.loggers.dllogger import DLLogger, DLLoggerParams
from nemo.utils.loggers.mlflow_logger import MLFlowParams
|
NeMo-main
|
nemo/utils/loggers/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import Namespace
from dataclasses import dataclass
from pathlib import Path
from typing import Any, List, Literal, Mapping, Optional, Union
import pandas as pd
from lightning_utilities.core.apply_func import apply_to_collection
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning.callbacks import Checkpoint
from pytorch_lightning.loggers import Logger
from pytorch_lightning.utilities.parsing import AttributeDict
from torch import Tensor
from nemo.utils import logging
try:
from clearml import OutputModel, Task
HAVE_CLEARML_LOGGER = True
except (ImportError, ModuleNotFoundError):
HAVE_CLEARML_LOGGER = False
@dataclass
class ClearMLParams:
project: Optional[str] = None
task: Optional[str] = None
connect_pytorch: Optional[bool] = False
model_name: Optional[str] = None
tags: Optional[List[str]] = None
log_model: Optional[bool] = False
log_cfg: Optional[bool] = False
log_metrics: Optional[bool] = False
class ClearMLLogger(Logger):
@property
def name(self) -> str:
return self.clearml_task.name
@property
def version(self) -> str:
return self.clearml_task.id
def __init__(
self, clearml_cfg: DictConfig, log_dir: str, prefix: str, save_best_model: bool, postfix: str = ".nemo"
) -> None:
if not HAVE_CLEARML_LOGGER:
raise ImportError(
"Found create_clearml_logger is True."
"But ClearML not found. Please see the README for installation instructions:"
"https://github.com/allegroai/clearml"
)
self.clearml_task = None
self.clearml_model = None
self.clearml_cfg = clearml_cfg
self.path_nemo_model = os.path.abspath(
os.path.expanduser(os.path.join(log_dir, "checkpoints", prefix + postfix))
)
self.save_best_model = save_best_model
self.prefix = prefix
self.previos_best_model_path = None
self.last_metrics = None
self.save_blocked = True
self.project_name = os.getenv("CLEARML_PROJECT", clearml_cfg.project if clearml_cfg.project else "NeMo")
self.task_name = os.getenv("CLEARML_TASK", clearml_cfg.task if clearml_cfg.task else f"Trainer {self.prefix}")
tags = ["NeMo"]
if clearml_cfg.tags:
tags.extend(clearml_cfg.tags)
self.clearml_task: Task = Task.init(
project_name=self.project_name,
task_name=self.task_name,
auto_connect_frameworks={"pytorch": clearml_cfg.connect_pytorch},
output_uri=True,
tags=tags,
)
if clearml_cfg.model_name:
model_name = clearml_cfg.model_name
elif self.prefix:
model_name = self.prefix
else:
model_name = self.task_name
if clearml_cfg.log_model:
self.clearml_model: OutputModel = OutputModel(
name=model_name, task=self.clearml_task, tags=tags, framework="NeMo"
)
def log_hyperparams(self, params, *args, **kwargs) -> None:
if self.clearml_model and self.clearml_cfg.log_cfg:
if isinstance(params, Namespace):
params = vars(params)
elif isinstance(params, AttributeDict):
params = dict(params)
params = apply_to_collection(params, (DictConfig, ListConfig), OmegaConf.to_container, resolve=True)
params = apply_to_collection(params, Path, str)
params = OmegaConf.to_yaml(params)
self.clearml_model.update_design(config_text=params)
def log_metrics(self, metrics: Mapping[str, float], step: Optional[int] = None) -> None:
if self.clearml_model and self.clearml_cfg.log_metrics:
metrics = {
k: {
"value": str(v.item() if type(v) == Tensor else v),
"type": str(type(v.item() if type(v) == Tensor else v)),
}
for k, v in metrics.items()
}
self.last_metrics = metrics
def log_table(
self,
key: str,
columns: List[str] = None,
data: List[List[Any]] = None,
dataframe: Any = None,
step: Optional[int] = None,
) -> None:
table: Optional[Union[pd.DataFrame, List[List[Any]]]] = None
if dataframe is not None:
table = dataframe
if columns is not None:
table.columns = columns
if data is not None:
table = data
assert len(columns) == len(table[0]), "number of column names should match the total number of columns"
table.insert(0, columns)
if table is not None:
self.clearml_task.logger.report_table(title=key, series=key, iteration=step, table_plot=table)
def after_save_checkpoint(self, checkpoint_callback: Checkpoint) -> None:
if self.clearml_model:
if self.save_best_model:
if self.save_blocked:
self.save_blocked = False
return None
if not os.path.exists(checkpoint_callback.best_model_path):
return None
if self.previos_best_model_path == checkpoint_callback.best_model_path:
return None
self.previos_best_model_path = checkpoint_callback.best_model_path
self._log_model(self.path_nemo_model)
def finalize(self, status: Literal["success", "failed", "aborted"] = "success") -> None:
if status == "success":
self.clearml_task.mark_completed()
elif status == "failed":
self.clearml_task.mark_failed()
elif status == "aborted":
self.clearml_task.mark_stopped()
def _log_model(self, save_path: str) -> None:
if self.clearml_model:
if os.path.exists(save_path):
self.clearml_model.update_weights(
weights_filename=save_path,
upload_uri=self.clearml_task.storage_uri or self.clearml_task._get_default_report_storage_uri(),
auto_delete_file=False,
is_package=True,
)
if self.clearml_cfg.log_metrics and self.last_metrics:
self.clearml_model.set_all_metadata(self.last_metrics)
self.save_blocked = True
else:
logging.warning((f"Logging model enabled, but cant find .nemo file!" f" Path: {save_path}"))
|
NeMo-main
|
nemo/utils/loggers/clearml_logger.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import Namespace
from dataclasses import dataclass
from pathlib import Path
from typing import Optional
from lightning_utilities.core.apply_func import apply_to_collection
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning.loggers import Logger
from pytorch_lightning.utilities.parsing import AttributeDict
from nemo.utils import logging
try:
import dllogger
from dllogger import Verbosity
HAVE_DLLOGGER = True
except (ImportError, ModuleNotFoundError):
HAVE_DLLOGGER = False
try:
from lightning_fabric.utilities.logger import _convert_params, _flatten_dict, _sanitize_callable_params
PL_LOGGER_UTILITIES = True
except (ImportError, ModuleNotFoundError):
PL_LOGGER_UTILITIES = False
@dataclass
class DLLoggerParams:
verbose: Optional[bool] = False
stdout: Optional[bool] = False
json_file: Optional[str] = "./dllogger.json"
class DLLogger(Logger):
@property
def name(self):
return self.__class__.__name__
@property
def version(self):
return None
def __init__(self, stdout: bool, verbose: bool, json_file: str):
if not HAVE_DLLOGGER:
raise ImportError(
"DLLogger was not found. Please see the README for installation instructions: "
"https://github.com/NVIDIA/dllogger"
)
if not PL_LOGGER_UTILITIES:
raise ImportError(
"DLLogger utilities were not found. You probably need to update PyTorch Lightning>=1.9.0. "
"pip install pytorch-lightning -U"
)
verbosity = Verbosity.VERBOSE if verbose else Verbosity.DEFAULT
backends = []
if json_file:
Path(json_file).parent.mkdir(parents=True, exist_ok=True)
backends.append(dllogger.JSONStreamBackend(verbosity, json_file))
if stdout:
backends.append(dllogger.StdOutBackend(verbosity))
if not backends:
logging.warning(
"Neither stdout nor json_file DLLogger parameters were specified." "DLLogger will not log anything."
)
dllogger.init(backends=backends)
def log_hyperparams(self, params, *args, **kwargs):
if isinstance(params, Namespace):
params = vars(params)
elif isinstance(params, AttributeDict):
params = dict(params)
params = apply_to_collection(params, (DictConfig, ListConfig), OmegaConf.to_container, resolve=True)
params = apply_to_collection(params, Path, str)
params = _sanitize_callable_params(_flatten_dict(_convert_params(params)))
dllogger.log(step="PARAMETER", data=params)
def log_metrics(self, metrics, step=None):
if step is None:
step = tuple()
dllogger.log(step=step, data=metrics)
def save(self):
dllogger.flush()
|
NeMo-main
|
nemo/utils/loggers/dllogger.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['experimental']
import wrapt
from nemo.utils import logging
@wrapt.decorator
def experimental(wrapped, instance, args, kwargs):
logging.warning(f"`{wrapped}` is experimental and not ready for production yet. Use at your own risk.")
return wrapped(*args, **kwargs)
|
NeMo-main
|
nemo/utils/decorators/experimental.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'deprecated',
]
import functools
import inspect
import wrapt
from nemo.utils import logging
# Remember which deprecation warnings have been printed already.
_PRINTED_WARNING = {}
def deprecated(wrapped=None, version=None, explanation=None):
"""
Decorator which can be used for indicating that a function/class is deprecated and going to be removed.
Tracks down which function/class printed the warning and will print it only once per call.
Args:
version: Version in which the function/class will be removed (optional).
explanation: Additional explanation, e.g. "Please, ``use another_function`` instead." (optional).
"""
if wrapped is None:
return functools.partial(deprecated, version=version, explanation=explanation)
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
# Check if we already warned about that function.
if wrapped.__name__ not in _PRINTED_WARNING.keys():
# Add to list so we won't print it again.
_PRINTED_WARNING[wrapped.__name__] = True
# Prepare the warning message.
entity_name = "Class" if inspect.isclass(wrapped) else "Function"
msg = f"{entity_name} ``{wrapped.__name__}`` is deprecated."
# Optionally, add version and explanation.
if version is not None:
msg = f"{msg} It is going to be removed in the {version} version."
if explanation is not None:
msg = f"{msg} {explanation}"
# Display the deprecated warning.
logging.warning(msg)
# Call the function.
return wrapped(*args, **kwargs)
return wrapper(wrapped)
|
NeMo-main
|
nemo/utils/decorators/deprecated.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.utils.decorators.deprecated import deprecated
from nemo.utils.decorators.experimental import experimental
from nemo.utils.decorators.port_docs import add_port_docs
|
NeMo-main
|
nemo/utils/decorators/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The "add_port_docs" decorator is needed to nicely generate neural types in Sphynx for input and output ports
__all__ = [
'add_port_docs',
]
import functools
import sys
import wrapt
def _normalize_docstring(docstring):
"""Normalizes the docstring.
Replaces tabs with spaces, removes leading and trailing blanks lines, and
removes any indentation.
Copied from PEP-257:
https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
Args:
docstring: the docstring to normalize
Returns:
The normalized docstring
"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
# (we use sys.maxsize because sys.maxint doesn't exist in Python 3)
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def add_port_docs(wrapped=None, instance=None, value=''):
if wrapped is None:
return functools.partial(add_port_docs, value=value)
@wrapt.decorator
def wrapper(wrapped, instance=None, args=None, kwargs=None):
return wrapped(*args, **kwargs)
decorated = wrapper(wrapped)
try:
port_2_ntype = decorated(instance)
except:
port_2_ntype = None
port_description = ""
if port_2_ntype is not None:
for port, ntype in port_2_ntype.items():
port_description += "* *" + port + "* : " + str(ntype)
port_description += "\n\n"
__doc__ = _normalize_docstring(wrapped.__doc__) + '\n\n' + str(port_description)
__doc__ = _normalize_docstring(__doc__)
wrapt.FunctionWrapper.__setattr__(decorated, "__doc__", __doc__)
return decorated
|
NeMo-main
|
nemo/utils/decorators/port_docs.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
import pytorch_lightning
import torch
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_info
from nemo.collections.common.callbacks import EMA
from nemo.utils import logging
from nemo.utils.app_state import AppState
from nemo.utils.get_rank import is_global_rank_zero
from nemo.utils.model_utils import ckpt_to_dir, inject_model_parallel_rank, uninject_model_parallel_rank
class NeMoModelCheckpoint(ModelCheckpoint):
""" Light wrapper around Lightning's ModelCheckpoint to force a saved checkpoint on train_end.
Extends Lightning's on_save_checkpoint func to save the .nemo file. Saves the .nemo file based
on the best checkpoint saved (according to the monitor value).
Also contains func to save the EMA copy of the model.
"""
def __init__(
self,
always_save_nemo: bool = False,
save_nemo_on_train_end: bool = True,
save_best_model: bool = False,
postfix: str = ".nemo",
n_resume: bool = False,
model_parallel_size: int = None,
**kwargs,
):
# Parse and store "extended" parameters: save_best model and postfix.
self.always_save_nemo = always_save_nemo
self.save_nemo_on_train_end = save_nemo_on_train_end
self.save_best_model = save_best_model
if self.save_best_model and not self.save_nemo_on_train_end:
logging.warning(
(
"Found save_best_model is True and save_nemo_on_train_end is False. "
"Set save_nemo_on_train_end to True to automatically save the best model."
)
)
self.postfix = postfix
self.previous_best_path = ""
self.model_parallel_size = model_parallel_size
# `prefix` is deprecated
if 'prefix' in kwargs:
self.prefix = kwargs.pop('prefix')
else:
self.prefix = ""
# Call the parent class constructor with the remaining kwargs.
super().__init__(**kwargs)
if self.save_top_k != -1 and n_resume:
logging.debug("Checking previous runs")
self.nemo_topk_check_previous_run()
def nemo_topk_check_previous_run(self):
try:
self.best_k_models
self.kth_best_model_path
self.best_model_score
self.best_model_path
except AttributeError:
raise AttributeError("Lightning's ModelCheckpoint was updated. NeMoModelCheckpoint will need an update.")
self.best_k_models = {}
self.kth_best_model_path = ""
self.best_model_score = None
self.best_model_path = ""
checkpoints = list(path for path in self._saved_checkpoint_paths if not self._is_ema_filepath(path))
for checkpoint in checkpoints:
if 'mp_rank' in str(checkpoint) or 'tp_rank' in str(checkpoint):
checkpoint = uninject_model_parallel_rank(checkpoint)
checkpoint = str(checkpoint)
# second case is for distributed checkpoints, since they are a directory there's no extension
if checkpoint[-10:] == '-last.ckpt' or checkpoint[-5:] == '-last':
continue
index = checkpoint.find(self.monitor) + len(self.monitor) + 1 # Find monitor in str + 1 for '='
if index != len(self.monitor):
match = re.search('[A-z]', checkpoint[index:])
if match:
value = checkpoint[index : index + match.start() - 1] # -1 due to separator hypen
self.best_k_models[checkpoint] = float(value)
if len(self.best_k_models) < 1:
return # No saved checkpoints yet
_reverse = False if self.mode == "min" else True
best_k_models = sorted(self.best_k_models, key=self.best_k_models.get, reverse=_reverse)
# This section should be ok as rank zero will delete all excess checkpoints, since all other ranks are
# instantiated after rank zero. models_to_delete should be 0 for all other ranks.
if self.model_parallel_size is not None:
# check for distributed checkpoint
if checkpoints[0].is_dir():
models_to_delete = len(best_k_models) - self.save_top_k
else:
models_to_delete = len(best_k_models) - self.model_parallel_size * self.save_top_k
else:
models_to_delete = len(best_k_models) - self.save_top_k
models_to_delete = max(0, models_to_delete)
logging.debug(f'Number of models to delete: {models_to_delete}')
# If EMA enabled, delete the additional EMA weights
ema_enabled = self._has_ema_ckpts(self._saved_checkpoint_paths)
for _ in range(models_to_delete):
model = best_k_models.pop(-1)
self.best_k_models.pop(model)
self._del_model_without_trainer(model)
if ema_enabled and self._fs.exists(self._ema_format_filepath(model)):
self._del_model_without_trainer(self._ema_format_filepath(model))
logging.debug(f"Removed checkpoint: {model}")
self.kth_best_model_path = best_k_models[-1]
self.best_model_path = best_k_models[0]
self.best_model_score = self.best_k_models[self.best_model_path]
def on_save_checkpoint(self, trainer, pl_module, checkpoint):
output = super().on_save_checkpoint(trainer, pl_module, checkpoint)
if not self.always_save_nemo:
return output
# Load the best model and then re-save it
app_state = AppState()
if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1:
logging.warning(f'always_save_nemo will slow down training for model_parallel > 1.')
# since we are creating tarfile artifacts we need to update .nemo path
app_state.model_restore_path = os.path.abspath(
os.path.expanduser(os.path.join(self.dirpath, self.prefix + self.postfix))
)
if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1:
maybe_injected_best_model_path = inject_model_parallel_rank(self.best_model_path)
else:
maybe_injected_best_model_path = self.best_model_path
if self.save_best_model:
if not os.path.exists(maybe_injected_best_model_path):
return
if self.best_model_path == self.previous_best_path:
return output
self.previous_model_path = self.best_model_path
old_state_dict = deepcopy(pl_module.state_dict())
checkpoint = torch.load(maybe_injected_best_model_path, map_location='cpu')
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
# get a new instanace of the model
pl_module.load_state_dict(checkpoint, strict=True)
if torch.distributed.is_initialized():
torch.distributed.barrier()
pl_module.save_to(save_path=app_state.model_restore_path)
logging.info(f"New best .nemo model saved to: {app_state.model_restore_path}")
pl_module.load_state_dict(old_state_dict, strict=True)
else:
if torch.distributed.is_initialized():
torch.distributed.barrier()
pl_module.save_to(save_path=app_state.model_restore_path)
logging.info(f"New .nemo model saved to: {app_state.model_restore_path}")
return output
def on_train_end(self, trainer, pl_module):
if trainer.fast_dev_run:
return None
# check if we need to save a last checkpoint manually as validation isn't always run based on the interval
if self.save_last and trainer.val_check_interval != 0:
should_save_last_checkpoint = False
if isinstance(trainer.val_check_interval, float) and trainer.val_check_interval % trainer.global_step != 0:
should_save_last_checkpoint = True
if isinstance(trainer.val_check_interval, int) and trainer.global_step % trainer.val_check_interval != 0:
should_save_last_checkpoint = True
if should_save_last_checkpoint:
monitor_candidates = self._monitor_candidates(trainer)
super()._save_last_checkpoint(trainer, monitor_candidates)
# Call parent on_train_end() to save the -last checkpoint
super().on_train_end(trainer, pl_module)
# Load the best model and then re-save it
if self.save_best_model:
# wait for all processes
trainer.strategy.barrier("SaveBestCheckpointConnector.resume_end")
if self.best_model_path == "":
logging.warning(
f"{self} was told to save the best checkpoint at the end of training, but no saved checkpoints "
"were found. Saving latest model instead."
)
else:
self.best_model_path = trainer.strategy.broadcast(self.best_model_path)
trainer._checkpoint_connector.restore(self.best_model_path)
if self.save_nemo_on_train_end:
pl_module.save_to(save_path=os.path.join(self.dirpath, self.prefix + self.postfix))
def _del_model_without_trainer(self, filepath: str) -> None:
filepath = Path(filepath)
# check if filepath is a distributed a checkpoint
if ckpt_to_dir(filepath).is_dir():
if is_global_rank_zero():
try:
dist_ckpt = ckpt_to_dir(filepath)
shutil.rmtree(dist_ckpt)
logging.info(f"Removed distributed checkpoint: {dist_ckpt}")
except:
logging.info(f"Tried to remove distributed checkpoint: {dist_ckpt} but failed.")
else:
app_state = AppState()
# legacy model parallel checkpoint
if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1:
# filepath needs to be updated to include mp_rank
filepath = inject_model_parallel_rank(filepath)
# each model parallel rank needs to remove its model
if is_global_rank_zero() or (
app_state.model_parallel_size is not None and app_state.data_parallel_rank == 0
):
try:
self._fs.rm(filepath)
logging.info(f"Removed checkpoint: {filepath}")
except:
logging.info(f"Tried to remove checkpoint: {filepath} but failed.")
def _ema_callback(self, trainer: 'pytorch_lightning.Trainer') -> Optional[EMA]:
ema_callback = None
for callback in trainer.callbacks:
if isinstance(callback, EMA):
ema_callback = callback
return ema_callback
def _save_checkpoint(self, trainer: 'pytorch_lightning.Trainer', filepath: str) -> None:
ema_callback = self._ema_callback(trainer)
if ema_callback is not None:
with ema_callback.save_original_optimizer_state(trainer):
super()._save_checkpoint(trainer, filepath)
# save EMA copy of the model as well.
with ema_callback.save_ema_model(trainer):
filepath = self._ema_format_filepath(filepath)
if self.verbose:
rank_zero_info(f"Saving EMA weights to separate checkpoint {filepath}")
super()._save_checkpoint(trainer, filepath)
else:
super()._save_checkpoint(trainer, filepath)
def _remove_checkpoint(self, trainer: "pytorch_lightning.Trainer", filepath: str) -> None:
super()._remove_checkpoint(trainer, filepath)
ema_callback = self._ema_callback(trainer)
if ema_callback is not None:
# remove EMA copy of the state dict as well.
filepath = self._ema_format_filepath(filepath)
super()._remove_checkpoint(trainer, filepath)
def _ema_format_filepath(self, filepath: str) -> str:
return filepath.replace(self.FILE_EXTENSION, f'-EMA{self.FILE_EXTENSION}')
def _has_ema_ckpts(self, checkpoints: Iterable[Path]) -> bool:
return any(self._is_ema_filepath(checkpoint_path) for checkpoint_path in checkpoints)
def _is_ema_filepath(self, filepath: Union[Path, str]) -> bool:
return str(filepath).endswith(f'-EMA{self.FILE_EXTENSION}')
@property
def _saved_checkpoint_paths(self) -> Iterable[Path]:
# distributed checkpoints are directories so we check for them here
dist_checkpoints = [d for d in list(Path(self.dirpath).glob("*")) if d.is_dir()]
if dist_checkpoints:
return dist_checkpoints
else:
return Path(self.dirpath).rglob("*.ckpt")
|
NeMo-main
|
nemo/utils/callbacks/nemo_model_checkpoint.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.utils.callbacks.nemo_model_checkpoint import NeMoModelCheckpoint
from nemo.utils.callbacks.preemption import PreemptionCallback
|
NeMo-main
|
nemo/utils/callbacks/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
import sys
import torch
from pytorch_lightning.callbacks import Callback
from nemo.utils import logging
class PreemptionCallback(Callback):
"""
PreemptionCallback class creates a callback that checks for preemption during training at the end of every step.
Upon preemption the callback provides a function to gracefully exit the training immediately and also saves the current state in a checkpoint as *last.ckpt.
(to be able to start from the same step without wasting any compute while resuming the next time).
PreemptionCallback is always enabled by default via the arg create_preemption_callback under ExpManagerConfig. To disable please pass
create_preemption_callback: False in your config file.
"""
def __init__(self, checkpoint_callback, sig=None):
self.sig = sig
if self.sig is None:
self.sig = signal.SIGTERM
self.checkpoint_callback = checkpoint_callback
self.preemption_enabled = False
@property
def interrupted(self):
interrupted = torch.tensor(self._interrupted, device=torch.cuda.current_device(), dtype=torch.int32)
torch.distributed.broadcast(interrupted, 0)
interrupted = bool(interrupted.item())
return interrupted
def on_train_start(self, trainer, pl_module):
"""
Defines custom handlers at the beginning of training to be executed when the
preemption signal is received.
"""
# Check if torch distributed is initialised, as its needed for broadcasting the preemption signal to all the ranks
if not (torch.distributed.is_available() and torch.distributed.is_initialized()):
logging.info("Preemption requires torch distributed to be initialized, disabling preemption")
else:
self.preemption_enabled = True
# Bool var that's initialized to false and made True upon receving the preemption signal
self._interrupted = False
self.released = False
self.original_handler = signal.getsignal(self.sig)
# Master handler executed only by rank 0 when the preemption siganal is received, to avoid deadlock conditions
def master_handler(signum, frame):
self.release()
self._interrupted = True
# Handler executed by the non zero ranks
def ignoring_handler(signum, frame):
self.release()
self.private_rank = torch.distributed.get_rank()
if self.private_rank == 0:
signal.signal(self.sig, master_handler)
else:
signal.signal(self.sig, ignoring_handler)
return self
def on_train_end(self, trainer, pl_module):
if self.preemption_enabled:
self.release()
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx: int):
if self.preemption_enabled:
# check if the job was preempted at the end of every training step/iteration
# NOTE: "self.interrupted" is a property which triggers a
# distributed broadcast of "_interrupted" flag from rank 0 to all other
# ranks, to avoid performance overheads it's best to store the result in
# a regular local variable
interrupted = self.interrupted
if interrupted:
logging.info("Received SIGTERM, saving checkpoint and exiting")
monitor_candidates = self.checkpoint_callback._monitor_candidates(trainer)
self.checkpoint_callback._save_last_checkpoint(trainer, monitor_candidates)
sys.exit(0)
def release(self):
if self.released:
return False
signal.signal(self.sig, self.original_handler)
self.released = True
return True
|
NeMo-main
|
nemo/utils/callbacks/preemption.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/utils/formatters/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from nemo.constants import NEMO_ENV_VARNAME_ENABLE_COLORING
from nemo.utils.env_var_parsing import get_envbool
__all__ = ["check_color_support", "to_unicode"]
def check_color_support():
# Colors can be forced with an env variable
if not sys.platform.lower().startswith("win") and get_envbool(NEMO_ENV_VARNAME_ENABLE_COLORING, False):
return True
def to_unicode(value):
"""
Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
try:
if isinstance(value, (str, type(None))):
return value
if not isinstance(value, bytes):
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
return value.decode("utf-8")
except UnicodeDecodeError:
return repr(value)
|
NeMo-main
|
nemo/utils/formatters/utils.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nemo.utils.formatters.colors import Fore as ForegroundColors
from nemo.utils.formatters.utils import check_color_support, to_unicode
__all__ = ["BaseNeMoFormatter"]
class BaseFormatter(logging.Formatter):
"""
Log formatter used in Tornado. Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
"""
DEFAULT_FORMAT = "%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s"
DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
DEFAULT_COLORS = {
logging.DEBUG: ForegroundColors.CYAN,
logging.INFO: ForegroundColors.GREEN,
logging.WARNING: ForegroundColors.YELLOW,
logging.ERROR: ForegroundColors.MAGENTA,
logging.CRITICAL: ForegroundColors.RED,
}
def __init__(self, color=True, fmt=None, datefmt=None, colors=None):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
if fmt is None:
fmt = self.DEFAULT_FORMAT
if datefmt is None:
datefmt = self.DEFAULT_DATE_FORMAT
if colors is None:
colors = self.DEFAULT_COLORS
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
self._colors = {}
self._normal = ""
if color and check_color_support():
self._colors = colors
self._normal = ForegroundColors.RESET
def format(self, record):
try:
message = record.getMessage()
assert isinstance(message, str) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings wherever possible).
record.message = to_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ""
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(to_unicode(ln) for ln in record.exc_text.split("\n"))
formatted = "\n".join(lines)
return formatted.replace("\n", "\n ")
class BaseNeMoFormatter(BaseFormatter):
DEFAULT_FORMAT = "%(color)s[NeMo %(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s"
class DebugNeMoFormatter(BaseFormatter):
DEFAULT_FORMAT = (
"%(color)s[NeMo %(levelname)1.1s %(asctime)s %(module)s:%(lineno)d rank:%(rank)s]%(end_color)s %(message)s"
)
|
NeMo-main
|
nemo/utils/formatters/base.py
|
# Source: https://github.com/tartley/colorama/blob/master/colorama/ansi.py
# Copyright: Jonathan Hartley 2013. BSD 3-Clause license.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CSI = "\033["
OSC = "\033]"
BEL = "\007"
def code_to_chars(code):
return CSI + str(code) + "m"
def set_title(title):
return OSC + "2;" + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + "J"
def clear_line(mode=2):
return CSI + str(mode) + "K"
class AnsiCodes(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith("_"):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + "A"
def DOWN(self, n=1):
return CSI + str(n) + "B"
def FORWARD(self, n=1):
return CSI + str(n) + "C"
def BACK(self, n=1):
return CSI + str(n) + "D"
def POS(self, x=1, y=1):
return CSI + str(y) + ";" + str(x) + "H"
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor()
|
NeMo-main
|
nemo/utils/formatters/colors.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
NeMo-main
|
nemo/collections/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nemo.collections.common.callbacks
from nemo.collections.common import data, losses, parts, tokenizers
from nemo.package_info import __version__
# Set collection version equal to NeMo version.
__version = __version__
# Authorship.
__author__ = "NVIDIA Corporation"
# Set collection name.
__description__ = "Common collection"
|
NeMo-main
|
nemo/collections/common/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import string
from collections import Counter
from typing import List, Union
import torch
from torchmetrics import Metric
__all__ = ['TopKClassificationAccuracy']
class TopKClassificationAccuracy(Metric):
"""
This metric computes numerator and denominator for Overall Accuracy between logits and labels.
When doing distributed training/evaluation the result of res=TopKClassificationAccuracy(logits, labels) calls
will be all-reduced between all workers using SUM operations.
Here contains two numbers res=[correctly_predicted, total_samples]. Accuracy=correctly_predicted/total_samples.
If used with PytorchLightning LightningModule, include correct_count and total_count inside validation_step results.
Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER.
Example:
def validation_step(self, batch, batch_idx):
...
correct_count, total_count = self._accuracy(logits, labels)
self.val_outputs = {'val_loss': loss_value, 'val_correct_count': correct_count, 'val_total_count': total_count}
return self.val_outputs
def on_validation_epoch_end(self):
...
val_loss_mean = torch.stack([x['val_loss'] for x in self.val_outputs]).mean()
correct_counts = torch.stack([x['val_correct_counts'] for x in self.val_outputs])
total_counts = torch.stack([x['val_total_counts'] for x in self.val_outputs])
topk_scores = compute_topk_accuracy(correct_counts, total_counts)
tensorboard_log = {'val_loss': val_loss_mean}
for top_k, score in zip(self._accuracy.top_k, topk_scores):
tensorboard_log['val_epoch_top@{}'.format(top_k)] = score
self.val_outputs.clear() # free memory
return {'log': tensorboard_log}
Args:
top_k: Optional list of integers. Defaults to [1].
Returns:
res: a torch.Tensor object with two elements: [correct_count, total_count]. To correctly compute average
accuracy, compute acc=correct_count/total_count
"""
full_state_update = True
def __init__(self, top_k=None, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
if top_k is None:
top_k = [1]
self.top_k = top_k
self.add_state(
"correct_counts_k", default=torch.zeros(len(self.top_k)), dist_reduce_fx='sum', persistent=False
)
self.add_state("total_counts_k", default=torch.zeros(len(self.top_k)), dist_reduce_fx='sum', persistent=False)
@torch.no_grad()
def top_k_predicted_labels(self, logits: torch.Tensor) -> torch.Tensor:
max_k = max(self.top_k)
_, predictions = logits.topk(max_k, dim=1, largest=True, sorted=True)
return predictions
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
predictions = self.top_k_predicted_labels(logits)
predictions = predictions.t()
correct = predictions.eq(labels.view(1, -1)).expand_as(predictions)
correct_counts_k = []
total_counts_k = []
for k in self.top_k:
correct_k = correct[:k].reshape(-1).long().sum()
total_k = labels.shape[0]
correct_counts_k.append(correct_k)
total_counts_k.append(total_k)
self.correct_counts_k = torch.tensor(correct_counts_k, dtype=labels.dtype, device=labels.device)
self.total_counts_k = torch.tensor(total_counts_k, dtype=labels.dtype, device=labels.device)
def compute(self):
"""
Computes the top-k accuracy.
Returns:
A list of length `K`, such that k-th index corresponds to top-k accuracy
over all distributed processes.
"""
if not len(self.correct_counts_k) == len(self.top_k) == len(self.total_counts_k):
raise ValueError("length of counts must match to topk length")
if self.top_k == [1]:
return [self.correct_counts_k.float() / self.total_counts_k]
else:
top_k_scores = compute_topk_accuracy(self.correct_counts_k, self.total_counts_k)
return top_k_scores
@property
def top_k(self) -> List[int]:
return self._top_k
@top_k.setter
def top_k(self, value: List[int]):
if value is None:
value = [1]
if type(value) == int:
value = [value]
if type(value) != list:
value = list(value)
self._top_k = value
def compute_topk_accuracy(correct_counts_k, total_counts_k):
"""
Computes the top-k accuracy
Args:
correct_counts: Tensor of shape [K], K being the top-k parameter.
total_counts: Tensor of shape [K], and K being the top-k parameter.
Returns:
A list of length `K`, such that k-th index corresponds to top-k accuracy
over all distributed processes.
"""
top_k_scores = []
for ki in range(len(correct_counts_k)):
correct_count = correct_counts_k[ki].item()
total_count = total_counts_k[ki].item()
top_k_scores.append(correct_count / float(total_count))
return top_k_scores
class ExactStringPerCategoryMatchMetric(Metric):
def __init__(self, categories=[], dist_sync_on_step=False, *args, **kwargs):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.categories = set(categories)
self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
for category in categories:
self.add_state(f"{category}_total", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state(f"{category}_correct", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, pred: str, target: str, category: str = None):
if pred == target:
self.correct += 1
self.total += 1
if category is None:
return
if category in self.categories:
val = getattr(self, f"{category}_total")
setattr(self, f"{category}_total", val + 1)
if pred == target:
val = getattr(self, f"{category}_correct")
setattr(self, f"{category}_correct", val + 1)
else:
logging.warn(f'{category} is not in the pre-defined list')
def compute(self):
results = {}
results['acc'] = self.correct.float() / self.total
for category in self.categories:
results[category] = getattr(self, f"{category}_correct") / getattr(self, f"{category}_total")
for category in self.categories:
results[f"{category}_total"] = getattr(self, f"{category}_total")
return results
class ExactStringMatchMetric(Metric):
def __init__(self, dist_sync_on_step=False, *args, **kwargs):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, pred: str, target: str):
if pred == target:
self.correct += 1
self.total += 1
def compute(self):
return self.correct.float() / self.total
class TokenF1Score(Metric):
"""Taken from the official evaluation script for v1.1 of the SQuAD dataset"""
def __init__(self, dist_sync_on_step=False, *args, **kwargs):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("correct", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, pred: str, target: Union[str, List[str]]):
if isinstance(target, str):
self.correct += self.f1_score(pred, target)
elif isinstance(target, list):
self.correct += max([self.f1_score(pred, tgt) for tgt in target])
self.total += 1
def compute(self):
return self.correct.float() / self.total
def f1_score(self, prediction, ground_truth):
prediction_tokens = self.normalize(prediction).split()
ground_truth_tokens = self.normalize(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0.0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def normalize(self, s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
NeMo-main
|
nemo/collections/common/metrics/classification_accuracy.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.distributions.categorical import Categorical
from torchmetrics import Metric
__all__ = ['Perplexity']
class Perplexity(Metric):
"""
This class computes mean perplexity of distributions in the last dimension of inputs. It is a wrapper around
:doc:`torch.distributions.Categorical.perplexity<pytorch:distributions>` method. You have to provide either
``probs`` or ``logits`` to the :meth:`update` method. The class computes perplexities for distributions passed to
:meth:`update` method in ``probs`` or ``logits`` arguments and averages the perplexities. Reducing results between
all workers is done via SUM operations.
See `PyTorch Lightning Metrics <https://pytorch-lightning.readthedocs.io/en/stable/ecosystem/metrics.html>`_ for the metric usage instructions.
Args:
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called. default: ``None`` (which selects the entire
world)
validate_args:
If ``True`` values of :meth:`update` method parameters are checked. ``logits`` has to not contain NaNs and
``probs`` last dim has to be valid probability distribution.
"""
full_state_update = True
def __init__(self, dist_sync_on_step=False, process_group=None, validate_args=True):
super().__init__(dist_sync_on_step=dist_sync_on_step, process_group=process_group)
self.validate_args = validate_args
self.add_state('perplexities_sum', torch.tensor(0.0, dtype=torch.float64), dist_reduce_fx='sum')
# Total number of distributions seen since last reset
self.add_state('num_distributions', torch.tensor(0, dtype=torch.int64), dist_reduce_fx='sum')
def update(self, probs=None, logits=None):
"""
Updates :attr:`perplexities_sum` and :attr:`num_distributions`.
Args:
probs: A ``torch.Tensor`` which innermost dimension is valid probability distribution.
logits: A ``torch.Tensor`` without NaNs.
"""
d = Categorical(
None if probs is None else probs.detach(),
None if logits is None else logits.detach(),
validate_args=self.validate_args,
)
ppl = d.perplexity()
self.num_distributions += ppl.numel()
self.perplexities_sum += ppl.sum()
def compute(self):
"""
Returns perplexity across all workers and resets to 0 :attr:`perplexities_sum` and :attr:`num_distributions`.
"""
if self.num_distributions.eq(0):
return None
return self.perplexities_sum / self.num_distributions
|
NeMo-main
|
nemo/collections/common/metrics/perplexity.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.common.metrics.classification_accuracy import TopKClassificationAccuracy
from nemo.collections.common.metrics.global_average_loss_metric import GlobalAverageLossMetric
from nemo.collections.common.metrics.metric_string_to_torchmetric import MetricStringToTorchMetric
from nemo.collections.common.metrics.perplexity import Perplexity
|
NeMo-main
|
nemo/collections/common/metrics/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torchmetrics import Metric
__all__ = ['GlobalAverageLossMetric']
class GlobalAverageLossMetric(Metric):
"""
This class is for averaging loss across multiple processes if a distributed backend is used. True average is
computed not running average. It does not accumulate gradients so the averaged loss cannot be used for optimization.
If ``take_avg_loss`` is ``True``, the :meth:`update` method ``loss`` argument has to be a mean loss. If
``take_avg_loss`` is ``False`` then the :meth:`update` method ``loss`` argument has to be a sum of losses.
See :doc:`PyTorch Lightning Metrics<pytorch-lightning:metrics>` for the metric usage instruction.
Args:
dist_sync_on_step:
Synchronize metric state across processes at each method :meth:`forward` call before returning the
value at the step
process_group:
Specify the process group on which synchronization is called. default: ``None`` (which selects the entire
world)
take_avg_loss:
If ``True`` values of :meth:`update` method ``loss`` argument has to be a mean loss. If ``False``
values of :meth:`update` method ``loss`` argument has to be a sum of losses. default: ``True``
"""
full_state_update = True
def __init__(self, dist_sync_on_step=False, process_group=None, take_avg_loss=True):
super().__init__(dist_sync_on_step=dist_sync_on_step, process_group=process_group)
self.add_state("loss_sum", torch.tensor(0.0, dtype=torch.float64), dist_reduce_fx='sum')
self.add_state("num_measurements", torch.tensor(0, dtype=torch.int64), dist_reduce_fx='sum')
self.take_avg_loss = take_avg_loss
def update(self, loss, num_measurements):
"""
Updates :attr:`loss_sum` and :attr:`num_measurements`.
Args:
loss: A float zero dimensional ``torch.Tensor`` which is either sum or average of losses for processed
examples. See ``take_avg_loss`` parameter of :meth:`__init__`.
num_measurements: An integer zero dimensional ``torch.Tensor`` which contains a number of loss measurements.
The sum or mean of the results of these measurements are in the ``loss`` parameter.
"""
if self.take_avg_loss:
self.loss_sum += loss.detach() * num_measurements
else:
self.loss_sum += loss.detach()
self.num_measurements += num_measurements
def compute(self):
"""
Returns mean loss.
"""
if self.num_measurements.eq(0):
return torch.tensor(float('nan'))
return self.loss_sum / self.num_measurements
|
NeMo-main
|
nemo/collections/common/metrics/global_average_loss_metric.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics import Accuracy, AveragePrecision, F1Score, MatthewsCorrCoef, PearsonCorrCoef, SpearmanCorrCoef
from torchmetrics.text.rouge import ROUGEScore
from nemo.collections.common.metrics.classification_accuracy import ExactStringMatchMetric, TokenF1Score
__all__ = ['MetricStringToTorchMetric']
# Dictionary that maps a metric string name to its corresponding torchmetric class.
MetricStringToTorchMetric = {
'accuracy': Accuracy,
'average_precision': AveragePrecision,
'f1': F1Score,
'token_f1': TokenF1Score,
'pearson_corr_coef': PearsonCorrCoef,
'spearman_corr_coef': SpearmanCorrCoef,
'matthews_corr_coef': MatthewsCorrCoef,
'exact_string_match': ExactStringMatchMetric,
'rouge': ROUGEScore,
}
|
NeMo-main
|
nemo/collections/common/metrics/metric_string_to_torchmetric.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from nemo.core.classes import Serialization, Typing, typecheck
from nemo.core.neural_types import LabelsType, LogitsType, LogprobsType, LossType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ['CrossEntropyLoss', 'NLLLoss']
class CrossEntropyLoss(nn.CrossEntropyLoss, Serialization, Typing):
"""
CrossEntropyLoss
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"logits": NeuralType(['B'] + ['ANY'] * (self._logits_dim - 1), LogitsType()),
"labels": NeuralType(['B'] + ['ANY'] * (self._logits_dim - 2), LabelsType()),
"loss_mask": NeuralType(['B'] + ['ANY'] * (self._logits_dim - 2), MaskType(), optional=True),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, logits_ndim=2, weight=None, reduction='mean', ignore_index=-100):
"""
Args:
logits_ndim (int): number of dimensions (or rank) of the logits tensor
weight (list): list of rescaling weight given to each class
reduction (str): type of the reduction over the batch
"""
if weight is not None and not torch.is_tensor(weight):
weight = torch.FloatTensor(weight)
logging.info(f"Weighted Cross Entropy loss with weight {weight}")
super().__init__(weight=weight, reduction=reduction, ignore_index=ignore_index)
self._logits_dim = logits_ndim
@typecheck()
def forward(self, logits, labels, loss_mask=None):
"""
Args:
logits (float): output of the classifier
labels (long): ground truth labels
loss_mask (bool/float/int): tensor to specify the masking
"""
logits_flatten = torch.flatten(logits, start_dim=0, end_dim=-2)
labels_flatten = torch.flatten(labels, start_dim=0, end_dim=-1)
if loss_mask is not None:
if loss_mask.dtype is not torch.bool:
loss_mask = loss_mask > 0.5
loss_mask_flatten = torch.flatten(loss_mask, start_dim=0, end_dim=-1)
logits_flatten = logits_flatten[loss_mask_flatten]
labels_flatten = labels_flatten[loss_mask_flatten]
if len(labels_flatten) == 0:
return super().forward(logits, torch.argmax(logits, dim=-1))
loss = super().forward(logits_flatten, labels_flatten)
return loss
class NLLLoss(nn.NLLLoss, Serialization, Typing):
"""
NLLLoss
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"log_probs": NeuralType(("B", "T", "D"), LogprobsType()),
"labels": NeuralType(("B", "T"), LabelsType()),
"output_mask": NeuralType(("B", "T"), MaskType(), optional=True),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, log_probs_ndim=2, weight=None, reduction='mean', ignore_index=-100):
"""
Args:
log_probs_ndim (int): number of dimensions (or rank) of the logprobs tensor
weight (list): list of rescaling weight given to each class
reduction (str): type of the reduction over the batch
ignore_index (int): mask out loss computation where labels = ignore_index
"""
if weight is not None and not torch.is_tensor(weight):
weight = torch.FloatTensor(weight)
super().__init__(weight=weight, reduction=reduction, ignore_index=ignore_index)
self._log_probs_dim = log_probs_ndim
@typecheck()
def forward(self, log_probs, labels, loss_mask=None):
"""
Args:
log_probs (float): output log probability tensor
labels (long): ground truth labels
loss_mask (bool/float/int): tensor to specify the masking
"""
log_probs_flatten = torch.flatten(log_probs, start_dim=0, end_dim=-2)
labels_flatten = torch.flatten(labels, start_dim=0, end_dim=-1)
if loss_mask is not None:
if loss_mask.dtype is not torch.bool:
loss_mask = loss_mask > 0.5
loss_mask_flatten = torch.flatten(loss_mask, start_dim=0, end_dim=-1)
log_probs_flatten = log_probs_flatten[loss_mask_flatten]
labels_flatten = labels_flatten[loss_mask_flatten]
if len(labels_flatten) == 0:
return super().forward(log_probs, torch.argmax(log_probs, dim=-1))
loss = super().forward(log_probs_flatten, labels_flatten)
return loss
|
NeMo-main
|
nemo/collections/common/losses/cross_entropy.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import Tensor, nn
from nemo.core.classes import Serialization, Typing, typecheck
from nemo.core.neural_types import LabelsType, LossType, NeuralType, RegressionValuesType
__all__ = ['MSELoss']
class MSELoss(nn.MSELoss, Serialization, Typing):
"""
MSELoss
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"preds": NeuralType(tuple('B'), RegressionValuesType()),
"labels": NeuralType(tuple('B'), LabelsType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, reduction: str = 'mean'):
"""
Args:
reduction: type of the reduction over the batch
"""
super().__init__(reduction=reduction)
@typecheck()
def forward(self, preds: Tensor, labels: Tensor) -> Tensor:
"""
Args:
preds: output of the classifier
labels: ground truth labels
"""
return super().forward(preds, labels)
|
NeMo-main
|
nemo/collections/common/losses/mse_loss.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import torch
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types import LossType, NeuralType
__all__ = ['AggregatorLoss']
class AggregatorLoss(Loss):
"""
Sums several losses into one.
Args:
num_inputs: number of input losses
weights: a list of coefficient for merging losses
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
input_types = {}
for i in range(self._num_losses):
input_types["loss_" + str(i + 1)] = NeuralType(elements_type=LossType())
return input_types
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, num_inputs: int = 2, weights: List[float] = None):
super().__init__()
self._num_losses = num_inputs
if weights is not None and len(weights) != num_inputs:
raise ValueError("Length of weights should be equal to the number of inputs (num_inputs)")
self._weights = weights
@typecheck()
def forward(self, **kwargs):
values = [kwargs[x] for x in sorted(kwargs.keys())]
loss = torch.zeros_like(values[0])
for loss_idx, loss_value in enumerate(values):
if self._weights is not None:
loss = loss.add(loss_value, alpha=self._weights[loss_idx])
else:
loss = loss.add(loss_value)
return loss
|
NeMo-main
|
nemo/collections/common/losses/aggregator.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from nemo.core.classes import Loss
from nemo.core.classes.common import typecheck
from nemo.core.neural_types import LabelsType, LogitsType, LossType, NeuralType
from nemo.utils import logging
__all__ = ['MultiSimilarityLoss']
class MultiSimilarityLoss(Loss):
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {"logits": NeuralType(('B', 'D'), LogitsType()), "labels": NeuralType(('B'), LabelsType())}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(
self,
scale_pos: Optional[float] = 2.0, # Params found to work best in our experiments
scale_neg: Optional[float] = 40.0,
offset: Optional[float] = 0.5,
margin: Optional[float] = 0.1,
):
super().__init__()
self._scale_pos = scale_pos
self._scale_neg = scale_neg
self._offset = offset
self._margin = margin
self._epsilon = 1e-5
@typecheck()
def forward(self, logits, labels):
cos_sim = torch.matmul(logits, torch.t(logits))
losses = []
for i in range(logits.size(0)):
# mine hard pairs relative to anchor i
positive_sims = cos_sim[i][labels.eq(labels[i])]
positive_sims = positive_sims[positive_sims.lt(1 - self._epsilon)] # omit identical pairs
negative_sims = cos_sim[i][labels.ne(labels[i])]
if len(negative_sims) == 0 or len(positive_sims) == 0:
continue
# negatives that are more similar than the least-similar positive
hard_negatives = negative_sims[negative_sims.gt(min(positive_sims) - self._margin)]
# positives that are less similar than the most-similar negative
hard_positives = positive_sims[positive_sims.lt(max(negative_sims) + self._margin)]
if len(hard_negatives) == 0 or len(hard_positives) == 0:
continue
pos_term = (
1.0
/ self._scale_pos
* torch.log(1 + torch.sum(torch.exp(-self._scale_pos * (hard_positives - self._offset))))
)
neg_term = (
1.0
/ self._scale_neg
* torch.log(1 + torch.sum(torch.exp(self._scale_neg * (hard_negatives - self._offset))))
)
losses.append(pos_term + neg_term)
if len(losses) == 0:
loss = torch.zeros([], requires_grad=True).cuda()
logging.info(f'Encountered zero loss in multisimloss, loss = {loss}. No hard examples found in the batch')
else:
loss = torch.sum(torch.stack(losses)) / logits.size(0)
return loss
|
NeMo-main
|
nemo/collections/common/losses/multi_similarity_loss.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.common.losses.aggregator import AggregatorLoss
from nemo.collections.common.losses.bce_logits_loss import BCEWithLogitsLoss
from nemo.collections.common.losses.cross_entropy import CrossEntropyLoss, NLLLoss
from nemo.collections.common.losses.mse_loss import MSELoss
from nemo.collections.common.losses.multi_similarity_loss import MultiSimilarityLoss
from nemo.collections.common.losses.smoothed_cross_entropy import SmoothedCrossEntropyLoss, SmoothedNLLLoss
from nemo.collections.common.losses.spanning_loss import SpanningLoss
|
NeMo-main
|
nemo/collections/common/losses/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch import nn
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types import ChannelType, LogitsType, LossType, NeuralType
__all__ = ['SpanningLoss']
class SpanningLoss(Loss):
"""
implements start and end loss of a span e.g. for Question Answering.
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"logits": NeuralType(('B', 'T', 'D'), LogitsType()),
"start_positions": NeuralType(tuple('B'), ChannelType()),
"end_positions": NeuralType(tuple('B'), ChannelType()),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {
"loss": NeuralType(elements_type=LossType()),
"start_logits": NeuralType(('B', 'T'), LogitsType()),
"end_logits": NeuralType(('B', 'T'), LogitsType()),
}
def __init__(self,):
super().__init__()
@typecheck()
def forward(self, logits, start_positions, end_positions):
"""
Args:
logits: Output of question answering head, which is a token classfier.
start_positions: Ground truth start positions of the answer w.r.t.
input sequence. If question is unanswerable, this will be
pointing to start token, e.g. [CLS], of the input sequence.
end_positions: Ground truth end positions of the answer w.r.t.
input sequence. If question is unanswerable, this will be
pointing to start token, e.g. [CLS], of the input sequence.
"""
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss, start_logits, end_logits
|
NeMo-main
|
nemo/collections/common/losses/spanning_loss.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from nemo.core.classes import Exportable, Loss, NeuralModule, typecheck
from nemo.core.neural_types import LabelsType, LogprobsType, LossType, MaskType, NeuralType
__all__ = ["SmoothedCrossEntropyLoss", "SmoothedNLLLoss"]
class SmoothedCrossEntropyLoss(Loss):
"""
Calculates Cross-entropy loss with label smoothing for a batch of sequences.
SmoothedCrossEntropyLoss:
1) excludes padding tokens from loss calculation
2) allows to use label smoothing regularization
3) allows to calculate loss for the desired number of last tokens
4) per_token_reduction - if False disables reduction per token
Args:
label_smoothing (float): label smoothing regularization coefficient
predict_last_k (int): parameter which sets the number of last tokens to calculate the loss for, for example
0: (default) calculate loss on the entire sequence (e.g., NMT)
1: calculate loss on the last token only (e.g., LM evaluation)
Intermediate values allow to control the trade-off between eval
time (proportional to the number of batches) and eval performance
(proportional to the number of context tokens)
pad_id (int): padding id
eps (float): the small eps number to avoid division buy zero
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"log_probs": NeuralType(("B", "T", "D"), LogprobsType()),
"labels": NeuralType(("B", "T"), LabelsType()),
"output_mask": NeuralType(("B", "T"), MaskType(), optional=True),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(
self,
pad_id: Optional[int] = None,
label_smoothing: Optional[float] = 0.0,
predict_last_k: Optional[int] = 0,
eps: float = 1e-6,
per_token_reduction: bool = True,
):
super().__init__()
self._pad_id = pad_id
self._eps = eps
self._predict_last_k = predict_last_k
self._label_smoothing = label_smoothing
self._per_token_reduction = per_token_reduction
@typecheck()
def forward(self, log_probs, labels, output_mask=None):
"""
Args:
log_probs: float tensor of shape batch_size x seq_len x vocab_size, values should be log probabilities
labels: int tensor of shape batch_size x seq_len
output_mask: binary tensor of shape batch_size x seq_len
eps: epsilon param to avoid divide by zero in loss calculation
"""
if output_mask is None and self._pad_id is None:
raise ValueError("Both output_mask and pad_id are None")
if output_mask is None and self._pad_id is not None:
output_mask = (labels != self._pad_id).to(log_probs.dtype)
if output_mask.dtype is not log_probs.dtype:
output_mask = output_mask.to(log_probs.dtype)
batch_size, seq_len, vocab_size = log_probs.size()
smoothing = vocab_size * self._label_smoothing / (vocab_size - 1)
target_log_probs = log_probs.gather(2, labels.unsqueeze(2)).squeeze(2)
smoothing_log_probs = log_probs.mean(dim=-1)
neg_log_likelihood = (1.0 - smoothing) * target_log_probs + smoothing * smoothing_log_probs
neg_log_likelihood = neg_log_likelihood[:, -self._predict_last_k :]
output_mask = output_mask[:, -self._predict_last_k :]
# when False avoid per token reduction
if self._per_token_reduction:
neg_log_likelihood = -torch.sum(neg_log_likelihood * output_mask)
neg_log_likelihood = neg_log_likelihood / (output_mask.sum() + self._eps)
else:
neg_log_likelihood = -(neg_log_likelihood * output_mask)
return neg_log_likelihood
class SmoothedNLLLoss(NeuralModule, Exportable):
"""
Calculate negative log likelihodd for sequence input, also applies label smoothing (if set).
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"log_probs": NeuralType(("B", "T", "D"), LogprobsType()),
"labels": NeuralType(("B", "T"), LabelsType()),
"output_mask": NeuralType(("B", "T"), MaskType(), optional=True),
"lengths": NeuralType(("B"), LabelsType(), optional=True),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, reduction='mean', label_smoothing=0.0, eps=1e-8, **kwargs):
super().__init__()
self.reduction = reduction
self.label_smoothing = label_smoothing
self.nll_loss = torch.nn.NLLLoss(reduction='none', **kwargs)
self.eps = eps # small constant to avoid divide by zero
@typecheck()
def forward(self, log_probs, labels, output_mask=None, lengths=None):
"""
Params:
- log_probs: BxTxC
- labels: B
- output_mask: BxT
- lengths: B
"""
if output_mask is None and lengths is None:
output_mask = torch.ones_like(log_probs).float()
elif output_mask is None and lengths is not None:
output_mask = torch.arange(log_probs.size(1), device=log_probs.device)[None, :] < lengths[:, None]
output_mask = output_mask.float()
log_probs = log_probs.transpose(1, 2) # BxTxC -> BxCxT
loss = output_mask * self.nll_loss(log_probs, labels)
batch_size = loss.size(0)
if self.reduction == "mean":
loss = loss.sum() / (torch.sum(output_mask) + self.eps)
elif self.reduction == "batchmean":
loss = loss.sum() / batch_size
elif self.reduction == "batch":
loss = loss.reshape(batch_size, -1).sum(1) / (output_mask.reshape(batch_size, -1).sum(1) + self.eps)
if self.label_smoothing == 0.0:
return loss
else:
# Regularizing Neural Networks by Penalizing Confident Output Distributions.
# https://arxiv.org/abs/1701.06548
loss_reg = torch.mean(log_probs, dim=1) * output_mask
if self.reduction == "mean":
loss_reg = torch.sum(loss_reg) / torch.sum(output_mask)
elif self.reduction == "batchmean":
loss_reg = torch.sum(loss_reg) / labels.shape[0]
elif self.reduction == "batch":
loss_reg = loss_reg.sum(1) / output_mask.sum(1)
return -self.label_smoothing * loss_reg + (1 - self.label_smoothing) * loss
|
NeMo-main
|
nemo/collections/common/losses/smoothed_cross_entropy.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import torch
from torch import nn
from nemo.core.classes import Serialization, Typing, typecheck
from nemo.core.neural_types import LabelsType, LogitsType, LossType, MaskType, NeuralType
__all__ = ["BCEWithLogitsLoss"]
class BCEWithLogitsLoss(nn.BCEWithLogitsLoss, Serialization, Typing):
"""
BCEWithLogitsLoss
https://pytorch.org/docs/1.9.1/generated/torch.nn.BCEWithLogitsLoss.html
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
return {
"logits": NeuralType(["B"] + ["ANY"] * (self._logits_dim - 1), LogitsType()),
"labels": [NeuralType(["B"] + ["ANY"] * (self._logits_dim - 2), LabelsType())],
"loss_mask": NeuralType(["B"] + ["ANY"] * (self._logits_dim - 2), MaskType(), optional=True),
}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(
self,
logits_ndim: int = 2,
weight: torch.Tensor = None,
reduction: str = "mean",
pos_weight: torch.Tensor = None,
):
"""
Args:
logits_ndim: number of dimensions (or rank) of the logits tensor
weight: list of rescaling weight given to each class
reduction: type of the reduction over the batch
pos_weight: weight given to positive samples
"""
if pos_weight is not None and not torch.is_tensor(pos_weight):
pos_weight = torch.FloatTensor(pos_weight)
super().__init__(weight=weight, pos_weight=pos_weight, reduction=reduction)
self._logits_dim = logits_ndim
@typecheck()
def forward(self, logits: float, labels: List[int], loss_mask: torch.Tensor = None):
"""
Args:
logits: output of the classifier
labels: ground truth labels
"""
labels = torch.stack(labels)
labels = labels.t().float()
return super().forward(logits, labels)
|
NeMo-main
|
nemo/collections/common/losses/bce_logits_loss.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from sacremoses import MosesDetokenizer, MosesPunctNormalizer, MosesTokenizer
class IndicProcessor:
"""
Tokenizer, Detokenizer and Normalizer utilities in Indic Languages.
Currently supports: 'hi'
"""
def __init__(self, lang_id: str):
if lang_id != 'hi':
raise NotImplementedError
self.moses_tokenizer = MosesTokenizer(lang=lang_id)
self.moses_detokenizer = MosesDetokenizer(lang=lang_id)
self.normalizer = MosesPunctNormalizer(lang=lang_id)
def detokenize(self, tokens: List[str]) -> str:
"""
Detokenizes a list of tokens
Args:
tokens: list of strings as tokens
Returns:
detokenized string
"""
return self.moses_detokenizer.detokenize(tokens)
def tokenize(self, text: str):
return text
def normalize(self, text: str):
return text
|
NeMo-main
|
nemo/collections/common/tokenizers/indic_tokenizers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, List, Optional, Union
import numpy as np
import sentencepiece
from nemo.collections.common.parts.utils import if_exist
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.utils import logging
__all__ = ['SentencePieceTokenizer', 'create_spt_model']
class SentencePieceTokenizer(TokenizerSpec):
"""
Sentencepiecetokenizer https://github.com/google/sentencepiece.
Args:
model_path: path to sentence piece tokenizer model. To create the model use create_spt_model()
special_tokens: either list of special tokens or dictionary of token name to token value
legacy: when set to True, the previous behavior of the SentecePiece wrapper will be restored,
including the possibility to add special tokens inside wrapper.
"""
def __init__(
self, model_path: str, special_tokens: Optional[Union[Dict[str, str], List[str]]] = None, legacy: bool = False
):
if not model_path or not os.path.exists(model_path):
raise ValueError(f"model_path: {model_path} is invalid")
self.tokenizer = sentencepiece.SentencePieceProcessor()
self.tokenizer.Load(model_path)
self.original_vocab_size = self.tokenizer.get_piece_size()
self.vocab_size = self.tokenizer.get_piece_size()
self.legacy = legacy
self.special_token_to_id = {}
self.id_to_special_token = {}
if special_tokens:
if not self.legacy:
raise ValueError(
"Special tokens must be None when legacy is set to False. Provide special tokens at train time."
)
self.add_special_tokens(special_tokens)
self.space_sensitive = self.text_to_tokens('x y') != self.text_to_tokens('x') + self.text_to_tokens('y')
def text_to_tokens(self, text):
if self.legacy:
tokens = []
idx = 0
last_idx = 0
while 1:
indices = {}
for token in self.special_token_to_id:
try:
indices[token] = text[idx:].index(token)
except ValueError:
continue
if len(indices) == 0:
break
next_token = min(indices, key=indices.get)
next_idx = idx + indices[next_token]
tokens.extend(self.tokenizer.encode_as_pieces(text[idx:next_idx]))
tokens.append(next_token)
idx = next_idx + len(next_token)
tokens.extend(self.tokenizer.encode_as_pieces(text[idx:]))
return tokens
return self.tokenizer.encode_as_pieces(text)
def text_to_ids(self, text):
if self.legacy:
ids = []
idx = 0
last_idx = 0
while 1:
indices = {}
for token in self.special_token_to_id:
try:
indices[token] = text[idx:].index(token)
except ValueError:
continue
if len(indices) == 0:
break
next_token = min(indices, key=indices.get)
next_idx = idx + indices[next_token]
ids.extend(self.tokenizer.encode_as_ids(text[idx:next_idx]))
ids.append(self.special_token_to_id[next_token])
idx = next_idx + len(next_token)
ids.extend(self.tokenizer.encode_as_ids(text[idx:]))
return ids
return self.tokenizer.encode_as_ids(text)
def tokens_to_text(self, tokens):
if isinstance(tokens, np.ndarray):
tokens = tokens.tolist()
return self.tokenizer.decode_pieces(tokens)
def ids_to_text(self, ids):
if isinstance(ids, np.ndarray):
ids = ids.tolist()
if self.legacy:
text = ""
last_i = 0
for i, id in enumerate(ids):
if id in self.id_to_special_token:
text += self.tokenizer.decode_ids(ids[last_i:i]) + " "
text += self.id_to_special_token[id] + " "
last_i = i + 1
text += self.tokenizer.decode_ids(ids[last_i:])
return text.strip()
return self.tokenizer.decode_ids(ids)
def token_to_id(self, token):
if self.legacy and token in self.special_token_to_id:
return self.special_token_to_id[token]
return self.tokenizer.piece_to_id(token)
def ids_to_tokens(self, ids):
tokens = []
for id in ids:
if id >= self.original_vocab_size:
tokens.append(self.id_to_special_token[id])
else:
tokens.append(self.tokenizer.id_to_piece(id))
return tokens
def tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
if isinstance(tokens, str):
tokens = [tokens]
ids = []
for token in tokens:
ids.append(self.token_to_id(token))
return ids
def add_special_tokens(self, special_tokens):
if not self.legacy:
raise AttributeError("Special Token addition does not work when legacy is set to False.")
if isinstance(special_tokens, list):
for token in special_tokens:
if (
self.tokenizer.piece_to_id(token) == self.tokenizer.unk_id()
and token not in self.special_token_to_id
):
self.special_token_to_id[token] = self.vocab_size
self.id_to_special_token[self.vocab_size] = token
self.vocab_size += 1
elif isinstance(special_tokens, dict):
for token_name, token in special_tokens.items():
setattr(self, token_name, token)
if (
self.tokenizer.piece_to_id(token) == self.tokenizer.unk_id()
and token not in self.special_token_to_id
):
self.special_token_to_id[token] = self.vocab_size
self.id_to_special_token[self.vocab_size] = token
self.vocab_size += 1
@property
def pad_id(self):
if self.legacy:
pad_id = self.tokens_to_ids([self.pad_token])[0]
else:
pad_id = self.tokenizer.pad_id()
return pad_id
@property
def bos_id(self):
if self.legacy:
bos_id = self.tokens_to_ids([self.bos_token])[0]
else:
bos_id = self.tokenizer.bos_id()
return bos_id
@property
def eos_id(self):
if self.legacy:
eos_id = self.tokens_to_ids([self.eos_token])[0]
else:
eos_id = self.tokenizer.eos_id()
return eos_id
@property
def sep_id(self):
if self.legacy:
return self.tokens_to_ids([self.sep_token])[0]
else:
raise NameError("Use function token_to_id to retrieve special tokens other than unk, pad, bos, and eos.")
@property
def cls_id(self):
if self.legacy:
return self.tokens_to_ids([self.cls_token])[0]
else:
raise NameError("Use function token_to_id to retrieve special tokens other than unk, pad, bos, and eos.")
@property
def mask_id(self):
if self.legacy:
return self.tokens_to_ids([self.mask_token])[0]
else:
raise NameError("Use function token_to_id to retrieve special tokens other than unk, pad, bos, and eos.")
@property
def unk_id(self):
return self.tokenizer.unk_id()
@property
def additional_special_tokens_ids(self):
"""Returns a list of the additional special tokens (excluding bos, eos, pad, unk). Used to return sentinel tokens for e.g. T5."""
special_tokens = set(
[self.bos_token, self.eos_token, self.pad_token, self.mask_token, self.cls_token, self.sep_token]
)
return [v for k, v in self.special_token_to_id.items() if k not in special_tokens]
@property
def vocab(self):
main_vocab = [self.tokenizer.id_to_piece(id) for id in range(self.tokenizer.get_piece_size())]
special_tokens = [
self.id_to_special_token[self.original_vocab_size + i]
for i in range(self.vocab_size - self.original_vocab_size)
]
return main_vocab + special_tokens
def create_spt_model(
data_file: str,
vocab_size: int,
sample_size: int,
do_lower_case: bool,
tokenizer_type: str = 'unigram',
output_dir: Optional[str] = None,
character_coverage: float = 1.0,
train_extremely_large_corpus: bool = False,
max_sentencepiece_length: int = -1,
bos: bool = False,
eos: bool = False,
pad: bool = False,
control_symbols: List[str] = None,
user_defined_symbols: List[str] = None,
byte_fallback: bool = False,
split_digits: bool = False,
split_by_whitespace: bool = True,
split_by_unicode_script: bool = True,
):
"""
Creates sentence piece tokenizer model from data file.
Args:
data_file: data file
vocab_size: vocabulary size
sample_size: maximum size of sentences the trainer loads
do_lower_case: if text should be lower cased before tokenizer model is created
character_coverage: float value between 0 and 1 (as a percentage). For languages with a vast charset,
can be < 1.0, but for all other languages, it should be set as 1.0
output_dir: folder to save created tokenizer model. If not specified will store model at data_file/../spt folder
train_extremely_large_corpus: If training on huge datasets, pass this flag to allow SentencePiece
to build the tokenizer.
max_sentencepiece_length: Limits the maximum length of the SentencePiece subword that can be constructed.
By default, no limit is placed.
bos: when True, bos token "<s>" is added to the vocabulary.
eos: when True, eos token "</s>" is added to the vocabulary.
pad: when True, pad token "<pad>" is added to the vocabulary.
control_symbols: control symbols to add to tokenizer, as defined by sentencepiece.
These tokens get removed at decode time and are not encoded from the text - can only be added to the input programatically.
user_defined_symbols: user symbols to add to tokenizer, as defined by sentencepiece.
These tokens remain in the decoded text and are encoded automatically when present in the input text.
byte_fallback: If <unk>, fallback to a byte sequence of the character.
split_digits: If true, digits are split into individual tokens.
split_by_whitespace: Whether to respect white space while creating subwords. If False, will learn merges across whitespace.
split_by_unicode_script: Whether to include multiple Unicode scripts. Ex. is Arabic diacritics which are considered part of the letter (عِدَّةُ)
"""
if not data_file or not os.path.exists(data_file):
raise ValueError(f"data_file must be valid file path, but got {data_file}")
data_dir = os.path.dirname(data_file)
vocab = []
special_tokens = ["<s>", "</s>", "<pad>", "<unk>"]
if not output_dir:
output_dir = f'{data_dir}/spt'
if if_exist(output_dir, ['tokenizer.model']):
logging.info(f"tokenizer model {output_dir}/tokenizer.model already exists")
return f'{output_dir}/tokenizer.model', f'{output_dir}/vocab.txt'
logging.info(f'Processing {data_file} and store at {output_dir}')
os.makedirs(output_dir, exist_ok=True)
cmd = (
f"--input={data_file} --model_prefix={output_dir}/tokenizer "
f"--vocab_size={vocab_size} "
f"--shuffle_input_sentence=true --hard_vocab_limit=false "
f"--model_type={tokenizer_type} "
f"--character_coverage={character_coverage}"
)
pad_id = 3
if not bos:
pad_id -= 1
cmd += " --bos_id=-1"
if not eos:
pad_id -= 1
cmd += " --eos_id=-1"
if pad:
cmd += f" --pad_id={pad_id}"
if control_symbols:
control_string = (",").join(control_symbols)
cmd += f" --control_symbols={control_string}"
special_tokens += control_symbols
if user_defined_symbols:
user_string = (",").join(user_defined_symbols)
cmd += f" --user_defined_symbols={user_string}"
special_tokens += user_defined_symbols
if do_lower_case:
cmd += " --normalization_rule_name=nmt_nfkc_cf"
if sample_size > 0:
cmd += f" --input_sentence_size={sample_size}"
if train_extremely_large_corpus:
cmd += " --train_extremely_large_corpus=true"
if max_sentencepiece_length >= 0:
cmd += f" --max_sentencepiece_length={max_sentencepiece_length}"
if byte_fallback:
cmd += " --byte_fallback=true"
if split_digits:
cmd += " --split_digits=true"
if not split_by_whitespace:
cmd += " --split_by_whitespace=false"
if not split_by_unicode_script:
cmd += " --split_by_unicode_script=false"
sentencepiece.SentencePieceTrainer.Train(cmd)
# Add BERT control symbols
tokens = []
with open(f"{output_dir}/tokenizer.vocab", "r") as f:
# Read tokens from each line and parse for vocab
for line in f:
piece = line.split("\t")[0]
if piece in special_tokens:
# skip special tokens
continue
token = piece[1:] if piece.startswith("▁") else f"##{piece}"
if len(token) > 0:
tokens.append(token)
else:
tokens.append(piece[0])
vocab.extend(tokens)
# Save vocabulary to output file
vocab_file = f'{output_dir}/vocab.txt'
with open(vocab_file, "w") as f:
for token in vocab:
f.write(f"{token}\n")
return f'{output_dir}/tokenizer.model', vocab_file
|
NeMo-main
|
nemo/collections/common/tokenizers/sentencepiece_tokenizer.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from sacremoses import MosesDetokenizer, MosesPunctNormalizer, MosesTokenizer
class MosesProcessor:
"""
Tokenizer, Detokenizer and Normalizer utilities in Moses
"""
def __init__(self, lang_id: str):
self.moses_tokenizer = MosesTokenizer(lang=lang_id)
self.moses_detokenizer = MosesDetokenizer(lang=lang_id)
self.normalizer = MosesPunctNormalizer(lang=lang_id)
def detokenize(self, tokens: List[str]) -> str:
"""
Detokenizes a list of tokens
Args:
tokens: list of strings as tokens
Returns:
detokenized string
"""
return self.moses_detokenizer.detokenize(tokens)
def tokenize(self, text: str):
"""
Tokenizes text using Moses -> Sentencepiece.
"""
return self.moses_tokenizer.tokenize(text, escape=False, return_str=True)
def normalize(self, text: str):
return self.normalizer.normalize(text)
|
NeMo-main
|
nemo/collections/common/tokenizers/moses_tokenizers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
# Copyright (c) 2016 The-Orizon
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# The detokenize function is based on : https://github.com/The-Orizon/nlputils/blob/master/detokenizer.py
import re
from typing import List
import jieba
import opencc
from pangu import spacing
class ChineseProcessor:
"""
Tokenizer, Detokenizer and Normalizer utilities for Chinese.
"""
def __init__(self):
self.normalizer = opencc.OpenCC('t2s.json')
def normalize(self, text: str) -> str:
return self.normalizer.convert(text)
def detokenize(self, text: List[str]) -> str:
RE_WS_IN_FW = re.compile(
r'([\u2018\u2019\u201c\u201d\u2e80-\u312f\u3200-\u32ff\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff\uff00-\uffef])\s+(?=[\u2018\u2019\u201c\u201d\u2e80-\u312f\u3200-\u32ff\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff\uff00-\uffef])'
)
detokenize = lambda s: spacing(RE_WS_IN_FW.sub(r'\1', s)).strip()
return detokenize(' '.join(text))
def tokenize(self, text: str) -> str:
text = jieba.cut(text)
return ' '.join(text)
|
NeMo-main
|
nemo/collections/common/tokenizers/chinese_tokenizers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import youtokentome as yttm
from nemo.collections.common.tokenizers import TokenizerSpec
__all__ = ['YouTokenToMeTokenizer']
class YouTokenToMeTokenizer(TokenizerSpec):
def __init__(self, model_path, bpe_dropout=0.0, legacy=False, r2l=False):
model_path = Path(model_path).expanduser()
self.tokenizer = yttm.BPE(model=str(model_path))
self.vocab_size = len(self.tokenizer.vocab())
self.special_tokens = self.tokens_to_ids(["<PAD>", "<UNK>", "<BOS>", "<EOS>"])
self.bpe_dropout = bpe_dropout
self.legacy = legacy
self.r2l = r2l
def text_to_tokens(self, text):
return self.tokenizer.encode(
text, output_type=yttm.OutputType.SUBWORD, dropout_prob=self.bpe_dropout, reverse=self.r2l
)
def tokens_to_text(self, tokens):
return self.ids_to_text(self.tokens_to_ids(tokens))
def text_to_ids(self, text):
return self.tokenizer.encode(
text, output_type=yttm.OutputType.ID, dropout_prob=self.bpe_dropout, reverse=self.r2l
)
def ids_to_text(self, ids):
ids_ = [id_ for id_ in ids if id_ not in self.special_tokens]
if self.r2l:
ids_ = ids_[::-1]
return self.tokenizer.decode([ids_])[0]
def tokens_to_ids(self, tokens):
return [self.tokenizer.subword_to_id(token) for token in tokens]
def ids_to_tokens(self, ids):
if self.legacy:
ids_ = [id_ for id_ in ids if id_ not in self.special_tokens]
else:
ids_ = ids
return [self.tokenizer.id_to_subword(id_) for id_ in ids_]
@property
def pad_id(self):
return self.tokenizer.subword_to_id("<PAD>")
@property
def bos_id(self):
return self.tokenizer.subword_to_id("<BOS>")
@property
def eos_id(self):
return self.tokenizer.subword_to_id("<EOS>")
@property
def unk_id(self):
return self.tokenizer.subword_to_id("<UNK>")
|
NeMo-main
|
nemo/collections/common/tokenizers/youtokentome_tokenizer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from typing import List
import numpy
from nemo.collections.common.tokenizers.column_coder import ColumnCodes
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
__all__ = ['TabularTokenizer']
END_OF_TEXT = '<|endoftext|>'
NEW_LINE = '\n'
def find_index_of(list_input, item):
output = -1
try:
output = list_input.index(item)
except ValueError:
pass
return output
class TabularTokenizer(TokenizerSpec):
def __init__(self, coder, special_tokens=[END_OF_TEXT, NEW_LINE], delimiter=','):
if isinstance(coder, ColumnCodes):
self.code_column: ColumnCodes = coder
else:
with open(coder, 'rb') as handle:
self.code_column: ColumnCodes = pickle.load(handle)
self.num_columns = len(self.code_column.columns)
self.special_tokens = {}
self.special_tokens_decoder = {}
self.add_special_tokens(special_tokens)
self.delimiter = delimiter
self.eod_id = self.special_tokens[END_OF_TEXT]
self.eos_id = self.eod_id
self.bos_id = self.eos_id
def __len__(self):
return self.vocab_size
@property
def vocab_size(self):
return max(self.special_tokens_decoder.keys()) + 1
def text_to_ids(self, text):
return self.encode(text)
def ids_to_text(self, token_ids):
return self.decode(token_ids)
@property
def eod(self):
return self.eod_id
@property
def eor(self):
return self.special_tokens[NEW_LINE]
def add_special_tokens(self, special_tokens):
""" Add a list of additional tokens to the encoder.
The additional tokens are indexed starting from the last
index of the
current vocabulary in the order of the `special_tokens` list.
"""
if not special_tokens:
self.special_tokens = {}
self.special_tokens_decoder = {}
return
new = dict(
(tok, self.code_column.vocab_size + i)
for i, tok in enumerate(special_tokens)
if tok not in self.special_tokens
)
self.special_tokens.update(new)
self.special_tokens_decoder = {v: k for k, v in self.special_tokens.items()}
def text_to_tokens(self, text):
""" Tokenize a string. """
tokens = []
rows = text.split(NEW_LINE)
num_rows = len(rows)
for row_id in range(num_rows):
row = rows[row_id]
if row == '':
continue
fields = row.split(self.delimiter)
for f in fields:
splits = f.split(END_OF_TEXT)
if len(splits) == 1:
tokens.append(f.strip())
elif len(splits) == 2:
if splits[0] != '':
tokens.append(splits[0].strip())
tokens.append(END_OF_TEXT)
if splits[1] != '':
tokens.append(splits[1].strip())
else:
raise ValueError("delimiter error")
if row_id != num_rows - 1:
tokens.append(NEW_LINE)
return tokens
def tokens_to_ids(self, tokens: List[str]):
""" Converts a sequence of tokens into ids using the vocab. """
ids = []
cindex = 0
if NEW_LINE in tokens:
idd = tokens.index(NEW_LINE)
cindex = (self.num_columns - idd) % self.num_columns
for token in tokens:
if token in self.special_tokens:
ids.append(self.special_tokens[token])
else:
index = cindex % self.num_columns
column = self.code_column.columns[index]
ids.extend(self.code_column.encode(column, token))
cindex += 1
return ids
def ids_to_tokens(self, ids, skip_special_tokens=False):
"""Converts a sequence of ids in Tabular tokens using the vocab."""
tokens = []
sizes = self.code_column.sizes
ids_size = sum(sizes)
cindex = 0
eor_pos = find_index_of(ids, self.eor)
eod_pos = find_index_of(ids, self.eod)
if eor_pos >= 0 and eod_pos >= 0:
idd = min(eor_pos, eod_pos)
cindex = (ids_size - idd) % ids_size
elif eor_pos >= 0 and eod_pos < 0:
idd = eor_pos
cindex = (ids_size - idd) % ids_size
elif eod_pos >= 0 and eor_pos < 0:
idd = eod_pos
cindex = (ids_size - idd) % ids_size
cum_sizes = numpy.cumsum(sizes)
old_column_index = -1
token_ids = []
for i in ids:
if i in self.special_tokens_decoder:
if not skip_special_tokens:
tokens.append(self.special_tokens_decoder[i])
else:
index = cindex % ids_size
column_index = numpy.where(index < cum_sizes)[0][0]
column = self.code_column.columns[column_index]
if old_column_index != column_index:
token_ids = [i]
old_column_index = column_index
else:
token_ids.append(i)
if len(token_ids) == sizes[column_index]:
tokens.append(self.code_column.decode(column, token_ids))
cindex += 1
return tokens
def encode(self, text):
return self.tokens_to_ids(self.text_to_tokens(text))
def decode(self, token_ids):
tokens = self.ids_to_tokens(token_ids, skip_special_tokens=False)
return self.tokens_to_text(tokens)
def tokens_to_text(self, tokens):
all_lines = []
line = []
for token in tokens:
if token == END_OF_TEXT or token == NEW_LINE:
if len(line) != 0:
line_text = self.delimiter.join(line)
all_lines.append(line_text)
all_lines.append(token)
line = []
else:
line.append(token)
if len(line) != 0:
# remaining items
line_text = self.delimiter.join(line)
all_lines.append(line_text)
text = "".join(all_lines)
return text
|
NeMo-main
|
nemo/collections/common/tokenizers/tabular_tokenizer.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import List
import ipadic
import MeCab
from pangu import spacing
from sacremoses import MosesDetokenizer, MosesPunctNormalizer, MosesTokenizer
class EnJaProcessor:
"""
Tokenizer, Detokenizer and Normalizer utilities for Japanese & English
Args:
lang_id: One of ['en', 'ja'].
"""
def __init__(self, lang_id: str):
self.lang_id = lang_id
self.moses_tokenizer = MosesTokenizer(lang=lang_id)
self.moses_detokenizer = MosesDetokenizer(lang=lang_id)
self.normalizer = MosesPunctNormalizer(
lang=lang_id, pre_replace_unicode_punct=True, post_remove_control_chars=True
)
def detokenize(self, tokens: List[str]) -> str:
"""
Detokenizes a list of tokens
Args:
tokens: list of strings as tokens
Returns:
detokenized Japanese or English string
"""
return self.moses_detokenizer.detokenize(tokens)
def tokenize(self, text) -> str:
"""
Tokenizes text using Moses. Returns a string of tokens.
"""
tokens = self.moses_tokenizer.tokenize(text)
return ' '.join(tokens)
def normalize(self, text) -> str:
# Normalization doesn't handle Japanese periods correctly;
# '。'becomes '.'.
if self.lang_id == 'en':
return self.normalizer.normalize(text)
else:
return text
class JaMecabProcessor:
"""
Tokenizer, Detokenizer and Normalizer utilities for Japanese MeCab & English
"""
def __init__(self):
self.mecab_tokenizer = MeCab.Tagger(ipadic.MECAB_ARGS + " -Owakati")
def detokenize(self, text: List[str]) -> str:
RE_WS_IN_FW = re.compile(
r'([\u2018\u2019\u201c\u201d\u2e80-\u312f\u3200-\u32ff\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff\uff00-\uffef])\s+(?=[\u2018\u2019\u201c\u201d\u2e80-\u312f\u3200-\u32ff\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff\uff00-\uffef])'
)
detokenize = lambda s: spacing(RE_WS_IN_FW.sub(r'\1', s)).strip()
return detokenize(' '.join(text))
def tokenize(self, text) -> str:
"""
Tokenizes text using Moses. Returns a string of tokens.
"""
return self.mecab_tokenizer.parse(text).strip()
def normalize(self, text) -> str:
return text
|
NeMo-main
|
nemo/collections/common/tokenizers/en_ja_tokenizers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
""" Code from
https://github.com/NVIDIA/DeepLearningExamples/blob/
master/PyTorch/Translation/Transformer/fairseq/tokenizer.py
"""
import re
import sys
import unicodedata
from collections import defaultdict
__all__ = ['get_unicode_categories', 'tokenize_en']
def get_unicode_categories():
cats = defaultdict(list)
for c in map(chr, range(sys.maxunicode + 1)):
cats[unicodedata.category(c)].append(c)
return cats
NUMERICS = ''.join(get_unicode_categories()['No'])
def tokenize_en(line):
line = line.strip()
line = ' ' + line + ' '
# remove ASCII junk
line = re.sub(r'\s+', ' ', line)
line = re.sub(r'[\x00-\x1F]', '', line)
# fix whitespaces
line = re.sub(r'\ +', ' ', line)
line = re.sub('^ ', '', line)
line = re.sub(' $', '', line)
# separate other special characters
line = re.sub(r'([^\s\.\'\`\,\-\w]|[_' + NUMERICS + '])', r' \g<1> ', line)
line = re.sub(r'(\w)\-(?=\w)', r'\g<1> @-@ ', line)
# multidots stay together
line = re.sub(r'\.([\.]+)', r' DOTMULTI\g<1>', line)
while re.search(r'DOTMULTI\.', line):
line = re.sub(r'DOTMULTI\.([^\.])', r'DOTDOTMULTI \g<1>', line)
line = re.sub(r'DOTMULTI\.', r'DOTDOTMULTI', line)
# separate out "," except if within numbers (5,300)
line = re.sub(r'([\D])[,]', r'\g<1> , ', line)
line = re.sub(r'[,]([\D])', r' , \g<1>', line)
# separate "," after a number if it's the end of sentence
line = re.sub(r'(\d)[,]$', r'\g<1> ,', line)
# split contractions right
line = re.sub(r'([\W\d])[\']([\W\d])', r'\g<1> \' \g<2>', line)
line = re.sub(r'(\W)[\']([\w\D])', r'\g<1> \' \g<2>', line)
line = re.sub(r'([\w\D])[\']([\W\d])', r'\g<1> \' \g<2>', line)
line = re.sub(r'([\w\D])[\']([\w\D])', r'\g<1> \'\g<2>', line)
# special case for "1990's"
line = re.sub(r'([\W\d])[\']([s])', r'\g<1> \'\g<2>', line)
# apply nonbreaking prefixes
words = line.split()
line = ''
for i in range(len(words)):
word = words[i]
match = re.search(r'^(\S+)\.$', word)
if match:
pre = match.group(1)
if i == len(words) - 1:
"""split last words independently as they are unlikely
to be non-breaking prefixes"""
word = pre + ' .'
else:
word = pre + ' .'
word += ' '
line += word
# clean up extraneous spaces
line = re.sub(' +', ' ', line)
line = re.sub('^ ', '', line)
line = re.sub(' $', '', line)
# .' at end of sentence is missed
line = re.sub(r'\.\' ?$', ' . \' ', line)
# restore multi-dots
while re.search('DOTDOTMULTI', line):
line = re.sub('DOTDOTMULTI', 'DOTMULTI.', line)
line = re.sub('DOTMULTI', '.', line)
# escape special characters
line = re.sub(r'\&', r'&', line)
line = re.sub(r'\|', r'|', line)
line = re.sub(r'\<', r'<', line)
line = re.sub(r'\>', r'>', line)
line = re.sub(r'\'', r''', line)
line = re.sub(r'\"', r'"', line)
line = re.sub(r'\[', r'[', line)
line = re.sub(r'\]', r']', line)
# ensure final line breaks
# if line[-1] is not '\n':
# line += '\n'
return line
|
NeMo-main
|
nemo/collections/common/tokenizers/fairseq_tokenizer.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Union
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
__all__ = ['ByteLevelProcessor', 'ByteLevelTokenizer']
class ByteLevelProcessor:
"""
A very basic tokenization and detokenization class for use with byte-level
tokenization.
"""
def detokenize(self, tokens: List[str]) -> str:
return ' '.join(tokens)
def tokenize(self, text) -> str:
return text
def normalize(self, text) -> str:
return text
class ByteLevelTokenizer(TokenizerSpec):
def __init__(self, special_tokens: Optional[Union[Dict[str, str], List[str]]] = None):
self.vocab_size = 259
self.special_start = 256
self.special_token_to_id = {
self.pad_id: self.pad_id,
self.bos_id: self.bos_id,
self.eos_id: self.eos_id,
}
special_tokens = {} if special_tokens is None else special_tokens
for tok in special_tokens:
self.special_start -= 1
self.special_token_to_id[tok] = self.special_start
self.id_to_special_token = {v: k for k, v in self.special_token_to_id.items()}
# no distinction between tokens and ids.
def text_to_tokens(self, text):
return self.text_to_ids(text)
def tokens_to_text(self, tokens):
return self.ids_to_text(tokens)
def text_to_ids(self, text):
return list(text.encode('utf-8'))
def ids_to_text(self, ids):
# remove special tokens.
ids = [x for x in ids if x < self.special_start]
return bytes(ids).decode('utf-8', errors='ignore').rstrip()
def tokens_to_ids(self, tokens):
if isinstance(tokens, str):
tokens = [tokens]
ids = []
for token in tokens:
ids.append(self.token_to_id(token))
return ids
def ids_to_tokens(self, ids):
if isinstance(ids, int):
ids = [ids]
tokens = []
for id in ids:
tokens.append(self.id_to_token(id))
return tokens
def token_to_id(self, token):
if token in self.special_token_to_id:
return self.special_token_to_id[token]
else:
return token
def id_to_token(self, id):
if id < self.special_start:
return id
else:
return self.id_to_special_token[id]
@property
def pad_id(self):
return 256
@property
def bos_id(self):
return 257
@property
def eos_id(self):
return 258
@property
def unk_id(self):
return 259 # unused
|
NeMo-main
|
nemo/collections/common/tokenizers/bytelevel_tokenizers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.common.tokenizers.aggregate_tokenizer import AggregateTokenizer
from nemo.collections.common.tokenizers.bytelevel_tokenizers import ByteLevelTokenizer
from nemo.collections.common.tokenizers.char_tokenizer import CharTokenizer
from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
from nemo.collections.common.tokenizers.regex_tokenizer import RegExTokenizer
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.common.tokenizers.word_tokenizer import WordTokenizer
from nemo.collections.common.tokenizers.youtokentome_tokenizer import YouTokenToMeTokenizer
|
NeMo-main
|
nemo/collections/common/tokenizers/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List
__all__ = ['TokenizerSpec']
class TokenizerSpec(ABC):
"""
Inherit this class to implement a new tokenizer.
"""
@abstractmethod
def text_to_tokens(self, text):
pass
@abstractmethod
def tokens_to_text(self, tokens):
pass
@abstractmethod
def tokens_to_ids(self, tokens):
pass
@abstractmethod
def ids_to_tokens(self, ids):
pass
@abstractmethod
def text_to_ids(self, text):
pass
@abstractmethod
def ids_to_text(self, ids):
pass
def add_special_tokens(self, special_tokens: List[str]):
raise NotImplementedError("To be implemented")
@property
def name(self):
return type(self).__name__
|
NeMo-main
|
nemo/collections/common/tokenizers/tokenizer_spec.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from nemo.collections.common.tokenizers.char_tokenizer import CharTokenizer
__all__ = ['WordTokenizer']
class WordTokenizer(CharTokenizer):
"Tokenizes at word boundary"
def __init__(
self,
vocab_file: str,
mask_token: Optional[str] = None,
bos_token: Optional[str] = None,
eos_token: Optional[str] = None,
pad_token: Optional[str] = None,
sep_token: Optional[str] = None,
cls_token: Optional[str] = None,
unk_token: Optional[str] = None,
):
"""
Args:
vocab_file: path to file with vocabulary which consists
of characters separated by \n
mask_token: mask token
bos_token: the beginning of sequence token
eos_token: the end of sequence token. Usually equal to sep_token
pad_token: token to use for padding
sep_token: token used for separating sequences
cls_token: class token. Usually equal to bos_token
unk_token: token to use for unknown tokens
"""
super().__init__(
vocab_file=vocab_file,
mask_token=mask_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
)
def text_to_tokens(self, text):
token_candidates = text.strip().split()
tokens = []
for token in token_candidates:
if token in self.vocab:
tokens.append(token)
else:
tokens.append(self.unk_token)
return tokens
def ids_to_text(self, ids):
ids_ = [id_ for id_ in ids if id_ not in self.special_tokens]
return " ".join(self.ids_to_tokens(ids_))
|
NeMo-main
|
nemo/collections/common/tokenizers/word_tokenizer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from collections import Counter
from enum import Enum
from pathlib import Path
from typing import Dict, List, NewType, Optional, Union
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
__all__ = ['CharTokenizer']
NUMBER_OF_CHARACTERS_READ_BUFFER_SIZE = 10 ** 7
class SpecialTokenString(Enum):
MASK = 'mask'
BOS = 'bos'
EOS = 'eos'
PAD = 'pad'
SEP = 'sep'
CLS = 'cls'
UNK = 'unk'
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_
SpecialTokenStringType = NewType('SpecialTokenString', SpecialTokenString)
class CharTokenizer(TokenizerSpec):
rf"""
Each character is a token.
Args:
vocab_file: path to file with vocabulary for a tokenizer. The file consists of valid Python string literals
separated by the new line character. Such literals must contain 1 character. Examples of valid Python
literals: ``'a'``, ``'\n'``, ``"'"``, ``'ж'``, ``'\u8976'``. Optionally the first line in the file can be a
JSON dictionary of special tokens. The keys of the special tokens dictionary are ``'mask_token'``,
``'bos_token'`` and so on. Some special tokens names can be omitted in the special tokens dictionary line.
A file ``vocab_file`` has to be in ``'utf-8'`` encoding.
mask_token: mask token. The following is applicable to all special tokens. Parameter ``mask_token`` is used
for adding mask token to vocabulary or for modification of mask token present in special tokens dictionary
in the first line of file ``vocab_file``. Parameter ``mask_token`` can be either of type ``bool`` or a
``str`` of length 1.
If ``mask_token`` is ``bool`` it has to be ``False``. If ``mask_token`` is ``True`` an exception is raised.
If ``mask_token`` is ``False`` and ``mask_token`` is present in special tokens dictionary in vocabulary
file ``vocab_file``, then ``mask_token`` is remove from special tokens dictionary.
If the parameter ``mask_token`` is a string, then such strings in the input sequence are interpreted as
mask tokens.
bos_token: the beginning of sequence token. See more in ``mask_token`` parameter description.
eos_token: the end of sequence token. Usually equal to sep_token. See more in ``mask_token`` parameter
description.
pad_token: token to use for padding. See more in ``mask_token`` parameter description.
sep_token: token used for separating sequences. See more in ``mask_token`` parameter description.
cls_token: class token. Usually equal to bos_token. See more in ``mask_token`` parameter description.
unk_token: token to use for unknown tokens. If the parameter ``unk_token`` is set and there is a character
in the input of ``text_to_ids`` of ``text_to_tokens`` methods which is not in the vocabulary, then
such an unknown character is tokenized into ``unk_token``. If the parameter ``unk_token`` is ``False``,
then unknown tokens are discarded. See more in ``mask_token`` parameter description.
special_token_to_prepend: special token to prepend to the output of ``text_to_ids`` of ``text_to_tokens``
methods. This option can be used if you decide to add EOS and BOS tokens to the input on the stage of
tokenization. Possible options are: {[None] + [e.value for e in SpecialTokenString]}.
special_token_to_append: special token to append to the output of ``text_to_ids`` of ``text_to_tokens``
methods. See more in the description of ``special_token_to_prepend`` parameter.
special_tokens_to_remove_while_decoding: which special tokens are remove before detokenization. If this
parameter equals ``'all'``, then all special tokens are removed. The parameter
``special_tokens_to_remove_while_decoding`` can also be a list of values from this set
{set(e.value for e in SpecialTokenString)}.
"""
def __init__(
self,
vocab_file: str,
mask_token: Optional[Union[str, bool]] = None,
bos_token: Optional[Union[str, bool]] = None,
eos_token: Optional[Union[str, bool]] = None,
pad_token: Optional[Union[str, bool]] = None,
sep_token: Optional[Union[str, bool]] = None,
cls_token: Optional[Union[str, bool]] = None,
unk_token: Optional[Union[str, bool]] = None,
special_token_to_prepend: Optional[SpecialTokenStringType] = None,
special_token_to_append: Optional[SpecialTokenStringType] = None,
special_tokens_to_remove_while_decoding: Union[List[SpecialTokenStringType], str] = 'all',
):
vocab_file = Path(vocab_file).expanduser()
with vocab_file.open(encoding='utf-8') as f:
first_line = f.readline()
if first_line[0] == '{':
special_tokens_dict = json.loads(first_line)
self.check_special_tokens_dict_from_file(special_tokens_dict, vocab_file)
vocab_list = f.readlines()
else:
special_tokens_dict = {}
vocab_list = [first_line] + f.readlines()
special_tokens_dict = self.update_special_tokens_dict(
special_tokens_dict, mask_token, bos_token, eos_token, pad_token, sep_token, cls_token, unk_token
)
for e in SpecialTokenString:
name = e.value + '_token'
setattr(self, name, special_tokens_dict[name] if name in special_tokens_dict else None)
for k, v in special_tokens_dict.items():
setattr(self, k, v)
for value, name in [
(special_token_to_prepend, 'special_token_to_prepend'),
(special_token_to_append, 'special_token_to_append'),
]:
self.check_special_token_name(name, value, special_tokens_dict)
setattr(self, name, value + '_token' if isinstance(value, str) else value)
self.vocab = {}
count = 0
for v in special_tokens_dict.values():
self.vocab[v] = count
count += 1
for i, token in enumerate(vocab_list):
token = eval(token.strip())
self.check_token_from_file(token, vocab_file, i)
if token not in self.vocab:
self.vocab[token] = count
count += 1
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.vocab_size = len(self.vocab)
self.check_special_tokens_to_remove_while_decoding(
special_tokens_to_remove_while_decoding, special_tokens_dict
)
self.special_token_ids_to_remove_while_decoding = (
self.tokens_to_ids([v for v in special_tokens_dict.values()])
if special_tokens_to_remove_while_decoding == 'all'
else [getattr(self, e + '_id') for e in special_tokens_to_remove_while_decoding]
)
@classmethod
def check_special_tokens_dict_from_file(cls, special_tokens_dict, vocab_file):
for k, v in special_tokens_dict.items():
if k[-6:] != '_token' or not SpecialTokenString.has_value(k[:-6]):
raise ValueError(
f"Unsupported key {repr(k)} in special tokens dictionary in vocabulary file {vocab_file} "
f"(first line). Supported keys are {[e.value + '_token' for e in SpecialTokenString]}."
)
if not isinstance(v, str):
raise ValueError(
f"Values of special tokens dictionary in vocabulary file {vocab_file} (first line) has to belong "
f"to type `str`, whereas type of item '{k}' value {repr(v)} is `{type(v)}`."
)
elif len(v) == 0:
raise ValueError(
f"Values of special tokens dictionary in vocabulary file {vocab_file} (first line) has to not "
f"empty strings, whereas value of item '{k}' is an empty string."
)
cls.check_special_tokens_dict_for_duplicate_values(
special_tokens_dict, f"Loaded from vocabulary file {vocab_file}"
)
@staticmethod
def check_special_tokens_dict_for_duplicate_values(special_tokens_dict, err_msg_prefix):
if len(special_tokens_dict) != len(set(special_tokens_dict.values())):
tokens_with_equal_values = []
duplicate_values = []
for k, v in list(reversed(list(special_tokens_dict.items())))[:-1]:
tokens = [k]
for kk, vv in special_tokens_dict.items():
if kk == k:
break
if v == vv:
tokens.append(kk)
if len(tokens) > 1:
duplicate_values.append(v)
tokens_with_equal_values.append(tokens)
if duplicate_values:
dup_values_msg = '. '.join(
[f"Tokens {t} have value '{v}'" for t, v in zip(tokens_with_equal_values, duplicate_values)]
)
raise ValueError(
err_msg_prefix + f" special tokens dictionary has duplicate values. " + dup_values_msg
)
@classmethod
def update_special_tokens_dict(
cls,
init_special_tokens_dict: Dict[str, str],
mask_token: Optional[Union[str, bool]] = None,
bos_token: Optional[Union[str, bool]] = None,
eos_token: Optional[Union[str, bool]] = None,
pad_token: Optional[Union[str, bool]] = None,
sep_token: Optional[Union[str, bool]] = None,
cls_token: Optional[Union[str, bool]] = None,
unk_token: Optional[Union[str, bool]] = None,
):
special_tokens_dict = init_special_tokens_dict.copy()
for value, name in zip(
[pad_token, unk_token, bos_token, eos_token, sep_token, mask_token, cls_token],
['pad_token', 'unk_token', 'bos_token', 'eos_token', 'sep_token', 'mask_token', 'cls_token'],
):
if value is not None:
if isinstance(value, bool):
if value:
raise ValueError(
f"If `CharTokenizer` constructor parameter `{name}` is `bool` it has to be `False`"
)
else:
if name in special_tokens_dict:
del special_tokens_dict[name]
else:
warnings.warn(
f"Cannot remove special token `{name}` since it is not in special tokens dictionary "
f"{special_tokens_dict}."
)
elif not isinstance(value, str):
raise ValueError(
f"`CharTokenizer` constructor parameter `{name}` has to be either `False` or belong to type "
f"`str`, whereas type of `{name}` is `{type(value)}`."
)
else:
special_tokens_dict[name] = value
cls.check_special_tokens_dict_for_duplicate_values(
special_tokens_dict,
"After updating special tokens dictionary with tokens passed in `CharTokenizer` constructor parameters",
)
return special_tokens_dict
@staticmethod
def check_token_from_file(token, vocab_file, line_i):
if not isinstance(token, str) or isinstance(token, str) and len(token) != 1:
raise ValueError(
f"Each line in vocabulary have to be a Python string literal containing 1 character. "
f"Encountered {repr(token)} on line {line_i} in file {vocab_file}."
)
@staticmethod
def check_special_token_name(parameter_name, value, special_tokens_dict):
if value is not None:
if not SpecialTokenString.has_value(value):
raise ValueError(
f"Value {repr(value)} of parameter `{parameter_name}` is wrong. Supported values are "
f"{[e.value for e in SpecialTokenString]}."
)
elif value + '_token' not in special_tokens_dict:
raise ValueError(
f"You should provide `{value + '_token'}` parameter to `CharTokenizer` constructor if "
f"you wish to pass token {repr(value)} in parameter `{parameter_name}`."
)
@staticmethod
def check_special_tokens_to_remove_while_decoding(special_tokens_to_remove_while_decoding, special_tokens_dict):
if isinstance(special_tokens_to_remove_while_decoding, list):
for i, value in enumerate(special_tokens_to_remove_while_decoding):
if not SpecialTokenString.has_value(value):
raise ValueError(
f'Wrong element with value {repr(value)} in position {i} of parameter '
f'`special_tokens_to_remove_while_decoding` of `CharTokenizer` constructor. Supported values '
f'are {[e.value for e in SpecialTokenString]}.'
)
elif value + '_token' not in special_tokens_dict:
raise ValueError(
f"You should provide `{value + '_token'}` parameter to `CharTokenizer` constructor if "
f"you wish to pass token {repr(value)} in parameter `special_tokens_to_remove_while_decoding`. "
f"`{value + '_token'}` was detected in position {i} in "
f"`special_tokens_to_remove_while_decoding`."
)
elif (
isinstance(special_tokens_to_remove_while_decoding, str)
and special_tokens_to_remove_while_decoding != 'all'
or not isinstance(special_tokens_to_remove_while_decoding, str)
):
raise ValueError(
f"Parameter `special_tokens_to_remove_while_decoding` of `CharTokenizer` constructor has to be "
f"equal to a string 'all' or be a list of values from set {set(e.value for e in SpecialTokenString)} "
f"whereas `special_tokens_to_remove_while_decoding={repr(special_tokens_to_remove_while_decoding)}`"
)
def text_to_tokens(self, text: str) -> List[str]:
token_candidates = [char for char in text]
tokens = []
if self.special_token_to_prepend is not None:
tokens.append(getattr(self, self.special_token_to_prepend))
for i, token in enumerate(token_candidates):
if token in self.vocab:
tokens.append(token)
elif self.unk_token is not None:
tokens.append(self.unk_token)
else:
warnings.warn(
f"Character {repr(token)} in position {i} is not present in vocabulary and no `<UNK>` token was "
f"set. Character {repr(token)} is discarded."
)
if self.special_token_to_append is not None:
tokens.append(getattr(self, self.special_token_to_append))
return tokens
def tokens_to_text(self, tokens: List[str]) -> str:
return self.ids_to_text(self.tokens_to_ids(tokens))
def text_to_ids(self, text: str) -> List[int]:
ids = [self.vocab[token] for token in self.text_to_tokens(text)]
return ids
def ids_to_text(self, ids: List[int]) -> str:
ids_ = [id_ for id_ in ids if id_ not in self.special_token_ids_to_remove_while_decoding]
return "".join(self.ids_to_tokens(ids_))
def tokens_to_ids(self, tokens: List[str]) -> List[int]:
return [self.vocab[token] for token in tokens]
def token_to_id(self, token: str) -> int:
return self.vocab[token]
def ids_to_tokens(self, ids: List[int]) -> List[str]:
return [self.inv_vocab[id] for id in ids]
@staticmethod
def check_special_token_id_getting(special_token, id_name):
if special_token is None:
token_param = id_name[:-3] + '_token'
raise ValueError(
f"Cannot return `{id_name}` since `{token_param}` is not set. To obtain `{id_name}` you need to pass "
f"parameter `{token_param}` to `CharTokenizer` constructor."
)
@property
def pad_id(self):
self.check_special_token_id_getting(self.pad_token, 'pad_id')
return self.vocab[self.pad_token]
@property
def bos_id(self):
self.check_special_token_id_getting(self.bos_token, 'bos_id')
return self.vocab[self.bos_token]
@property
def eos_id(self):
self.check_special_token_id_getting(self.eos_token, 'eos_id')
return self.vocab[self.eos_token]
@property
def unk_id(self):
self.check_special_token_id_getting(self.unk_token, 'unk_id')
return self.vocab[self.unk_token]
@property
def mask_id(self):
self.check_special_token_id_getting(self.mask_token, 'mask_id')
return self.vocab[self.mask_token]
@property
def sep_id(self):
self.check_special_token_id_getting(self.sep_token, 'sep_id')
return self.vocab[self.sep_token]
@property
def cls_id(self):
self.check_special_token_id_getting(self.cls_token, 'cls_id')
return self.vocab[self.cls_token]
@staticmethod
def create_special_tokens_dict(
mask_token: Optional[str] = None,
bos_token: Optional[str] = None,
eos_token: Optional[str] = None,
pad_token: Optional[str] = None,
sep_token: Optional[str] = None,
cls_token: Optional[str] = None,
unk_token: Optional[str] = None,
):
special_tokens_dict = {}
for value, name in zip(
[pad_token, unk_token, bos_token, eos_token, sep_token, mask_token, cls_token],
['pad_token', 'unk_token', 'bos_token', 'eos_token', 'sep_token', 'mask_token', 'cls_token'],
):
if value is not None:
if not isinstance(value, str):
raise ValueError(
f"The type of parameter `{name}` has to be `None` or `str`, found `{type(value)}`"
)
elif len(value) == 0:
raise ValueError(f"If the parameter `{name}` is `str`, then its length has to be nonzero.")
elif value in special_tokens_dict.values():
other_name = None
for k, v in special_tokens_dict.items():
if v == value:
other_name = k
raise ValueError(
f"The value {repr(value)} of special token `{name}` is the same as the value of special token "
f"`{other_name}`."
)
special_tokens_dict[name] = value
return special_tokens_dict
@staticmethod
def check_characters_to_exclude_from_vocabulary(characters_to_exclude_from_vocabulary):
for i, char in enumerate(characters_to_exclude_from_vocabulary):
if not isinstance(char, str):
raise ValueError(
f"Character to exclude from vocabulary has to `str`, whereas an element in position {i} is of "
f"type `{type(char)}`."
)
elif len(char) != 1:
raise ValueError(
f"A length of an element of `characters_to_exclude_from_vocabulary` parameter has to be 1. "
f"The length of an element in position {i} is {len(char)}."
)
@staticmethod
def check_text_and_text_file_name(text, text_file_name):
if text is None and text_file_name is None:
raise ValueError(
f'Exactly one of parameters `text` and `text_file_name` should be provided whereas both parameters '
f'are `None`.'
)
if text is not None and text_file_name is not None:
raise ValueError(
f"Exactly one of parameters `text` and `text_file_name` has to be provided, whereas both parameters "
f"are not `None`."
)
if text is not None:
if not isinstance(text, str):
raise ValueError(
f"Parameter `text` has to be of type `str`, whereas it belongs to type `{type(text)}`."
)
@classmethod
def build_vocab(
cls,
save_path: Union[str, bytes, os.PathLike],
text: Optional[str] = None,
text_file_name: Optional[Union[str, bytes, os.PathLike]] = None,
characters_to_exclude: Optional[List[str]] = None,
vocab_size: int = None,
mask_token: Optional[str] = None,
bos_token: Optional[str] = None,
eos_token: Optional[str] = None,
pad_token: Optional[str] = None,
sep_token: Optional[str] = None,
cls_token: Optional[str] = None,
unk_token: Optional[str] = None,
):
"""
Creates character vocabulary and saves it to file ``save_path``. You should provide one of parameters ``text``
and ``text_file_name``. The format of created character vocabulary file is following:
```
{['mask_token': "ANY NON EMPTY STRING", ]['bos_token': "ANY NON EMPTY STRING", ] and so on}
' '
'e'
...
```
The first line is a JSON which contains special tokens. This special token are set using parameters
``mas_token``, ``bos_token``, ``eos_token``, ``pad_token``, ``sep_token``, ``cls_token``, ``unk_token``.
Other lines in created vocabulary file are Python string literals containing one character each.
Args:
save_path: path to the output text file. If ``save_path`` parent directory does not exist it will be created
text: string which characters are used for vocabulary creation.
text_file_name: path to a file which characters are used for vocabulary creation. Use this parameter if
the text in file is too large to be loaded in memory.
characters_to_exclude: a list of characters which will not be added to vocabulary.
vocab_size: vocabulary size. If this parameter is set only most frequent ``vocab_size`` characters are added
to vocabulary.
mask_token: mask token
bos_token: the beginning of sequence token
eos_token: the end of sequence token. Usually equal to sep_token.
pad_token: token to use for padding.
sep_token: token used for separating sequences.
cls_token: class token. Usually equal to bos_token.
unk_token: token to use for unknown tokens. If the parameter ``unk_token`` is set and there is a character
in the input of ``text_to_ids`` of ``text_to_tokens`` methods which is not in the vocabulary, then
such an unknown character is tokenized into ``unk_token``. If the parameter ``unk_token`` is ``False``,
then unknown tokens are discarded.
"""
special_tokens_dict = cls.create_special_tokens_dict(
mask_token, bos_token, eos_token, pad_token, sep_token, cls_token, unk_token
)
if characters_to_exclude is None:
characters_to_exclude = []
else:
cls.check_characters_to_exclude_from_vocabulary(characters_to_exclude)
cls.check_text_and_text_file_name(text, text_file_name)
if text is not None:
counter = Counter(text)
else:
assert text_file_name is not None
text_file_name = Path(text_file_name).expanduser()
counter = Counter()
with text_file_name.open(encoding='utf-8') as f:
while True:
segment = f.read(NUMBER_OF_CHARACTERS_READ_BUFFER_SIZE)
if not segment:
break
counter.update(segment)
for char in characters_to_exclude:
if char in counter:
del counter[char]
save_path = Path(save_path).expanduser()
save_path.parent.mkdir(exist_ok=True, parents=True)
with save_path.open('w', encoding='utf-8') as f:
f.write(json.dumps(special_tokens_dict) + '\n')
if vocab_size is None:
for c, _ in sorted(counter.items(), key=lambda x: -x[1]):
f.write(repr(c) + '\n')
else:
vocab_size -= len(special_tokens_dict)
for i, (c, _) in enumerate(sorted(counter.items(), key=lambda x: -x[1])):
if i < vocab_size:
f.write(repr(c) + '\n')
else:
break
|
NeMo-main
|
nemo/collections/common/tokenizers/char_tokenizer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Dict, List, Tuple
import numpy as np
from numpy import ndarray
from sklearn.preprocessing import PowerTransformer, QuantileTransformer, RobustScaler
from nemo.utils import logging
__all__ = ["IntCode", "FloatCode", "CategoryCode", "ColumnCodes"]
class Code(object):
def compute_code(self, data_series: ndarray):
"""
@params:
data_series: an array of input data used to calculate mapping
"""
raise NotImplementedError()
def __init__(self, col_name: str, code_len: int, start_id: int, fillall: bool = True, hasnan: bool = True):
"""
@params:
col_name: name of the column
code_len: number of tokens used to code the column.
start_id: offset for token_id.
fillall: if True, reserve space for digit number even the digit number is
not present in the data_series. Otherwise, only reserve space for the numbers
in the data_series.
hasnan: if True, reserve space for nan
"""
self.name = col_name
self.code_len = code_len
self.start_id = start_id
self.end_id = start_id
self.fillall = fillall
self.hasnan = hasnan
def encode(self, item: str) -> List[int]:
raise NotImplementedError()
def decode(self, ids: List[int]) -> str:
raise NotImplementedError()
@property
def code_range(self) -> List[Tuple[int, int]]:
"""
get the vocab id range for each of the encoded tokens
@returns [(min, max), (min, max), ...]
"""
return [(self.start_id, self.end_id)]
class IntCode(Code):
def __init__(
self, col_name: str, code_len: int, start_id: int, fillall: bool = True, base: int = 100, hasnan: bool = True
):
super().__init__(col_name, code_len, start_id, fillall, hasnan)
self.base = base
self.int_min: int = None
def compute_code(self, data_series: ndarray):
significant_val = self.array_convert_to_int(data_series)
digits_id_to_item = [{} for _ in range(self.code_len)]
digits_item_to_id = [{} for _ in range(self.code_len)]
for i in range(self.code_len):
id_to_item = digits_id_to_item[i]
item_to_id = digits_item_to_id[i]
v = (significant_val // self.base ** i) % self.base
if self.fillall:
uniq_items = range(0, self.base)
else:
uniq_items = sorted(np.unique(v).tolist())
for k in range(len(uniq_items)):
item = str(uniq_items[k])
item_to_id[item] = self.end_id
id_to_item[self.end_id] = item
self.end_id += 1
self.digits_id_to_item = digits_id_to_item
self.digits_item_to_id = digits_item_to_id
self.NA_token = 'nan'
if self.hasnan:
self.end_id += 1 # add the N/A token
codes = []
ranges = self.code_range
for i in ranges:
codes.append(i[1] - 1)
self.NA_token_id = codes
def array_convert_to_int(self, val: ndarray):
val = val.astype(int)
self.int_min = val.min()
return val - self.int_min
def convert_to_int(self, val: float) -> int:
return int(val) - self.int_min
def reverse_convert_to_int(self, val: int) -> int:
return val + self.int_min
@property
def code_range(self) -> List[Tuple[int, int]]:
"""
get the vocab id range for each of the encoded tokens
@returns [(min, max), (min, max), ...]
"""
# first largest digits
outputs = []
c = 0
for i in reversed(range(self.code_len)):
ids = self.digits_id_to_item[i].keys()
if c == 0:
if self.hasnan:
outputs.append((min(ids), max(ids) + 2)) # the first token contains the N/A
else:
outputs.append((min(ids), max(ids) + 1)) # non N/A
else:
outputs.append((min(ids), max(ids) + 1))
c += 1
return outputs
def encode(self, item: str) -> List[int]:
if self.hasnan and item == self.NA_token:
return self.NA_token_id
elif not self.hasnan and item == self.NA_token:
raise ValueError(f"colum {self.name} cannot handle nan, please set hasnan=True")
val = float(item)
val_int = self.convert_to_int(val)
digits = []
for i in range(self.code_len):
digit = (val_int // self.base ** i) % self.base
digits.append(str(digit))
if (val_int // self.base ** self.code_len) != 0:
raise ValueError("not right length")
codes = []
for i in reversed(range(self.code_len)):
digit_str = digits[i]
if digit_str in self.digits_item_to_id[i]:
codes.append(self.digits_item_to_id[i][digit_str])
else:
# find the nearest encode id
allowed_digits = np.array([int(d) for d in self.digits_item_to_id[i].keys()])
near_id = np.argmin(np.abs(allowed_digits - int(digit_str)))
digit_str = str(allowed_digits[near_id])
codes.append(self.digits_item_to_id[i][digit_str])
logging.warning('out of domain num is encounterd, use nearest code')
return codes
def decode(self, ids: List[int]) -> str:
if self.hasnan and ids[0] == self.NA_token_id[0]:
return self.NA_token
v = 0
for i in reversed(range(self.code_len)):
digit = int(self.digits_id_to_item[i][ids[self.code_len - i - 1]])
v += digit * self.base ** i
v = self.reverse_convert_to_int(v)
return str(v)
class FloatCode(IntCode):
def __init__(
self,
col_name: str,
code_len: int,
start_id: int,
fillall: bool = True,
base: int = 100,
hasnan: bool = True,
transform: str = 'quantile',
):
super().__init__(col_name, code_len, start_id, fillall, base, hasnan)
if transform == 'yeo-johnson':
self.scaler = PowerTransformer(standardize=True)
elif transform == 'quantile':
self.scaler = QuantileTransformer(output_distribution='uniform', n_quantiles=100)
elif transform == 'robust':
self.scaler = RobustScaler()
else:
raise ValueError('Supported data transformations are "yeo-johnson", "quantile", and "robust"')
def convert_to_int(self, val: float) -> int:
val = np.expand_dims(np.array(val), axis=0)
values = self.scaler.transform(val[:, None])[:, 0] - self.mval
values = (values * self.base ** self.extra_digits).astype(int)
output = values[0]
return output
def array_convert_to_int(self, val: ndarray):
values = self.scaler.fit_transform(val[:, None])[:, 0]
self.mval = values.min()
values = values - self.mval
digits = int(math.log(values.max(), self.base)) + 1
# extra digits used for 'float' part of the number
extra_digits = self.code_len - digits
if extra_digits < 0:
raise ValueError("need large length to code the nummber")
self.extra_digits = extra_digits
values = (values * self.base ** self.extra_digits).astype(int)
return values
def reverse_convert_to_int(self, val: int) -> float:
val = val / self.base ** self.extra_digits
val = np.expand_dims(np.array(val), axis=0)
v = self.scaler.inverse_transform(val[:, None] + self.mval)[0, 0]
return v
def decode(self, ids: List[int]) -> str:
if self.hasnan and ids[0] == self.NA_token_id[0]:
return self.NA_token
v = 0
for i in reversed(range(self.code_len)):
digit = int(self.digits_id_to_item[i][ids[self.code_len - i - 1]])
v += digit * self.base ** i
v = self.reverse_convert_to_int(v)
accuracy = max(int(abs(np.log10(0.1 / self.base ** self.extra_digits))), 1)
return f"{v:.{accuracy}f}"
class CategoryCode(Code):
def __init__(self, col_name: str, start_id: int):
super().__init__(col_name, 1, start_id, True, False)
def compute_code(self, data_series: ndarray):
uniq_items = np.unique(data_series).tolist()
id_to_item = {}
item_to_id = {}
for i in range(len(uniq_items)):
item = str(uniq_items[i])
item_to_id[item] = self.end_id
id_to_item[self.end_id] = item
self.end_id += 1
self.id_to_item = id_to_item
self.item_to_id = item_to_id
def encode(self, item) -> List[int]:
return [self.item_to_id[item]]
def decode(self, ids: List[int]) -> str:
return self.id_to_item[ids[0]]
column_map = {"int": IntCode, "float": FloatCode, "category": CategoryCode}
class ColumnCodes(object):
def __init__(self):
self.column_codes: Dict[str, Code] = {}
self.columns = []
self.sizes = []
@property
def vocab_size(self):
return self.column_codes[self.columns[-1]].end_id
def register(self, name: str, ccode: Code):
self.columns.append(name)
self.column_codes[name] = ccode
self.sizes.append(ccode.code_len)
def encode(self, col: str, item: str) -> List[int]:
if col in self.column_codes:
return self.column_codes[col].encode(item)
else:
raise ValueError(f"cannot encode {col} {item}")
def decode(self, col: str, ids: List[int]) -> str:
if col in self.column_codes:
return self.column_codes[col].decode(ids)
else:
raise ValueError("cannot decode")
def get_range(self, column_id: int) -> List[Tuple[int, int]]:
return self.column_codes[self.columns[column_id]].code_range
@classmethod
def get_column_codes(cls, column_configs, example_arrays):
column_codes = cls()
beg = 0
cc = None
for config in column_configs:
col_name = config['name']
coder = column_map[config['code_type']]
args = config.get('args', {})
start_id = beg if cc is None else cc.end_id
args['start_id'] = start_id
args['col_name'] = col_name
cc = coder(**args)
cc.compute_code(example_arrays[col_name])
column_codes.register(col_name, cc)
return column_codes
|
NeMo-main
|
nemo/collections/common/tokenizers/column_coder.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from typing import Optional
import pandas as pd
from nemo.collections.common.tokenizers.char_tokenizer import TokenizerSpec
from nemo.utils import logging
__all__ = ['RegExTokenizer']
DEFAULT_MASK_TOKEN = '<MASK>'
DEFAULT_BOS_TOKEN = '^'
DEFAULT_EOS_TOKEN = '&'
DEFAULT_PAD_TOKEN = '<PAD>'
DEFAULT_SEP_TOKEN = '<SEP>'
DEFAULT_UNK_TOKEN = '?'
class RegExTokenizer(TokenizerSpec):
"""
A regular expression-based tokenizer at word boundary.
This tokenizer default to support MegaMolBART.
<https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara/models/megamolbart>
"""
def __init__(
self,
regex: Optional[str] = "",
mask_token: Optional[str] = DEFAULT_MASK_TOKEN,
bos_token: Optional[str] = DEFAULT_BOS_TOKEN,
eos_token: Optional[str] = DEFAULT_EOS_TOKEN,
pad_token: Optional[str] = DEFAULT_PAD_TOKEN,
sep_token: Optional[str] = DEFAULT_SEP_TOKEN,
unk_token: Optional[str] = DEFAULT_UNK_TOKEN,
):
"""
Args:
regex: regular expression that defined tokenization rules
mask_token: mask token
bos_token: the beginning of sequence token
eos_token: the end of sequence token. Usually equal to sep_token
pad_token: token to use for padding
sep_token: token used for separating sequences
cls_token: class token. Usually equal to bos_token
unk_token: token to use for unknown tokens
"""
self.regex = regex
self.mask_token = mask_token
self.bos_token = bos_token
self.eos_token = eos_token
self.pad_token = pad_token
self.sep_token = sep_token
self.unk_token = unk_token
# holds names of .model/.vocab files
self.regex_file = None
self.vocab_file = None
# initialize with default vocab
self.vocab = {
self.pad_token: 0, # pad_token
self.unk_token: 1, # unk_token
self.bos_token: 2, # begin_token
self.eos_token: 3, # end_token
self.mask_token: 4, # mask_token
self.sep_token: 5, # sep_token
}
self._update_cache()
# Computed attributes
self._compile_regex()
def _update_cache(self):
# Cache data/attributes required for tokenization
self._unk_id = self.vocab.get(self.unk_token, DEFAULT_UNK_TOKEN)
self._decode_vocab = {i: t for t, i in self.vocab.items()}
def _compile_regex(self):
regex_string = r"("
regex_string += self.regex + r"|"
regex_string += r".)"
self._compiled_regex = re.compile(regex_string)
@property
def vocab_size(self):
return len(self.vocab)
def text_to_tokens(self, text):
tokens = self._compiled_regex.findall(text)
return tokens
def tokens_to_text(self, tokens):
tokens_list = []
for token in tokens:
if token[0] == self.bos_token:
token = token[1:]
# Remove end token and the following values
if self.eos_token in token:
eos_idx = token.index(self.eos_token)
token = token[:eos_idx]
tokens_list.append(token)
text = ["".join(tokens) for tokens in tokens_list]
return text
def token_to_ids(self, tokens):
ids_list = []
for token in tokens:
ids_list.append(self.vocab.get(token, self._unk_id))
return ids_list
def tokens_to_ids(self, token_data):
if isinstance(token_data, str):
token_data = [token_data]
ids_list = []
for tokens in token_data:
ids = self.token_to_ids(tokens)
ids_list.append(ids)
return ids_list
def ids_to_tokens(self, ids_list):
if len(ids_list) and not isinstance(ids_list[0], list):
ids_list = [ids_list]
added_list = True
else:
added_list = False
tokens_list = []
for ids in ids_list:
tokens = []
for token_id in ids:
token = self._decode_vocab.get(token_id)
if token is None:
raise ValueError(f"Token id {token_id} is not recognised")
tokens.append(token)
tokens_list.append(tokens)
if added_list:
return tokens_list[0]
else:
return tokens_list
def text_to_ids(self, text):
tokens = self.text_to_tokens(text)
tokens = [tokens]
return self.tokens_to_ids(tokens)[0]
def ids_to_text(self, ids):
tokens = self.ids_to_tokens(ids)
return self.tokens_to_text(tokens)
@property
def pad_id(self):
return 0
@property
def unk_id(self):
return 1
@property
def bos_id(self):
return 2
@property
def eos_id(self):
return 3
@property
def mask_id(self):
return 4
@property
def sep_id(self):
return 5
def _get_regex_vocab_files(self, regex_file=None, vocab_file=None):
"""
Infers files or update if given.
"""
regex_file = regex_file or self.regex_file
if not regex_file:
raise ValueError(f"regex_file must be specified")
vocab_file = vocab_file or self.vocab_file
# try to infer vocab_file from regex_file
if not vocab_file:
vocab_file = os.path.splitext(regex_file)[0] + '.vocab'
self.regex_file = regex_file
self.vocab_file = vocab_file
return regex_file, vocab_file
def save_tokenizer(self, regex_file=None, vocab_file=None):
"""
Saves tokenizer's regex and vocab files
"""
regex_file, vocab_file = self._get_regex_vocab_files(regex_file=regex_file, vocab_file=vocab_file)
logging.info(f"Saving vocabulary to file = {vocab_file}")
with open(vocab_file, 'w') as fp:
for token in self.vocab:
fp.write(f"{token[0]}\n")
logging.info(f"Saving regex to file = {regex_file}")
with open(regex_file, 'w') as f:
f.write(self.regex)
def load_tokenizer(self, regex_file=None, vocab_file=None):
"""
Loads tokenizer's regex and vocab files
"""
regex_file, vocab_file = self._get_regex_vocab_files(regex_file=regex_file, vocab_file=vocab_file)
# load vocab file
# vocab_file: path to file with vocabulary which consists
# of characters separated by \n (None/"" for empty vocab)
logging.info(f"Loading vocabulary from file = {vocab_file}")
if os.path.exists(vocab_file):
vocab = {}
with open(vocab_file, "r") as f:
for line in f:
line = line.strip()
if line:
vocab[line] = len(vocab)
self.vocab = vocab
else:
raise RuntimeError(f"Missing vocab_file = {vocab_file}")
# load regex from a file
if os.path.exists(regex_file):
logging.info(f"Loading regex from file = {regex_file}")
self.regex = open(regex_file, encoding="utf-8").read().strip()
else:
raise RuntimeError(f"Missing regex_file = {regex_file}")
self._update_cache()
self._compile_regex()
return self
def build_vocab_from_csv(self, data_csv_file, col="smiles"):
"""
Learns vocabulary from a CSV file. Can be called multiple times to update vocabulary.
"""
logging.debug(f"Building vocabulary from CSV col = {col} file = {data_csv_file}")
# NOTE this has to be run on each CSV file
if not os.path.exists(data_csv_file):
raise ValueError(f"Data file: {data_csv_file} is missing")
df = pd.read_csv(data_csv_file)
vocab = self.vocab
for d in df[col]:
tokens = self.text_to_tokens(d)
logging.debug(f"Text: {d}, Tokens: {tokens}")
for token in tokens:
if token not in vocab:
vocab[token] = len(vocab)
sorted_vocab = sorted(vocab.items(), key=lambda k_v: k_v[1])
logging.debug(f"Vocab: {sorted_vocab}")
self.vocab = vocab
self._update_cache()
def build_vocab_from_text(self, data_text_file):
"""
Learns vocabulary from a text file. Can be called multiple times to update vocabulary.
"""
logging.debug(f"Building vocabulary from TEXT file = {data_text_file}")
# NOTE this has to be run on each text file
if not os.path.exists(data_text_file):
raise ValueError(f"Data file: {data_text_file} is missing")
vocab = self.vocab
with open(data_text_file, encoding="utf-8") as f:
for d in f.readlines():
d = d.rstrip()
tokens = self.text_to_tokens(d)
logging.debug(f"Text: {d}, Tokens: {d}")
for token in tokens:
if token not in vocab:
vocab[token] = len(vocab)
sorted_vocab = sorted(vocab.items(), key=lambda k_v: k_v[1])
logging.debug(f"Vocab: {sorted_vocab}")
self.vocab = vocab
self._update_cache()
|
NeMo-main
|
nemo/collections/common/tokenizers/regex_tokenizer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Union
import numpy as np
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.utils import logging
__all__ = ['AggregateTokenizer']
class DummyTokenizer:
def __init__(self, vocab):
self.vocab = vocab
self.vocab_size = len(vocab)
# minimum compatibility
# since all the monolingual tokenizers have a vocab
# additional methods could be added here
def get_vocab(self):
return self.vocab
class AggregateTokenizer(TokenizerSpec):
'''
AggregateTokenizer, allowing one to combine multiple regular monolongual tokenizers into one tokenizer.
The intuition is that we can use existing tokenizers "as is", without retraining, and associate each tokenizer with a language id
during text processing (language id will be used to route the incoming text sample to the right tokenizer)
as well as a token id range for detokenization (e.g. [0..127] for tokenizer A, [128..255] for tokenizer B) so
that the orignal text could be reconstructed. Note that we assume that the incoming dict of langs / tokenizers
is ordered, e.g. the first tokenizer will be assigned a lower interval of token ids
Args:
tokenizers: dict of tokenizers, keys are lang ids, values are actual tokenizers
'''
def __init__(self, tokenizers: Dict):
self.tokenizers_dict = tokenizers
self.vocabulary = []
# the tokenizers should produce non-overlapping, ordered token ids
# keys are language ids
self.token_id_offset = {}
# keys are tokenizer numbers
self.token_id_offset_by_tokenizer_num = {}
offset = 0
i = 0
for lang, tokenizer in self.tokenizers_dict.items():
self.token_id_offset[lang] = offset
self.token_id_offset_by_tokenizer_num[i] = offset
offset += len(tokenizer.vocab)
i += 1
for tokenizer in self.tokenizers_dict.values():
self.vocabulary.extend(tokenizer.vocab)
self.vocab_size = len(self.vocabulary)
logging.info(f'Aggregate vocab size: {self.vocab_size}')
# for compatibility purposes only -- right now only the get_vocab method
# is supported, returning the joint vocab across all tokenizers
self.tokenizer = DummyTokenizer(self.vocabulary)
# lookup tables to speed up token to text operations
# if there are two tokenizers, [0,1], ['en', 'es'], each with 128 tokens, the aggregate tokenizer
# token range will be [0,255]. The below method provides three look up tables:
# one, to convert the incoming token id -- e.g. 200 into its real id (200-127 = 73)
# second, to compute the tokenizer id that should process that token (1)
# third, the compute the lang id for that token ('es')
offset_token_ids_by_token_id, tokenizers_by_token_id, langs_by_token_id = self._calculate_offsets()
self.offset_token_ids_by_token_id = offset_token_ids_by_token_id
self.tokenizers_by_token_id = tokenizers_by_token_id
self.langs_by_token_id = langs_by_token_id
def _calculate_offsets(self):
offsets = {}
tokenizers = {}
langs = {}
cur_num = 0
tot = len(self.tokenizers_dict)
for id in range(len(self.vocabulary)):
off_id = id - list(self.token_id_offset.values())[cur_num]
if cur_num + 1 < tot:
if id >= list(self.token_id_offset.values())[cur_num + 1]:
cur_num += 1
off_id = id - list(self.token_id_offset.values())[cur_num]
offsets[id] = off_id
tokenizers[id] = list(self.tokenizers_dict.values())[cur_num]
langs[id] = list(self.tokenizers_dict.keys())[cur_num]
return offsets, tokenizers, langs
def text_to_tokens(self, text, lang_id):
tokenizer = self.tokenizers_dict[lang_id]
return tokenizer.text_to_tokens(text)
def text_to_ids(self, text, lang_id):
tokenizer = self.tokenizers_dict[lang_id]
token_ids = tokenizer.text_to_ids(text)
token_ids[:] = [t + self.token_id_offset[lang_id] for t in token_ids]
return token_ids
def tokens_to_text(self, tokens, lang_id):
if isinstance(tokens, np.ndarray):
tokens = tokens.tolist()
tokenizer = self.tokenizers_dict[lang_id]
return tokenizer.decode_pieces(tokens)
def ids_to_text(self, ids):
if isinstance(ids, np.ndarray):
ids = ids.tolist()
tokens = []
for id in ids:
offset_id = self.offset_token_ids_by_token_id[id]
tokenizer = self.tokenizers_by_token_id[id]
tokens.extend(tokenizer.ids_to_tokens([offset_id]))
text = ''.join(tokens).replace('▁', ' ')
return text
def token_to_id(self, token, lang_id):
tokenizer = self.tokenizers_dict[lang_id]
return tokenizer.token_to_id(token) + self.token_id_offset[lang_id]
def ids_to_tokens(self, ids):
tokens = []
for id in ids:
offset_id = self.offset_token_ids_by_token_id[id]
tokenizer = self.tokenizers_by_token_id[id]
token = tokenizer.ids_to_tokens([offset_id])[0]
tokens.append(token)
return tokens
def ids_to_text_and_langs(self, ids):
text_and_langs = []
for id in ids:
offset_id = self.offset_token_ids_by_token_id[id]
tokenizer = self.tokenizers_by_token_id[id]
token = tokenizer.ids_to_tokens([offset_id])[0]
text = token.replace('▁', ' ')
text = text.strip() # strip for display purposes
lang = self.langs_by_token_id[id]
text_and_langs.append({'char': text, 'lang': lang})
return text_and_langs
def ids_to_words_and_langs(self, ids):
words_and_langs = []
word_ids = [] # tokens belonging to the current word
for id in ids:
offset_id = self.offset_token_ids_by_token_id[id]
tokenizer = self.tokenizers_by_token_id[id]
token = tokenizer.ids_to_tokens([offset_id])[0]
if token.startswith('▁'):
if len(word_ids) > 0: # if this isn't the first word
word = self.ids_to_text(word_ids)
word = word.strip() # strip for display purposes
lang = self.ids_to_lang(word_ids)
wl = {'word': word, 'lang': lang}
words_and_langs.append(wl)
word_ids = []
word_ids.append(id)
if len(word_ids) > 0: # the last tokens
word = self.ids_to_text(word_ids)
word = word.strip() # strip for display purposes
lang = self.ids_to_lang(word_ids)
wl = {'word': word, 'lang': lang}
words_and_langs.append(wl)
return words_and_langs
def ids_to_lang(self, ids):
lang_cnts = {}
for id in ids:
lang = self.langs_by_token_id[id]
lang_cnt = lang_cnts.get(lang)
if lang_cnt is not None:
lang_cnts[lang] = lang_cnt + 1
else:
lang_cnts[lang] = 1
max_lang = ''
max_lang_cnt = -1
for lang, lang_cnt in lang_cnts.items():
if lang_cnt > max_lang_cnt:
max_lang = lang
max_lang_cnt = lang_cnt
return max_lang
def tokens_to_ids(self, tokens: Union[str, List[str]], langs: Union[str, List[str]]) -> Union[int, List[int]]:
if isinstance(tokens, str):
tokens = [tokens]
if isinstance(langs, str):
langs = [langs]
ids = []
for i, token in enumerate(tokens):
lang_id = langs[i]
ids.append(self.token_to_id(token, lang_id))
return ids
@property
def vocab(self):
return self.vocabulary
@property
def langs(self):
return list(self.tokenizers_dict.keys())
|
NeMo-main
|
nemo/collections/common/tokenizers/aggregate_tokenizer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
|
NeMo-main
|
nemo/collections/common/tokenizers/huggingface/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from transformers import AutoTokenizer as AUTOTOKENIZER
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.utils import logging
__all__ = [
'AutoTokenizer',
]
class AutoTokenizer(TokenizerSpec):
'''
Wrapper of HuggingFace AutoTokenizer https://huggingface.co/transformers/model_doc/auto.html#autotokenizer.
'''
def __init__(
self,
pretrained_model_name: str,
vocab_file: Optional[str] = None,
merges_file: Optional[str] = None,
mask_token: Optional[str] = None,
bos_token: Optional[str] = None,
eos_token: Optional[str] = None,
pad_token: Optional[str] = None,
sep_token: Optional[str] = None,
cls_token: Optional[str] = None,
unk_token: Optional[str] = None,
use_fast: Optional[bool] = False,
):
"""
Args:
pretrained_model_name: corresponds to HuggingFace-AutoTokenizer's 'pretrained_model_name_or_path' input argument.
For more details please refer to https://huggingface.co/transformers/_modules/transformers/tokenization_auto.html#AutoTokenizer.from_pretrained.
The list of all supported models can be found here: ALL_PRETRAINED_CONFIG_ARCHIVE_MAP
vocab_file: path to file with vocabulary which consists
of characters separated by '\n'.
mask_token: mask token
bos_token: the beginning of sequence token
eos_token: the end of sequence token. Usually equal to sep_token
pad_token: token to use for padding
sep_token: token used for separating sequences
cls_token: class token. Usually equal to bos_token
unk_token: token to use for unknown tokens
use_fast: whether to use fast HuggingFace tokenizer
"""
try:
# this logic deals with different huggingface tokenizers having different positional args
if vocab_file is None:
self.tokenizer = AUTOTOKENIZER.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name, use_fast=use_fast,
)
elif merges_file is None:
self.tokenizer = AUTOTOKENIZER.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name, vocab_file=vocab_file, use_fast=use_fast,
)
else:
self.tokenizer = AUTOTOKENIZER.from_pretrained(
pretrained_model_name_or_path=pretrained_model_name,
vocab_file=vocab_file,
merges_file=merges_file,
use_fast=use_fast,
)
except Exception as e:
raise ValueError(
f'Unable to instantiate HuggingFace AUTOTOKENIZER for {pretrained_model_name}. Exception: {e}'
)
self.original_vocab_size = len(self.tokenizer)
special_tokens_dict = {}
# # setting special tokens, by default the default model's special tokens will be preserved
# # unless passes new values to the special tokens
if unk_token is not None:
special_tokens_dict["unk_token"] = unk_token
if mask_token is not None:
special_tokens_dict["mask_token"] = mask_token
if pad_token is not None:
special_tokens_dict["pad_token"] = pad_token
# if the model does not have eos_token but has sep_token,
# set eos_token = sep_token, and vice versa
if sep_token is not None:
special_tokens_dict["sep_token"] = sep_token
elif self.tokenizer.sep_token is None and self.tokenizer.eos_token:
special_tokens_dict["sep_token"] = self.tokenizer.eos_token
if eos_token is not None:
special_tokens_dict["eos_token"] = eos_token
elif self.tokenizer.eos_token is None and self.tokenizer.sep_token:
special_tokens_dict["eos_token"] = self.tokenizer.sep_token
# if the model does not have bos_token but has cls_token,
# set bos_token = cls_token, and vice versa
if bos_token is not None:
special_tokens_dict["bos_token"] = bos_token
elif self.tokenizer.bos_token is None and self.tokenizer.cls_token:
special_tokens_dict["bos_token"] = self.tokenizer.cls_token
if cls_token is not None:
special_tokens_dict["cls_token"] = cls_token
elif self.tokenizer.cls_token is None and self.tokenizer.bos_token:
special_tokens_dict["cls_token"] = self.tokenizer.bos_token
new_tokens_in_vocab = []
for token in [mask_token, bos_token, eos_token, pad_token, sep_token, cls_token, unk_token]:
if token is not None and token not in self.tokenizer.get_vocab():
new_tokens_in_vocab.append(token)
if len(new_tokens_in_vocab) > 0:
"""
Special tokens that were not previously included in the tokenizer's vocabulary file will be added to
the vocabulary and, as a result, the model should be resized, for example:
# define your model
pretrained_model_name = 'roberta-base'
model = nemo_nlp.modules.get_lm_model(pretrained_model_name=pretrained_model_name)
# define pretrained tokenizer
tokenizer_default = nemo_nlp.modules.get_tokenizer(tokenizer_name=pretrained_model_name)
special_tokens = {'bos_token': '<BOS>',
'cls_token': '<CSL>',
'additional_special_tokens': ['<MY_NER_TOKEN>', '<ANOTHER_TOKEN>']}
tokenizer_default.add_special_tokens(special_tokens_dict=special_tokens)
# resize your model so that the embeddings for newly added tokens are updated during training/finetuning
model.resize_token_embeddings(tokenizer_default.vocab_size)
See NLP_Tokenizers.ipynb for more details.
"""
logging.warning(
f'{new_tokens_in_vocab} \n will be added to the vocabulary.\n'
f'Please resize your model accordingly, '
f'see NLP_Tokenizers.ipynb for more details.'
)
self.add_special_tokens(special_tokens_dict)
self.space_sensitive = self.text_to_tokens('x y') != self.text_to_tokens('x') + self.text_to_tokens('y')
@property
def vocab_size(self):
return len(self.tokenizer)
def add_special_tokens(self, special_tokens_dict: dict) -> int:
"""
Adds a dictionary of special tokens (eos, pad, cls...). If special tokens are NOT in the vocabulary, they are added
to it (indexed starting from the last index of the current vocabulary).
Args:
special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
[``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary.
Returns:
Number of tokens added to the vocabulary.
"""
num_tokens_added = self.tokenizer.add_special_tokens(special_tokens_dict)
if num_tokens_added > 0:
logging.info(f'{num_tokens_added} special tokens added, resize your model accordingly.')
for k in self.tokenizer.SPECIAL_TOKENS_ATTRIBUTES:
setattr(self, k, getattr(self.tokenizer, k, None))
return num_tokens_added
@property
def additional_special_tokens_ids(self):
"""Returns a list of the additional special tokens (excluding bos, eos, pad, unk). Used to return sentinel tokens for e.g. T5."""
return [self.token_to_id(token) for token in self.additional_special_tokens]
def text_to_tokens(self, text):
tokens = self.tokenizer.tokenize(text)
return tokens
def tokens_to_text(self, tokens):
text = self.tokenizer.convert_tokens_to_string(tokens)
return text
def token_to_id(self, token):
return self.tokens_to_ids([token])[0]
def tokens_to_ids(self, tokens):
ids = self.tokenizer.convert_tokens_to_ids(tokens)
return ids
def ids_to_tokens(self, ids):
tokens = self.tokenizer.convert_ids_to_tokens(ids)
return tokens
def text_to_ids(self, text):
tokens = self.text_to_tokens(text)
ids = self.tokens_to_ids(tokens)
return ids
def ids_to_text(self, ids):
tokens = self.ids_to_tokens(ids)
tokens_clean = [t for t in tokens if t not in self.tokenizer.all_special_tokens]
text = self.tokens_to_text(tokens_clean)
return text
@property
def vocab(self):
id2vocab = {v: k for k, v in self.tokenizer.vocab.items()}
return [id2vocab[i] for i in range(len(id2vocab))]
@property
def pad_id(self):
return self.tokens_to_ids([getattr(self, 'pad_token')])[0]
@property
def bos_id(self):
return self.tokens_to_ids([getattr(self, 'bos_token')])[0]
@property
def eos_id(self):
return self.tokens_to_ids([getattr(self, 'eos_token')])[0]
@property
def sep_id(self):
return self.tokens_to_ids([getattr(self, 'sep_token')])[0]
@property
def cls_id(self):
return self.tokens_to_ids([getattr(self, 'cls_token')])[0]
@property
def unk_id(self):
return self.tokens_to_ids([getattr(self, 'unk_token')])[0]
@property
def mask_id(self):
return self.tokens_to_ids([getattr(self, 'mask_token')])[0]
@property
def name(self):
return type(self.tokenizer).__name__
def save_vocabulary(self, save_directory: str, filename_prefix: str = None):
"""Saves tokenizer's vocabulary and other artifacts to the specified directory"""
return self.tokenizer.save_vocabulary(save_directory=save_directory, filename_prefix=filename_prefix)
|
NeMo-main
|
nemo/collections/common/tokenizers/huggingface/auto_tokenizer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# fmt: off
SUPPORTED_LOCALES = ["en-US", "de-DE", "es-ES"]
DEFAULT_PUNCTUATION = (
',', '.', '!', '?', '-',
':', ';', '/', '"', '(',
')', '[', ']', '{', '}',
)
VITS_PUNCTUATION = (
',', '.', '!', '?', '-',
':', ';', '"', '«', '»',
'“', '”', '¡', '¿', '—',
'…',
)
GRAPHEME_CHARACTER_SETS = {
"en-US": (
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z'
),
"es-ES": (
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'Á', 'É', 'Í', 'Ñ',
'Ó', 'Ú', 'Ü'
),
# ref: https://en.wikipedia.org/wiki/German_orthography#Alphabet
"de-DE": (
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'Ä', 'Ö', 'Ü', 'ẞ',
),
}
IPA_CHARACTER_SETS = {
"en-US": (
'a', 'b', 'd', 'e', 'f', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w',
'x', 'z', 'æ', 'ð', 'ŋ', 'ɐ', 'ɑ', 'ɔ', 'ə', 'ɚ',
'ɛ', 'ɜ', 'ɡ', 'ɪ', 'ɬ', 'ɹ', 'ɾ', 'ʃ', 'ʊ', 'ʌ',
'ʒ', 'ʔ', 'ʲ', '̃', '̩', 'θ', 'ᵻ'
),
"es-ES": (
'a', 'b', 'd', 'e', 'f', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'w', 'x',
'ð', 'ŋ', 'ɛ', 'ɡ', 'ɣ', 'ɪ', 'ɲ', 'ɾ', 'ʃ', 'ʊ',
'ʎ', 'ʒ', 'ʝ', 'β', 'θ'
),
"de-DE": (
'1', 'a', 'b', 'd', 'e', 'f', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', 'ç', 'ø', 'ŋ', 'œ', 'ɐ', 'ɑ',
'ɒ', 'ɔ', 'ə', 'ɛ', 'ɜ', 'ɡ', 'ɪ', 'ɹ', 'ɾ', 'ʃ',
'ʊ', 'ʌ', 'ʒ', '̃', 'θ'
)
}
GRAPHEME_CHARACTER_CASES = ["upper", "lower", "mixed"]
# fmt: on
def validate_locale(locale):
if locale not in SUPPORTED_LOCALES:
raise ValueError(f"Unsupported locale '{locale}'. " f"Supported locales {SUPPORTED_LOCALES}")
def get_grapheme_character_set(locale: str, case: str = "upper") -> str:
if locale not in GRAPHEME_CHARACTER_SETS:
raise ValueError(
f"Grapheme character set not found for locale '{locale}'. "
f"Supported locales {GRAPHEME_CHARACTER_SETS.keys()}"
)
charset_str_origin = ''.join(GRAPHEME_CHARACTER_SETS[locale])
if case == "upper":
# Directly call .upper() will convert 'ß' into 'SS' according to https://bugs.python.org/issue30810.
charset_str = charset_str_origin.replace('ß', 'ẞ').upper()
elif case == "lower":
charset_str = charset_str_origin.lower()
elif case == "mixed":
charset_str = charset_str_origin.replace('ß', 'ẞ').upper() + charset_str_origin.lower()
else:
raise ValueError(
f"Grapheme character case not found: '{case}'. Supported cases are {GRAPHEME_CHARACTER_CASES}"
)
return charset_str
def get_ipa_character_set(locale):
if locale not in IPA_CHARACTER_SETS:
raise ValueError(
f"IPA character set not found for locale '{locale}'. " f"Supported locales {IPA_CHARACTER_SETS.keys()}"
)
char_set = set(IPA_CHARACTER_SETS[locale])
return char_set
def get_ipa_punctuation_list(locale):
if locale is None:
return sorted(list(DEFAULT_PUNCTUATION))
validate_locale(locale)
punct_set = set(DEFAULT_PUNCTUATION)
# TODO @xueyang: verify potential mismatches with locale-specific punctuation sets used
# in nemo_text_processing.text_normalization.en.taggers.punctuation.py
if locale in ["de-DE", "es-ES"]:
# ref: https://en.wikipedia.org/wiki/Guillemet#Uses
punct_set.update(['«', '»', '‹', '›'])
if locale == "de-DE":
# ref: https://en.wikipedia.org/wiki/German_orthography#Punctuation
punct_set.update(
[
'„', # double low-9 quotation mark, U+201E, decimal 8222
'“', # left double quotation mark, U+201C, decimal 8220
'‚', # single low-9 quotation mark, U+201A, decimal 8218
'‘', # left single quotation mark, U+2018, decimal 8216
'‒', # figure dash, U+2012, decimal 8210
'–', # en dash, U+2013, decimal 8211
'—', # em dash, U+2014, decimal 8212
]
)
elif locale == "es-ES":
# ref: https://en.wikipedia.org/wiki/Spanish_orthography#Punctuation
punct_set.update(['¿', '¡'])
punct_list = sorted(list(punct_set))
return punct_list
|
NeMo-main
|
nemo/collections/common/tokenizers/text_to_speech/ipa_lexicon.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unicodedata
from builtins import str as unicode
from typing import List, Tuple
__all__ = [
"chinese_text_preprocessing",
"english_text_preprocessing",
"any_locale_text_preprocessing",
"spanish_text_preprocessing",
"any_locale_word_tokenize",
"english_word_tokenize",
"LATIN_CHARS_ALL",
"normalize_unicode_text",
]
# Derived from LJSpeech
_synoglyphs = {
"'": ['’'],
'"': ['”', '“'],
}
SYNOGLYPH2ASCII = {g: asc for asc, glyphs in _synoglyphs.items() for g in glyphs}
# Example of parsing by groups via _WORDS_RE_EN.
# Regular expression pattern groups:
# 1st group -- valid english words,
# 2nd group -- any substring starts from | to | (mustn't be nested), useful when you want to leave sequence unchanged,
# 3rd group -- punctuation marks or whitespaces.
# Text (first line) and mask of groups for every char (second line).
# config file must contain |EY1 EY1|, B, C, D, E, F, and G.
# define char set based on https://en.wikipedia.org/wiki/List_of_Unicode_characters
LATIN_ALPHABET_BASIC = "A-Za-z"
ACCENTED_CHARS = "À-ÖØ-öø-ÿ"
LATIN_CHARS_ALL = f"{LATIN_ALPHABET_BASIC}{ACCENTED_CHARS}"
_WORDS_RE_EN = re.compile(
fr"([{LATIN_ALPHABET_BASIC}]+(?:[{LATIN_ALPHABET_BASIC}\-']*[{LATIN_ALPHABET_BASIC}]+)*)|(\|[^|]*\|)|([^{LATIN_ALPHABET_BASIC}|]+)"
)
_WORDS_RE_ANY_LOCALE = re.compile(
fr"([{LATIN_CHARS_ALL}]+(?:[{LATIN_CHARS_ALL}\-']*[{LATIN_CHARS_ALL}]+)*)|(\|[^|]*\|)|([^{LATIN_CHARS_ALL}|]+)"
)
def english_text_preprocessing(text, lower=True):
text = unicode(text)
text = ''.join(char for char in unicodedata.normalize('NFD', text) if unicodedata.category(char) != 'Mn')
text = ''.join(char if char not in SYNOGLYPH2ASCII else SYNOGLYPH2ASCII[char] for char in text)
if lower:
text = text.lower()
return text
def any_locale_text_preprocessing(text: str) -> str:
"""
Normalize unicode text with "NFC", and convert right single quotation mark (U+2019, decimal 8217) as an apostrophe.
Args:
text (str): the original input sentence.
Returns: normalized text (str).
"""
res = []
for c in normalize_unicode_text(text):
if c in ['’']: # right single quotation mark (U+2019, decimal 8217) as an apostrophe
res.append("'")
else:
res.append(c)
return ''.join(res)
def normalize_unicode_text(text: str) -> str:
"""
TODO @xueyang: Apply NFC form may be too aggressive since it would ignore some accented characters that do not exist
in predefined German alphabet (nemo.collections.common.tokenizers.text_to_speech.ipa_lexicon.IPA_CHARACTER_SETS),
such as 'é'. This is not expected. A better solution is to add an extra normalization with NFD to discard the
diacritics and consider 'é' and 'e' produce similar pronunciations.
Note that the tokenizer needs to run `unicodedata.normalize("NFC", x)` before calling `encode` function,
especially for the characters that have diacritics, such as 'ö' in the German alphabet. 'ö' can be encoded as
b'\xc3\xb6' (one char) as well as b'o\xcc\x88' (two chars). Without the normalization of composing two chars
together and without a complete predefined set of diacritics, when the tokenizer reads the input sentence
char-by-char, it would skip the combining diaeresis b'\xcc\x88', resulting in indistinguishable pronunciations
for 'ö' and 'o'.
Args:
text (str): the original input sentence.
Returns:
NFC normalized sentence (str).
"""
# normalize word with NFC form
if not unicodedata.is_normalized("NFC", text):
text = unicodedata.normalize("NFC", text)
return text
def _word_tokenize(words: List[Tuple[str, str, str]], is_lower: bool = False) -> List[Tuple[List[str], bool]]:
"""
Process a list of words and attach indicators showing if each word is unchangeable or not. Each word representation
can be one of valid word, any substring starting from | to | (unchangeable word), or punctuation marks including
whitespaces. This function will split unchanged strings by whitespaces and return them as `List[str]`. For example,
.. code-block:: python
[
('Hello', '', ''), # valid word
('', '', ' '), # punctuation mark
('World', '', ''), # valid word
('', '', ' '), # punctuation mark
('', '|NVIDIA unchanged|', ''), # unchangeable word
('', '', '!') # punctuation mark
]
will be converted into,
.. code-block:: python
[
(["Hello"], False),
([" "], False),
(["World"], False),
([" "], False),
(["NVIDIA", "unchanged"], True),
(["!"], False)
]
Args:
words (List[str]): a list of tuples like `(maybe_word, maybe_without_changes, maybe_punct)` where each element
corresponds to a non-overlapping match of either `_WORDS_RE_EN` or `_WORDS_RE_ANY_LOCALE`.
is_lower (bool): a flag to trigger lowercase all words. By default, it is False.
Returns: List[Tuple[List[str], bool]], a list of tuples like `(a list of words, is_unchanged)`.
"""
result = []
for word in words:
maybe_word, maybe_without_changes, maybe_punct = word
without_changes = False
if maybe_word != '':
if is_lower:
token = [maybe_word.lower()]
else:
token = [maybe_word]
elif maybe_punct != '':
token = [maybe_punct]
elif maybe_without_changes != '':
without_changes = True
token = maybe_without_changes[1:-1].split(" ")
else:
raise ValueError(
f"This is not expected. Found empty string: <{word}>. "
f"Please validate your regular expression pattern '_WORDS_RE_EN' or '_WORDS_RE_ANY_LOCALE'."
)
result.append((token, without_changes))
return result
def english_word_tokenize(text: str) -> List[Tuple[List[str], bool]]:
words = _WORDS_RE_EN.findall(text)
return _word_tokenize(words, is_lower=True)
def any_locale_word_tokenize(text: str) -> List[Tuple[List[str], bool]]:
words = _WORDS_RE_ANY_LOCALE.findall(text)
return _word_tokenize(words)
def spanish_text_preprocessing(text: str) -> str:
return text.lower()
def chinese_text_preprocessing(text: str) -> str:
return text
|
NeMo-main
|
nemo/collections/common/tokenizers/text_to_speech/tokenizer_utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.