python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A convolutional GRU layer for DriveNet gridbox TensorRT models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from nvidia_tao_tf1.core.models.templates.conv_gru_2d import ConvGRU2D
import tensorflow as tf
class ConvGRU2DExport(ConvGRU2D):
"""TRT compatible class containing convolutional GRU operations on sequential tensors."""
def __init__(
self,
model_sequence_length_in_frames,
input_sequence_length_in_frames,
state_scaling,
input_shape,
initial_state_shape,
spatial_kernel_height,
spatial_kernel_width,
kernel_regularizer=None,
bias_regularizer=None,
is_stateful=True,
name="conv_gru_2d_export",
output_type="last",
**kwargs
):
"""Constructor for ConvGRU2D export layer.
Args:
model_sequence_length_in_frames (int): How many steps the GRU will be tracked for
gradient computation (starting from the last frame). That is, a stop_gradient
operation will be applied just before the last model_sequence_length_in_frames
steps of the input sequence.
input_sequence_length_in_frames (int): Length of the sequence that is presented to the
GRU in the minibatch. The GRU might, however, stop the gradient flow in the middle
of the sequence. The gradients will propagate backwards through the last
model_sequence_length_in_frames steps of the input.
state_scaling (float): A constant scaling factor in range [0,1] in order to simulate
an exponential decay.
input_shape (list / tuple): Input tensor shape (N, C, H, W), where
[N: batch_size * sequence_length_in_frames / None,
C: input_channels,
H: input_height,
W: input_width].
initial_state_shape (list / tuple): Shape of the initial state (M, F, H, W), where
[M: batch_size / None,
F: number_out_channels,
H: input_height,
W: input_width].
spatial_kernel_height (int): Height of the convolution kernel within the GRU.
spatial_kernel_width (int): Width of the convolution kernel within the GRU.
kernel_regularizer (keras.regularizers.Regularizer instance):
Regularizer to be applied to convolution kernels.
bias_regularizer (keras.regularizers.Regularizer instance):
Regularizer to be applied to biases.
is_stateful (bool): Whether the GRU keeps track of state from minibatch to minibatch.
name (str): Name of the layer.
output_type (str): Whether to give an output for all frames in the sequences or only
for the last frame. Unused in this layer since input and output consist of only
single frame.
Raises:
AssertionError: If height and width for input and state shapes are not equal or
if state_scaling is not in range [0, 1] or if input_sequence_length_in_frames is
less than model_sequence_length_in_frames.
"""
super(ConvGRU2DExport, self).__init__(
model_sequence_length_in_frames,
input_sequence_length_in_frames,
state_scaling,
input_shape,
initial_state_shape,
spatial_kernel_height,
spatial_kernel_width,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
is_stateful=is_stateful,
name=name,
**kwargs
)
self.is_export_model = True
self.is_stateful = is_stateful
self.state_output_name = "state_output"
self._set_initial_state()
self._check_export_layer_sanity()
def _check_export_layer_sanity(self):
"""For GRU/TRT export layer must be stateful and sequence lengths must be one.
Raises:
AssertionError: if either of the sequence length parameters differs from 1
or if the layer is not stateful.
"""
assert (
self.input_sequence_length_in_frames == self.model_sequence_length_in_frames
), (
"Input sequence length and model sequence length must be the same for the "
"TRT export layer."
)
if self.is_stateful:
assert (
self.input_sequence_length_in_frames == 1
), "Input sequence length must be 1 for the TRT export layer."
assert (
self.model_sequence_length_in_frames == 1
), "Model sequence length must be 1 for the TRT export layer."
def _set_initial_state(self):
"""Initialize the state for the minibatch.
Note that initial_state refers to the initial state of the minibatch.
For the export model, the initial state is a placeholder to be fed from
outside.
"""
self.state_input_name = "/".join([self.name, "state_placeholder"])
# In the TRT model, initial state needs to be provided externally.
if not hasattr(self, "_initial_state"):
self._initial_state = tf.compat.v1.placeholder(
K.floatx(),
shape=[1] + self.initial_state_shape[1:],
name=self.state_input_name,
)
if not hasattr(self, "_past_features"):
self._past_feature_count = self.input_sequence_length_in_frames - 1
self._past_features = []
self._past_feature_names = []
# Keep them in the order of time stamps, i.e. t-3, t-2, t-1
for i in reversed(range(self._past_feature_count)):
past_feature_name = "/".join([self.name, "past_feature", str(i + 1)])
self._past_features.append(
tf.compat.v1.placeholder(
K.floatx(),
shape=self.past_feature_shape,
name=past_feature_name,
)
)
self._past_feature_names.append(past_feature_name)
def call(self, input_tensor):
"""Call the GRU network.
Args:
input_tensor (N, C, H, W), where
[N: batch_size,
C: number_input_channels,
H: grid_height,
W: grid_width]): input to the GRU.
Returns:
state: Final state of the GRU after input_sequence_length_in_frames cell operations,
with shape (M, F, H, W), where
[M: batch_size,
F: number_out_channels,
H: input_height,
W: input_width].
"""
if self.is_stateful:
# Set the initial state and the computation.
state = tf.identity(self._initial_state)
state = self._step(input_tensor, state, 0)
else:
# inputs_per_time contains [... past_feature/2, past_feature/1, current_feature]
inputs_per_time = self._past_features + [input_tensor]
state = None
for step, instant_input in enumerate(inputs_per_time):
state = self._step(instant_input, state, step)
return state
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/models/templates/conv_gru_2d_export.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus utilities for model templates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import re
import tempfile
from nvidia_tao_tf1.core.decorators.arg_scope import add_arg_scope
from nvidia_tao_tf1.core.models.templates.quantized_conv2d import QuantizedConv2D
from nvidia_tao_tf1.core.utils import get_uid
if os.environ.get("TF_KERAS"):
from tensorflow import keras
else:
import keras
logger = logging.getLogger(__name__)
bn_axis_map = {"channels_last": 3, "channels_first": 1}
class SUBBLOCK_IDS(object):
"""A operator to get index of subblock, overload [] operation."""
def __getitem__(self, key):
"""
Generate a subblock ID and return.
Args:
key (int): an index used to generate the subblock ID.
"""
cur = key
subblock_id = ""
while cur >= 0:
ch = chr(ord("a") + cur % 26)
subblock_id = ch + subblock_id
cur = cur // 26 - 1
return subblock_id
def get_batchnorm_axis(data_format):
"""Convert a data_format string to the correct index in a 4 dimensional tensor.
Args:
data_format (str): either 'channels_last' or 'channels_first'.
Returns:
int: the axis corresponding to the `data_format`.
"""
return bn_axis_map[data_format]
def add_dense_head(model, inputs, nclasses, activation):
"""
Create a model that stacks a dense head on top of a another model. It is also flattened.
Args:
model (Model): the model on top of which the head should be created.
inputs (tensor): the inputs (tensor) to the previously supplied model.
nclasses (int): the amount of outputs of the dense map
activation (string): activation function to use e.g. 'softmax' or 'linear'.
Returns:
Model: A model with the head stacked on top of the `model` input.
"""
x = model.outputs[0]
head_name = "head_fc%d" % (nclasses)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(nclasses, activation=activation, name=head_name)(x)
model = keras.models.Model(
inputs=inputs, outputs=x, name="%s_fc%d" % (model.name, nclasses)
)
return model
@add_arg_scope
def conv2D_bn_activation(
x,
use_batch_norm,
filters,
kernel_size,
strides=(1, 1),
activation_type="relu",
activation_kwargs=None,
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
layer_name=None,
use_bias=True,
quantize=False,
bitwidth=8,
):
"""
Add a conv layer, followed by batch normalization and activation.
Args:
x (tensor): the inputs (tensor) to the convolution layer.
use_batch_norm (bool): use batch norm.
filters (int): the number of filters.
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
activation_type (str): activation function name, e.g., 'relu'.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
data_format (str): either 'channels_last' or 'channels_first'.
kernel_regularizer (`regularizer`): regularizer for the kernels.
bias_regularizer (`regularizer`): regularizer for the biases.
layer_name(str): layer name prefix.
use_bias(bool): whether or not use bias in convolutional layer.
quantize (bool): A boolean flag to determine whether to use quantized conv2d or not.
bitwidth (integer): quantization bitwidth.
Returns:
x (tensor): the output tensor of the convolution layer.
"""
if layer_name is not None:
layer_name = "%s_m%d" % (layer_name, filters)
if quantize:
x = QuantizedConv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=layer_name,
use_bias=use_bias,
bitwidth=bitwidth,
)(x)
else:
x = keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=layer_name,
use_bias=use_bias,
)(x)
if use_batch_norm:
if layer_name is not None:
layer_name += "_bn"
x = keras.layers.BatchNormalization(
axis=get_batchnorm_axis(data_format), name=layer_name
)(x)
if activation_type:
activation_kwargs = activation_kwargs or {}
x = add_activation(activation_type, **activation_kwargs)(x)
return x
@add_arg_scope
def deconv2D_bn_activation(
x,
use_batch_norm,
filters,
kernel_size,
strides=(1, 1),
activation_type="relu",
activation_kwargs=None,
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
layer_name=None,
use_bias=True,
):
"""
Add a deconv layer, followed by batch normalization and activation.
Args:
x (tensor): the inputs (tensor) to the convolution layer.
use_batch_norm (bool): use batch norm.
filters (int): the number of filters.
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
activation_type (str): activation function name, e.g., 'relu'.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
data_format (str): either 'channels_last' or 'channels_first'.
kernel_regularizer (`regularizer`): regularizer for the kernels.
bias_regularizer (`regularizer`): regularizer for the biases.
layer_name(str): layer name prefix.
use_bias(bool): whether or not use bias in convolutional layer.
Returns:
x (tensor): the output tensor of the convolution layer.
"""
if layer_name is not None:
layer_name = "%s_m%d" % (layer_name, filters)
x = keras.layers.Conv2DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=layer_name,
use_bias=use_bias,
)(x)
if use_batch_norm:
if layer_name is not None:
layer_name += "_bn"
x = keras.layers.BatchNormalization(
axis=get_batchnorm_axis(data_format), name=layer_name
)(x)
if activation_type:
activation_kwargs = activation_kwargs or {}
x = add_activation(activation_type, **activation_kwargs)(x)
return x
def add_conv_layer(
model,
inputs,
use_batch_norm,
filters,
kernel_size,
strides=(1, 1),
activation_type="relu",
activation_kwargs=None,
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
layer_name=None,
use_bias=True,
quantize=False,
bitwidth=8,
):
"""
Add a conv layer to a model.
Args:
model (tensor): the model on top of which the head should be created.
inputs (tensor): the inputs (tensor) to the previously supplied model.
use_batch_norm (bool): use batch norm.
filters (int): the number of filters.
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
activation_type (str): activation function name, e.g., 'relu'.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
data_format (str): either 'channels_last' or 'channels_first'.
kernel_regularizer (`regularizer`): regularizer for the kernels.
bias_regularizer (`regularizer`): regularizer for the biases.
layer_name(str): layer name prefix.
use_bias(bool): whether use bias in convolutional layer.
quantize (bool): A boolean flag to determine whether to use quantized conv2d or not.
bitwidth (integer): quantization bitwidth.
Returns:
Model: A model with a conv layer stacked on top of the `model` input.
"""
if data_format is None:
data_format = keras.backend.image_data_format()
x = model.outputs[0]
if layer_name is not None:
layer_name = "%s_m%d" % (layer_name, filters)
x = conv2D_bn_activation(
x,
use_batch_norm=use_batch_norm,
filters=filters,
kernel_size=kernel_size,
strides=strides,
activation_type=activation_type,
activation_kwargs=activation_kwargs,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
layer_name=layer_name,
use_bias=use_bias,
quantize=quantize,
bitwidth=bitwidth,
)
model = keras.models.Model(inputs=inputs, outputs=x, name="%s_conv" % (model.name))
return model
def add_conv_head(
model,
inputs,
nmaps,
kernel_size,
strides,
activation_type="sigmoid",
activation_kwargs=None,
data_format=None,
quantize=False,
bitwidth=8,
):
"""
Create a model that stacks a convolutional head on top of another model.
Args:
model (tensor): the model on top of which the head should be created.
inputs (tensor): the inputs (tensor) to the previously supplied model.
nmaps (int): the amount of maps (output filters) the convolution should have.
kernel_size (int, int): the size of the kernel for this layer.
strides (int, int): the stride for this layer.
activation_type (str): the activation function after this layer.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
data_format (str): either 'channels_last' or 'channels_first'.
quantize (bool): A boolean flag to determine whether to use quantized conv2d or not.
bitwidth (integer): quantization bitwidth.
Returns:
Model: A model with the head stacked on top of the `model` input.
"""
return add_conv_layer(
model,
inputs,
use_batch_norm=False,
filters=nmaps,
kernel_size=kernel_size,
strides=strides,
activation_type=activation_type,
activation_kwargs=activation_kwargs,
data_format=data_format,
kernel_regularizer=None,
bias_regularizer=None,
layer_name="head_conv",
quantize=quantize,
bitwidth=bitwidth,
)
def add_deconv_layer(
model,
inputs,
use_batch_norm,
filters,
upsampling,
activation_type="relu",
activation_kwargs=None,
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
layer_name=None,
padding="same",
):
"""
Add a deconv layer.
Args:
model (tensor): the model on top of which the head should be created.
inputs (tensor): the inputs (tensor) to the previously supplied model.
use_batch_norm (bool): use batch norm.
filters (int): the number of filters.
upsampling (int): the amount of upsampling the transpose convolution should do.
activation_type (str): activation function name, e.g., 'relu'.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
data_format (str): either 'channels_last' or 'channels_first'.
kernel_regularizer (`regularizer`): regularizer for the kernels.
bias_regularizer (`regularizer`): regularizer for the biases.
layer_name (str): layer_name prefix.
Returns:
Model: A model with a deconv layer stacked on top of the `model` input.
"""
if data_format is None:
data_format = keras.backend.image_data_format()
x = model.outputs[0]
if layer_name is not None:
layer_name = "%s_m%d_d%d" % (layer_name, filters, upsampling)
x = keras.layers.Conv2DTranspose(
filters=filters,
kernel_size=(upsampling, upsampling),
strides=(upsampling, upsampling),
padding=padding,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=layer_name,
)(x)
if use_batch_norm:
if layer_name is not None:
layer_name += "_bn"
x = keras.layers.BatchNormalization(
axis=get_batchnorm_axis(data_format), name=layer_name
)(x)
if activation_type:
activation_kwargs = activation_kwargs or {}
x = add_activation(activation_type, **activation_kwargs)(x)
model = keras.models.Model(
inputs=inputs, outputs=x, name="%s_d%d" % (model.name, upsampling)
)
return model
def add_deconv_head(
model,
inputs,
nmaps,
upsampling,
activation_type="sigmoid",
activation_kwargs=None,
data_format=None,
padding="same",
):
"""
Create a model that stacks a deconvolutional (transpose conv) head on top of another model.
Args:
model (tensor): the model on top of which the head should be created.
inputs (tensor): the inputs (tensor) to the previously supplied model.
nmaps (int): the amount of maps (output filters) the transpose convolution should
have.
upsampling (int): the amount of upsampling the transpose convolution should do.
activation_type (str): activation function name, e.g., 'softmax'.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
data_format (str): either 'channels_last' or 'channels_first'.
Returns:
Model: A model with the head stacked on top of the `model` input.
"""
return add_deconv_layer(
model,
inputs,
use_batch_norm=False,
filters=nmaps,
upsampling=upsampling,
activation_type=activation_type,
activation_kwargs=activation_kwargs,
data_format=data_format,
kernel_regularizer=None,
bias_regularizer=None,
layer_name="head_deconv",
padding=padding,
)
def add_activation(activation_type, **kwargs):
"""
Create an activation layer based on activation type and additional arguments.
Note that the needed kwargs depend on the activation type.
Args:
activation_type (str): String to indicate activation type.
kwargs (dict): Additional keyword arguments depending on the activation type.
Returns:
activation_layer (a subclass of keras.layers.Layer): The layer type
depends on activation_type.
"""
if activation_type == "relu-n":
max_value = kwargs["max_value"]
activation_layer = keras.layers.ReLU(max_value=max_value)
elif activation_type == "lrelu":
alpha = kwargs["alpha"]
activation_layer = keras.layers.LeakyReLU(alpha=alpha)
elif activation_type == "elu":
alpha = kwargs["alpha"]
activation_layer = keras.layers.ELU(alpha=alpha)
else:
activation_layer = keras.layers.Activation(activation_type, **kwargs)
return activation_layer
def count_layers_by_class_name(model, class_names):
"""Count the number of layers in a model (recursively) having any of the given class_names."""
n_layers = 0
for layer in model.layers:
if layer.__class__.__name__ in class_names:
n_layers += 1
if isinstance(layer, keras.models.Model):
# The layer is a model: recurse.
n_layers += count_layers_by_class_name(layer, class_names)
return n_layers
def clone_model(model, inputs=None, copy_weights=False):
"""
Clone a model and optionally replace the inputs.
Args:
model (Model): The model to clone.
inputs (list of tensors): The tensor to apply the new model to. If None, the model will
be returned with placeholders.
copy_weights (bool): Flag that determines whether the old model's weights should be
copied into the new one.
Returns:
new_model (Model): updated model.
"""
if inputs is not None:
# Get all the input placeholders.
input_placeholders = [
i
for i in range(len(model.layers))
if ("is_placeholder" in dir(model.layers[i]))
and (model.layers[i].is_placeholder is True)
]
if len(inputs) != len(input_placeholders):
raise ValueError(
"Number of model inputs does not match number of given inputs."
)
# Rename the input placeholders to avoid name clashes when cloning.
for placeholder in input_placeholders:
model.layers[placeholder].name = "input_placeholder_%d" % placeholder
new_model = keras.models.clone_model(model, inputs)
# Update the node references in the graph.
for placeholder in input_placeholders:
to_remove = [l.name for l in new_model.layers].index(
"input_placeholder_%d" % placeholder
)
to_connect = [
len(n.inbound_layers)
for n in new_model.layers[to_remove]._inbound_nodes
].index(1)
new_model.layers[to_remove + 1]._inbound_nodes = []
new_model.layers[to_remove + 1]._inbound_nodes = [
new_model.layers[to_remove]._inbound_nodes[to_connect]
]
new_model.layers.remove(new_model.layers[to_remove])
else:
new_model = keras.models.clone_model(model)
if copy_weights:
new_model.set_weights(model.get_weights())
return new_model
def update_config(model, config, name_pattern=None, custom_objects=None):
"""
Update the configuration of an existing model.
In order to update the configuration of only certain layers,
a name pattern (regular expression) may be provided.
Args:
model (Model): the model to update the regularizers of.
config (dict): dictionary of layer attributes to update.
name_pattern (str): pattern to match layers against. Those that
do not match will not be updated.
custom_objects (dict): dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization.
"""
# Loop through all layers and update those that have a matching config attribute.
for layer in model.layers:
if name_pattern is None or re.match(name_pattern, layer.name):
for name, value in config.items():
if hasattr(layer, name):
setattr(layer, name, value)
with tempfile.NamedTemporaryFile(delete=True) as f:
model.save(f.name)
new_model = keras.models.load_model(
f.name, custom_objects=custom_objects, compile=False
)
return new_model
def update_regularizers(model, kernel_regularizer, bias_regularizer, name_pattern=None):
"""
Update the weight decay regularizers of an existing model.
Note that the input tensors to apply the new model to must be different
from those of the original model. This is because when Keras
clones a model it retains the original input layer and adds an extra one
on top.
In order to update the regularizers of only certain layers,
a name pattern (regular expression) may be provided.
Args:
model (Model): the model to update the regularizers of.
kernel_regularizer (object): regularizer to apply to kernels.
bias_regularizer (object): regularizer to apply to biases.
name_pattern (str): pattern to match layers against. Those that
do not match will not be updated.
"""
config = {
"bias_regularizer": bias_regularizer,
"kernel_regularizer": kernel_regularizer,
}
return update_config(model, config, name_pattern)
def performance_test_model(inputs, data_format=None, activation_type="relu"):
"""Construct a model with 1x1 max pooling with stride 16 for performance diagnostics.
Args:
inputs (tensor): The input tensor `x`.
data_format (string): Either 'channels_last' (NHWC) or 'channels_first' (NCHW).
activation_type (string): Activation type to use.
Returns:
Model: the output model after applying 1x1 max pooling with stride 16 to the input `x`.
"""
if data_format is None:
data_format = keras.backend.image_data_format()
# Create HelNet-0 model which does max pooling with stride 16.
x = keras.layers.MaxPooling2D(
pool_size=(1, 1), strides=(16, 16), padding="same", data_format=data_format
)(inputs)
x = keras.layers.Activation(activation_type)(x)
model_name = "helnet0_s16"
model = keras.models.Model(inputs=inputs, outputs=x, name=model_name)
return model
class CNNBlock(object):
"""A functor for creating a block of layers."""
@add_arg_scope
def __init__(
self,
use_batch_norm,
use_shortcuts,
data_format,
kernel_regularizer,
bias_regularizer,
repeat,
stride,
subblocks,
index=None,
activation_type="relu",
activation_kwargs=None,
dilation_rate=(1, 1),
all_projections=False,
use_bias=True,
name_prefix=None,
quantize=False,
bitwidth=8,
):
"""
Initialization of the block functor object.
Args:
use_batch_norm (bool): whether batchnorm should be added after each convolution.
use_shortcuts (bool): whether shortcuts should be used. A typical ResNet by definition
uses shortcuts, but these can be toggled off to use the same ResNet topology without
the shortcuts.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
repeat (int): repeat number.
stride (int): The filter stride to be applied only to the first subblock (typically used
for downsampling). Strides are set to 1 for all layers beyond the first subblock.
subblocks (list of tuples): A list of tuples defining settings for each consecutive
convolution. Example:
`[(3, 64), (3, 64)]`
The two items in each tuple represents the kernel size and the amount of filters in
a convolution, respectively. The convolutions are added in the order of the list.
index (int): the index of the block to be created.
activation_type (str): activation function type.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
dilation_rate (int or (int, int)): An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
all_projections (bool): A boolean flag to determinte whether all shortcut connections
should be implemented as projection layers to facilitate full pruning or not.
use_bias (bool): whether the layer uses a bias vector.
name_prefix (str): Prefix the name with this value.
quantize (bool): A boolean flag to determine whether to use quantized conv2d or not.
bitwidth (integer): quantization bitwidth.
"""
self.use_batch_norm = use_batch_norm
self.use_shortcuts = use_shortcuts
self.all_projections = all_projections
self.data_format = data_format
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activation_type = activation_type
self.activation_kwargs = activation_kwargs or {}
self.dilation_rate = dilation_rate
self.repeat = repeat
self.stride = stride
self.use_bias = use_bias
self.subblocks = subblocks
self.subblock_ids = SUBBLOCK_IDS()
self.quantize = quantize
self.bitwidth = bitwidth
if index is not None:
self.name = "block_%d" % index
else:
self.name = "block_%d" % (get_uid("block") + 1)
if name_prefix is not None:
self.name = name_prefix + "_" + self.name
def __call__(self, x):
"""Build the block.
Args:
x (tensor): input tensor.
Returns:
tensor: the output tensor after applying the block on top of input `x`.
"""
for i in range(self.repeat):
name = "%s%s_" % (self.name, self.subblock_ids[i])
if i == 0:
# Set the stride only on the first layer.
stride = self.stride
dimension_changed = True
else:
stride = 1
dimension_changed = False
x = self._subblocks(x, stride, dimension_changed, name_prefix=name)
return x
def _subblocks(self, x, stride, dimension_changed, name_prefix=None):
"""
Stack several convolutions in a specific sequence given by a list of subblocks.
Args:
x (tensor): the input tensor.
stride (int): The filter stride to be applied only to the first subblock (typically used
for downsampling). Strides are set to 1 for all layers beyond the first subblock.
dimension_changed (bool): This indicates whether the dimension has been changed for this
block. If this is true, then we need to account for the change, or else we will be
unable to re-add the shortcut tensor due to incompatible dimensions. This can be
solved by applying a (1x1) convolution [1]. (The paper also notes the possibility of
zero-padding the shortcut tensor to match any larger output dimension, but this is
not implemented.)
name_prefix (str): name prefix for all the layers created in this function.
Returns:
tensor: the output tensor after applying the ResNet block on top of input `x`.
"""
bn_axis = get_batchnorm_axis(self.data_format)
shortcut = x
nblocks = len(self.subblocks)
for i in range(nblocks):
kernel_size, filters = self.subblocks[i]
if i == 0:
strides = (stride, stride)
else:
strides = (1, 1)
# Keras doesn't support dilation_rate != 1 if stride != 1.
dilation_rate = self.dilation_rate
if strides != (1, 1) and dilation_rate != (1, 1):
dilation_rate = (1, 1)
logger.warning(
"Dilation rate {} is incompatible with stride {}. "
"Setting dilation rate to {} for layer {}conv_{}.".format(
self.dilation_rate, strides, dilation_rate, name_prefix, i + 1
)
)
if self.quantize:
x = QuantizedConv2D(
filters,
(kernel_size, kernel_size),
strides=strides,
padding="same",
dilation_rate=dilation_rate,
data_format=self.data_format,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
bitwidth=self.bitwidth,
name="%sconv_%d" % (name_prefix, i + 1),
)(x)
else:
x = keras.layers.Conv2D(
filters,
(kernel_size, kernel_size),
strides=strides,
padding="same",
dilation_rate=dilation_rate,
data_format=self.data_format,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="%sconv_%d" % (name_prefix, i + 1),
)(x)
if self.use_batch_norm:
x = keras.layers.BatchNormalization(
axis=bn_axis, name="%sbn_%d" % (name_prefix, i + 1)
)(x)
if i != nblocks - 1: # All except last conv in block.
x = add_activation(self.activation_type, **self.activation_kwargs)(x)
if self.use_shortcuts:
if self.all_projections:
# Implementing shortcut connections as 1x1 projection layers irrespective of
# dimension change.
if self.quantize:
shortcut = QuantizedConv2D(
filters,
(1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
bitwidth=self.bitwidth,
name="%sconv_shortcut" % name_prefix,
)(shortcut)
else:
shortcut = keras.layers.Conv2D(
filters,
(1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="%sconv_shortcut" % name_prefix,
)(shortcut)
if self.use_batch_norm:
shortcut = keras.layers.BatchNormalization(
axis=bn_axis, name="%sbn_shortcut" % name_prefix
)(shortcut)
else:
# Add projection layers to shortcut only if there is a change in dimesion.
if dimension_changed: # Dimension changed.
if self.quantize:
shortcut = QuantizedConv2D(
filters,
(1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
bitwidth=self.bitwidth,
name="%sconv_shortcut" % name_prefix,
)(shortcut)
else:
shortcut = keras.layers.Conv2D(
filters,
(1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name="%sconv_shortcut" % name_prefix,
)(shortcut)
if self.use_batch_norm:
shortcut = keras.layers.BatchNormalization(
axis=bn_axis, name="%sbn_shortcut" % name_prefix
)(shortcut)
x = keras.layers.add([x, shortcut])
x = add_activation(self.activation_type, **self.activation_kwargs)(x)
return x
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/models/templates/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantized DepthwiseConv2D for Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import keras.backend as K
from keras.layers import DepthwiseConv2D
import tensorflow as tf
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import moving_averages
logger = logging.getLogger(__name__)
# @zeyuz: note Keras 2.2.4 DepthwiseConv2D has no dilation support. Dilation rate is here to
# support future keras version. This value should NOT be set to anything else than (1, 1)
class QuantizedDepthwiseConv2D(DepthwiseConv2D):
"""Quantized Depthwise 2D convolution.
Depthwise convolution performs
just the first step of a depthwise spatial convolution
(which acts on each input channel separately).
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
# Arguments
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution
along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. 'linear' activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation').
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector
quantize: Quantize the input in addition to weights.
bitwidth: Quantization precision.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(batch, rows, cols, channels)`
if `data_format` is `"channels_last"`.
# Output shape
4D tensor with shape:
`(batch, channels * depth_multiplier, new_rows, new_cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(batch, new_rows, new_cols, channels * depth_multiplier)`
if `data_format` is `"channels_last"`.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
quantize=True,
bitwidth=8,
**kwargs):
"""Init function."""
super(QuantizedDepthwiseConv2D, self).__init__(
kernel_size=kernel_size,
strides=strides,
padding=padding,
depth_multiplier=depth_multiplier,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.quantize_input = quantize
self.bitwidth = bitwidth
def build(self, input_shape):
"""Keras layer build."""
# The parent class build function should be called first so quantize input is weights[-1]
super(QuantizedDepthwiseConv2D, self).build(input_shape)
if self.quantize_input:
self.scaling_factor = self.add_weight(
shape=[],
initializer=init_ops.constant_initializer(6.0),
name="scaling_factor",
trainable=False,
)
else:
self.scaling_factor = None
def call(self, inputs):
"""Call function to apply QAT."""
if self.quantize_input:
assert (
self.scaling_factor is not None
), "Quantization enabled but scaling factor parameter not defined."
# Quantize the input.
keras_learning_phase = K.learning_phase()
if tf.is_tensor(keras_learning_phase):
keras_learning_phase = 0
logger.warning(
"QuantizedDepthwiseConv2D: Keras learning_phase not set. Assuming evaluation."
)
if keras_learning_phase:
batch_min = math_ops.reduce_min(inputs, name="BatchMin")
batch_min = math_ops.minimum(batch_min, 0.0)
batch_max = math_ops.reduce_max(inputs, name="BatchMax")
batch_max = math_ops.maximum(batch_max, 0.0)
abs_max = math_ops.maximum(
math_ops.abs(batch_min), math_ops.abs(batch_max), name="tensor_scale"
)
assign_max = moving_averages.assign_moving_average(
self.scaling_factor, abs_max, 0.999, name="AssignMaxEma"
)
else:
assign_max = self.scaling_factor
assign_min = math_ops.negative(assign_max)
assert assign_min.get_shape() == [], "Unexpected shape for tensor minimum."
assert assign_max.get_shape() == [], "Unexpected shape for tensor maximum."
inputs = tf.quantization.quantize_and_dequantize(
input=inputs,
input_min=assign_min,
input_max=assign_max,
range_given=True,
signed_input=True,
num_bits=self.bitwidth,
)
# Quantizing the weights.
kernel = tf.quantization.quantize_and_dequantize(
input=self.depthwise_kernel,
input_min=0.0,
input_max=0.0,
range_given=False,
signed_input=True,
num_bits=self.bitwidth,
)
outputs = K.depthwise_conv2d(
inputs,
kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def get_config(self):
"""get config function."""
config = super(QuantizedDepthwiseConv2D, self).get_config()
config["quantize"] = self.quantize_input
config["bitwidth"] = self.bitwidth
return config
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/models/templates/quantized_depthwiseconv2d.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional GRU Layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.models.templates.rnn_conv2d_base import RNNConv2dBase
import tensorflow as tf
class GRUConv2d(RNNConv2dBase):
"""Convolutional GRU Module."""
TYPE_NAME = "GRU"
# Variable names of this layer, grouped according to their functions.
INPUT_PROJECTION_VARIABLES_NAMES = ["W_z", "W_r", "W_h"]
STATE_VARIABLES_NAMES = ["U_z", "U_r", "U_h"]
BIAS_VARIABLES_NAMES = ["b_z", "b_r", "b_h"]
def build(self, input_shapes):
"""Initializes internal parameters given the shape of the inputs."""
input_shape = input_shapes[0]
n_input_shape = self._get_normalized_size(input_shape)
self.n_input_shape = n_input_shape
kernel_height, kernel_width = self.kernel_size
# Create variables here.
for var_name in self.INPUT_PROJECTION_VARIABLES_NAMES:
tmp_var = self.add_weight(
name=var_name,
shape=[kernel_height, kernel_width, n_input_shape[1], self.filters],
initializer="glorot_uniform",
trainable=True,
regularizer=self.kernel_regularizer,
)
setattr(self, var_name, tmp_var)
for var_name in self.STATE_VARIABLES_NAMES:
tmp_var = self.add_weight(
name=var_name,
shape=self._get_hidden_shape(),
initializer="glorot_uniform",
trainable=True,
regularizer=self.kernel_regularizer,
)
setattr(self, var_name, tmp_var)
for var_name in self.BIAS_VARIABLES_NAMES:
tmp_var = self.add_weight(
name=var_name,
shape=self._cvt_to_df([1, self.filters, 1, 1]),
initializer="zeros",
trainable=True,
regularizer=self.bias_regularizer,
)
setattr(self, var_name, tmp_var)
super(GRUConv2d, self).build(input_shapes)
def iteration(self, x, state):
"""
Implements the recurrent activation on a single timestep.
Args:
x (tf.Tensor): The input tensor for the current timestep.
state (tf.Tensor): The state of the recurrent module, up to the current timestep.
Returns:
state (tf.Tensor): The state of the recurrent module after processing this timestep.
"""
# Scale the state down to simulate the necessary leak.
state = state * self.state_scaling
# Convolutional GRU operations
z = self._conv2d(x, self.W_z) + self._conv2d(state, self.U_z)
z = self._bias_add(z, self.b_z)
z = tf.sigmoid(z)
r = self._conv2d(x, self.W_r) + self._conv2d(state, self.U_r)
r = self._bias_add(r, self.b_r)
r = tf.sigmoid(r)
h = self._conv2d(x, self.W_h) + self._conv2d(tf.multiply(state, r), self.U_h)
h = self._bias_add(h, self.b_h)
h = tf.tanh(h)
out_name = "state_output" if self.is_export_mode else None
state = tf.subtract(
tf.multiply(z, h), tf.multiply((z - 1.0), state), name=out_name
)
return state
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/models/templates/gru_conv2d.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input layer for temporal models that receive a temporal input."""
import keras
class TemporalInput(keras.layers.Layer):
"""
Temporal input module.
This model is compatible with the modulus export facility that
deals with the difference in model architecture between training
and inference.
"""
def __init__(self, is_export_mode=False, **kwargs):
"""
Initialization.
Args:
is_export_mode (bool) Whether or not this layer should behave in single-frame
inference mode (e.g. driveworks).
"""
super(TemporalInput, self).__init__(**kwargs)
self.is_export_mode = is_export_mode
def call(self, x):
"""
Call function.
Composes this part of the graph. If this layer is in export mode, then it will
simply forward the input. If it's in training mode, then all but the final 3
tensor dimensions will be flattened.
Args:
x (tf.Tensor): The input tensor.
"""
if self.is_export_mode:
return x
shape = x.shape
return keras.backend.reshape(x, (-1, shape[2], shape[3], shape[4]))
def compute_output_shape(self, input_shape):
"""
Computes the output shape given the specified input shape.
The behavior changes between training and export mode.
Args:
input_shape (tf.TensorShape): The shape of the input tensor.
"""
shape_type = type(input_shape)
input_shape = list(input_shape)
if self.is_export_mode:
output_shape = shape_type([input_shape[0]] + input_shape[-3:])
else:
batch_dim = input_shape[0]
temporal_dim = input_shape[1]
if batch_dim is None or temporal_dim is None:
batch_output = None
else:
batch_output = batch_dim * temporal_dim
output_shape = shape_type([batch_output] + input_shape[2:])
return output_shape
def prepare_for_export(self):
"""Configures this layer for export mode if this didn't already happen in the init."""
self.is_export_mode = True
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/models/templates/temporal_input.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test convolutional gated recurrent unit (GRU) custom Keras layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
import numpy as np
from nvidia_tao_tf1.core.models.templates.conv_gru_2d import ConvGRU2D
from parameterized import parameterized
import tensorflow as tf
class TestConvGRU2D(tf.test.TestCase):
"""Test convolutional gated recurrent unit (GRU) custom Keras layer."""
def setUp(self):
"""Set up the test fixture: construct a ConvGRU2D object."""
super(TestConvGRU2D, self).setUp()
# Shape: [channels, height, width]
self.GRU_INPUT_SHAPE = [None, 3, 1, 2]
self.MODEL_SEQUENCE_LENGTH_IN_FRAMES = 2
self.INPUT_SEQUENCE_LENGTH_IN_FRAMES = 2
self.STATE_SCALING = 1.0
self.IS_STATEFUL = False
# Set the initial state shape.
# Shape: [number_channels in a state, height, width]
self.INITIAL_STATE_SHAPE = [None, 4, 1, 2]
self.SPATIAL_KERNEL_HEIGHT = 1
self.SPATIAL_KERNEL_WIDTH = 1
self.KERNEL_REGULARIZER = {
"class_name": "L1L2",
"config": {"l2": 0.1, "l1": 0.3},
}
self.BIAS_REGULARIZER = {"class_name": "L1L2", "config": {"l2": 0.0, "l1": 0.1}}
self.convgru2d = ConvGRU2D(
model_sequence_length_in_frames=self.MODEL_SEQUENCE_LENGTH_IN_FRAMES,
input_sequence_length_in_frames=self.INPUT_SEQUENCE_LENGTH_IN_FRAMES,
state_scaling=self.STATE_SCALING,
input_shape=self.GRU_INPUT_SHAPE,
initial_state_shape=self.INITIAL_STATE_SHAPE,
spatial_kernel_height=self.SPATIAL_KERNEL_HEIGHT,
spatial_kernel_width=self.SPATIAL_KERNEL_WIDTH,
kernel_regularizer=self.KERNEL_REGULARIZER,
bias_regularizer=self.BIAS_REGULARIZER,
)
def test_convgru2d(self):
"""Test if convgru2d layer parameters are initialized as expected values."""
input_shape_expected = self.GRU_INPUT_SHAPE
model_sequence_length_in_frames_expected = self.MODEL_SEQUENCE_LENGTH_IN_FRAMES
input_sequence_length_in_frames_expected = self.INPUT_SEQUENCE_LENGTH_IN_FRAMES
state_scaling_expected = self.STATE_SCALING
is_stateful_expected = self.IS_STATEFUL
# Set the initial state shape.
# Shape: [number_channels in a state, height, width].
initial_state_shape_expected = self.INITIAL_STATE_SHAPE
spatial_kernel_height_expected = self.SPATIAL_KERNEL_HEIGHT
spatial_kernel_width_expected = self.SPATIAL_KERNEL_WIDTH
kernel_regularizer = self.KERNEL_REGULARIZER
bias_regularizer = self.BIAS_REGULARIZER
assert input_shape_expected == self.convgru2d.rnn_input_shape
assert (
list(initial_state_shape_expected[1:])
== self.convgru2d.initial_state_shape[1:]
)
assert spatial_kernel_height_expected == self.convgru2d.spatial_kernel_height
assert spatial_kernel_width_expected == self.convgru2d.spatial_kernel_width
assert (
model_sequence_length_in_frames_expected
== self.convgru2d.model_sequence_length_in_frames
)
assert (
input_sequence_length_in_frames_expected
== self.convgru2d.input_sequence_length_in_frames
)
assert np.isclose(state_scaling_expected, self.convgru2d.state_scaling)
assert is_stateful_expected == self.convgru2d.is_stateful
assert (
kernel_regularizer["config"]["l1"] == self.convgru2d.kernel_regularizer.l1
)
assert (
kernel_regularizer["config"]["l2"] == self.convgru2d.kernel_regularizer.l2
)
assert bias_regularizer["config"]["l1"] == self.convgru2d.bias_regularizer.l1
assert bias_regularizer["config"]["l2"] == self.convgru2d.bias_regularizer.l2
def test_convgru2d_variables(self):
"""Test the trainable variables of the ConvGRU2D Layer.
1) Test if the trainable variables are built with the correct shapes and names.
2) Test if the number of trainable variables are correct.
3) Test if the output shape is set correctly while upon calling the layer.
"""
variable_names_expected = set(
[
"conv_gru_2d/W_z:0",
"conv_gru_2d/W_r:0",
"conv_gru_2d/W_h:0",
"conv_gru_2d/U_z:0",
"conv_gru_2d/U_r:0",
"conv_gru_2d/U_h:0",
"conv_gru_2d/b_z:0",
"conv_gru_2d/b_r:0",
"conv_gru_2d/b_h:0",
]
)
inputs = keras.Input(
shape=(
self.GRU_INPUT_SHAPE[1],
self.GRU_INPUT_SHAPE[2],
self.GRU_INPUT_SHAPE[3],
)
)
output = self.convgru2d(inputs)
# Check variable names.
weights_actual = self.convgru2d.weights
variable_names_actual = {weight.name for weight in weights_actual}
assert variable_names_expected == variable_names_actual
# Check the number of trainable variables. This should be 9: 3 x U_, 3 X W_ and 3 X bias.
assert len(self.convgru2d.trainable_weights) == 9
# Check the shapes of input to state projections.
# Shape [height, width, in_channels, out_channels].
var_shape_expected = [
self.SPATIAL_KERNEL_HEIGHT,
self.SPATIAL_KERNEL_WIDTH,
self.GRU_INPUT_SHAPE[1],
self.INITIAL_STATE_SHAPE[1],
]
for local_var_name in ["W_z", "W_r", "W_h"]:
var_shape_actual = getattr(self.convgru2d, local_var_name).shape.as_list()
assert var_shape_expected == var_shape_actual
# Check the shapes of state to state projections.
# Shape [height, width, in_channels, out_channels].
var_shape_expected = [
self.SPATIAL_KERNEL_HEIGHT,
self.SPATIAL_KERNEL_WIDTH,
self.INITIAL_STATE_SHAPE[1],
self.INITIAL_STATE_SHAPE[1],
]
for local_var_name in ["U_z", "U_r", "U_h"]:
var_shape_actual = getattr(self.convgru2d, local_var_name).shape.as_list()
assert var_shape_expected == var_shape_actual
# Check the shapes of bias variables.
# Shape [out_channels].
var_shape_expected = [self.INITIAL_STATE_SHAPE[1]]
for local_var_name in ["b_z", "b_r", "b_h"]:
var_shape_actual = getattr(self.convgru2d, local_var_name).shape.as_list()
assert var_shape_expected == var_shape_actual
# Check the output shape for the dimensions that can be inferred.
# Shape [batch_size, num_filters, grid_height, grid_width].
output_shape_expected = [
None,
self.INITIAL_STATE_SHAPE[1],
None,
self.INITIAL_STATE_SHAPE[3],
]
assert output_shape_expected == output.shape.as_list()
@parameterized.expand([["all"], ["last"]])
def test_convgru2d_output_values_single_step(self, output_type):
"""Test the value of the output tensor after calling a single step ConvGRU2D Layer."""
with self.test_session() as sess:
# A close-to-minimum sized GRU.
# This GRU will implement 1x1 operations on 2x2 grid. in-channels:1, out-channels:1 .
convgru2d_minimal = ConvGRU2D(
model_sequence_length_in_frames=1,
input_sequence_length_in_frames=1,
state_scaling=1.0,
input_shape=[None, 1, 2, 2],
initial_state_shape=[None, 1, 2, 2],
spatial_kernel_height=1,
spatial_kernel_width=1,
output_type=output_type,
)
inputs = keras.Input(shape=(1, 2, 2))
convgru2d_minimal(inputs)
# Manually set the weights to specific values.
value_for_projections_variables = np.ones([1, 1, 1, 1], dtype=np.float32)
# Set the bias variable to zero first.
value_for_bias_variables = np.zeros([1], dtype=np.float32)
# Weights have 6 projection variables and 3 bias variables.
convgru2d_minimal.set_weights(
6 * [value_for_projections_variables] + 3 * [value_for_bias_variables]
)
# Present a ones input to the network.
input_tensor = tf.constant(np.ones([1, 1, 2, 2], dtype=np.float32))
# The below sub-session should compute the value of
# the GRU-operations output after time-step.
# z will be 1x1x2x2 tensor, each element will equal sigmoid(1).
# r will be 1x1x2x2 tensor, each element will equal sigmoid(1).
# state_update_input will be 1x1x2x2 tensor, each element will equal tanh(1).
# Then the output will be z * state_update_input,
# a 1x1x2x2 tensor, each element will equal sigmoid(1) * tanh(1) ~ 0.55677 .
output_tensor = convgru2d_minimal(input_tensor)
output_value = sess.run(output_tensor)
np.testing.assert_array_almost_equal(
0.55677 * np.ones((1, 1, 2, 2)), output_value
)
# Now everything is the same as previous test, but the bias values will be 1.
value_for_bias_variables = np.ones([1], dtype=np.float32)
# Weights have 6 projection variables and 3 bias variables.
convgru2d_minimal.set_weights(
6 * [value_for_projections_variables] + 3 * [value_for_bias_variables]
)
# The below sub-session should compute the value of
# the GRU-operations output after time-step.
# z will be 1x1x2x2 tensor, each element will equal sigmoid(1+1).
# r will be 1x1x2x2 tensor, each element will equal sigmoid(1+1).
# state_update_input will be 1x1x2x2 tensor, each element will equal tanh(1+1).
# Then the output will be z * state_update_input,
# a 1x1x2x2 tensor, each element will equal sigmoid(2) * tanh(2) ~ 0.849112675 .
output_value = sess.run(output_tensor)
np.testing.assert_array_almost_equal(
0.849112675 * np.ones((1, 1, 2, 2)), output_value
)
@parameterized.expand([["all"], ["last"]])
def test_convgru2d_output_values_2_steps(self, output_type):
"""Test the value of the output tensor after calling a 2-step ConvGRU2D Layer."""
with self.test_session() as sess:
# Now test the GRU, for 2 time steps inference.
# This GRU will implement 1x1 operations on 2x2 grid. in-channels:1, out-channels:1.
convgru2d_2steps = ConvGRU2D(
model_sequence_length_in_frames=2,
input_sequence_length_in_frames=2,
state_scaling=1.0,
input_shape=[None, 1, 2, 2],
initial_state_shape=[None, 1, 2, 2],
spatial_kernel_height=1,
spatial_kernel_width=1,
output_type=output_type,
)
inputs = keras.Input(shape=(1, 2, 2))
convgru2d_2steps(inputs)
# Manually set the weights to specific values.
value_for_projections_variables = np.ones([1, 1, 1, 1], dtype=np.float32)
# Set the bias variable to zero first.
value_for_bias_variables = np.zeros([1], dtype=np.float32)
# Weights have 6 projection variables and 3 bias variables.
convgru2d_2steps.set_weights(
6 * [value_for_projections_variables] + 3 * [value_for_bias_variables]
)
# Present a [ones, zeros] input to the network.
# Note that the number of input tensors equals 2*sequence_length_in_frames.
# First input sequence in the batch consists of tensors of ones.
# Second input sequence in the batch consist of zero tensors.
# The second input is included in order to check that the ordering of the
# output states is correct.
input_tensor = tf.constant(
np.concatenate((np.ones([2, 1, 2, 2]), np.zeros([2, 1, 2, 2])), axis=0),
dtype=tf.float32,
)
# The below sub-session should compute the value of
# the GRU-operations output after time-step.
# z_0 will be 1x1x2x2 tensor, each element will equal sigmoid(1).
# r_0 will be 1x1x2x2 tensor, each element will equal sigmoid(1).
# state_update_input_0 will be 1x1x2x2 tensor, each element will equal tanh(1).
# Then the state_1 for the first sequence will be z_0 * state_update_input_0,
# a 2x2 tensor, where each element will equal sigmoid(1) * tanh(1) ~ 0.55677.
# state_1 for the second sequence will be zero.
expected_state_1_seq_1 = 0.55677 * np.ones((1, 1, 2, 2))
expected_state_1_seq_2 = np.zeros((1, 1, 2, 2))
# z_1 will be 1x1x2x2 tensor, each element will be
# sigmoid(1 + 0.55677) ~ 0.825889 .
# r_1 will be 1x1x2x2 tensor, each element will be
# sigmoid(1 + 0.55677) ~ 0.825889.
# state_update_input_1 will be 1x1x2x2 tensor, each element will equal
# tanh(0.55677 * 0.825889 + 1) ~ 0.897619618.
# state_2 for the first sequence will be z_1 * state_update_input_1 + (1-z_1) * state_0
# a 1x1x2x2 tensor, each element will equal
# 0.825889 * 0.897619618 + (1.-0.825889) * 0.55677 ~ 0.8382739.
# state_2 for the second sequence will be zero.
expected_state_2_seq_1 = 0.8382739 * np.ones((1, 1, 2, 2))
expected_state_2_seq_2 = np.zeros((1, 1, 2, 2))
output_tensor = convgru2d_2steps(input_tensor)
output_value = sess.run(output_tensor)
if output_type == "last":
np.testing.assert_array_almost_equal(
np.concatenate(
(expected_state_2_seq_1, expected_state_2_seq_2), axis=0
),
output_value,
)
elif output_type == "all":
np.testing.assert_array_almost_equal(
np.concatenate(
(
expected_state_1_seq_1,
expected_state_2_seq_1,
expected_state_1_seq_2,
expected_state_2_seq_2,
),
axis=0,
),
output_value,
)
@parameterized.expand([["all"], ["last"]])
def test_convgru2d_output_values_2_times_stateful(self, output_type):
"""Test the value of the output tensor after calling a 1-step ConvGRU2D Layer 2 times."""
with self.test_session() as sess:
# Now test the GRU, for 2 time steps stateful inference with 2 single steps.
# This GRU will implement 1x1 operations on 2x2 grid. in-channels:1, out-channels:1.
convgru2d_2steps = ConvGRU2D(
model_sequence_length_in_frames=1,
input_sequence_length_in_frames=1,
state_scaling=1.0,
input_shape=[None, 1, 2, 2],
initial_state_shape=[None, 1, 2, 2],
spatial_kernel_height=1,
spatial_kernel_width=1,
is_stateful=True,
output_type=output_type,
)
inputs = keras.Input(shape=(1, 2, 2))
convgru2d_2steps(inputs)
# Manually set the weights to specific values.
value_for_projections_variables = np.ones([1, 1, 1, 1], dtype=np.float32)
# Set the bias variable to zero first.
value_for_bias_variables = np.zeros([1], dtype=np.float32)
# Weights have 6 projection variables and 3 bias variables.
convgru2d_2steps.set_weights(
6 * [value_for_projections_variables] + 3 * [value_for_bias_variables]
)
# Present a [ones, zeros] input to the network.
# Note that the number of input tensors equals 2.
# First input sequence in the batch consists of tensors of ones.
# Second input sequence in the batch consist of zero tensors.
input_tensor = tf.constant(
np.concatenate((np.ones([1, 1, 2, 2]), np.zeros([1, 1, 2, 2])), axis=0),
dtype=tf.float32,
)
# The below sub-session should compute the value of
# the GRU-operations output after time-step.
# z_0 will be 1x1x2x2 tensor, each element will equal sigmoid(1).
# r_0 will be 1x1x2x2 tensor, each element will equal sigmoid(1).
# state_update_input_0 will be 1x1x2x2 tensor, each element will equal tanh(1).
# Then the state_1 for the first sequence will be z_0 * state_update_input_0,
# a 2x2 tensor, where each element will equal sigmoid(1) * tanh(1) ~ 0.55677.
# state_1 for the second sequence will be zero.
expected_state_1_seq_1 = 0.55677 * np.ones((1, 1, 2, 2))
expected_state_1_seq_2 = np.zeros((1, 1, 2, 2))
# z_1 will be 1x1x2x2 tensor, each element will be
# sigmoid(1 + 0.55677) ~ 0.825889 .
# r_1 will be 1x1x2x2 tensor, each element will be
# sigmoid(1 + 0.55677) ~ 0.825889.
# state_update_input_1 will be 1x1x2x2 tensor, each element will equal
# tanh(0.55677 * 0.825889 + 1) ~ 0.897619618.
# state_2 for the first sequence will be z_1 * state_update_input_1 + (1-z_1) * state_0
# a 1x1x2x2 tensor, each element will equal
# 0.825889 * 0.897619618 + (1.-0.825889) * 0.55677 ~ 0.8382739.
# state_2 for the second sequence will be zero.
expected_state_2_seq_1 = 0.8382739 * np.ones((1, 1, 2, 2))
expected_state_2_seq_2 = np.zeros((1, 1, 2, 2))
# Call the layer 2 times to check that the hidden state is correctly
# saved after the first call.
output_tensor_1 = convgru2d_2steps(input_tensor)
output_value_1 = sess.run(output_tensor_1)
output_value_2 = sess.run(output_tensor_1)
np.testing.assert_array_almost_equal(
np.concatenate(
(expected_state_1_seq_1, expected_state_1_seq_2), axis=0
),
output_value_1,
)
np.testing.assert_array_almost_equal(
np.concatenate(
(expected_state_2_seq_1, expected_state_2_seq_2), axis=0
),
output_value_2,
)
@parameterized.expand([["last"], ["all"]])
def test_convgru2d_stateful_gradients(self, output_type):
"""Test the value of the output tensor after calling a 2-step ConvGRU2D Layer."""
with self.test_session() as sess:
# Test the GRU for stateful training gradients.
convgru2d_2steps = ConvGRU2D(
model_sequence_length_in_frames=2,
input_sequence_length_in_frames=2,
state_scaling=1.0,
input_shape=[None, 1, 2, 2],
initial_state_shape=[2, 1, 2, 2],
spatial_kernel_height=1,
spatial_kernel_width=1,
is_stateful=True,
output_type=output_type,
)
inputs = keras.Input(shape=(1, 2, 2))
convgru2d_2steps(inputs)
# Manually set the weights to specific values.
value_for_projections_variables = np.ones([1, 1, 1, 1], dtype=np.float32)
# Set the bias variable to zero first.
value_for_bias_variables = np.zeros([1], dtype=np.float32)
# Weights have 6 projection variables and 3 bias variables.
convgru2d_2steps.set_weights(
6 * [value_for_projections_variables] + 3 * [value_for_bias_variables]
)
# Note that the number of input tensors equals sequence_length_in_frames.
input_tensor = tf.constant(np.ones([4, 1, 2, 2], dtype=np.float32))
output_tensor = convgru2d_2steps(input_tensor)
gradients = keras.backend.gradients(
output_tensor, convgru2d_2steps.trainable_weights
)
# Take one forward pass, which assigns the state variables.
sess.run(output_tensor)
# Obtain the gradients. This breaks if the above state assignment changed the state
# variable shape. Since we set the batch size to correct value upon construction,
# the state variable shape remains the same, and no error is raised.
gradient_values = sess.run(gradients)
# The gradient values should be nonzero for all weights.
assert all([float(g) != 0.0 for g in gradient_values])
@parameterized.expand(
[["all", False], ["all", True], ["last", False], ["last", True]]
)
def test_convgru2d_variable_input_size(self, output_type, is_stateful):
"""Test the ConvGRU2D Layer with variable input size.
Test that the Conv2DGRU layer accepts an input tensor whose height and width
dimensions are different to the dimensions given when the layer was created
and outputs a tensor of correct size.
"""
convgru2d_variable_input = ConvGRU2D(
model_sequence_length_in_frames=self.MODEL_SEQUENCE_LENGTH_IN_FRAMES,
input_sequence_length_in_frames=self.INPUT_SEQUENCE_LENGTH_IN_FRAMES,
state_scaling=self.STATE_SCALING,
input_shape=self.GRU_INPUT_SHAPE,
initial_state_shape=self.INITIAL_STATE_SHAPE,
spatial_kernel_height=self.SPATIAL_KERNEL_HEIGHT,
spatial_kernel_width=self.SPATIAL_KERNEL_WIDTH,
kernel_regularizer=self.KERNEL_REGULARIZER,
bias_regularizer=self.BIAS_REGULARIZER,
is_stateful=is_stateful,
output_type=output_type,
)
inputs = keras.Input(
shape=(
self.GRU_INPUT_SHAPE[1],
self.GRU_INPUT_SHAPE[2],
self.GRU_INPUT_SHAPE[3],
)
)
convgru2d_variable_input(inputs)
batch_size = 4
if output_type == "last":
expected_batch_size = batch_size // self.INPUT_SEQUENCE_LENGTH_IN_FRAMES
elif output_type == "all":
expected_batch_size = batch_size
inputs_shape = inputs.shape.as_list()
input_tensor = tf.ones(
[batch_size, inputs_shape[1], inputs_shape[2] + 2, inputs_shape[3] + 3]
)
output_tensor = convgru2d_variable_input(input_tensor)
expected_output_shape = (
expected_batch_size,
self.INITIAL_STATE_SHAPE[1],
) + tuple(input_tensor.shape.as_list()[2:])
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_value = sess.run(output_tensor)
assert output_value.shape == expected_output_shape
# Test also compute_output_shape function.
assert (
convgru2d_variable_input.compute_output_shape(input_tensor.shape.as_list())
== expected_output_shape
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/models/templates/test_conv_gru_2d.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantized Conv2D for Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import keras.backend as K
from keras.backend import image_data_format
from keras.backend.tensorflow_backend import _preprocess_padding
from keras.layers import Conv2D
from keras.layers import InputSpec
import tensorflow as tf
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import moving_averages
logger = logging.getLogger(__name__)
DATA_FORMAT_MAP = {"channels_first": "NCHW", "channels_last": "NHWC"}
def _conv2d(
x,
kernel,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
quantize_input=False,
bitwidth=8,
scaling_factor=None,
training=None,
):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 2 integers.
quantize_input: boolean, quantize both the weights and the inputs.
bitwidth: number of bits to use for quantization.
scaling_factor: variable holding the moving average of absolute max of input tensor.
training: boolean or interger determining training or alternative phase.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in DATA_FORMAT_MAP:
raise ValueError("Unknown data_format " + str(data_format))
tf_data_format = DATA_FORMAT_MAP[data_format]
# Avoid Tensorflow's implicit assymetric padding by explicit symmetric padding.
# See https://stackoverflow.com/questions/42924324/tensorflows-asymmetric-padding-assumptions
if padding == "same":
filter_shape = kernel.get_shape()
width_padding = ((filter_shape[0].value - 1) * dilation_rate[0] + 1) // 2
height_padding = ((filter_shape[1].value - 1) * dilation_rate[1] + 1) // 2
if tf_data_format == "NCHW":
padding_pattern = [
[0, 0],
[0, 0],
[width_padding, width_padding],
[height_padding, height_padding],
]
else: # 'NHWC'
padding_pattern = [
[0, 0],
[width_padding, width_padding],
[height_padding, height_padding],
[0, 0],
]
x = tf.pad(x, padding_pattern, mode="CONSTANT")
padding = "valid"
nhwc_roundtrip = not K._has_nchw_support() and tf_data_format == "NCHW"
if nhwc_roundtrip:
tf_data_format = "NHWC"
x = tf.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
padding = _preprocess_padding(padding)
if quantize_input:
assert (
scaling_factor is not None
), "Quantization enabled but scaling factor parameter not defined."
# Quantize the input.
keras_learning_phase = K.learning_phase()
if tf.is_tensor(keras_learning_phase):
keras_learning_phase = 0
logger.warning(
"QuantizedConv2D: Keras learning_phase was not set. Assuming evaluation phase."
)
if keras_learning_phase:
batch_min = math_ops.reduce_min(x, name="BatchMin")
batch_min = math_ops.minimum(batch_min, 0.0)
batch_max = math_ops.reduce_max(x, name="BatchMax")
batch_max = math_ops.maximum(batch_max, 0.0)
abs_max = math_ops.maximum(
math_ops.abs(batch_min), math_ops.abs(batch_max), name="tensor_scale"
)
assign_max = moving_averages.assign_moving_average(
scaling_factor, abs_max, 0.999, name="AssignMaxEma"
)
else:
assign_max = scaling_factor
assign_min = math_ops.negative(assign_max)
assert assign_min.get_shape() == [], "Unexpected shape for tensor minimum."
assert assign_max.get_shape() == [], "Unexpected shape for tensor maximum."
x = tf.quantization.quantize_and_dequantize(
input=x,
input_min=assign_min,
input_max=assign_max,
range_given=True,
signed_input=True,
num_bits=bitwidth,
)
# Quantizing the weights.
kernel = tf.quantization.quantize_and_dequantize(
input=kernel,
input_min=0.0,
input_max=0.0,
range_given=False,
signed_input=True,
num_bits=bitwidth,
)
x = tf.nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format,
)
if nhwc_roundtrip:
x = tf.transpose(x, (0, 3, 1, 2)) # NCHW -> NHWC
return x
class QuantizedConv2D(Conv2D):
"""Quantized 2D convolution layer (e.g. spatial convolution over images).
This layer creates a quantized convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution
along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
Note that `"same"` is slightly inconsistent across backends with
`strides` != 1, as described
[here](https://github.com/keras-team/keras/pull/9473#issuecomment-372166860)
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix
bias_initializer: Initializer for the bias vector
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
bias_regularizer: Regularizer function applied to the bias vector
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix
bias_constraint: Constraint function applied to the bias vector
quantize: Quantize the input in addition to weights.
bitwidth: Quantization precision.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(batch, rows, cols, channels)`
if `data_format` is `"channels_last"`.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)`
if `data_format` is `"channels_first"`
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)`
if `data_format` is `"channels_last"`.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
quantize=True,
bitwidth=8,
**kwargs
):
"""Construct the QuantizedConv2D layer."""
super(QuantizedConv2D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs
)
self.quantize_input = quantize
self.bitwidth = bitwidth
def call(self, inputs, training=None):
"""Keras layer call."""
outputs = _conv2d(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
quantize_input=self.quantize_input,
bitwidth=self.bitwidth,
scaling_factor=self.scaling_factor,
training=training,
)
if self.use_bias:
outputs = K.bias_add(outputs, self.bias, data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def build(self, input_shape):
"""Keras layer build."""
if self.data_format == "channels_first":
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError(
"The channel dimension of the inputs "
"should be defined. Found `None`."
)
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(
shape=kernel_shape,
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.filters,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
if self.quantize_input:
self.scaling_factor = self.add_weight(
shape=[],
initializer=init_ops.constant_initializer(6.0),
name="scaling_factor",
trainable=False,
)
else:
self.scaling_factor = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim})
self.built = True
def get_config(self):
"""Get the layer configuration for QuantizedConv2D layer."""
config = super(QuantizedConv2D, self).get_config()
config["quantize"] = self.quantize_input
config["bitwidth"] = self.bitwidth
return config
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/models/templates/quantized_conv2d.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A convolutional GRU layer for DriveNet gridbox models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras import regularizers
from keras.engine.topology import Layer
import tensorflow as tf
def conv2d(x, W):
"""Convenience wrapper around 2d convolution."""
return K.conv2d(x, W, strides=(1, 1), padding="same", data_format="channels_first")
class ConvGRU2D(Layer):
"""Class containing convolutional GRU operations on sequential tensors."""
# Variable names of this layer, grouped according to their functions.
INPUT_PROJECTION_VARIABLES_NAMES = ["W_z", "W_r", "W_h"]
STATE_VARIABLES_NAMES = ["U_z", "U_r", "U_h"]
BIAS_VARIABLES_NAMES = ["b_z", "b_r", "b_h"]
def __init__(
self,
model_sequence_length_in_frames,
input_sequence_length_in_frames,
state_scaling,
input_shape,
initial_state_shape,
spatial_kernel_height,
spatial_kernel_width,
kernel_regularizer=None,
bias_regularizer=None,
is_stateful=False,
name="conv_gru_2d",
output_type="last",
**kwargs
):
"""Constructor for ConvGRU2D layer.
Args:
model_sequence_length_in_frames (int): How many steps the GRU will be tracked for
gradient computation (starting from the last frame). That is, a stop_gradient
operation will be applied just before the last model_sequence_length_in_frames
steps of the input sequence.
input_sequence_length_in_frames (int): Length of the sequence that is presented to the
GRU in the minibatch. The GRU might, however, stop the gradient flow in the middle
of the sequence. The gradients will propagate backwards through the last
model_sequence_length_in_frames steps of the input.
state_scaling (float): A constant scaling factor in range [0,1] in order to simulate
an exponential decay.
input_shape (list / tuple): Input tensor shape (N, C, H, W), where
[N: batch_size * sequence_length_in_frames / None,
C: input_channels,
H: input_height,
W: input_width].
Batch_size needs to be specified only for stateful training.
initial_state_shape (list / tuple): Shape of the initial state (M, F, H, W), where
[M: batch_size or None,
F: number_out_channels,
H: input_height,
W: input_width].
Batch size needs to be specified for stateful training.
spatial_kernel_height (int): Height of the convolution kernel within the GRU.
spatial_kernel_width (int): Width of the convolution kernel within the GRU.
kernel_regularizer (keras.regularizers.Regularizer instance or keras.regularizers config
dict): Regularizer to be applied to convolution kernels.
bias_regularizer (keras.regularizers.Regularizer instance or keras.regularizers config
dict): Regularizer to be applied to biases.
is_stateful (bool): Whether the GRU keeps track of state from minibatch to minibatch.
name (str): Name of the layer.
output_type (str): Whether to give an output for all frames in the sequences or only
for the last frame. Possible values: 'all' or 'last' (default).
Raises:
AssertionError: If height and width for input and state shapes are not equal or
if state_scaling is not in range [0, 1] or if input_sequence_length_in_frames is
less than model_sequence_length_in_frames.
"""
super(ConvGRU2D, self).__init__(**kwargs)
self.rnn_input_shape = input_shape
self.model_sequence_length_in_frames = model_sequence_length_in_frames
self.input_sequence_length_in_frames = input_sequence_length_in_frames
assert (
input_sequence_length_in_frames >= model_sequence_length_in_frames
), "The input sequence must be at least as long as the \
sequence with gradient flow."
self.is_stateful = is_stateful
self.state_scaling = state_scaling
assert 0 <= state_scaling <= 1, "state_scaling must lie in range [0, 1]."
# Set the initial state shape.
initial_state_shape = list(initial_state_shape)
if initial_state_shape[0] is None:
# Singleton batch dimension works for forward pass (stateful inference),
# as the variable shape can be changed in assign op.
initial_state_shape[0] = 1
self.initial_state_shape = initial_state_shape
assert tuple(input_shape[2:4]) == tuple(
initial_state_shape[2:4]
), "Both height and width for input and state shapes must be equal."
self.state_depth = initial_state_shape[1]
self.spatial_kernel_height = spatial_kernel_height
self.spatial_kernel_width = spatial_kernel_width
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.name = name
self.state_output_name = "_".join(["state_output", self.name])
self.output_type = output_type
self.past_feature_shape = input_shape
self._set_initial_state()
def _set_initial_state(self):
"""Initialize the state for the minibatch.
Note that initial_state refers to the initial state of the minibatch.
For the first minibatch this will be zeros (assuming batch size 1).
The initial state for the next minibatches will be set as the final state
of the previous minibatch. This will also update the initial_state dimension
according to the batch size of the training/evaluation.
"""
if self.is_stateful:
self._initial_state = tf.Variable(
tf.zeros([1], dtype=K.floatx()), trainable=False
)
else:
self._initial_state = tf.zeros([], dtype=K.floatx())
def build(self, input_shape):
"""Create layer variables.
Args:
input_shape: Tensor shape as a list/tuple (N, C, H, W), where
[N: batch_size * input_sequence_length_in_frames / None,
C: input_channels,
H: input_height,
W: input_width].
"""
number_input_channels = input_shape[1]
# Create variables here.
# Input projection variables' shape is: [H, W, in_channels, out_channels].
for var_name in self.INPUT_PROJECTION_VARIABLES_NAMES:
temp_var = self.add_weight(
name=var_name,
shape=[
self.spatial_kernel_height,
self.spatial_kernel_width,
number_input_channels,
self.state_depth,
],
initializer="glorot_uniform",
trainable=True,
regularizer=self.kernel_regularizer,
)
setattr(self, var_name, temp_var)
# State variables' shape is: [H, W, in_channels, out_channels].
# Note that in_channels = out_channels = state_depth for those variables.
for var_name in self.STATE_VARIABLES_NAMES:
temp_var = self.add_weight(
name=var_name,
shape=[
self.spatial_kernel_height,
self.spatial_kernel_width,
self.state_depth,
self.state_depth,
],
initializer="glorot_uniform",
trainable=True,
regularizer=self.kernel_regularizer,
)
setattr(self, var_name, temp_var)
# Bias variables' shape is: [out_channels].
for var_name in self.BIAS_VARIABLES_NAMES:
temp_var = self.add_weight(
name=var_name,
shape=[self.state_depth],
initializer="zeros",
trainable=True,
regularizer=self.bias_regularizer,
)
setattr(self, var_name, temp_var)
super(ConvGRU2D, self).build(input_shape)
def compute_output_shape(self, input_shape):
"""Compute layer's output shape.
Args:
input_shape: Tensor shape as a list/tuple (N, C, H, W), where
[N: batch_size * input_sequence_length_in_frames / None,
C: input_channels,
H: input_height,
W: input_width].
Returns:
Output tensor shape as a tuple (M, F, H, W), where
[M: batch_size / None,
F: number_out_channels,
H: input_height,
W: input_width].
"""
if self.output_type == "last":
batch_size = (
input_shape[0] // self.input_sequence_length_in_frames
if input_shape[0] is not None
else None
)
elif self.output_type == "all":
batch_size = input_shape[0]
else:
raise ValueError("output_type = {} not supported".format(self.output_type))
return (batch_size, self.state_depth) + tuple(input_shape[2:])
def _split_inputs_per_time(self, input_tensor):
"""Splits the input tensor into a per time list of tensors.
Args:
input_tensor (N, C, H, W), where
[N: batch_size * input_sequence_length_in_frames,
C: number_input_channels,
H: grid_height,
W: grid_width]): input to the GRU.
Returns:
inputs_per_time: List of tensors each,
with shape (M, F, H, W), where
[M: batch_size,
F: number_out_channels,
H: input_height,
W: input_width].
"""
inputs_per_time = [
input_tensor[offset :: self.input_sequence_length_in_frames]
for offset in range(self.input_sequence_length_in_frames)
]
return inputs_per_time
def call(self, input_tensor):
"""Call the GRU network.
Args:
input_tensor (N, C, H, W), where
[N: batch_size * input_sequence_length_in_frames,
C: number_input_channels,
H: grid_height,
W: grid_width]): input to the GRU.
Returns:
state: Final state of the GRU after input_sequence_length_in_frames cell operations,
with shape (M, F, H, W), where
[M: batch_size * output_sequence_length,
F: number_out_channels,
H: input_height,
W: input_width],
where output_sequence_length == 1 if self.output_type == 'last' and
output_sequence_length == input_sequence_length if self.output_type = 'all'.
"""
# model_sequence_length_in_frames is the number of last layers in the unrolled GRU,
# upto which the gradients still back-propagate.
model_sequence_length_in_frames = self.model_sequence_length_in_frames
# state_depth is both number of input_channels and number of
# output filters to an operation to a GRU.
state_depth = self.state_depth
# inputs_per_time is a list with a length of sequence_length_in_frames,
# where each item is a tensor with shape [ batch_size,
# rnn_input_channels,
# height,
# width ].
inputs_per_time = self._split_inputs_per_time(input_tensor)
number_frames_per_sequence = len(inputs_per_time)
# Compute the state shape dynamically from the input_tensor shape.
input_shape = tf.shape(input=input_tensor)
state_shape = tf.stack(
[
input_shape[0] // self.input_sequence_length_in_frames,
state_depth,
input_shape[2],
input_shape[3],
]
)
if self.is_stateful:
# Create a correct size tensor of zeros so that the initial state is
# broadcasted to correct shape if its shape is [1].
zero_state = tf.fill(state_shape, tf.constant(0.0, dtype=K.floatx()))
state = zero_state + tf.identity(self._initial_state)
else:
state = tf.fill(state_shape, tf.constant(0.0, dtype=K.floatx()))
output = []
for step, instant_input in enumerate(inputs_per_time):
state = self._step(instant_input, state, step)
# Update the initial state for the next mini-batch.
if step == model_sequence_length_in_frames - 1:
state = self._update_network_state(state)
if self.output_type == "all":
output.append(state)
# Stop the gradients before the extent of the model memory,
# so that the gradients are utilized for the model sequence_length_in_frames frames.
if step == number_frames_per_sequence - model_sequence_length_in_frames - 1:
state = tf.stop_gradient(state)
if self.output_type == "all":
# Stack the outputs to a 4D tensor with shape
# [M: batch_size * input_sequence_length_in_frames,
# F: number_out_channels,
# H: input_height,
# W: input_width]
# such that in the first dimension first input_sequence_length_in_frames
# values belong to the first sequence, the next input_sequence_length_in_frames
# values belong to the second sequence, and so on.
output_tensor = tf.stack(output, axis=0)
output_tensor = tf.transpose(a=output_tensor, perm=(1, 0, 2, 3, 4))
output_tensor = tf.reshape(
output_tensor,
(input_shape[0], state_depth, input_shape[2], input_shape[3]),
)
return output_tensor
return state
def _step(self, input_tensor, state, step):
"""Single time step update.
Args:
input_tensor (N, C, H, W), where
[N: batch_size,
C: number_input_channels,
H: grid_height,
W: grid_width]): input to the GRU.
state (N, D, H, W), where
[N: batch_size,
F: number_out_channels,
H: grid_height,
W: grid_width]): GRU state tensor.
For stateless GRU layer, the initial state is zeros. Because zeros tensor
(or tf.fill) is not supported in TensorRT, a `None` state is used to represent
zero state. In this case, this function will simply not use the input variable
`state`.
step (int): Time step index.
"""
# Scale the state down to simulate the necessary leak.
if state is not None:
state = state * self.state_scaling
# Convolutional GRU operations.
z = conv2d(input_tensor, self.W_z)
if state is not None:
z += conv2d(state, self.U_z)
z = K.bias_add(z, self.b_z, data_format="channels_first")
z = K.sigmoid(z)
r = conv2d(input_tensor, self.W_r)
if state is not None:
r += conv2d(state, self.U_r)
r = K.bias_add(r, self.b_r, data_format="channels_first")
r = K.sigmoid(r)
h = conv2d(input_tensor, self.W_h)
if state is not None:
h += conv2d(tf.multiply(state, r), self.U_h)
h = K.bias_add(h, self.b_h, data_format="channels_first")
h = K.tanh(h)
# Name the the output node operation to ease the TRT export.
state_name = (
self.state_output_name
if step == self.input_sequence_length_in_frames - 1
else None
)
if state is not None:
state = tf.subtract(
tf.multiply(z, h), tf.multiply((z - 1.0), state), name=state_name
)
else:
state = tf.multiply(z, h, name=None)
return state
def _update_network_state(self, state):
"""Updates the state of the GRU depending on the statefulness.
Args:
state: Tensor with shape (M, F, H, W), where
[M: batch_size,
F: number_out_channels,
H: input_height,
W: input_width].
Returns:
Updated state with the same shape.
"""
# If the layer is stateful, update the initial state for the next mini-batch.
if self.is_stateful:
# Tf.assign does not propagate gradients, so do the assign op as a control dependency.
# See https://github.com/tensorflow/tensorflow/issues/17735
updated_state = tf.compat.v1.assign(
ref=self._initial_state, value=state, validate_shape=False
)
with tf.control_dependencies([updated_state]):
state = tf.identity(state)
return state
def get_config(self):
"""Get layer's configuration.
Returns:
The dict of configuration.
"""
config = {
"model_sequence_length_in_frames": self.model_sequence_length_in_frames,
"input_sequence_length_in_frames": self.input_sequence_length_in_frames,
"state_scaling": self.state_scaling,
"input_shape": self.input_shape,
"initial_state_shape": self.initial_state_shape,
"kernel_regularizer": self.kernel_regularizer,
"bias_regularizer": self.bias_regularizer,
"spatial_kernel_height": self.spatial_kernel_height,
"spatial_kernel_width": self.spatial_kernel_width,
"is_stateful": self.is_stateful,
"output_type": self.output_type,
}
base_config = super(ConvGRU2D, self).get_config()
base_config.update(config)
return base_config
@classmethod
def from_config(cls, config):
"""Create convolutional GRU layer from its config.
Args:
config (dict) Layer config.
Returns:
A convolutional GRU layer.
"""
# Handle backward compatibility.
if "sequence_length_in_frames" in config:
config.update(
{
"model_sequence_length_in_frames": config[
"sequence_length_in_frames"
],
"input_sequence_length_in_frames": config[
"sequence_length_in_frames"
],
"state_scaling": 1.0,
"is_stateful": False,
}
)
config.pop("sequence_length_in_frames")
return super(ConvGRU2D, cls).from_config(config)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/models/templates/conv_gru_2d.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse Example Proto Processor."""
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import Processor
class ParseExampleProto(Processor):
"""Parse and deserialize an example proto to a dictionary with tensor values.
Args:
features (dict): a dictionary with strings as keys and feature-tensors as values.
The keys should relate to those in the proto, and the values should be of type
`tf.VarLenFeature` or `tf.FixedLenFeature`.
single (bool): indicates whether we're parsing a single example, or a batch of examples.
kwargs (dict): keyword arguments passed to parent class.
"""
@save_args
def __init__(self, features, single, **kwargs):
""""__init__ method."""
self.features = features
self.single = single
super(ParseExampleProto, self).__init__(**kwargs)
def call(self, serialized):
"""call method.
Note: only `values` will be extracted from `tf.VarLenFeature` outputs. Therefore this
method might not be ideally compatible with purely sparse tensors.
Args:
serialized (tensor): a serialized example proto.
Returns:
dict: a dict of tensors with the same keys as the `features` dict, and dense tensor
values as extracted from the example proto's relating key value.
"""
if self.single:
example = tf.io.parse_single_example(
serialized=serialized, features=self.features
)
else:
example = tf.io.parse_example(serialized=serialized, features=self.features)
for key, value in example.items():
if isinstance(value, tf.SparseTensor):
default_value = "" if value.dtype == tf.string else 0
example[key] = tf.sparse.to_dense(value, default_value=default_value)
if not self.single:
# If known, retain the shape of the batch.
shape = example[key].get_shape().as_list()
shape[0] = serialized.get_shape()[0]
example[key].set_shape(shape)
return example
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/parse_example_proto.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying a Modulus Transform to input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.processors import ColorTransform, SpatialTransform
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import data_format as modulus_data_format, Transform
class ColorTransformer(Processor):
"""Processor for applying a Modulus color transform to input."""
def __init__(
self, transform, min_clip=0.0, max_clip=255.0, data_format=None, **kwargs
):
"""Construct processor that uses a Transform instance to transform input tensors.
Args:
transform (Transform): Input Transform instance that defines a set of transformations
to be applied to an input tensor.
min_clip (float): Value to clip all minimum numbers too.
max_clip (float): Value to clip all maximum numbers to.
data_format (string): A string representing the dimension ordering of the input
images, must be one of 'channels_last' (NHWC) or 'channels_first' (NCHW).
kwargs (dict): keyword arguments passed to parent class.
"""
super(ColorTransformer, self).__init__(**kwargs)
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
self._data_format = (
data_format if data_format is not None else modulus_data_format()
)
self._transform = transform
self._transform_processor = ColorTransform(
min_clip=min_clip, max_clip=max_clip, data_format=self._data_format
)
def call(self, applicant):
"""Process tensors by applying transforms according to their data types.
Args:
applicant (Tensor): Input tensor to be transformed.
Returns:
Tensor: Transformed result tensor.
"""
input_shape = tf.shape(input=applicant)
batch_size = input_shape[0]
ctms = tf.tile(
tf.expand_dims(self._transform.color_transform_matrix, axis=0),
[batch_size, 1, 1],
)
return self._transform_processor(applicant, ctms=ctms)
class SpatialTransformer(Processor):
"""Processor for applying a Modulus spatial transform to input."""
def __init__(
self,
transform,
method="bilinear",
background_value=0.5,
verbose=False,
data_format=None,
**kwargs
):
"""Construct processor that uses a Transform instance to transform input tensors.
Args:
transform (Transform): Input Transform instance that defines a set of transformations
to be applied to an input tensor.
method (string): Sampling method used. Can be 'bilinear' or 'bicubic'.
background_value (float): The value the background canvas should have.
verbose (bool): Toggle verbose output during processing.
data_format (string): A string representing the dimension ordering of the input
images, must be one of 'channels_last' (NHWC) or 'channels_first' (NCHW).
kwargs (dict): keyword arguments passed to parent class.
Returns:
Transform: Final Transform instance with either cropping or scaling applied.
"""
super(SpatialTransformer, self).__init__(**kwargs)
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
self._data_format = (
data_format if data_format is not None else modulus_data_format()
)
self._transform = transform
self._spatial_transform = SpatialTransform(
method=method,
background_value=background_value,
data_format=self._data_format,
verbose=verbose,
)
def call(self, applicant):
"""Process tensors by applying transforms according to their data types.
Args:
applicant (Tensor): Input tensor to be transformed.
Returns:
Tensor: Transformed result tensor.
"""
input_shape = tf.shape(input=applicant)
batch_size = input_shape[0]
stms = tf.tile(
tf.expand_dims(self._transform.spatial_transform_matrix, axis=0),
[batch_size, 1, 1],
)
output = self._spatial_transform(
applicant,
stms=stms,
shape=(
int(self._transform.canvas_shape.height),
int(self._transform.canvas_shape.width),
),
)
return output
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/transformers.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the polygon rasterizer processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict, namedtuple
import errno
import os
import numpy as np
from PIL import Image
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import PolygonRasterizer
from nvidia_tao_tf1.core.processors import PolygonTransform
from nvidia_tao_tf1.core.processors import SparsePolygonRasterizer
from nvidia_tao_tf1.core.types import DataFormat
from nvidia_tao_tf1.core.utils import test_session
# Debug mode for saving test_polygon_rasterizer generated images to disk.
# This can be used for visually comparing test_polygon_rasterizer generated images to references,
# and regenerating the reference images in case the test or the rasterizer changes. In the latter
# case, run the test, visually check the images in test_polygon_rasterizer folder, and if ok, copy
# them to test_polygon_rasterizer_ref folder and commit to git.
debug_save_images = False
def _connected_components(edges):
"""
Generate its connected components as sets of nodes.
Time complexity is linear with respect to the number of edges.
Adapted from: https://stackoverflow.com/questions/12321899
"""
neighbors = defaultdict(set)
for a, b in edges:
neighbors[a].add(b)
neighbors[b].add(a)
seen = set()
def _component(
node, neighbors=neighbors, seen=seen, see=seen.add
): # pylint: disable=W0102
unseen = set([node])
next_unseen = unseen.pop
while unseen:
node = next_unseen()
see(node)
unseen |= neighbors[node] - seen
yield node
return (set(_component(node)) for node in neighbors if node not in seen)
def _matching_pixels(image, test):
"""Generate all pixel coordinates where pixel satisfies test."""
width, height = image.size
pixels = image.load()
for x in range(width):
for y in range(height):
if test(pixels[x, y]):
yield x, y
def _make_edges(coordinates):
"""Generate all pairs of neighboring pixel coordinates."""
coordinates = set(coordinates)
for x, y in coordinates:
if (x - 1, y - 1) in coordinates:
yield (x, y), (x - 1, y - 1)
if (x, y - 1) in coordinates:
yield (x, y), (x, y - 1)
if (x + 1, y - 1) in coordinates:
yield (x, y), (x + 1, y - 1)
if (x - 1, y) in coordinates:
yield (x, y), (x - 1, y)
yield (x, y), (x, y)
def _boundingbox(coordinates):
"""Return the bounding box of all coordinates."""
xs, ys = zip(*coordinates)
return min(xs), min(ys), max(xs), max(ys)
def _is_black_enough(pixel):
return pixel > 0
def disjoint_areas(image, test=_is_black_enough):
"""Return the bounding boxes of all non-consecutive areas who's pixels satisfy test."""
for each in _connected_components(_make_edges(_matching_pixels(image, test))):
yield _boundingbox(each)
Polygon = namedtuple(
"Polygon",
["polygons", "vertices_per_polygon", "class_ids_per_polygon", "polygons_per_image"],
)
def _make_triangle(width, height, class_id=0):
"""Make a triangle."""
polygons = tf.constant(
[[width / 2.0, 0.0], [0.0, height], [width, height]], dtype=tf.float32
)
vertices_per_polygon = tf.constant([3], dtype=tf.int32)
class_ids_per_polygon = tf.constant([class_id], dtype=tf.int32)
polygons_per_image = tf.constant([1], dtype=tf.int32)
return Polygon(
polygons, vertices_per_polygon, class_ids_per_polygon, polygons_per_image
)
def _make_line(length, thickness, offset_y, offset_x, angle=0.0):
"""Draw a line (thin rectangle) at an angle."""
width = length
height = thickness
# Draw a line (rectangle) around the origin
x = np.array(
[-width / 2.0, width / 2.0, width / 2.0, -width / 2.0], dtype=np.float32
)
y = np.array(
[-height / 2.0, -height / 2.0, height / 2.0, height / 2.0], dtype=np.float32
)
# Rotate the rectangle, and zip the polygons to a tensor
xr = x * np.cos(angle) - y * np.sin(angle)
yr = y * np.cos(angle) + x * np.sin(angle)
polygons = tf.stack([xr, yr], axis=1)
# Translate the polygon to the center of the image
polygons = polygons + tf.constant([offset_y, offset_x], dtype=tf.float32)
vertices_per_polygon = tf.constant([4], dtype=tf.int32)
class_ids_per_polygon = tf.constant([0], dtype=tf.int32)
return polygons, vertices_per_polygon, class_ids_per_polygon
@pytest.fixture
def _triangle_polygon():
polygons = tf.constant(
[
[0.1, 0.3],
[0.7, 0.3],
[0.5, 0.4],
[0.2, 0.3],
[0.7, 0.3],
[0.5, 0.7],
[0.3, 0.3],
[0.7, 0.3],
[0.5, 0.7],
[0.4, 0.3],
[0.7, 0.3],
[0.5, 0.7],
[0.5, 0.3],
[0.7, 0.3],
[0.5, 0.7],
],
dtype=tf.float32,
)
vertices_per_polygon = tf.constant([3, 3, 3, 3, 3], dtype=tf.int32)
class_ids_per_polygon = tf.constant([1, 0, 0, 0, 1], dtype=tf.int32)
polygons_per_image = tf.constant([5], dtype=tf.int32)
return Polygon(
polygons, vertices_per_polygon, class_ids_per_polygon, polygons_per_image
)
@pytest.fixture
def _lanenet_polygon():
"""A set of polygons that spells out "L A N E N E T"."""
polygons = (
tf.constant(
[
[5.183, 9.818],
[3.298, 92.005],
[24.787, 92.005],
[28.086, 77.019],
[17.907, 69.29],
[29.5, 92.005],
[34.306, 9.818],
[43.731, 92.005],
[46.465, 92.005],
[43.731, 10.667],
[57.021, 83.428],
[56.078, 11.892],
[60.603, 12.174],
[61.262, 93.041],
[50.706, 87.669],
[64.467, 86.256],
[64.467, 93.607],
[74.834, 93.607],
[74.08, 85.689],
[67.671, 82.392],
[67.954, 60.996],
[73.42, 59.865],
[73.608, 53.268],
[68.802, 51.477],
[68.236, 27.443],
[71.818, 24.521],
[72.007, 11.986],
[64.467, 11.986],
[76.907, 93.041],
[75.776, 11.986],
[80.583, 11.986],
[84.824, 85.973],
[84.824, 11.986],
[87.746, 11.986],
[87.746, 93.041],
[80.395, 89.365],
[89.632, 93.607],
[94.438, 93.607],
[94.438, 85.407],
[92.035, 84.842],
[92.035, 53.174],
[94.438, 52.514],
[94.438, 44.597],
[92.035, 41.769],
[92.459, 20.28],
[95.192, 20.28],
[94.627, 12.457],
[90.386, 11.986],
[96.041, 11.986],
[96.041, 15.567],
[97.549, 15.567],
[96.041, 93.607],
[98.585, 93.607],
[98.585, 15.85],
[99.527, 15.85],
[99.527, 11.986],
],
dtype=tf.float32,
)
* 0.01
)
vertices_per_polygon = tf.constant([5, 3, 7, 13, 8, 12, 8], dtype=tf.int32)
class_ids_per_polygon = tf.constant([0, 1, 2, 0, 1, 2, 0], dtype=tf.int32)
polygons_per_image = tf.constant([7], dtype=tf.int32)
return Polygon(
polygons, vertices_per_polygon, class_ids_per_polygon, polygons_per_image
)
def test_one_hot_binarize_fail():
"""Test incompatibility of non-one_hot and non-binary output."""
with pytest.raises(ValueError):
PolygonRasterizer(
width=256,
height=128,
nclasses=3,
one_hot=False,
binarize=False,
data_format=DataFormat.CHANNELS_FIRST,
)
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("one_hot", [True, False])
def test_single_input(one_hot, data_format, width=60, height=30, nclasses=2):
"""Test the input of a single image, as opposed to a batched input."""
sess = test_session()
polygons = tf.zeros([0, 2], dtype=tf.float32)
vertices_per_polygon = tf.constant([], dtype=tf.int32)
class_ids_per_polygon = tf.constant([], dtype=tf.int32)
op = PolygonRasterizer(
width=width,
height=height,
nclasses=nclasses,
one_hot=one_hot,
data_format=data_format,
)
fetches = op(polygons, vertices_per_polygon, class_ids_per_polygon)
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(fetches)
if data_format == DataFormat.CHANNELS_LAST:
out = np.transpose(out, [2, 0, 1])
expected_shape = (nclasses + 1, height, width) if one_hot else (1, height, width)
np.testing.assert_array_equal(out.shape, expected_shape)
dim_tests = [
# One image
(1, 1, 60, 30, True, (1, 2, 30, 60)),
# Three images
(3, 1, 60, 30, True, (3, 2, 30, 60)),
# Three images, three classes
(3, 3, 60, 30, True, (3, 4, 30, 60)),
# Without one_hot
# Three images, one class
(3, 1, 60, 30, False, (3, 1, 30, 60)),
# Three images, three classes
(3, 3, 60, 30, False, (3, 1, 30, 60)),
]
@pytest.mark.parametrize(
"nimages,nclasses,width,height,one_hot,expected_shape", dim_tests
)
def test_shapes_dims_no_polygons(
nimages, nclasses, width, height, one_hot, expected_shape
):
"""
Test for dimension inputs, but without any polygons. Checks if the shapes are as expected
and the contents of the output tensor is entirely blank.
"""
sess = test_session()
polygons = tf.zeros([0, 2], dtype=tf.float32)
vertices_per_polygon = tf.constant([], dtype=tf.int32)
class_ids_per_polygon = tf.constant([], dtype=tf.int32)
polygons_per_image = tf.zeros([nimages], dtype=tf.int32)
op = PolygonRasterizer(
width=width,
height=height,
nclasses=nclasses,
one_hot=one_hot,
data_format=DataFormat.CHANNELS_FIRST,
)
fetches = op(
polygons, vertices_per_polygon, class_ids_per_polygon, polygons_per_image
)
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(fetches)
# Check shape
assert out.shape == expected_shape
if one_hot:
expected = np.concatenate(
[
np.ones([nimages, 1, height, width]), # background
np.zeros([nimages, nclasses, height, width]),
], # class map
axis=1,
)
np.testing.assert_array_equal(out, expected)
else:
np.testing.assert_array_equal(out, np.zeros([nimages, 1, height, width]))
dim_tests = [
# One image
(1, 1, 60, 30, True, True, (1, 2, 30, 60)),
# Three images, exclude background.
(3, 1, 60, 30, True, False, (3, 1, 30, 60)),
# Without one_hot
# Three images, one class, exclude background
(3, 1, 60, 30, False, False, (3, 1, 30, 60)),
# Three images, three classes
(3, 3, 60, 30, False, True, (3, 1, 30, 60)),
]
@pytest.mark.parametrize(
"nimages,nclasses,width,height,one_hot,include_background,expected_shape", dim_tests
)
def test_shapes_dims_include_background(
nimages, nclasses, width, height, one_hot, include_background, expected_shape
):
"""
Test for dimension inputs, but without any polygons. Checks if the shapes are as expected
and the contents of the output tensor is entirely blank.
"""
sess = test_session()
polygons = tf.zeros([0, 2], dtype=tf.float32)
vertices_per_polygon = tf.constant([], dtype=tf.int32)
class_ids_per_polygon = tf.constant([], dtype=tf.int32)
polygons_per_image = tf.zeros([nimages], dtype=tf.int32)
op = PolygonRasterizer(
width=width,
height=height,
nclasses=nclasses,
one_hot=one_hot,
data_format=DataFormat.CHANNELS_FIRST,
include_background=include_background,
)
fetches = op(
polygons, vertices_per_polygon, class_ids_per_polygon, polygons_per_image
)
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(fetches)
# Check shape
assert out.shape == expected_shape
if one_hot:
if include_background:
expected = np.concatenate(
[
np.ones([nimages, 1, height, width]), # background
np.zeros([nimages, nclasses, height, width]),
], # class map
axis=1,
)
else:
expected = np.zeros([nimages, nclasses, height, width])
np.testing.assert_array_equal(out, expected)
else:
np.testing.assert_array_equal(out, np.zeros([nimages, 1, height, width]))
@pytest.mark.parametrize("class_id,nclasses", [(0, -1), (1, 1), (0, 0)])
def test_class_id_fails(class_id, nclasses, width=64, height=32):
"""
Test fail cases where the class_id is out of bounds with respect to the total amount of
classes.
"""
op = PolygonRasterizer(
width=width,
height=height,
nclasses=nclasses,
one_hot=True,
data_format=DataFormat.CHANNELS_FIRST,
)
inputs = _make_triangle(width=width, height=height, class_id=class_id)
fetches = op(*inputs)
sess = test_session()
sess.run(tf.compat.v1.global_variables_initializer())
with pytest.raises(Exception):
sess.run(fetches)
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize(
"one_hot,binarize", [(True, True), (True, False), (False, True)]
)
def test_triangle_coverage(one_hot, binarize, data_format):
"""Draw a triangle in the center of the image, and test if it covers half of the image.
When `one_hot` is `True`, we also check that the background map inversely contains the inverse
coverage.
"""
height, width, nclasses = 128, 256, 1
op = PolygonRasterizer(
width=width,
height=height,
nclasses=nclasses,
one_hot=one_hot,
binarize=binarize,
data_format=data_format,
)
inputs = _make_triangle(width=width, height=height, class_id=0)
fetches = op(*inputs)
sess = test_session()
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(fetches)
# Cancel the transpose from output for 'channels_last' format to continue testing.
if data_format == DataFormat.CHANNELS_LAST:
out = np.transpose(out, [0, 3, 1, 2])
mean_per_class_map = np.mean(out, axis=(0, 2, 3))
expected_mean, rtol = 0.5, 1e-2
if one_hot:
np.testing.assert_allclose(
mean_per_class_map[0],
expected_mean,
rtol=rtol,
err_msg="unexpected background coverage.",
)
np.testing.assert_allclose(
mean_per_class_map[1],
expected_mean,
rtol=rtol,
err_msg="unexpected foreground coverage.",
)
if binarize:
# Test of an exact result where each pixel's sum over the class map results in 1
# Combine the background and foreground, and check each pixel adds up to exactly 1
combined_coverage = np.sum(out, axis=1, keepdims=True)
np.testing.assert_array_equal(
combined_coverage,
np.ones([1, 1, height, width]),
err_msg="not every pixel sums up to a value of exactly 1.",
)
else:
np.testing.assert_allclose(
mean_per_class_map,
expected_mean,
rtol=rtol,
err_msg="unexpected coverage area while drawing a triangle.",
)
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("one_hot", (True, False))
def test_draw_outside_canvas(one_hot, data_format, width=64, height=32, nclasses=1):
"""Test drawing outside of the canvas visible range, and assert the output is blank."""
polygons = tf.constant(
[
[0.0, 0.0],
[-1.0, 0.0],
[0.0, -1.0], # < (0, 0)
[width, height],
[2 * width, height],
[width, 2 * height],
], # > (width, height)
dtype=tf.float32,
)
vertices_per_polygon = tf.constant([3, 3], dtype=tf.int32)
class_ids_per_polygon = tf.constant([0, 0], dtype=tf.int32)
polygons_per_image = tf.constant([2], dtype=tf.int32)
op = PolygonRasterizer(
width=width,
height=height,
nclasses=nclasses,
one_hot=one_hot,
data_format=data_format,
)
fetches = op(
polygons, vertices_per_polygon, class_ids_per_polygon, polygons_per_image
)
sess = test_session()
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(fetches)
# Cancel the transpose from output for 'channels_last' format to continue testing.
if data_format == DataFormat.CHANNELS_LAST:
out = np.transpose(out, [0, 3, 1, 2])
if one_hot:
expected = np.concatenate(
[
np.ones([1, 1, height, width]), # background
np.zeros([1, nclasses, height, width]),
], # class map
axis=1,
)
np.testing.assert_array_equal(out, expected)
else:
np.testing.assert_array_equal(out, np.zeros([1, nclasses, height, width]))
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize(
"one_hot, binarize", [(True, True), (True, False), (False, True)]
)
def test_draw_repeated_lanenet_text(
one_hot,
binarize,
data_format,
_lanenet_polygon,
nclasses=3,
width=128,
height=64,
nrepeats=5,
):
"""
Test a complicated set of polygons that spells out "L A N E N E T" in for a few option
combinations and loop those a few times, and compare the output to the previous one.
This is run in a new session each time.
Also tests the binarize setting for actually result in binary outputs, and visa versa.
"""
polygons_abs = tf.matmul(
_lanenet_polygon.polygons,
tf.constant([[width, 0], [0, height]], dtype=tf.float32),
)
vertices_per_polygon = _lanenet_polygon.vertices_per_polygon
class_ids_per_polygon = _lanenet_polygon.class_ids_per_polygon
polygons_per_image = _lanenet_polygon.polygons_per_image
op = PolygonRasterizer(
width=width,
height=height,
nclasses=nclasses,
one_hot=one_hot,
binarize=binarize,
data_format=data_format,
)
fetches = op(
polygons_abs, vertices_per_polygon, class_ids_per_polygon, polygons_per_image
)
last_output = None
for _ in range(nrepeats):
sess = test_session()
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(fetches)
# Cancel the transpose from output for 'channels_last' format to continue testing.
if data_format == DataFormat.CHANNELS_LAST:
out = np.transpose(out, [0, 3, 1, 2])
if one_hot:
if binarize:
np.testing.assert_array_equal(
np.unique(out),
[0.0, 1.0],
"binarize and one_hot did not result in exclusively 0 and 1 values.",
)
else:
assert (
len(np.unique(out)) > 2
), "one_hot and non-binarized output resulted in too few unique values."
# Check each class contains something
np.testing.assert_array_equal(
np.amax(out, axis=(0, 2, 3)),
np.ones(1 + nclasses),
"not every class map contains output while using one_hot.",
)
else: # not one hot
expected_unique_range = np.arange(nclasses + 1)
np.testing.assert_array_equal(
np.unique(out),
expected_unique_range,
"binarize and not one_hot did not result in exclusively values: %s."
% expected_unique_range,
)
if last_output is not None:
np.testing.assert_array_equal(
out,
last_output,
"drawing of the same polygon set repeatedly results in a non-deterministic "
"rasterized map",
)
last_output = out
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("device", ["/gpu:0", "/cpu"])
@pytest.mark.parametrize("batch_size", [1, 2, 4, 8])
def test_draw_batched_copies_are_identical(
batch_size, device, data_format, _lanenet_polygon, width=128, height=64
):
"""Test drawing batched copies of the same polygon produce identical results"""
with tf.device(device):
op = PolygonRasterizer(
width=width,
height=height,
nclasses=3,
one_hot=True,
binarize=True,
verbose=False,
data_format=data_format,
)
polygons_abs = tf.matmul(
_lanenet_polygon.polygons,
tf.constant([[width, 0], [0, height]], dtype=tf.float32),
)
vertices_per_polygon = _lanenet_polygon.vertices_per_polygon
class_ids_per_polygon = _lanenet_polygon.class_ids_per_polygon
polygons_per_image = _lanenet_polygon.polygons_per_image
single_rasterized = op(
polygons_abs,
vertices_per_polygon,
class_ids_per_polygon,
polygons_per_image,
)
polygons_abs = tf.tile(
tf.matmul(
_lanenet_polygon.polygons,
tf.constant([[width, 0], [0, height]], dtype=tf.float32),
),
[batch_size, 1],
)
vertices_per_polygon = tf.tile(
_lanenet_polygon.vertices_per_polygon, [batch_size]
)
class_ids_per_polygon = tf.tile(
_lanenet_polygon.class_ids_per_polygon, [batch_size]
)
polygons_per_image = tf.tile(_lanenet_polygon.polygons_per_image, [batch_size])
batch_rasterized = op(
polygons_abs,
vertices_per_polygon,
class_ids_per_polygon,
polygons_per_image,
)
sess = test_session()
sess.run(tf.compat.v1.global_variables_initializer())
single_out = sess.run(single_rasterized)
batch_out = sess.run(batch_rasterized)
# Cancel the transpose from output for 'channels_last' format to continue testing.
if data_format == DataFormat.CHANNELS_LAST:
single_out = np.transpose(single_out, [0, 3, 1, 2])
batch_out = np.transpose(batch_out, [0, 3, 1, 2])
for raster_index in range(batch_size):
np.testing.assert_array_equal(single_out[0], batch_out[raster_index])
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("thickness,expect_fragmented", [(0.01, True), (0.26, False)])
def test_line_non_fragmentation(thickness, expect_fragmented, data_format):
"""
Rasterize subpixel-thick lines at different angles, and test the lines are continuous
Note that polygon thicknesses above 0.25 should not fragment because of the subsampler in the
rasterizer.
Args:
thickness (float): the absolute thickness of the line we will draw.
expect_fragmented (bool): Whether or not we expect fragmentation
"""
width, height = 256, 128
op = PolygonRasterizer(
width=width,
height=height,
nclasses=1,
one_hot=False,
binarize=True,
data_format=data_format,
)
ps, vs, cs = [], [], []
nlines = 16
angles = np.linspace(0.0, 3.14, nlines, endpoint=False)
for angle in angles:
p, v, c = _make_line(
length=width,
thickness=thickness,
offset_y=width / 2,
offset_x=height / 2,
angle=angle,
)
ps.append(p)
vs.append(v)
cs.append(c)
ps = tf.concat(ps, axis=0)
vs = tf.concat(vs, axis=0)
cs = tf.concat(cs, axis=0)
fetches = op(ps, vs, cs)
sess = test_session()
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(fetches)
# Cancel the transpose from output for 'channels_last' format to continue testing.
if data_format == DataFormat.CHANNELS_LAST:
out = np.transpose(out, [2, 0, 1])
image = Image.fromarray(np.uint8(out[0] * 255))
n_fragments = len(list(disjoint_areas(image)))
is_fragmented = n_fragments > 1
assert is_fragmented == expect_fragmented
def test_identity_polygon_transform():
"""Test an identity polygon transform leaves the polygon unchanged."""
polygons_in = _make_triangle(width=64, height=32, class_id=0)[0]
# Define a spatial transformation matrix.
stm = tf.eye(3, dtype=tf.float32)
# Set up the polygon transformer, and transform the polygons.
polygon_transform_op = PolygonTransform()
polygons_out = polygon_transform_op(polygons_in, stm)
# Set up the rasterizer and run the graph.
sess = test_session()
sess.run(tf.compat.v1.global_variables_initializer())
p_in_np, p_out_np = sess.run([polygons_in, polygons_out])
np.testing.assert_array_equal(p_out_np, p_in_np)
def _draw_dense(
num_classes,
one_hot,
binarize,
num_samples,
data_format,
polygons_per_image,
vertices_per_polygon,
class_ids_per_polygon,
vertices,
):
"""Execute PolygonRasterizer op."""
sess = test_session()
op = PolygonRasterizer(
width=100,
height=90,
nclasses=num_classes,
one_hot=one_hot,
binarize=binarize,
verbose=True,
data_format=data_format,
num_samples=num_samples,
)
fetches = op(
polygon_vertices=tf.constant(vertices, dtype=tf.float32),
vertex_counts_per_polygon=vertices_per_polygon,
class_ids_per_polygon=class_ids_per_polygon,
polygons_per_image=polygons_per_image,
)
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(fetches)
# Check shape inference.
assert fetches.shape == output.shape
return output
def _draw_sparse(
num_classes,
one_hot,
binarize,
num_samples,
data_format,
polygons,
class_ids_per_polygon,
):
"""Execute SparsePolygonRasterizer op."""
sess = test_session()
op = SparsePolygonRasterizer(
width=100,
height=90,
nclasses=num_classes,
one_hot=one_hot,
binarize=binarize,
verbose=True,
data_format=data_format,
num_samples=num_samples,
)
fetches = op(polygons=polygons, class_ids_per_polygon=class_ids_per_polygon)
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(fetches)
# Check shape inference.
assert fetches.shape == output.shape
return output
def _run_sparse_rasterizer_error_check(data):
"""Run sparse rasterizer error check."""
with pytest.raises((tf.errors.InvalidArgumentError, ValueError)):
polygons = tf.SparseTensor(
values=tf.constant(data["vertices"], dtype=tf.float32),
indices=tf.constant(data["indices"], dtype=tf.int64),
dense_shape=data["dense_shape"],
)
class_ids_per_polygon = tf.SparseTensor(
values=tf.constant(data["class_ids_per_polygon"], dtype=tf.int64),
indices=tf.constant(data["class_indices"], dtype=tf.int64),
dense_shape=data["dense_shape"],
)
_draw_sparse(
num_classes=2,
one_hot=False,
binarize=True,
num_samples=1,
data_format=DataFormat.CHANNELS_FIRST,
polygons=polygons,
class_ids_per_polygon=class_ids_per_polygon,
)
@pytest.fixture()
def sparse_rasterizer_error_check_data():
"""Return data dict for sparse rasterizer error checks."""
vertices = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
indices = [
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 0, 2, 0],
[0, 0, 2, 1],
[0, 1, 0, 0],
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 1],
[0, 1, 2, 0],
[0, 1, 2, 1],
]
class_ids_per_polygon = [0, 1]
class_indices = [[0, 0, 0, 0], [0, 1, 0, 0]]
dense_shape = [1, 2, 6, 2]
class_ids_dense_shape = [1, 2, 6]
return {
"vertices": vertices,
"indices": indices,
"dense_shape": dense_shape,
"class_ids_per_polygon": class_ids_per_polygon,
"class_indices": class_indices,
"class_ids_dense_shape": class_ids_dense_shape,
}
@pytest.mark.parametrize(
"dense_shape",
[
[[1], [2]], # Dense shape is not 1D.
[1, 2], # Dense shape does not have 3 or 4 elements.
[0, 2, 6, 2], # Dense shape batch size is <= 0.
[1, -1, 6, 2], # Dense shape polygons dimension size < 0.
[1, 2, -1, 2], # Dense shape vertices dimension size < 0.
[1, 2, 6, 1], # Dense shape coordinates dimension size != 2.
],
)
def test_sparse_rasterizer_error_check_dense_shape(
sparse_rasterizer_error_check_data, dense_shape
):
"""Test sparse rasterizer dense shape error checks."""
sparse_rasterizer_error_check_data["dense_shape"] = dense_shape
sparse_rasterizer_error_check_data["class_ids_dense_shape"] = dense_shape[:-1]
_run_sparse_rasterizer_error_check(sparse_rasterizer_error_check_data)
@pytest.mark.parametrize(
"vertices",
[
[[0.0], [0.0]], # Vertices is not 1D.
[0.0, 0.0, 0.0], # Vertices size is not even.
[0.0, 0.0], # Vertices size does not match indices size.
],
)
def test_sparse_rasterizer_error_check_vertices(
sparse_rasterizer_error_check_data, vertices
):
"""Test sparse rasterizer vertices array error checks."""
sparse_rasterizer_error_check_data["vertices"] = vertices
sparse_rasterizer_error_check_data["indices"] = [
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
]
_run_sparse_rasterizer_error_check(sparse_rasterizer_error_check_data)
@pytest.mark.parametrize(
"indices",
[
[0, 0, 0, 0], # Indices is not 2D.
[[0, 0, 0, 0]], # Indices dim0 size != values size.
[[0, 0, 0], [0, 0, 1]], # Indices dim1 size != dense_shape num elements.
[[2, 0, 0, 0], [2, 0, 0, 1]], # Image index >= batch_size or < 0.
[
[0, 2, 0, 0],
[0, 2, 0, 1],
], # Polygon index < 0 or >= dense_shape polygons dimension size.
[
[0, 0, 2, 0],
[0, 0, 2, 1],
], # Vertex index < 0 or >= dense_shape vertices dimension size.
[[0, 0, 0, 2], [0, 0, 0, 3]], # Coordinate index not 0 or 1.
[[1, 0, 0, 0], [0, 0, 0, 1]], # Image index not in order.
[[0, 1, 0, 0], [0, 0, 0, 1]], # Polygon index not in order.
[[0, 0, 1, 0], [0, 0, 0, 1]], # Vertex index not in order.
[[0, 0, 0, 1], [0, 0, 0, 0]], # Coordate index not in order.
],
)
def test_sparse_rasterizer_error_check_indices(
sparse_rasterizer_error_check_data, indices
):
"""Test sparse rasterizer indices error checks."""
sparse_rasterizer_error_check_data["dense_shape"] = [2, 2, 2, 2]
sparse_rasterizer_error_check_data["vertices"] = [0.0, 0.0]
sparse_rasterizer_error_check_data["indices"] = indices
_run_sparse_rasterizer_error_check(sparse_rasterizer_error_check_data)
@pytest.mark.parametrize(
"class_ids",
[
[0], # Class IDs size does not match the number of polygons.
[2, 2], # Class ID >= num classes.
],
)
def test_sparse_rasterizer_error_check_class_ids(
sparse_rasterizer_error_check_data, class_ids
):
"""Test sparse rasterizer class ids error checks."""
sparse_rasterizer_error_check_data["class_ids_per_polygon"] = class_ids
_run_sparse_rasterizer_error_check(sparse_rasterizer_error_check_data)
@pytest.mark.parametrize(
"values, indices, dense_shape, class_ids_per_polygon, class_indices, class_dense_shape, "
"expected_shape",
[
# No vertices, single image.
(None, None, [0, 0, 2], None, None, [0, 0], [1, 90, 100]),
# No vertices, batch size 1.
(None, None, [1, 0, 0, 2], None, None, [1, 0, 0], [1, 1, 90, 100]),
# 1 vertex, batch size 1.
(
[0.0, 0.0],
[[0, 0, 0, 0], [0, 0, 0, 1]],
[1, 1, 1, 2],
[0],
[[0, 0, 0]],
[1, 1, 1],
[1, 1, 90, 100],
),
# 2 vertices, batch size 1.
(
[0.0, 0.0, 1.0, 1.0],
[[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 1]],
[1, 1, 2, 2],
[0],
[[0, 0, 0]],
[1, 1, 2],
[1, 1, 90, 100],
),
# 3 images, 1 polygon per image, 1 vertex per polygon. batch size 3.
(
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[
[0, 0, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[1, 0, 0, 1],
[2, 0, 0, 0],
[2, 0, 0, 1],
],
[3, 1, 1, 2],
[0, 0, 0],
[[0, 0, 0], [1, 0, 0], [2, 0, 0]],
[3, 1, 1],
[3, 1, 90, 100],
),
# 2 polygons, batch size 3, the first image is empty.
(
[1.0, 1.0, 1.0, 1.0],
[[1, 0, 0, 0], [1, 0, 0, 1], [2, 0, 0, 0], [2, 0, 0, 1]],
[3, 1, 1, 2],
[0, 0],
[[1, 0, 0], [2, 0, 0]],
[3, 1, 1],
[3, 1, 90, 100],
),
# 2 polygons, batch size 3, the second image is empty.
(
[1.0, 1.0, 1.0, 1.0],
[[0, 0, 0, 0], [0, 0, 0, 1], [2, 0, 0, 0], [2, 0, 0, 1]],
[3, 1, 1, 2],
[0, 0],
[[0, 0, 0], [2, 0, 0]],
[3, 1, 1],
[3, 1, 90, 100],
),
# 2 polygons, batch size 3, the third image is empty.
(
[1.0, 1.0, 1.0, 1.0],
[[0, 0, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [1, 0, 0, 1]],
[3, 1, 1, 2],
[0, 0],
[[0, 0, 0], [1, 0, 0]],
[3, 1, 1],
[3, 1, 90, 100],
),
],
)
def test_sparse_polygon_rasterizer_degenerate_input(
values,
indices,
dense_shape,
class_ids_per_polygon,
class_indices,
class_dense_shape,
expected_shape,
):
"""Test valid but degenerate input."""
if values is None:
values = tf.zeros(shape=[0], dtype=tf.float32)
if indices is None:
indices = tf.zeros(shape=[0, len(dense_shape)], dtype=tf.int64)
if class_ids_per_polygon is None:
class_ids_per_polygon = tf.zeros(shape=[0], dtype=tf.int64)
if class_indices is None:
class_indices = tf.zeros(shape=[0, len(class_dense_shape)], dtype=tf.int64)
polygons = tf.SparseTensor(values=values, indices=indices, dense_shape=dense_shape)
class_ids_per_polygon = tf.SparseTensor(
values=class_ids_per_polygon,
indices=class_indices,
dense_shape=class_dense_shape,
)
output = _draw_sparse(
num_classes=1,
one_hot=False,
binarize=True,
num_samples=1,
data_format=DataFormat.CHANNELS_FIRST,
polygons=polygons,
class_ids_per_polygon=class_ids_per_polygon,
)
np.testing.assert_array_equal(
output, np.zeros(expected_shape), err_msg="output is not all zeros."
)
def _check_sparse_shape_inference(
dense_shape, height, width, one_hot, data_format, expected_shape
):
"""Run shape inference test for sparse rasterizer."""
vertices = tf.compat.v1.placeholder(dtype=tf.float32)
class_ids_per_polygon = tf.SparseTensor(
values=tf.compat.v1.placeholder(dtype=tf.int64),
indices=tf.compat.v1.placeholder(dtype=tf.int64),
dense_shape=dense_shape[:-1],
)
indices = tf.compat.v1.placeholder(dtype=tf.int64)
polygons = tf.SparseTensor(
values=vertices, indices=indices, dense_shape=dense_shape
)
op = SparsePolygonRasterizer(
width=width,
height=height,
nclasses=1,
one_hot=one_hot,
binarize=True,
verbose=True,
data_format=data_format,
)
output = op(polygons=polygons, class_ids_per_polygon=class_ids_per_polygon)
assert expected_shape == output.shape.as_list()
@pytest.mark.parametrize(
"dense_shape,single_image,expected_batch_size",
[
([1, 1, 2], True, None),
([1, 1, 1, 2], False, 1),
([-1, 1, 1, 2], False, None),
# Shape is unknown, the op assumes no batch dimension.
(tf.compat.v1.placeholder(dtype=tf.int64), True, None),
(tf.compat.v1.placeholder(shape=[3], dtype=tf.int64), True, None),
(tf.compat.v1.placeholder(shape=[4], dtype=tf.int64), False, None),
(tf.constant([-1, 2, 6, 2], dtype=tf.int64), False, None),
(tf.Variable([-1, 2, 6, 2], dtype=tf.int64), False, None),
# Variable value is unavailable for shape inference.
(tf.Variable([1, 2, 6, 2], dtype=tf.int64), False, None),
],
)
def test_sparse_polygon_rasterizer_shape_inference_batch_size(
dense_shape, single_image, expected_batch_size
):
"""Test different ways of specifying batch size."""
if single_image:
expected_shape = [1, 90, 100]
else:
expected_shape = [expected_batch_size, 1, 90, 100]
_check_sparse_shape_inference(
dense_shape,
height=90,
width=100,
one_hot=False,
data_format=DataFormat.CHANNELS_FIRST,
expected_shape=expected_shape,
)
@pytest.mark.parametrize("one_hot,expected_nclasses", [(False, 1), (True, 2)])
def test_sparse_polygon_rasterizer_shape_inference_classes(one_hot, expected_nclasses):
"""Test shape inference for the number of classes."""
dense_shape = [2, 1, 1, 2]
expected_shape = [2, expected_nclasses, 90, 100]
_check_sparse_shape_inference(
dense_shape,
height=90,
width=100,
one_hot=one_hot,
data_format=DataFormat.CHANNELS_FIRST,
expected_shape=expected_shape,
)
@pytest.mark.parametrize(
"width, height, expected_width, expected_height",
[
(100, 90, 100, 90),
(
tf.compat.v1.placeholder(dtype=tf.int32),
tf.compat.v1.placeholder(dtype=tf.int32),
None,
None,
),
(
tf.Variable(100, dtype=tf.int32),
tf.Variable(100, dtype=tf.int32),
None,
None,
),
],
)
def test_sparse_polygon_rasterizer_shape_inference_width_height(
width, height, expected_width, expected_height
):
"""Test shape inference for the different ways of specifying width and height."""
dense_shape = [1, 1, 1, 2]
expected_shape = [1, 1, expected_height, expected_width]
_check_sparse_shape_inference(
dense_shape,
height=height,
width=width,
one_hot=False,
data_format=DataFormat.CHANNELS_FIRST,
expected_shape=expected_shape,
)
def _check_dense_shape_inference(
vertices_per_polygon,
polygons_per_image,
height,
width,
one_hot,
data_format,
expected_shape,
):
"""Run shape inference test for dense rasterizer."""
vertices = tf.compat.v1.placeholder(dtype=tf.float32)
class_ids_per_polygon = tf.compat.v1.placeholder(dtype=tf.int64)
op = PolygonRasterizer(
width=width,
height=height,
nclasses=1,
one_hot=one_hot,
binarize=True,
verbose=True,
data_format=data_format,
)
output = op(
polygon_vertices=vertices,
vertex_counts_per_polygon=vertices_per_polygon,
class_ids_per_polygon=class_ids_per_polygon,
polygons_per_image=polygons_per_image,
)
assert expected_shape == output.shape.as_list()
@pytest.mark.parametrize(
"vertices_per_polygon,polygons_per_image,single_image,\
expected_batch_size",
[
# Polygons_per_image == None implies single image.
([1], None, True, None),
(tf.compat.v1.placeholder(dtype=tf.int64), None, True, None),
# Empty polygons_per_image implies single image.
([], [], True, None),
# Polygons_per_image of size 1 implies 1 image.
([1], [3], False, 1),
# Polygons_per_image placeholder of unknown shape implies batch dimension of unknown size.
(
tf.compat.v1.placeholder(dtype=tf.int64),
tf.compat.v1.placeholder(dtype=tf.int64),
False,
None,
),
# When placeholder size if specified, batch dimension size is known.
(
tf.compat.v1.placeholder(shape=[1], dtype=tf.int64),
tf.compat.v1.placeholder(shape=[4], dtype=tf.int64),
False,
4,
),
# Batch dimension size is computed from variable shape.
(
tf.compat.v1.placeholder(shape=[1], dtype=tf.int64),
tf.Variable([1, 2], dtype=tf.int64),
False,
2,
),
],
)
def test_dense_polygon_rasterizer_shape_inference_batch_size(
vertices_per_polygon, polygons_per_image, single_image, expected_batch_size
):
"""Test different ways of specifying batch size."""
if single_image:
expected_shape = [1, 90, 100]
else:
expected_shape = [expected_batch_size, 1, 90, 100]
_check_dense_shape_inference(
vertices_per_polygon,
polygons_per_image,
height=90,
width=100,
one_hot=False,
data_format=DataFormat.CHANNELS_FIRST,
expected_shape=expected_shape,
)
@pytest.mark.parametrize("one_hot,expected_nclasses", [(False, 1), (True, 2)])
def test_dense_polygon_rasterizer_shape_inference_classes(one_hot, expected_nclasses):
"""Test shape inference for the number of classes."""
vertices_per_polygon = [1]
polygons_per_image = [1]
expected_shape = [1, expected_nclasses, 90, 100]
_check_dense_shape_inference(
vertices_per_polygon,
polygons_per_image,
height=90,
width=100,
one_hot=one_hot,
data_format=DataFormat.CHANNELS_FIRST,
expected_shape=expected_shape,
)
@pytest.mark.parametrize(
"width,height,expected_width,expected_height",
[
(100, 90, 100, 90),
(
tf.compat.v1.placeholder(dtype=tf.int32),
tf.compat.v1.placeholder(dtype=tf.int32),
None,
None,
),
(
tf.Variable(100, dtype=tf.int32),
tf.Variable(100, dtype=tf.int32),
None,
None,
),
],
)
def test_dense_polygon_rasterizer_shape_inference_width_height(
width, height, expected_width, expected_height
):
"""Test shape inference for the different ways of specifying width and height."""
vertices_per_polygon = [1]
polygons_per_image = [1]
expected_shape = [1, 1, expected_height, expected_width]
_check_dense_shape_inference(
vertices_per_polygon,
polygons_per_image,
height=height,
width=width,
one_hot=False,
data_format=DataFormat.CHANNELS_FIRST,
expected_shape=expected_shape,
)
@pytest.mark.parametrize("sparse", [False, True])
@pytest.mark.parametrize("one_hot", [False, True])
@pytest.mark.parametrize("binarize", [False, True])
@pytest.mark.parametrize("num_samples", [1, 5])
@pytest.mark.parametrize("single_image", [False, True])
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("cpu", [False, True])
def test_polygon_rasterizer(
sparse, one_hot, binarize, num_samples, single_image, data_format, cpu
):
"""Test polygon rasterizer output."""
class_id_sparse = True
test_name = "sparse" if sparse else "dense"
test_name += "_onehot" if one_hot else "_noonehot"
test_name += "_bin" if binarize else "_nobin"
test_name += "_onesample" if num_samples == 1 else "_multisample"
test_name += "_single" if single_image else "_nosingle"
test_name += (
"_channelslast" if data_format == DataFormat.CHANNELS_LAST else "_channelsfirst"
)
test_name += "_cpu" if cpu else "_gpu"
device = "cpu:0" if cpu else "gpu:0"
should_raise = binarize is False and one_hot is False
vertices = [
# L
[5.183, 9.818],
[3.298, 92.005],
[24.787, 92.005],
[28.086, 77.019],
[17.907, 69.29],
# A
[29.5, 92.005],
[34.306, 9.818],
[43.731, 92.005],
# N
[46.465, 92.005],
[43.731, 10.667],
[57.021, 83.428],
[56.078, 11.892],
[60.603, 12.174],
[61.262, 93.041],
[50.706, 87.669],
# E
[64.467, 86.256],
[64.467, 93.607],
[74.834, 93.607],
[74.08, 85.689],
[67.671, 82.392],
[67.954, 60.996],
[73.42, 59.865],
[73.608, 53.268],
[68.802, 51.477],
[68.236, 27.443],
[71.818, 24.521],
[72.007, 11.986],
[64.467, 11.986],
# N
[76.907, 93.041],
[75.776, 11.986],
[80.583, 11.986],
[84.824, 85.973],
[84.824, 11.986],
[87.746, 11.986],
[87.746, 93.041],
[80.395, 89.365],
# E
[89.632, 93.607],
[94.438, 93.607],
[94.438, 85.407],
[92.035, 84.842],
[92.035, 53.174],
[94.438, 52.514],
[94.438, 44.597],
[92.035, 41.769],
[92.459, 20.28],
[95.192, 20.28],
[94.627, 12.457],
[90.386, 11.986],
# T
[96.041, 11.986],
[96.041, 15.567],
[97.549, 15.567],
[96.041, 93.607],
[98.585, 93.607],
[98.585, 15.85],
[99.527, 15.85],
[99.527, 11.986],
# triangle
[0.0, 30.0],
[50.0, 40.0],
[100.0, 20.0],
]
vertices_per_polygon = [5, 3, 7, 13, 8, 12, 8, 3]
class_ids_per_polygon = [0, 1, 2, 0, 1, 2, 0, 1]
if single_image:
polygons_per_image = [8]
else:
polygons_per_image = [0, 3, 0, 5, 0]
num_classes = max(class_ids_per_polygon) + 1
batch_size = len(polygons_per_image)
with tf.device(device):
if sparse is False:
if single_image:
polygons_per_image = None
try:
output = _draw_dense(
num_classes,
one_hot,
binarize,
num_samples,
data_format,
polygons_per_image,
vertices_per_polygon,
class_ids_per_polygon,
vertices,
)
except ValueError:
assert should_raise
return
else:
curr_polygon = 0
max_polygons_per_image = 0
max_vertices_per_polygon = 0
indices = []
class_id_indices = []
for b in range(batch_size):
num_polygons = polygons_per_image[b]
max_polygons_per_image = max(num_polygons, max_polygons_per_image)
for p in range(num_polygons):
num_vertices = vertices_per_polygon[curr_polygon]
if single_image:
class_id_indices.append([p, 0])
else:
class_id_indices.append([b, p, 0])
curr_polygon += 1
max_vertices_per_polygon = max(
num_vertices, max_vertices_per_polygon
)
for v in range(num_vertices):
if single_image:
indices.append([p, v, 0])
indices.append([p, v, 1])
else:
indices.append([b, p, v, 0])
indices.append([b, p, v, 1])
dense_shape = [max_polygons_per_image, max_vertices_per_polygon, 2]
if single_image is False:
dense_shape = [batch_size] + dense_shape
flat_vertices = tf.reshape(
tf.constant(vertices, dtype=tf.float32), shape=[len(indices)]
)
polygons = tf.SparseTensor(
values=flat_vertices,
indices=tf.constant(indices, dtype=tf.int64),
dense_shape=dense_shape,
)
class_ids = tf.constant(class_ids_per_polygon, dtype=tf.int64)
if class_id_sparse:
class_id_dense_shape = [max_polygons_per_image, 1]
if single_image is False:
class_id_dense_shape = [batch_size] + class_id_dense_shape
class_ids = tf.SparseTensor(
values=class_ids,
indices=tf.constant(class_id_indices, dtype=tf.int64),
dense_shape=class_id_dense_shape,
)
try:
output = _draw_sparse(
num_classes,
one_hot,
binarize,
num_samples,
data_format,
polygons,
class_ids,
)
except ValueError:
assert should_raise
return
# Check output image shape.
num_output_channels = num_classes + 1 if one_hot else 1
if data_format == DataFormat.CHANNELS_LAST:
expected_shape = (90, 100, num_output_channels)
else:
expected_shape = (num_output_channels, 90, 100)
if single_image is False:
expected_shape = (batch_size,) + expected_shape
assert output.shape == expected_shape
# Cancel the transpose effect, convert the op's final output from NHWC to NCHW for testing
if data_format == DataFormat.CHANNELS_LAST:
if single_image:
output = np.transpose(output, [2, 0, 1])
else:
output = np.transpose(output, [0, 3, 1, 2])
if single_image:
# Tile classes horizontally.
output = np.transpose(output, [1, 0, 2])
output = output.reshape(
(output.shape[0], output.shape[1] * output.shape[2])
)
else:
# Tile images vertically, classes horizontally.
output = np.transpose(output, [0, 2, 1, 3])
output = output.reshape(
(output.shape[0] * output.shape[1], output.shape[2] * output.shape[3])
)
test_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "test_polygon_rasterizer"
)
if debug_save_images:
try:
os.mkdir(test_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
if one_hot:
# Scale 1.0 to full 8b white.
output *= 255.0
else:
# Map class ids to gray scale.
output *= 255.0 / num_classes
# Construct an image out of each output slice.
channel = output.astype(np.uint8)
image = np.stack([channel, channel, channel], axis=-1)
# Optionally save test images to disk for visual comparison.
if debug_save_images:
debug_im = Image.fromarray(image)
debug_im.save("%s/test_%s.png" % (test_dir, test_name))
# Load reference image.
ref_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "test_polygon_rasterizer_ref"
)
ref_image = Image.open("%s/test_%s.png" % (ref_dir, test_name))
ref_image = np.array(ref_image).astype(np.float32)
# Compare and assert that test images match reference.
# Note that there might be slight differences depending on whether the code
# is run on CPU or GPU, or between different GPUs, CUDA versions, TF versions,
# etc. We may need to change this assertion to allow some tolerance. Before
# doing that, please check the generated images to distinguish bugs from
# small variations.
squared_diff = np.square(np.subtract(ref_image, image.astype(np.float)))
assert np.sum(squared_diff) < 0.0001
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
op = PolygonRasterizer(
width=3,
height=3,
nclasses=1,
one_hot=True,
binarize=True,
verbose=True,
data_format=DataFormat.CHANNELS_FIRST,
)
op_dict = op.serialize()
deserialized_op = deserialize_tao_object(op_dict)
assert op.width == deserialized_op.width
assert op.height == deserialized_op.height
assert op.nclasses == deserialized_op.nclasses
assert op.one_hot == deserialized_op.one_hot
assert op.binarize == deserialized_op.binarize
assert op.verbose == deserialized_op.verbose
assert op.data_format == deserialized_op.data_format
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/test_polygon_rasterizer.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the bbox rasterizer processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import errno
import math
import os
import numpy as np
from PIL import Image
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_maglev_object
from nvidia_tao_tf1.core.processors import BboxRasterizer
from nvidia_tao_tf1.core.types import DataFormat
# Shorten these for convenience.
ELLIPSE = BboxRasterizer.DRAW_MODE_ELLIPSE
RECTANGLE = BboxRasterizer.DRAW_MODE_RECTANGLE
PASS = BboxRasterizer.GRADIENT_MODE_PASSTHROUGH
COV = BboxRasterizer.GRADIENT_MODE_MULTIPLY_BY_COVERAGE
# Debug mode for saving generated images to disk.
debug_save_shape_images = False
# Special case 3x3 matrix multiply where the third column of inputs and output is [0,0,1].
def mul3x2(ml, mr):
return [
[
ml[0][0] * mr[0][0] + ml[0][1] * mr[1][0],
ml[0][0] * mr[0][1] + ml[0][1] * mr[1][1],
0.0,
],
[
ml[1][0] * mr[0][0] + ml[1][1] * mr[1][0],
ml[1][0] * mr[0][1] + ml[1][1] * mr[1][1],
0.0,
],
[
ml[2][0] * mr[0][0] + ml[2][1] * mr[1][0] + mr[2][0],
ml[2][0] * mr[0][1] + ml[2][1] * mr[1][1] + mr[2][1],
1.0,
],
]
# Special case 3x3 matrix inverse where the third column of input and output is [0,0,1].
def inv3x2(mat):
det = mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]
oodet = 1.0 / det
return [
[mat[1][1] * oodet, -mat[0][1] * oodet, 0.0],
[-mat[1][0] * oodet, mat[0][0] * oodet, 0.0],
[
-(mat[2][0] * mat[1][1] + mat[2][1] * -mat[1][0]) * oodet,
-(mat[2][0] * -mat[0][1] + mat[2][1] * mat[0][0]) * oodet,
1.0,
],
]
def matrix_from_bbox(xmin, ymin, xmax, ymax):
# Compute a matrix that transforms bbox from canonical [-1,1] space to image coordinates.
half_width = (xmax - xmin) * 0.5
half_height = (ymax - ymin) * 0.5
smat = [[half_width, 0.0, 0.0], [0.0, half_height, 0.0], [0.0, 0.0, 1.0]]
tmat = [
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[half_width + xmin, half_height + ymin, 1.0],
]
mat = mul3x2(smat, tmat)
# [-1,-1] -> [-hw+hw+xmin, -hh+hh+ymin] = [xmin, ymin]
# [1,1] -> [hw+hw+xmin, hh+hh+ymin] = [xmax-xmin+xmin, ymax-ymin+ymin] = [xmax, ymax]
# Inverse matrix transforms from image coordinates to canonical space.
return inv3x2(mat)
def matrix_from_center(centerx, centery, half_width, half_height, angle):
# Compute a matrix that transforms bbox from canonical [-1,1] space to image coordinates.
smat = [[half_width, 0.0, 0.0], [0.0, half_height, 0.0], [0.0, 0.0, 1.0]]
tmat = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [centerx, centery, 1.0]]
a = angle * math.pi / 180.0
rot = [[math.cos(a), math.sin(a)], [-math.sin(a), math.cos(a)], [0.0, 0.0]]
mat = mul3x2(smat, rot)
mat = mul3x2(mat, tmat)
# Inverse matrix transforms from image coordinates to canonical space.
# c * smat * rot * tmat = p
# c = p * tmat^-1 * rot^-1 * smat^-1
return inv3x2(mat)
def gradient_from_endpoints(sx, sy, svalue, ex, ey, evalue):
# edge = [ex - sx, ey - sy]
# p = [px - sx, py - sy]
# ratio = dot(p, edge) / |edge|^2
# value = (1-ratio) * svalue + ratio * evalue
# ->
# l = 1 / |edge|^2
# ratio = ((ex - sx) * (px - sx) + (ey - sy) * (py - sy)) * l
# ->
# dvalue = (evalue - svalue), dx = (ex - sx), dy = (ey - sy)
# value = dvalue * dx * l * px +
# dvalue * dy * l * py +
# svalue - dvalue * dx * l * sx - dvalue * dy * l * sy
# ->
# A = dvalue * dx * l
# B = dvalue * dy * l
# C = svalue - dvalue * dx * l * sx - dvalue * dy * l * sy
dx = ex - sx
dy = ey - sy
le = 0.0
if dx != 0.0 or dy != 0.0:
le = 1.0 / (dx * dx + dy * dy)
dvalue = (evalue - svalue) * le
dvx = dvalue * dx
dvy = dvalue * dy
offset = svalue - (dvx * sx + dvy * sy)
vec = [dvx, dvy, offset]
return vec
bbox_tests = [
# Test multiple images and classes.
(
"0", # Test name
4, # num_images
2, # num_classes
1, # num_gradients
[2, 2, 0, 1], # num bboxes per image
[0, 1, 1, 0, 1], # bbox class IDs
[
matrix_from_bbox(0.0, 0.0, 80.0, 60.0), # bbox matrices (3D)
matrix_from_bbox(80.0, 0.0, 160.0, 60.0),
matrix_from_bbox(80.0, 60.0, 160.0, 120.0),
matrix_from_bbox(0.0, 60.0, 80.0, 120.0),
matrix_from_bbox(40.0, 30.0, 120.0, 90.0),
],
[
[[0.0, 0.0, 32.0]],
[[0.0, 0.0, 64.0]],
[[0.0, 0.0, 128.0]],
[[0.0, 0.0, 192.0]],
[[0.0, 0.0, 224.0]],
], # bbox gradients (3D)
[
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
], # bbox coverage radii
[ELLIPSE, RECTANGLE, ELLIPSE, ELLIPSE, ELLIPSE], # bbox flags
[0.5, 1.5, 0.5, 1.5, 1.5], # bbox sort values
[COV],
), # gradient flags
# Test empty images.
(
"1", # Test name
6, # num_images
1, # num_classes
1, # num_gradients
[0, 1, 0, 0, 1, 0], # num bboxes per image
[0, 0], # bbox class IDs
[
matrix_from_bbox(0.0, 0.0, 160.0, 120.0), # bbox matrices (3D)
matrix_from_bbox(00.0, 0.0, 160.0, 120.0),
],
[[[0.0, 0.0, 32.0]], [[0.0, 0.0, 64.0]]], # bbox gradients (3D)
[[1.0, 1.0], [1.0, 1.0]], # bbox coverage radii
[ELLIPSE, RECTANGLE], # bbox flags
[0.0, 0.0], # bbox sort values
[COV],
), # gradient flags
# Test basic shapes with only one constant gradient.
(
"2",
1,
1,
1,
[3],
[0, 0, 0],
[
matrix_from_bbox(0.0, 0.0, 80.0, 60.0),
matrix_from_bbox(40.0, 30.0, 120.0, 90.0),
matrix_from_bbox(80.0, 60.0, 160.0, 120.0),
],
[[[0.0, 0.0, 255.0]], [[0.0, 0.0, 255.0]], [[0.0, 0.0, 255.0]]],
[[0.8, 0.6], [0.8, 0.9], [0.5, 0.9]],
[RECTANGLE, ELLIPSE, RECTANGLE],
[1.0, 2.0, 3.0],
[COV],
),
# Reverse sort order.
(
"3",
1,
1,
1,
[3],
[0, 0, 0],
[
matrix_from_bbox(0.0, 0.0, 80.0, 60.0),
matrix_from_bbox(40.0, 30.0, 120.0, 90.0),
matrix_from_bbox(80.0, 60.0, 160.0, 120.0),
],
[[[0.0, 0.0, 255.0]], [[0.0, 0.0, 255.0]], [[0.0, 0.0, 255.0]]],
[[0.8, 0.6], [0.8, 0.9], [0.5, 0.9]],
[RECTANGLE, ELLIPSE, RECTANGLE],
[3.0, 2.0, 1.0],
[COV],
),
# Zero sort values should draw in the order bboxes are specified.
(
"4",
1,
1,
1,
[3],
[0, 0, 0],
[
matrix_from_bbox(40.0, 30.0, 160.0, 90.0),
matrix_from_bbox(0.0, 0.0, 120.0, 90.0),
matrix_from_bbox(90.0, 60.0, 160.0, 120.0),
],
[[[0.0, 0.0, 255.0]], [[0.0, 0.0, 255.0]], [[0.0, 0.0, 255.0]]],
[[0.9, 0.9], [0.9, 0.9], [0.9, 0.9]],
[RECTANGLE, ELLIPSE, RECTANGLE],
[0.0, 0.0, 0.0],
[COV],
),
# Test affine transformations.
(
"5",
1,
1,
1,
[3],
[0, 0, 0],
[
matrix_from_center(40.0, 30.0, 30.0, 22.5, 15.0),
matrix_from_center(80.0, 60.0, 30.0, 22.5, 22.5),
matrix_from_center(120.0, 90.0, 30.0, 22.5, 30.0),
],
[[[0.0, 0.0, 255.0]], [[0.0, 0.0, 255.0]], [[0.0, 0.0, 255.0]]],
[[0.8, 0.6], [0.8, 0.9], [0.5, 0.9]],
[RECTANGLE, ELLIPSE, RECTANGLE],
[1.0, 2.0, 3.0],
[COV],
),
# Test one constant and one interpolated gradient.
(
"6",
1,
1,
2,
[1],
[0],
[matrix_from_bbox(20, 15, 140, 105)],
[[[0.0, 0.0, 255.0], gradient_from_endpoints(20, 15, 32, 140, 105, 224)]],
[[1.0, 1.0]],
[RECTANGLE],
[1.0],
[COV, PASS],
),
# Test one constant and two interpolated gradients.
(
"7",
1,
1,
3,
[1],
[0],
[matrix_from_bbox(20, 15, 140, 105)],
[
[
[0.0, 0.0, 255.0],
gradient_from_endpoints(20, 15, 0, 140, 15, 255),
gradient_from_endpoints(20, 15, 64, 20, 105, 192),
]
],
[[1.0, 1.0]],
[RECTANGLE],
[2.0],
[COV, PASS, PASS],
),
# Empty image.
("8", 1, 1, 1, [0], [], [], [], [], [], [], [COV]),
]
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize(
"test_name,num_images,num_classes,num_gradients,bboxes_per_image,\
bbox_class_ids,bbox_matrices,bbox_gradients,bbox_coverage_radii,\
bbox_flags, bbox_sort_values, gradient_flags",
bbox_tests,
)
@pytest.mark.parametrize("cpu", [False, True])
def test_bbox_rasterizer(
test_name,
num_images,
num_classes,
num_gradients,
bboxes_per_image,
bbox_class_ids,
bbox_matrices,
bbox_gradients,
bbox_coverage_radii,
bbox_flags,
bbox_sort_values,
gradient_flags,
data_format,
cpu,
):
"""Test the ground-truth generator for different shapes, sizes and deadzones."""
device = "cpu:0" if cpu else "gpu:0"
data_format_string = (
"chlast" if data_format is DataFormat.CHANNELS_LAST else "chfirst"
)
device_string = "cpu" if cpu is True else "gpu"
file_name = "test_%s_%s_%s.png" % (test_name, data_format_string, device_string)
image_height = 120
image_width = 160
with tf.device(device):
sess = tf.compat.v1.Session()
op = BboxRasterizer(verbose=True, data_format=data_format)
fetches = op(
num_images=num_images,
num_classes=num_classes,
num_gradients=num_gradients,
image_height=image_height,
image_width=image_width,
bboxes_per_image=bboxes_per_image,
bbox_class_ids=bbox_class_ids,
bbox_matrices=bbox_matrices,
bbox_gradients=bbox_gradients,
bbox_coverage_radii=bbox_coverage_radii,
bbox_flags=bbox_flags,
bbox_sort_values=bbox_sort_values,
gradient_flags=gradient_flags,
)
sess.run(tf.compat.v1.global_variables_initializer())
output = sess.run(fetches)
# Cancel the transpose effect, convert the op's final output from NHWCG to NCGHW for
# testing.
if data_format == DataFormat.CHANNELS_LAST:
output = np.transpose(output, [0, 3, 4, 1, 2])
ref_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "test_bbox_rasterizer_ref"
)
test_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "test_bbox_rasterizer"
)
channel = np.reshape(
output,
[num_images * num_classes * num_gradients * image_height, image_width],
).astype(np.uint8)
image = np.stack([channel, channel, channel, channel], axis=-1)
if debug_save_shape_images:
try:
os.mkdir(test_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
debug_im = Image.fromarray(image)
debug_im.save("%s/%s" % (test_dir, file_name))
# Load reference image.
ref_image = Image.open("%s/%s" % (ref_dir, file_name))
ref_image = np.array(ref_image).astype(np.float32)
# Compare and assert that test images match reference.
# Note that there might be slight differences depending on whether the code
# is run on CPU or GPU, or between different GPUs, CUDA versions, TF versions,
# etc. We may need to change this assertion to allow some tolerance. Before
# doing that, please check the generated images to distinguish bugs from
# small variations.
squared_diff = np.square(np.subtract(ref_image, image.astype(np.float)))
assert np.sum(squared_diff) < 0.0001
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
op = BboxRasterizer(verbose=True, data_format=DataFormat.CHANNELS_LAST)
op_dict = op.serialize()
deserialized_op = deserialize_maglev_object(op_dict)
assert op.verbose == deserialized_op.verbose
assert op.data_format == deserialized_op.data_format
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/test_bbox_rasterizer.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode Image Processor."""
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import data_format as modulus_data_format, DataFormat
class DecodeImage(Processor):
"""Processor for decoding and reshaping an input to an image tensor.
Args:
encoding (str): The expected encoding ('fp16', 'jpg', 'png').
shape (tuple): An optional explicit reshape after loading. Especially relevant for loading
from raw.
data_format (str): A string representing the dimension ordering of the input data.
Must be one of 'channels_last' or 'channels_first'. If ``None`` (default), the
modulus global default will be used.
channels (int): Amount of channels of the image. Only relevant for compressed formats.
normalize (float): An optional normalization factor for color/pixel values. When set, all
pixels will be divided by this value.
uint8_precision (bool): Whether or not cast the input image so that the precision of the
output image is uint8, but data type is still fp32. This is used when training images
consist of both .png/.jpeg and .fp16 images.
kwargs (dict): keyword arguments passed to parent class.
Raises:
NotImplementedError: if ``data_format`` is not in ['channels_first', 'channels_last'], or
if ``encoding`` is not one of ['fp16', 'jpg', 'jpeg', 'png'].
"""
@save_args
def __init__(
self,
encoding,
shape=None,
data_format=None,
channels=3,
normalize=None,
uint8_precision=False,
**kwargs
):
"""__init__ method."""
self.encoding = encoding.lower()
self.shape = shape
self.data_format = (
data_format if data_format is not None else modulus_data_format()
)
self.channels = channels
self.normalize = normalize
self.uint8_precision = uint8_precision
if self.data_format not in [
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
]:
raise NotImplementedError(
"Data format not supported, must be 'channels_first' or "
"'channels_last', given {}.".format(self.data_format)
)
if self.encoding not in ["fp16", "jpg", "jpeg", "png"]:
raise NotImplementedError(
"Encoding not supported, must be one of ['fp16', 'jpg', 'jpeg', 'png'], "
"given {}.".format(self.encoding)
)
super(DecodeImage, self).__init__(**kwargs)
def call(self, data):
"""call method.
Args:
data (tensor): Tensor to be loaded (and decoded).
Returns:
tensor: Decoded image.
"""
if self.encoding == "fp16":
img = tf.io.decode_raw(data, tf.float16, little_endian=None)
if self.uint8_precision: # change float16 images' precision to uint8
img *= 255
img = tf.cast(img, tf.uint8)
self.normalize = 255.0
elif self.encoding in ["jpg", "jpeg"]:
img = tf.image.decode_jpeg(
data, channels=self.channels
) # (H, W, C) [C:RGB]
elif self.encoding == "png":
img = tf.image.decode_png(data, channels=self.channels) # (H, W, C) [C:RGB]
img = tf.cast(img, tf.float32)
if self.shape:
img = tf.reshape(img, self.shape)
if self.data_format == DataFormat.CHANNELS_FIRST and self.encoding != "fp16":
img = DataFormat.convert(
img, DataFormat.CHANNELS_LAST, DataFormat.CHANNELS_FIRST
)
if self.normalize:
img /= self.normalize
return img
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/decode_image.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus processors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# # TODO(xiangbok): Split processors into their own seperate files.
from nvidia_tao_tf1.core.processors import augment
from nvidia_tao_tf1.core.processors.augment.color import ColorTransform
from nvidia_tao_tf1.core.processors.augment.crop import Crop
from nvidia_tao_tf1.core.processors.augment.random_brightness import RandomBrightness
from nvidia_tao_tf1.core.processors.augment.random_contrast import RandomContrast
from nvidia_tao_tf1.core.processors.augment.random_flip import RandomFlip
from nvidia_tao_tf1.core.processors.augment.random_glimpse import RandomGlimpse
from nvidia_tao_tf1.core.processors.augment.random_hue_saturation import RandomHueSaturation
from nvidia_tao_tf1.core.processors.augment.random_rotation import RandomRotation
from nvidia_tao_tf1.core.processors.augment.random_shear import RandomShear
from nvidia_tao_tf1.core.processors.augment.random_translation import RandomTranslation
from nvidia_tao_tf1.core.processors.augment.random_zoom import RandomZoom
from nvidia_tao_tf1.core.processors.augment.scale import Scale
from nvidia_tao_tf1.core.processors.augment.spatial import PolygonTransform
from nvidia_tao_tf1.core.processors.augment.spatial import SpatialTransform
from nvidia_tao_tf1.core.processors.bbox_rasterizer import BboxRasterizer
# from nvidia_tao_tf1.core.processors.binary_to_distance import BinaryToDistance
# from nvidia_tao_tf1.core.processors.buffers import NamedTupleStagingArea
# from nvidia_tao_tf1.core.processors.buffers import TensorflowBuffer
from nvidia_tao_tf1.core.processors.clip_polygon import ClipPolygon
# from nvidia_tao_tf1.core.processors.cluster_one_sweep import ClusterOneSweep
# from nvidia_tao_tf1.core.processors.compute_pr_from_computed_dist import ComputePRFromDecodedDist
# from nvidia_tao_tf1.core.processors.dataset import GroupByWindowKeyDataset
# from nvidia_tao_tf1.core.processors.dataset import SqlDatasetV2
# from nvidia_tao_tf1.core.processors.dataset import VariableBatchDataset
# from nvidia_tao_tf1.core.processors.decode_dist import DecodeDist
from nvidia_tao_tf1.core.processors.decode_image import DecodeImage
# from nvidia_tao_tf1.core.processors.dense_map_summary import DenseMapSummary
# from nvidia_tao_tf1.core.processors.draw_polygon_outlines import DrawPolygonOutlines
# from nvidia_tao_tf1.core.processors.generate_dist_from_bezier import GenerateDistFromBezier
# from nvidia_tao_tf1.core.processors.generate_dist_from_lineseg import GenerateDistFromLineseg
# from nvidia_tao_tf1.core.processors.generate_lineseg_from_polygon import GenerateLinesegFromPolygon
# from nvidia_tao_tf1.core.processors.image_loader import ImageLoader
from nvidia_tao_tf1.core.processors.load_file import LoadFile
from nvidia_tao_tf1.core.processors.lookup_table import LookupTable
# from nvidia_tao_tf1.core.processors.merge_polylines import MergePolylines
# from nvidia_tao_tf1.core.processors.mix_up import MixUp
from nvidia_tao_tf1.core.processors.parse_example_proto import ParseExampleProto
# from nvidia_tao_tf1.core.processors.path_generator import PathGenerator
# from nvidia_tao_tf1.core.processors.pipeline import Pipeline
from nvidia_tao_tf1.core.processors.polygon_rasterizer import PolygonRasterizer
from nvidia_tao_tf1.core.processors.polygon_rasterizer import SparsePolygonRasterizer
from nvidia_tao_tf1.core.processors.processors import boolean_mask_sparse_tensor
from nvidia_tao_tf1.core.processors.processors import dense_to_sparse
from nvidia_tao_tf1.core.processors.processors import json_arrays_to_tensor
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.processors.processors import remove_empty_rows_from_sparse_tensor
from nvidia_tao_tf1.core.processors.processors import (
sparse_coordinate_feature_to_vertices_and_counts,
)
from nvidia_tao_tf1.core.processors.processors import string_lower, string_upper
from nvidia_tao_tf1.core.processors.processors import to_dense_if_sparse_tensor_is_fully_dense
from nvidia_tao_tf1.core.processors.processors import values_and_count_to_sparse_tensor
from nvidia_tao_tf1.core.processors.tfrecords_iterator import TFRecordsIterator
from nvidia_tao_tf1.core.processors.transformers import ColorTransformer
from nvidia_tao_tf1.core.processors.transformers import SpatialTransformer
__all__ = (
"augment",
"ColorTransform",
"PolygonTransform",
"SpatialTransform",
# "NamedTupleStagingArea",
# "TensorflowBuffer",
# "GroupByWindowKeyDataset",
# "VariableBatchDataset",
# "SqlDatasetV2",
"BboxRasterizer",
# "BinaryToDistance",
"boolean_mask_sparse_tensor",
"ClipPolygon",
# "ClusterOneSweep",
"ColorTransformer",
# "ComputePRFromDecodedDist",
"Crop",
# "DecodeDist",
"DecodeImage",
"dense_to_sparse",
# "DenseMapSummary",
# "DrawPolygonOutlines",
# "GenerateDistFromBezier",
# "GenerateDistFromLineseg",
# "GenerateLinesegFromPolygon",
"json_arrays_to_tensor",
# "ImageLoader",
"LoadFile",
"LookupTable",
# "MergePolylines",
# "MixUp",
"ParseExampleProto",
# "PathGenerator",
# "Pipeline",
"PolygonRasterizer",
"Processor",
"remove_empty_rows_from_sparse_tensor",
"RandomBrightness",
"RandomContrast",
"RandomFlip",
"RandomGlimpse",
"RandomHueSaturation",
"RandomRotation",
"RandomTranslation",
"RandomShear",
"RandomZoom",
"Scale",
"SpatialTransformer",
"sparse_coordinate_feature_to_vertices_and_counts",
"SparsePolygonRasterizer",
"string_lower",
"string_upper",
"TFRecordsIterator",
"to_dense_if_sparse_tensor_is_fully_dense",
"values_and_count_to_sparse_tensor",
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the tf records iterator in processors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pytest
import tensorflow as tf
import nvidia_tao_tf1.core
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
@pytest.fixture
def dummy_data():
dataset_size = 10
dummy_data = list(range(dataset_size))
return dummy_data
@pytest.fixture
def get_tf_records_iterator_tensors(dummy_data, tmpdir):
def _get_tf_records_iterator_tensors(
shuffle_buffer_size, batch_size, shuffle, repeat, sequence_length, batch_as_list
):
# Set up dummy values
dummy_file_name = "dummy.tfrecords"
dummy_file_path = os.path.join(str(tmpdir), dummy_file_name)
# Write TFRecords file
writer = tf.io.TFRecordWriter(dummy_file_path)
for data in dummy_data:
example = tf.train.Example(
features=tf.train.Features(
feature={
"value": tf.train.Feature(
int64_list=tf.train.Int64List(value=[data])
)
}
)
)
writer.write(example.SerializeToString())
writer.close()
# Open TFRecords file
iterator = nvidia_tao_tf1.core.processors.TFRecordsIterator(
dummy_file_path,
shuffle_buffer_size=shuffle_buffer_size,
batch_size=batch_size,
shuffle=shuffle,
repeat=repeat,
sequence_length=sequence_length,
batch_as_list=batch_as_list,
)
# Parse example
example = iterator()
features = {"value": tf.io.FixedLenFeature([], dtype=tf.int64)}
if batch_as_list:
value = [
nvidia_tao_tf1.core.processors.ParseExampleProto(features=features, single=True)(
record
)["value"]
for record in example
]
return value, iterator
value = nvidia_tao_tf1.core.processors.ParseExampleProto(features=features, single=False)(
example
)
return value["value"], iterator
return _get_tf_records_iterator_tensors
@pytest.mark.parametrize("shuffle_buffer_size", [1, 10, 100])
def test_tf_records_iterator_shuffle_buffer_size(
shuffle_buffer_size, get_tf_records_iterator_tensors, dummy_data
):
"""Test that different buffer sizes work as expected with TFRecordsIterator."""
# Get the value tensors from TFRecords
value, _ = get_tf_records_iterator_tensors(
shuffle_buffer_size,
batch_size=10,
shuffle=True,
repeat=True,
sequence_length=0,
batch_as_list=False,
)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.get_collection("iterator_init"))
# Get data for 10 epochs
per_epoch_values = []
for _ in range(10):
per_epoch_values.append(list(sess.run(value)))
# In case of shuffle_buffer_size being 1, we test that every epoch has the same data order
# which is the same as the order of original data.
if shuffle_buffer_size == 1:
assert all(
epoch_values == dummy_data for epoch_values in per_epoch_values
), "Parsed examples do not match original data."
# Else we test that in 10 epochs we get all the values in the original dataset
else:
all_values = [val for epoch_values in per_epoch_values for val in epoch_values]
assert set(all_values) == set(
dummy_data
), "Parsed examples do not match original data."
@pytest.mark.parametrize("batch_size", [1, 5, 100])
def test_tf_records_iterator_batch_size(batch_size, get_tf_records_iterator_tensors):
"""Test that different batch sizes work as expected with TFRecordsIterator."""
# Get the value tensors from TFRecords
value, _ = get_tf_records_iterator_tensors(
shuffle_buffer_size=1,
batch_size=batch_size,
shuffle=True,
repeat=True,
sequence_length=0,
batch_as_list=False,
)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.get_collection("iterator_init"))
# Get 100 batches of data
for _ in range(100):
values = sess.run(value)
assert values.shape[0] == batch_size, "Parsed examples aren't batch size long."
@pytest.mark.parametrize("shuffle", [True, False])
def test_tf_records_iterator_shuffliness(
shuffle, get_tf_records_iterator_tensors, dummy_data
):
"""Test that shuffle option works as expected with TFRecordsIterator."""
# Get the value tensors from TFRecords
shuffle_buffer_size = 10 if shuffle else 0
value, _ = get_tf_records_iterator_tensors(
shuffle_buffer_size=shuffle_buffer_size,
batch_size=10,
shuffle=shuffle,
repeat=True,
sequence_length=0,
batch_as_list=False,
)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.get_collection("iterator_init"))
# Get data for 10 epochs
per_epoch_values = []
for _ in range(10):
per_epoch_values.append(list(sess.run(value)))
# Check that all samples are unique in each epoch.
assert all(
len(e) == len(set(e)) for e in per_epoch_values
), "Epochs contain duplicate samples."
# In case of shuffle being True, we check that there's at least two epochs
# with different order of data
if shuffle:
assert any(
e1 != e2 for e1 in per_epoch_values for e2 in per_epoch_values
), "All the epochs use the same data in the same order even though shuffle is True."
# Else we test that all the epochs have exactly the same data, that is the original data
else:
assert all(
epoch_values == dummy_data for epoch_values in per_epoch_values
), "Some epoch use different data even though shuffle option is False."
@pytest.mark.parametrize("repeat", [True, False])
def test_tf_records_iterator_repeat(
repeat, get_tf_records_iterator_tensors, dummy_data
):
"""Test that repeat option works as expected with TFRecordsIterator."""
# Get the value tensors from TFRecords
value, _ = get_tf_records_iterator_tensors(
shuffle_buffer_size=0,
batch_size=10,
shuffle=False,
repeat=repeat,
sequence_length=0,
batch_as_list=False,
)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.get_collection("iterator_init"))
# Get data for 10 epochs if repeat, else only a single epoch is possible
no_epochs = 10 if repeat else 1
all_values = []
for _ in range(no_epochs):
all_values.append(list(sess.run(value)))
# Check that the same dummy values are repeated
assert all(
value == dummy_data for value in all_values
), "Not all of original data has been observed when pulling data from TFRecordsIterator."
def test_tf_records_iterator_reset(get_tf_records_iterator_tensors, dummy_data):
"""Test that reset option works as expected with TFRecordsIterator."""
# Get the value tensors from TFRecords
value, iterator = get_tf_records_iterator_tensors(
shuffle_buffer_size=0,
batch_size=10,
shuffle=False,
repeat=False,
sequence_length=0,
batch_as_list=False,
)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.get_collection("iterator_init"))
# Get data 10 times, but reset the iterator after each iteration,
# it should work even with repeat=False
for _ in range(10):
values = list(sess.run(value))
# Check that the same dummy values are used after every reset
assert (
values == dummy_data
), "Not all of original data has been observed when pulling data\
from TFRecordsIterator."
iterator.reset(sess)
@pytest.mark.parametrize("sequence_length", [2, 5])
def test_tf_records_iterator_block_shuffle(
sequence_length, get_tf_records_iterator_tensors, dummy_data
):
"""Test that block shuffling works as expected with TFRecordsIterator"""
# Get the value tensors from TFRecords
value, _ = get_tf_records_iterator_tensors(
shuffle_buffer_size=10,
batch_size=10,
shuffle=True,
repeat=True,
sequence_length=sequence_length,
batch_as_list=False,
)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.get_collection("iterator_init"))
all_values = sess.run(value)
# the number of elements in data would be (batch_size * sequence_length)
# with having blocks of sequence_length consecutive elements.
# eg: [0,1,4,5,2,3,8,9....] for sequence_length=2
for i in range(0, len(all_values), sequence_length):
assert all(
all_values[j] + 1 == all_values[j + 1]
for j in range(i, i + sequence_length - 1)
), "Some elements in a block are shuffled"
@pytest.mark.parametrize("batch_as_list", [True, False])
def test_tf_records_iterator_batch_as_list(
batch_as_list, get_tf_records_iterator_tensors, dummy_data
):
"""Test that batch_as_list returns same output when True and False with TFRecordsIterator"""
# Get the value tensors from TFRecords
value, _ = get_tf_records_iterator_tensors(
shuffle_buffer_size=0,
batch_size=10,
shuffle=False,
repeat=True,
sequence_length=0,
batch_as_list=batch_as_list,
)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.get_collection("iterator_init"))
assert (
list(sess.run(value)) == dummy_data
), "batch_as_list = {0} doesn't perform as expected".format(batch_as_list)
def test_serialization_and_deserialization_spatial_transform():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
iterator = nvidia_tao_tf1.core.processors.TFRecordsIterator(
"dummy.tfrecords",
shuffle_buffer_size=8,
batch_size=8,
shuffle=True,
repeat=False,
sequence_length=0,
batch_as_list=False,
)
iterator_dict = iterator.serialize()
deserialized_iterator = deserialize_tao_object(iterator_dict)
assert iterator.file_list == deserialized_iterator.file_list
assert iterator.batch_size == deserialized_iterator.batch_size
assert iterator.shuffle_buffer_size == deserialized_iterator.shuffle_buffer_size
assert iterator.shuffle == deserialized_iterator.shuffle
assert iterator.repeat == deserialized_iterator.repeat
assert iterator.batch_as_list == deserialized_iterator.batch_as_list
assert iterator.sequence_length == deserialized_iterator.sequence_length
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/test_tf_records_iterator.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test polygon clipping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
import nvidia_tao_tf1.core
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
# Relative and absolute tolerances for comparing polygon coordinates.
_RTOL = 1e-6
_ATOL = 1e-6
def _flatten_polygon_list(polygons):
"""Convert from a list of polygons (list of (x, y) tuples) to a dense list of coordinates."""
if polygons == []:
return np.ndarray(shape=(0, 2), dtype=np.float32), []
polygons_flattened = np.concatenate(polygons)
points_per_polygon = np.array([len(a) for a in polygons])
return polygons_flattened, points_per_polygon
def _to_polygon_list(polygons, points_per_polygon):
"""Convert from a dense list of coordinates to a list of polygons (list of (x, y) tuples)."""
polygon_list = []
start_index = 0
for npoints in points_per_polygon:
polygon_list.append(polygons[start_index : start_index + npoints, :])
start_index += npoints
return polygon_list
helper_tests = [
# Test some random polygons.
(
[
np.array([(1.0, 2.0), (3.0, 4.0)]),
np.ndarray(shape=(0, 2), dtype=np.float32),
np.array([(5.0, 6.0)]),
np.array([(7.0, 8.0), (9.0, 10.0)]),
],
np.array([2, 0, 1, 2]),
),
# Test empty polygons.
(
[
np.ndarray(shape=(0, 2), dtype=np.float32),
np.ndarray(shape=(0, 2), dtype=np.float32),
],
np.array([0, 0]),
),
]
@pytest.mark.parametrize("polygons,ppp_expected", helper_tests)
def test_polygon_test_helpers(polygons, ppp_expected):
"""Test the polygon helper functions that convert between a dense and seperate polygons."""
pf, ppp = _flatten_polygon_list(polygons)
np.testing.assert_array_equal(ppp, ppp_expected)
polygons_roundtrip = _to_polygon_list(pf, ppp)
for p, p_expected in zip(polygons, polygons_roundtrip):
np.testing.assert_array_equal(p, p_expected)
def _clip_and_test(
polygon_list,
polygon_mask,
expected_polygon_list,
expected_polygon_index_mapping,
closed,
):
"""Run the numpy arrays through our TensorFlow Op and compare."""
polygons, points_per_polygon = _flatten_polygon_list(polygon_list)
# expected_polygons, expected_points_per_polygon = _flatten_polygon_list(expected_polygon_list)
clipper = nvidia_tao_tf1.core.processors.ClipPolygon(closed=closed)
clipped_polygons, clipped_points_per_polygon, clipped_polygon_index_mapping = clipper(
polygons=polygons,
points_per_polygon=points_per_polygon,
polygon_mask=polygon_mask,
)
sess = nvidia_tao_tf1.core.utils.test_session()
np_clipped_polygons, np_clipped_points_per_polygon, np_clipped_polygon_index_mapping = sess.run(
[clipped_polygons, clipped_points_per_polygon, clipped_polygon_index_mapping]
)
clipped_polygon_list = _to_polygon_list(
np_clipped_polygons, np_clipped_points_per_polygon
)
np.testing.assert_array_equal(
np_clipped_polygon_index_mapping, expected_polygon_index_mapping
)
assert len(expected_polygon_list) == len(clipped_polygon_list)
for p, p_clipped in zip(expected_polygon_list, clipped_polygon_list):
assert _polygon_equal(p, p_clipped) is True
@pytest.mark.parametrize("closed", (True, False))
def test_no_polygons(closed):
"""Test that no polygons as input returns no input."""
_clip_and_test(
polygon_list=[],
polygon_mask=np.array([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]),
expected_polygon_list=[],
expected_polygon_index_mapping=[],
closed=closed,
)
@pytest.mark.parametrize("closed", (True, False))
def test_equal_coordinate_polygons(closed):
"""Test that polygons with identical coordinates are filtered out."""
_clip_and_test(
polygon_list=[np.array([(0.5, 0.5), (0.5, 0.5), (0.5, 0.5), (0.5, 0.5)])],
polygon_mask=np.array([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]),
expected_polygon_list=[],
expected_polygon_index_mapping=[],
closed=closed,
)
@pytest.mark.parametrize("closed", (True, False))
def test_polygon_outside_mask(closed):
"""Test that polygons outside the mask are not drawn at all."""
_clip_and_test(
polygon_list=[
np.array([(-2.0, -2.0), (-2.0, -1.0), (-1.0, -1.0), (-1.0, -2.0)])
],
polygon_mask=np.array([(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)]),
expected_polygon_list=[],
expected_polygon_index_mapping=[],
closed=closed,
)
@pytest.mark.parametrize(
"polygon_mask", (np.array([]), np.array([(0.0, 0.0), (0.0, 1.0)]))
)
@pytest.mark.parametrize("closed", (True, False))
def test_invalid_mask(closed, polygon_mask):
"""Test that an empty or non-polygonal mask raises an error."""
with pytest.raises(tf.errors.InvalidArgumentError):
_clip_and_test(
polygon_list=[],
polygon_mask=polygon_mask,
expected_polygon_list=[],
expected_polygon_index_mapping=[],
closed=closed,
)
def _polygon_equal(p1, p2):
"""Compare two polygons or polylines.
Note the coordinate sequence (list) is allowed to be inverted and shifted.
"""
np.testing.assert_array_equal(p1.shape, p2.shape)
nvertices = p1.shape[0]
p2p2 = np.concatenate((p2, p2))
rp2p2 = np.flip(p2p2, axis=0)
# Slide p1 over p2p2
for i in range(nvertices):
if np.allclose(p1, p2p2[i : i + nvertices, :], rtol=_RTOL, atol=_ATOL):
return True
if np.allclose(p1, rp2p2[i : i + nvertices, :], rtol=_RTOL, atol=_ATOL):
return True
return False
polygon_equal_tests = [
# Exactly equal.
(
np.array([(1.0, 2.0), (3.0, 4.0), (5.0, 6.0)]),
np.array([(1.0, 2.0), (3.0, 4.0), (5.0, 6.0)]),
),
# Offset.
(
np.array([(1.0, 2.0), (3.0, 4.0), (5.0, 6.0)]),
np.array([(3.0, 4.0), (5.0, 6.0), (1.0, 2.0)]),
),
# Inverted and offset.
(
np.array([(1.0, 2.0), (3.0, 4.0), (5.0, 6.0)]),
np.array([(5.0, 6.0), (3.0, 4.0), (1.0, 2.0)]),
),
]
@pytest.mark.parametrize("p1, p2", polygon_equal_tests)
def test_polygon_equal(p1, p2):
"""Test for the polygon equality helper function."""
assert _polygon_equal(p1, p2) is True
encompass_tests = [
(np.array([(1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0)])),
(np.array([(1.0e6, 1.0e6), (0.0, 1.0e6), (0.0, 0.0), (1.0e6, 0.0)])),
]
@pytest.mark.parametrize("polygon_mask", encompass_tests)
def test_polygon_encompass_mask(polygon_mask):
"""Test that polygons encompassing the mask entirely yield the same as the mask."""
polygon = polygon_mask * 2 # Simply inflate the polygon.
_clip_and_test(
polygon_list=[polygon],
polygon_mask=polygon_mask,
expected_polygon_list=[polygon_mask],
expected_polygon_index_mapping=[0],
closed=True,
)
@pytest.mark.parametrize("polygon_mask", encompass_tests)
def test_polyline_encompass_mask(polygon_mask):
"""Test that polylines circumscribing the mask are entirely removed."""
polyline = polygon_mask * 2 # Simply inflate the polygon.
_clip_and_test(
polygon_list=[polyline],
polygon_mask=polygon_mask,
expected_polygon_list=[],
expected_polygon_index_mapping=[],
closed=False,
)
@pytest.mark.parametrize("polygon", encompass_tests)
@pytest.mark.parametrize("closed", (True, False))
def test_mask_encompass_polygon(polygon, closed):
"""Test that a mask encompassing the polygons will leave the polygons as-is."""
polygon_mask = polygon * 2 # Simply inflate the polygon.
_clip_and_test(
polygon_list=[polygon],
polygon_mask=polygon_mask,
expected_polygon_list=[polygon],
expected_polygon_index_mapping=[0],
closed=closed,
)
@pytest.mark.parametrize("xy_range", (1.0, 1.0e6))
def test_many_polylines_in_encompassing_mask(
xy_range, npolylines=1000, max_nvertices=100
):
"""Test that many polygons inside one encompassing mask stay the same."""
polygon_mask = np.array(
[
(-xy_range, -xy_range),
(xy_range, -xy_range),
(xy_range, xy_range),
(-xy_range, xy_range),
]
)
# Create random polygons
polylines = []
np.random.seed(42)
for _ in range(npolylines):
# Create random vertices centered around the axis origin, to stay within mask bounds.
nvertices = np.random.randint(low=3, high=max_nvertices)
polylines.append((np.random.rand(nvertices, 2) - 0.5) * xy_range)
_clip_and_test(
polygon_list=polylines,
polygon_mask=polygon_mask,
expected_polygon_list=polylines,
expected_polygon_index_mapping=list(range(npolylines)),
closed=False,
)
def test_self_intersecting_polygon():
r"""Draw an intersecting polygon (8-shape) that is encompassed by the mask.
1mmm2 +---+
m\ /m \1/
m X m -> X
m/ \m /2\
3mmm4 +---+
Note: The polygon clipper will reduce self-intersecting polygons into multiple
non-intersecting polygons.
"""
polygon_list = [np.array([(-2.0, -2.0), (2.0, 2.0), (2.0, -2.0), (-2.0, 2.0)])]
polygon_mask = np.array([(2.0, 2.0), (-2.0, 2.0), (-2.0, -2.0), (2.0, -2.0)])
expected_polygon_list = [
np.array([(-2.0, -2.0), (0.0, 0.0), (-2.0, 2.0)]),
np.array([(2.0, 2.0), (0.0, 0.0), (2.0, -2.0)]),
]
expected_polygon_index_mapping = [0, 0]
_clip_and_test(
polygon_list=polygon_list,
polygon_mask=polygon_mask,
expected_polygon_list=expected_polygon_list,
expected_polygon_index_mapping=expected_polygon_index_mapping,
closed=True,
)
def test_self_intersecting_polygon_clipping():
r"""Draw an intersecting polygon (8-shape) that is cropped by a smaller mask.
1-------2
\ /
mmmmm +---+
m\ /m \1/
m x m -> X
m/ \m /2\
mmmmm +---+
/ \
3-------4
Note that the polygon clipper will reduce self-intersecting polygons into multiple
non-intersecting polygons.
"""
polygon_list = [np.array([(-2.0, -2.0), (2.0, 2.0), (2.0, -2.0), (-2.0, 2.0)])]
polygon_mask = np.array([(1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0), (1.0, -1.0)])
expected_polygon_list = [
np.array([(-1.0, -1.0), (0.0, 0.0), (-1.0, 1.0)]),
np.array([(1.0, 1.0), (0.0, 0.0), (1.0, -1.0)]),
]
expected_polygon_index_mapping = [0, 0]
_clip_and_test(
polygon_list=polygon_list,
polygon_mask=polygon_mask,
expected_polygon_list=expected_polygon_list,
expected_polygon_index_mapping=expected_polygon_index_mapping,
closed=True,
)
def test_convex_polygon_clipping():
"""Test simple convex polygon clipping.
+-+
mm|m|mm +-+
m | | m -> |1|
mm|m|mm +-+
+-+
"""
polygon_list = [np.array([(1.0, 0.0), (2.0, 0.0), (2.0, 3.0), (1.0, 3.0)])]
polygon_mask = np.array([(0.0, 1.0), (3.0, 1.0), (3.0, 2.0), (0.0, 2.0)])
expected_polygon_list = [np.array([(1.0, 1.0), (1, 2.0), (2.0, 2.0), (2.0, 1.0)])]
expected_polygon_index_mapping = [0]
_clip_and_test(
polygon_list=polygon_list,
polygon_mask=polygon_mask,
expected_polygon_list=expected_polygon_list,
expected_polygon_index_mapping=expected_polygon_index_mapping,
closed=True,
)
def test_polyline_clipping():
"""Test simple 'convex' polyline clipping.
2-3 1 2
mm|m|mm + +
m | | m -> | |
mm|m|mm + +
1 4
"""
polyline_list = [np.array([(1.0, 0.0), (1.0, 3.0), (2.0, 3.0), (2.0, 0.0)])]
polygon_mask = np.array([(0.0, 1.0), (3.0, 1.0), (3.0, 2.0), (0.0, 2.0)])
expected_polyline_list = [
np.array([(1.0, 1.0), (1.0, 2.0)]),
np.array([(2.0, 1.0), (2.0, 2.0)]),
]
expected_polyline_index_mapping = [0, 0]
_clip_and_test(
polygon_list=polyline_list,
polygon_mask=polygon_mask,
expected_polygon_list=expected_polyline_list,
expected_polygon_index_mapping=expected_polyline_index_mapping,
closed=False,
)
def test_concave_polygon_clipping():
r"""Test that drawing a 'V' masked with a thin horizontal mask results in two polygons.
_ _
1\ /3
mm\m\mmm/m/mm _ _
m \ \2/ / m -> \1\ /2/
mmmm\mmm/mmmm \_\ /_/
\4/
"""
polygon_list = [np.array([(-3.0, 3.0), (0.0, 0.0), (3.0, 3.0), (0.0, -3.0)])]
polygon_mask = np.array([(-3.0, 2.0), (-3.0, 1.0), (3.0, 1.0), (3.0, 2.0)])
expected_polygon_list = [
np.array([(-2.0, 2.0), (-2.5, 2.0), (-2.0, 1.0), (-1.0, 1.0)]),
np.array([(2.5, 2.0), (2.0, 2.0), (1.0, 1.0), (2.0, 1.0)]),
]
expected_polygon_index_mapping = [0, 0]
_clip_and_test(
polygon_list=polygon_list,
polygon_mask=polygon_mask,
expected_polygon_list=expected_polygon_list,
expected_polygon_index_mapping=expected_polygon_index_mapping,
closed=True,
)
@pytest.mark.parametrize("swap_polygon_with_mask", (True, False))
def test_corner_polygon_clipping(swap_polygon_with_mask):
r"""Test that drawing a 'V' masked with a thin horizontal mask results in two polygons.
mmmmm
m m
m 1-m-2
m | m |
mmmmm |
| |
4---3
"""
polygons = np.array([(1.0, 1.0), (3.0, 1.0), (3.0, 3.0), (1.0, 3.0)])
polygon_mask = np.array([(0.0, 0), (0.0, 2.0), (2.0, 2.0), (2.0, 0.0)])
if swap_polygon_with_mask:
polygon_mask, polygons = polygons, polygon_mask
expected_polygon_list = [np.array([(1.0, 1.0), (2.0, 1.0), (2.0, 2.0), (1.0, 2.0)])]
expected_polygon_index_mapping = [0]
_clip_and_test(
polygon_list=[polygons],
polygon_mask=polygon_mask,
expected_polygon_list=expected_polygon_list,
expected_polygon_index_mapping=expected_polygon_index_mapping,
closed=True,
)
@pytest.mark.parametrize("swap_polygon_with_mask", (True, False))
def test_side_polygon_clipping(swap_polygon_with_mask):
r"""Test that drawing a 'V' masked with a thin horizontal mask results in two polygons.
Args:
swap_polygon_with_mask: swaps the polygon with the mask. Should give the same result.
mmmmm
m 1-m-2
m | m |
m 3-m-4
mmmmm
"""
polygons = np.array([(1.0, 1.0), (4.0, 1.0), (4.0, 2.0), (1.0, 2.0)])
polygon_mask = np.array([(0.0, 0), (3.0, 0.0), (3.0, 3.0), (0.0, 3.0)])
if swap_polygon_with_mask:
polygon_mask, polygons = polygons, polygon_mask
expected_polygon_list = [np.array([(1.0, 1.0), (3.0, 1.0), (3.0, 2.0), (1.0, 2.0)])]
expected_polygon_index_mapping = [0]
_clip_and_test(
polygon_list=[polygons],
polygon_mask=polygon_mask,
expected_polygon_list=expected_polygon_list,
expected_polygon_index_mapping=expected_polygon_index_mapping,
closed=True,
)
polyline_corner_tests = [
(
np.array([(1.0, 1.0), (3.0, 1.0), (3.0, 3.0), (1.0, 3.0)]),
np.array([(1.0, 1.0), (2.0, 1.0)]),
),
(
np.array([(3.0, 1.0), (3.0, 3.0), (1.0, 3.0), (1.0, 1.0)]),
np.array([(1.0, 2.0), (1.0, 1.0)]),
),
(
np.array([(3.0, 3.0), (1.0, 3.0), (1.0, 1.0), (3.0, 1.0)]),
np.array([(1.0, 2.0), (1.0, 1.0), (2.0, 1.0)]),
),
(
np.array([(1.0, 3.0), (1.0, 1.0), (3.0, 1.0), (3.0, 3.0)]),
np.array([(1.0, 2.0), (1.0, 1.0), (2.0, 1.0)]),
),
]
@pytest.mark.parametrize("reverse_path", (True, False))
@pytest.mark.parametrize("polylines,expected_polylines", polyline_corner_tests)
def test_corner_polyline_clipping(polylines, expected_polylines, reverse_path):
r"""Test corner polyline cropping for differently organized paths.
Args:
reverse_path: reversing the path should yield the same result.
mmmmm mmmmm mmmmm mmmmm
m m m m m m m m
m 1-m-2 m 4 m 1 m 3-m-4 m 2-m-3
m m | m | m | m | m m | m |
mmmmm | mmmmm | mmmmm mmmmm |
| | | | | |
4---3 3---2 2---1 1 4
"""
if reverse_path:
polylines = np.flip(polylines, axis=0)
polygon_mask = np.array([(0.0, 0), (0.0, 2.0), (2.0, 2.0), (2.0, 0.0)])
expected_polyline_index_mapping = [0]
_clip_and_test(
polygon_list=[polylines],
polygon_mask=polygon_mask,
expected_polygon_list=[expected_polylines],
expected_polygon_index_mapping=expected_polyline_index_mapping,
closed=False,
)
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
clipper = nvidia_tao_tf1.core.processors.ClipPolygon(closed=False)
clipper_dict = clipper.serialize()
deserialized_clipper = deserialize_tao_object(clipper_dict)
assert clipper.closed == deserialized_clipper.closed
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/test_clip_polygon.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test conversion of values and counts to sparse tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from parameterized import parameterized
import pytest
from six import binary_type
import tensorflow as tf
from nvidia_tao_tf1.core.processors import values_and_count_to_sparse_tensor
class ValuesAndCountToSparseTensorTest(tf.test.TestCase):
def assertSparseTensorShapeEqual(self, st, indices, values, dense_shape):
"""Assert the shapes of the tensors in a SparseTensor conform to the expected shapes."""
self.assertAllEqual(indices, st.indices.get_shape().as_list())
self.assertAllEqual(values, st.values.get_shape().as_list())
self.assertAllEqual(dense_shape, st.dense_shape.get_shape().as_list())
def assertSparseTensorValueEqual(self, st, values, indices, dense_shape):
"""Assert two SparseTensor values are the same."""
self.assertAllEqual(indices, st.indices)
self.assertAllEqual(values, st.values)
self.assertAllEqual(dense_shape, st.dense_shape)
@parameterized.expand(
[
(np.array([1, 1, 1], dtype=np.int32),),
(np.array([1, 1, 1, 1, 1], dtype=np.int32),),
(np.array([0], dtype=np.int32),),
(np.array([3], dtype=np.int32),),
(np.array([5], dtype=np.int32),),
]
)
def test_mismatch_counts(self, counts):
with pytest.raises(tf.errors.InvalidArgumentError):
with self.test_session():
values = np.array([0, 1, 2, 3], dtype=np.int32)
values_and_count_to_sparse_tensor(values, counts).eval()
@parameterized.expand(
[
(np.array([1, 1, 1], dtype=np.int32),),
(np.array([1, 1, 1, 1, 1], dtype=np.int32),),
(np.array([0], dtype=np.int32),),
(np.array([3], dtype=np.int32),),
(np.array([5], dtype=np.int32),),
]
)
def test_mismatch_counts_of_counts(self, counts_of_counts):
with pytest.raises(tf.errors.InvalidArgumentError):
with self.test_session():
values = np.array([[1, 1], [2, 2], [3, 3], [4, 4]], dtype=np.int32)
counts = np.array([1, 1, 1, 1], dtype=np.int32)
values_and_count_to_sparse_tensor(
values, counts, counts_of_counts
).eval()
def test_2d_empty_tensor_only_counts(self,):
values = np.zeros([0, 2], dtype=np.float32)
counts = [0, 0]
st = values_and_count_to_sparse_tensor(values, counts)
self.assertSparseTensorShapeEqual(
st, indices=[None, 3], values=[None], dense_shape=[3]
)
with self.test_session():
self.assertSparseTensorValueEqual(
st=st.eval(),
indices=np.zeros([0, 3]),
values=np.zeros([0]),
dense_shape=[2, 0, 2],
)
def test_2d_empty_tensor(self,):
values = np.zeros([0, 2], dtype=np.float32)
counts = [0, 0]
counts_of_counts = [0, 2, 0]
st = values_and_count_to_sparse_tensor(
values, counts, counts_of_counts=counts_of_counts
)
self.assertSparseTensorShapeEqual(
st, indices=[None, 4], values=[None], dense_shape=[4]
)
with self.test_session():
self.assertSparseTensorValueEqual(
st=st.eval(),
indices=np.zeros([0, 4]),
values=np.zeros([0]),
dense_shape=[3, 2, 0, 2],
)
@parameterized.expand(
[(np.int32,), (np.int64,), (np.float32,), (np.float64,), (binary_type,)]
)
def test_1d_only_counts_all_in_one(self, dtype):
values = np.array([0, 1, 2], dtype=dtype)
counts = [0, 3]
st = values_and_count_to_sparse_tensor(values, counts)
self.assertSparseTensorShapeEqual(
st, indices=[None, 2], values=[None], dense_shape=[2]
)
with self.test_session():
self.assertSparseTensorValueEqual(
st=st.eval(),
indices=[[1, 0], [1, 1], [1, 2]],
values=values.flatten(),
dense_shape=[2, 3],
)
@parameterized.expand(
[(np.int32,), (np.int64,), (np.float32,), (np.float64,), (binary_type,)]
)
def test_1d_only_counts_distributed(self, dtype):
values = np.array([0, 1, 2], dtype=dtype)
counts = [1, 0, 1, 0, 1]
st = values_and_count_to_sparse_tensor(values, counts)
self.assertSparseTensorShapeEqual(
st, indices=[None, 2], values=[None], dense_shape=[2]
)
with self.test_session():
self.assertSparseTensorValueEqual(
st=st.eval(),
indices=[[0, 0], [2, 0], [4, 0]],
values=values.flatten(),
dense_shape=[5, 1],
)
@parameterized.expand(
[(np.int32,), (np.int64,), (np.float32,), (np.float64,), (binary_type,)]
)
def test_2d_only_counts_all_in_one(self, dtype):
values = np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype)
counts = [0, 3]
st = values_and_count_to_sparse_tensor(values, counts)
self.assertSparseTensorShapeEqual(
st, indices=[None, 3], values=[None], dense_shape=[3]
)
with self.test_session():
self.assertSparseTensorValueEqual(
st=st.eval(),
indices=[
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
[1, 2, 0],
[1, 2, 1],
],
values=values.flatten(),
dense_shape=[2, 3, 2],
)
@parameterized.expand(
[(np.int32,), (np.int64,), (np.float32,), (np.float64,), (binary_type,)]
)
def test_2d_only_counts_distributed(self, dtype):
values = np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype)
counts = [1, 0, 1, 0, 1]
st = values_and_count_to_sparse_tensor(values, counts)
self.assertSparseTensorShapeEqual(
st, indices=[None, 3], values=[None], dense_shape=[3]
)
with self.test_session():
self.assertSparseTensorValueEqual(
st=st.eval(),
indices=[
[0, 0, 0],
[0, 0, 1],
[2, 0, 0],
[2, 0, 1],
[4, 0, 0],
[4, 0, 1],
],
values=values.flatten(),
dense_shape=[5, 1, 2],
)
@parameterized.expand(
[(np.int32,), (np.int64,), (np.float32,), (np.float64,), (binary_type,)]
)
def test_2d_distributed(self, dtype):
values = np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype)
counts = [1, 0, 1, 0, 1]
counts_of_counts = [3, 2]
st = values_and_count_to_sparse_tensor(values, counts, counts_of_counts)
self.assertSparseTensorShapeEqual(
st, indices=[None, 4], values=[None], dense_shape=[4]
)
with self.test_session():
self.assertSparseTensorValueEqual(
st=st.eval(),
indices=[
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 2, 0, 0],
[0, 2, 0, 1],
[1, 1, 0, 0],
[1, 1, 0, 1],
],
values=values.flatten(),
dense_shape=[2, 3, 1, 2],
)
@parameterized.expand(
[(np.int32,), (np.int64,), (np.float32,), (np.float64,), (binary_type,)]
)
def test_multi_dim_only_counts_all_in_one(self, dtype):
values = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [0, 1]]])
counts = [0, 3]
st = values_and_count_to_sparse_tensor(values, counts)
self.assertSparseTensorShapeEqual(
st, indices=[None, 4], values=[None], dense_shape=[4]
)
with self.test_session():
self.assertSparseTensorValueEqual(
st=st.eval(),
indices=[
[1, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 1, 1, 1],
[1, 2, 0, 0],
[1, 2, 0, 1],
[1, 2, 1, 0],
[1, 2, 1, 1],
],
values=values.flatten(),
dense_shape=[2, 3, 2, 2],
)
@parameterized.expand(
[(np.int32,), (np.int64,), (np.float32,), (np.float64,), (binary_type,)]
)
def test_multi_dim_only_counts_distributed(self, dtype):
values = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [0, 1]]])
counts = [1, 0, 1, 0, 1]
st = values_and_count_to_sparse_tensor(values, counts)
self.assertSparseTensorShapeEqual(
st, indices=[None, 4], values=[None], dense_shape=[4]
)
with self.test_session():
self.assertSparseTensorValueEqual(
st=st.eval(),
indices=[
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 1],
[2, 0, 0, 0],
[2, 0, 0, 1],
[2, 0, 1, 0],
[2, 0, 1, 1],
[4, 0, 0, 0],
[4, 0, 0, 1],
[4, 0, 1, 0],
[4, 0, 1, 1],
],
values=values.flatten(),
dense_shape=[5, 1, 2, 2],
)
@parameterized.expand(
[(np.int32,), (np.int64,), (np.float32,), (np.float64,), (binary_type,)]
)
def test_multi_dim_distributed(self, dtype):
values = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [0, 1]]])
counts = [1, 0, 1, 0, 1]
counts_of_counts = [3, 2]
st = values_and_count_to_sparse_tensor(values, counts, counts_of_counts)
self.assertSparseTensorShapeEqual(
st, indices=[None, 5], values=[None], dense_shape=[5]
)
with self.test_session():
self.assertSparseTensorValueEqual(
st=st.eval(),
indices=[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 1],
[0, 2, 0, 0, 0],
[0, 2, 0, 0, 1],
[0, 2, 0, 1, 0],
[0, 2, 0, 1, 1],
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 1, 0],
[1, 1, 0, 1, 1],
],
values=values.flatten(),
dense_shape=[2, 3, 1, 2, 2],
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/test_values_and_count_to_sparse_tensor.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Polygon Rasterier Processor."""
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import is_sparse, load_custom_tf_op, Processor
from nvidia_tao_tf1.core.types import data_format as modulus_data_format, DataFormat
class PolygonRasterizer(Processor):
"""Processor that draws polygons from coordinate/class lists into (rasterized) maps.
Regardless of settings, the background will always be rendered as zeros. Any class with a value
of -1 will be ignored (and not drawn). The class index of each polygon has an impact on the
value in the rasterized maps, depending on the ``one_hot`` and ``binarize`` arguments.
Args:
width (int): width of the output map.
height (int): height of the output map.
nclasses (int or None): The number of classes. This value must be specified if ``one_hot``
is ``True``, because ``one_hot`` creates a fixed output map for each class, but can be
``None`` (the default) if ``one_hot`` is ``False``.
binarize (bool): Defaults to ``True``, but can be set to ``False`` if ``one_hot`` is
``True``. When ``one_hot`` is ``True``, the polygons of each class will be rendered in
their own map, so it's OK that output values in each map aren't binary. (For example,
a polygon intersecting a pixel over its diagonal could result in a value of 0.5.).
Binarize=True is more expensive than binarizing by setting num_samples=1, so it should
be used only when more accurate pixel coverage is required.
one_hot (bool): Defaults to ``True``, meaning that one output map is created for each class.
The background class is the first map at index 0. This means that a class index '0' will
be drawn at map (channel) index '1'.
If set to ``False`` instead, the rasterizer will produce only a single output map
containing the polygons from all classes, with each class represented by a different
discrete integer value. (The current implementation is that each class_id will be
rendered with an integer value of ``class_id+1``.)
verbose (bool): If ``True``, shows verbose output from the backend implementation.
data_format (str): A string representing the dimension ordering of the input data.
Must be one of 'channels_last' or 'channels_first'. If ``None`` (default), the
modulus global default will be used.
num_samples (int): number of samples per box filter dimension. For each pixel in the
output image, polygon coverage is evaluated by sampling with a pixel sized box
filter. The total number of samples taken is num_samples * num_samples. Must be
between 1 and 5. Note that 1 gives a binary result. Also note that setting
num_samples > 1 and binarize=True results in fatter polygons compared to num_samples=1
since the former setting approximates a one pixel wide box filter while the latter
uses point sampling.
include_background (bool): If set to true, the rasterized output would also include
the background channel at channel index=0. This parameter only takes effect when
`one_hot` parameter is set to `true`. Default `true`.
kwargs (dict): keyword arguments passed to parent class.
Raises:
NotImplementedError: if ``data_format`` is not in ['channels_first', 'channels_last'].
ValueError: if ``one_hot`` is set with ``nclassses`` unspecified, or if ``one_hot`` and
``binarize`` are both set to False.
"""
@save_args
def __init__(
self,
width,
height,
nclasses=None,
binarize=True,
one_hot=True,
verbose=False,
data_format=None,
num_samples=5,
include_background=True,
**kwargs
):
"""__init__ method."""
self.width = width
self.height = height
self.nclasses = nclasses
self.binarize = binarize
self.one_hot = one_hot
self.verbose = verbose
self.include_background = include_background
self.data_format = (
data_format if data_format is not None else modulus_data_format()
)
self.num_samples = num_samples
# TODO(xiangbok): add an attribute that sets the class_id and the value that class should be
# rendered at, if ``one_hot`` is ``False``. Currently, we're rendering those values as
# ``class_id+1``. This is done to avoid ``class_id`` 0 to be drawn as 0 (background).
if self.data_format not in [
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
]:
raise NotImplementedError(
"Data format not supported, must be 'channels_first' or "
"'channels_last', given {}.".format(self.data_format)
)
if one_hot and nclasses is None:
raise ValueError("Using `one_hot` requires `nclasses` to be defined.")
if one_hot is False and binarize is False:
raise ValueError(
"Setting `one_hot` False is incompatible with `binarize` as False."
)
if nclasses is None:
self.nclasses = 1
super(PolygonRasterizer, self).__init__(**kwargs)
def _post_process(self, cov):
"""Post process the output tensor.
Args:
cov (Tensor): Tensor output by the custom op.
Returns:
Tensor with post processing operations applied.
"""
if self.one_hot and self.include_background:
# Add the background as a separate map. With out current implementation this is not
# straightforward to add in the kernel (because the kernel loops over classes
# individually).
cov_sum = tf.reduce_sum(
input_tensor=cov, axis=-3, keepdims=True
) # NCHW->N1HW or CHW->1HW.
cov_background = tf.cast(
tf.equal(cov_sum, 0), dtype=tf.float32
) # N1HW or 1HW.
cov = tf.concat(
[cov_background, cov], axis=-3
) # (N, 1+C, H, W) or (1+C, H, W).
# TODO(xiangbok): Add native channels_last support.
if self.data_format == DataFormat.CHANNELS_LAST:
cov = DataFormat.convert(
cov,
from_format=DataFormat.CHANNELS_FIRST,
to_format=DataFormat.CHANNELS_LAST,
)
return cov
def call(
self,
polygon_vertices,
vertex_counts_per_polygon,
class_ids_per_polygon,
polygons_per_image=None,
force_cpu=False
):
"""call method.
Actually runs the polygon rasterizer op with given inputs, and returns the rasterized
map(s).
Args:
polygon_vertices: a tensor in the form of a list of lists. The top-level list contains
sub-lists with 2 elements each; each sub-list contains the x/y coordinates (in that
order) of a single vertex of a single polygon for a single image (= raster map). The
length of the top-level list is therefore equal to the total number of vertices over
all polygons that we are drawing over all raster maps.
vertex_counts_per_polygon: a tensor in the form of a flat list. The elements of the list
are the vertex counts for each polygon that we will draw during rasterization. Thus,
the length of this list is equal to the number of polygons we will draw, and if we
were to sum all the values in this list, the sum should equal the length of the
``polygon_vertices`` list above.
class_ids_per_polygon: a tensor in the form of a flat list having the same shape as the
``vertex_counts_per_polygon`` list above. Each list element is an ID representing
the class to which each polygon belongs.
polygons_per_image: if `None` (the default), we assume only one single image (i.e. this
call will output only a single raster map). Otherwise, this should be a tensor in
the form of a flat list, where each list element is the number of polygons to be
drawn for that image (raster). In this case, the sum of the list values should equal
the length of the ``vertex_counts_per_polygon`` list above.
Returns:
cov: a fp32 tensor (`NCHW`) containing the output map if 'data_format' is set to
'channels_first', or a fp32 tensor of shape (NHWC) if 'data_format' is set to
'channels_last'. When ``one_hot`` is used, the number of channels `C` is equal
to ``nclasses``, and when it is not used, it is equal to 1.
"""
polygon_vertices = tf.cast(polygon_vertices, dtype=tf.float32)
vertex_counts_per_polygon = tf.cast(vertex_counts_per_polygon, dtype=tf.int32)
class_ids_per_polygon = tf.cast(class_ids_per_polygon, dtype=tf.int32)
# If polygons_per_image is None, use an empty tensor to signal that we want
# 3D output. In that case the number of polygons is infered from
# vertex_counter_per_polygon.
polygons_per_image = (
tf.constant([], dtype=tf.int32)
if polygons_per_image is None
else tf.cast(polygons_per_image, dtype=tf.int32)
)
op = load_custom_tf_op("op_rasterize_polygon.so")
if force_cpu:
with tf.device('CPU:0'):
cov = op.rasterize_polygon(
polygon_vertices=polygon_vertices,
vertex_counts_per_polygon=vertex_counts_per_polygon,
class_ids_per_polygon=class_ids_per_polygon,
polygons_per_image=polygons_per_image,
width=self.width,
height=self.height,
num_samples=tf.cast(self.num_samples, dtype=tf.int32),
nclasses=self.nclasses,
binarize=self.binarize,
one_hot=self.one_hot,
verbose=self.verbose,
)
else:
cov = op.rasterize_polygon(
polygon_vertices=polygon_vertices,
vertex_counts_per_polygon=vertex_counts_per_polygon,
class_ids_per_polygon=class_ids_per_polygon,
polygons_per_image=polygons_per_image,
width=self.width,
height=self.height,
num_samples=tf.cast(self.num_samples, dtype=tf.int32),
nclasses=self.nclasses,
binarize=self.binarize,
one_hot=self.one_hot,
verbose=self.verbose,
)
return self._post_process(cov)
class SparsePolygonRasterizer(PolygonRasterizer):
"""Polygon rasterizer for sparse polygon input.
See ``PolygonRasterizer`` documentation.
"""
def call(self, polygons, class_ids_per_polygon, force_cpu=False):
"""call method.
Args:
polygons (``tf.SparseTensor``): the polygons and its vertices wrapped in a sparse
tensor. Polygons.dense_shape must be either 3D (PVC) or 4D (NPVC), where N is batch
dimension, P is polygons, V vertices, and C coordinate index (0 or 1). In the
3D case the op returns a 3D tensor (CHW or HWC). In the 4D case the first
dimension of dense_shape specifies batch size, and the op returns a 4D tensor
(NCHW or NHWC). Polygons.values is a flat fp32 list of interleaved vertex x
and y coordinates. Polygons.indices is a 2D tensor with dimension 0 the size of
the polygons.values tensor, and dimension 1 either 3D (PVC) or 4D (NPVC).
class_ids_per_polygon: the class ids wrapped in a sparse tensor with indices
corresponding to those of the polygons. class_ids_per_polygon.dense_shape must be
either 2D (SC) for the 3D polygon case or 2D (NSC) for the 4D polygon case, where N
is the batch dimension, S is the shape dimension and C is the class dimension.
Each value is an ID representing the class to which each polygon belongs. If a class
id is associated with a polygon id that do not exist in the polygon sparse tensor,
the class id will be skipped from processing. If there exists a polygon that does
not have a corresponding class id, the operation will result in an error.
Returns:
cov: an fp32 tensor (CHW or NCHW) containing the output map if 'data_format' is
set to 'channels_first', or a fp32 tensor of shape (HWC or NHWC) if 'data_format'
is set to 'channels_last'. When ``one_hot`` is used, the number of channels `C` is
equal to ``nclasses``, and when it is not used, it is equal to 1.
"""
assert is_sparse(polygons)
indices = tf.cast(polygons.indices, dtype=tf.int32)
values = tf.cast(polygons.values, dtype=tf.float32)
dense_shape = tf.cast(polygons.dense_shape, dtype=tf.int32)
class_ids_per_polygon_indices = tf.cast(
class_ids_per_polygon.indices, dtype=tf.int32
)
class_ids_per_polygon_values = tf.cast(
class_ids_per_polygon.values, dtype=tf.int32
)
class_dense_shape = tf.convert_to_tensor(
value=class_ids_per_polygon.dense_shape, dtype=tf.int64
)
class_ids_per_polygon_dense_shape = tf.cast(class_dense_shape, dtype=tf.int32)
op = load_custom_tf_op("op_rasterize_polygon.so")
if force_cpu:
with tf.device('CPU:0'):
cov = op.rasterize_sparse_polygon(
polygon_indices=indices,
polygon_dense_shape=dense_shape,
polygon_values=values,
class_ids_per_polygon_indices=class_ids_per_polygon_indices,
class_ids_per_polygon_values=class_ids_per_polygon_values,
class_ids_per_polygon_dense_shape=class_ids_per_polygon_dense_shape,
width=self.width,
height=self.height,
num_samples=tf.cast(self.num_samples, dtype=tf.int32),
nclasses=self.nclasses,
binarize=self.binarize,
one_hot=self.one_hot,
verbose=self.verbose,
)
else:
cov = op.rasterize_sparse_polygon(
polygon_indices=indices,
polygon_dense_shape=dense_shape,
polygon_values=values,
class_ids_per_polygon_indices=class_ids_per_polygon_indices,
class_ids_per_polygon_values=class_ids_per_polygon_values,
class_ids_per_polygon_dense_shape=class_ids_per_polygon_dense_shape,
width=self.width,
height=self.height,
num_samples=tf.cast(self.num_samples, dtype=tf.int32),
nclasses=self.nclasses,
binarize=self.binarize,
one_hot=self.one_hot,
verbose=self.verbose,
)
return self._post_process(cov)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/polygon_rasterizer.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lookup Table Processor."""
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import Processor
class LookupTable(Processor):
"""Create a lookup table (LUT) to relate keys to values, or uses a default value.
Args:
keys (list): list of keys, with the same length as ``values``.
values (list): list of values, with the same length as ``keys``.
default_value: the default value to be used when a key is not present in the ``keys`` list.
kwargs (dict): keyword arguments passed to parent class.
Raises:
ValueError: if ``keys`` or ``values`` are not of type ``list``, or if length of ``keys`` do
not match length of ``values``.
"""
@save_args
def __init__(self, keys, values, default_value, **kwargs):
"""__init__ method."""
self.keys = keys
self.values = values
self.default_value = default_value
if type(keys) != list:
raise TypeError('"keys" is not of type "list"')
if type(values) != list:
raise TypeError('"values" is not of type "list"')
nkeys, nvalues = len(keys), len(values)
if nkeys != nvalues:
raise ValueError(
'keys/values list discrepancy: received %d "keys" and %d "values".'
% (nkeys, nvalues)
)
# Only possible LUT key types are string and int64, problem occurs when list
# of 'ints' is passed in, as that one is by default converted to list of tf.int32.
# Need to do the type specification explicitly!
self.key_dtype = None
if type(keys[0]) is str:
self.key_dtype = tf.string
elif type(keys[0]) is int:
self.key_dtype = tf.int64
super(LookupTable, self).__init__(**kwargs)
def _build(self, *args, **kwargs):
"""Build and initialize the LUT."""
self._table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
self.keys, self.values, key_dtype=self.key_dtype
),
self.default_value,
)
def call(self, key):
"""call method.
Args:
key (tensor): input key to be related to a value in ``values`` through the LUT.
Returns:
tensor: mapped tensor as `x` relates to a `value` in the LUT, or uses the
``default_value``.
"""
return self._table.lookup(key)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/lookup_table.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Clip Polygon Processor."""
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import load_custom_tf_op, Processor
class ClipPolygon(Processor):
"""
Processor to clip (crop) polygons.
Op to clip polygons or polylines with an input polygon mask.
Clipped polygons do not give any intra-polygon coordinate ordering guarantees. This is
typically not a problem as lines or polygons are agnostic to direction.
Polygons are assumed to be cyclical, and can therefore 'shift' indices in the array, and
can even be inverted in direction. Polylines (`closed` is False) are not cyclical and can
therefore only revert in direction, but can never be shifted.
Self-intersecting polygons will be split into multiple non-intersecting polygons. This
means that the amount of output polygons can increase or decrease. This does not apply to
polylines (`closed` is False). Similarly, the amount of output polygons and polylines can
decrease if they are clipped entirely.
Args:
closed (bool): if the polygon is closed or open (also known as polyline).
"""
@save_args
def __init__(self, closed, **kwargs):
"""__init__ method."""
self.closed = closed
super(ClipPolygon, self).__init__(**kwargs)
def call(self, polygons, points_per_polygon, polygon_mask):
"""call method.
Args:
polygons: a (n, 2) fp32 tensor containing a long list of vertices of all the polygons
The top-level list contains sub-lists with 2 elements each; each sub-list contains
the x/y coordinates (in that order) of a single vertex of a single polygon for a
single image (= raster map). The length of the top-level list is therefore equal to
the total number of vertices over all polygons that we are drawing over all raster
maps.
points_per_polygon: a 1D int32 tensor. The elements of the list are the vertex counts
for each polygon that we will draw during rasterization. Thus, the length of this
list is equal to the number of polygons we will draw, and if we were to sum all the
values in this list, the sum should equal the length of the ``polygons`` list above.
polygon_mask: a (n,2) fp32 tensor containing a single polygon used as the clipping mask.
Returns:
Three tensors:
clipped_polygons: same shape and type as `polygons` input, but clipped.
clipped_points_per_polygon: same shape and type as `points_per_polygon` input,
but clipped.
clipped_polygon_index_map: a 1D int32 tensor with the same length as
`clipped_points_per_polygon`. This contains a per-polygon map to the original
polygon index that was used as input. This is done because the amount of
input polygons might decrease or even increase. If those input polygons have
had metadata associated with them, we can map the output polygons to the
correct metadata using this tensor.
"""
points_per_polygon = tf.cast(points_per_polygon, tf.int32)
op = load_custom_tf_op("op_clip_polygon.so")
clipped_polygons, clipped_points_per_polygon, clipped_polygon_index_map = op.clip_polygon(
polygons=polygons,
points_per_polygon=points_per_polygon,
polygon_mask=polygon_mask,
closed=self.closed,
)
return (clipped_polygons, clipped_points_per_polygon, clipped_polygon_index_map)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/clip_polygon.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load File Processor."""
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import Processor
class LoadFile(Processor):
"""Load and read a file from an input string.
Args:
prefix (str): Optional prefix to be added to the input filename.
suffix (str): Optional suffix to be added to the input filename.
kwargs (dict): keyword arguments passed to parent class.
"""
@save_args
def __init__(self, prefix=None, suffix=None, **kwargs):
"""__init__ method."""
self.prefix = prefix
self.suffix = suffix
super(LoadFile, self).__init__(**kwargs)
def call(self, filename):
"""call method.
Args:
filename (tensor): Tensor (string) containing the filename to be loaded. The filename
can be joined with an optional `prefix` and `suffix` as supplied with this layers
creation.
Returns:
tensor: File contents as loaded from `filename`.
"""
if self.prefix and self.suffix:
filename = tf.strings.join([self.prefix, filename, self.suffix])
elif self.prefix:
filename = tf.strings.join([self.prefix, filename])
elif self.suffix:
filename = tf.strings.join([filename, self.suffix])
return tf.io.read_file(filename)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/load_file.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus processors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
from functools import lru_cache
import os
import tensorflow as tf
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.framework.sparse_tensor import is_sparse
from tensorflow.python.ops import string_ops
from nvidia_tao_tf1.core.coreobject import AbstractTAOObject
from nvidia_tao_tf1.core.utils import get_uid_name
def is_tensor(x):
"""Determines if input is a TensorFlow tensor."""
return isinstance(x, tf_ops._TensorLike) or tf_ops.is_dense_tensor_like(x)
def dense_to_sparse(dense_tensor):
"""Convert a dense tensor to a sparse tensor."""
if is_sparse(dense_tensor):
return dense_tensor
indices = tf.compat.v1.where(condition=tf.ones_like(dense_tensor, dtype=tf.bool))
values = tf.gather_nd(params=dense_tensor, indices=indices)
shape = tf.shape(input=dense_tensor, out_type=tf.int64)
return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
def boolean_mask_sparse_tensor(st, mask, axis=0):
"""Update a sparse tensor with a a mask.
NOTICE: This function only can only mask out values that are already absent in the sparse
tensor. That means it can only ever update sparse tensor ``indices`` and ``dense_shape``.
TODO(xiangbok): Add support to fully mask sparse tensors, including actual values.
The tensor will be masked with a boolean tensor, but assumes the content is already removed
and so is not present in the values or indices. However, the sparse tensor's indices and
shape will still reflect the empty rows. We want to mask out these indices and the shape.
A sparse tensor consists of three dense tensors named ``indices``, ``values`` and
``dense_shape``. This operation will not change the shape of these three tensors, and will never
change the content of ``values``.
The ``dense_shape`` is subtracted by the amount of masking that is applied on the
given ``axis``.
The ``indices`` on dimension of input ``axis`` are masked out. This means practically that the
``indices`` on the given ``axis`` are subtracted by the amount of masking that has been applied
to previous indices.
For example, a sparse tensor with ``indices = [0, 2, 2, 4]`` and ``dense_shape = [5]`` and
``mask = [True, False, True, True, True]``, then the output ``dense_shape = [4]`` and
the ouptut ``indices = [0, 1, 1, 3]``.
Args:
st (``tf.SparseTensor``): The input tensor to be masked.
mask (``tf.Tensor``): The dense tensor to be used as a mask over the dimension indicated by
``axis``. When mask values are ``True``, it indicates the values to keep.
axis (int): The axis over which the mask should be applied.
Returns:
A masked ``tf.SparseTensor``.
Raises:
ValueError: if ``axis`` value is not supported (non-zero).
"""
if axis != 0:
raise ValueError("Only axis=0 supported, got `{}`.".format(axis))
assert_op = tf.compat.v1.assert_equal(
tf.cast(st.dense_shape[axis], tf.int32), tf.shape(input=mask)[0]
)
# Compute the dense shape. The dense shape is simply subtracted over the correct axis
# by counting the amount elements where the mask is ``False``.
with tf.control_dependencies([assert_op]):
count_zero = tf.reduce_sum(
input_tensor=tf.cast(tf.equal(mask, False), tf.int64)
)
dense_shape = tf.concat(
[[st.dense_shape[axis] - count_zero], st.dense_shape[1:]], axis=axis
)
# Split out the axis row from the other rows. We will use the other rows to concat back later.
dim0, dims = tf.split(st.indices, num_or_size_splits=[1, -1], axis=1)
# Calculate how much we need to subtract:
# Example: ``mask = [True, False, True, True, True]`` -> ``subtraction = [0, -1, -1, -1, -1]``.
subtraction = tf.scan(
lambda a, x: a - tf.cast(tf.equal(x, 1), tf.int64),
tf.cast(~mask, tf.int64),
initializer=tf.constant(0, tf.int64),
parallel_iterations=1,
back_prop=False,
)
# These previous subtractions relate to an continuous index range. Our sparse tensor might
# not be continuous, so we have to gather the updates to the correct indices.
# Example: ``dim0 = [0, 3]`` and ``subtraction = [0, -1, -1, -1, -1]`` then
# ``subtraction_sparse = [0, -1]``.
subtraction_sparse = tf.gather(subtraction, dim0)
# We apply the subtraction here and concatenate the new axis indices together with the other
# unaffected indices dimensions.
indices_dim0 = dim0 + subtraction_sparse
indices = tf.concat([indices_dim0, dims], axis=1)
return tf.SparseTensor(indices=indices, values=st.values, dense_shape=dense_shape)
def remove_empty_rows_from_sparse_tensor(st, axis=0):
"""Remove empty rows from a sparse tensor over a given axis.
Removes empty elements over one axis. For example, if the sparse tensor contains indices
only over rows ``[2, 4, 5]``; it means ``[0, 1, 3]`` are empty. This function will reduce
the indices so that ``[2, 4, 5]`` now cleanly map to ``[0, 1, 2]``. The sparse tensor's
``dense_shape`` will also be changed accordingly. This function never changes actual values.
Args:
st (`tf.SparseTensor`): the tensor to be reduced.
axis (int): the axis over which the reduction will take place.
Returns:
tf.SparseTensor with empty rows removed from the input ``st`` tensor.
Raises:
ValueError: if ``axis`` value is not supported (non-zero).
"""
if axis != 0:
raise NotImplementedError("Only pruning of sparse tensor axis 0 implemented")
# Indices.
indices = st.indices[:, 0]
dim0, dims = tf.split(st.indices, num_or_size_splits=[1, -1], axis=1)
uniques, indices = tf.unique(dim0[:, 0])
indices = tf.expand_dims(indices, 1)
indices = tf.concat([tf.cast(indices, tf.int64), dims], 1)
# Compute the new dense shape.
dim_count = tf.cast(tf.shape(input=uniques)[0], tf.int64)
dense_shape = tf.concat([[dim_count], st.dense_shape[1:]], axis=0)
return tf.SparseTensor(indices=indices, values=st.values, dense_shape=dense_shape)
def to_dense_if_sparse_tensor_is_fully_dense(st, axis=0):
"""Convert a tf.SparseTensor to a dense Tensor if it's not sparse over given axis.
Args:
st (``tf.SparseTensor``): input tensor.
axis (int): the dimension over which the density check is performed.
Returns:
The input sparse tensor converted to a dense type as ``tf.Tensor``.
Raises:
NotImplementedError: if ``axis`` value is not supported (non-zero).
ValueError: if input tensor ``st`` is not a sparse tensor.
"""
if axis != 0:
raise NotImplementedError("Axis {} not supported.".format(axis))
if not is_sparse(st):
raise ValueError("Input tensor ({}) should be a tf.SparseTensor.".format(st))
assert_op = tf.compat.v1.assert_equal(
tf.shape(input=st.values)[0],
tf.cast(tf.reduce_prod(input_tensor=st.dense_shape), tf.int32),
)
with tf.control_dependencies([assert_op]):
return tf.reshape(st.values, st.dense_shape)
@lru_cache()
def load_custom_tf_op(filename, python_module_path=__file__):
"""Load a custom tf op library from a file.
Loads a custom tf op library given the specified filename and a path to the caller. The path
of the caller (`python_module_path`) should usually be obtained by the `__file__` global
variable. Given `python_module_path`, this function will search a sibling `lib` directory
for the specified `filename`.
Example:
> print(__file__)
/foo/bar/baz.py
> load_custom_tf_op('my_custom_baz_op.so', __file__)
# Library Path: /foo/lib/my_custom_baz_op.so
# Calls tf.load_op_library('/foo/lib/my_custom_baz_op.so')
Args:
filename (str): The name of the library file (e.g. foo_bar.so).
python_module_path (str): The path of the python module calling this function.
"""
abs_path = os.path.join(os.path.dirname(python_module_path), "..", "lib", filename)
return tf.load_op_library(abs_path)
class Processor(AbstractTAOObject):
"""Processor (non-differentiable Layer) base class.
This object is very similar to a `keras.Layer`, with some minor differences. The methods
that a subclass should override are the same, but their inputs and outputs allow for more
flexibility. The inputs and outputs allow arbitrary (keyword) arguments and dictionaries,
or no input arguments at all.
Args:
kwargs (dict): keyword arguments.
"""
def __init__(self, **kwargs):
"""__init__ method."""
name = kwargs.get("name", get_uid_name(self.__class__.__name__))
self.name = name
self._built = False
def _build(self, *args, **kwargs):
"""Anything that needs to be created once for the op is done here.
For example: weight creation, op creation, or anything that needs intializing, etc.
"""
pass
def build(self, *args, **kwargs):
"""Passthrough for the build method and set the member 'built' to True."""
self._build(*args, **kwargs)
self._built = True
@abstractmethod
def call(self, *args, **kwargs):
"""The layers logic should be implemented in this method."""
raise NotImplementedError("Override me.")
def __call__(self, *args, **kwargs):
"""The entrypoint for calling the logic of a layer, after its creation.
If the layer has not been built using `build()`, it will do so.
"""
with tf.compat.v1.name_scope(self.name):
if not self._built:
self.build(*args, **kwargs)
return self.call(*args, **kwargs)
def json_arrays_to_tensor(value, dtype, add_brackets=False):
"""Convert a json-encoded values (that may be nested in lists) to a sparse tensor.
Arguments:
vertices (tf.string): A valid json-encoded string. These values may be nested inside
lists. It contains values of strictly compatible datatypes (corresponding to ``dtype``
argument). F.e. floats and integers are compatible (potential data loss due to casting).
dtype (tf.dtype): Supported datatype (tf.int32, tf.int64, tf.float32, tf.string), the
output values will be in this ``dtype``.
add_brackets (bool) False: If we want to add brackets around our input. For example:
`[..],[..]` -> `[[..],[..]]`. This is a utility function added for ease of use and
performance to fuse the string concatenation in this operation.
Returns:
A ``tf.SparseTensor`` containing (by definition) the ``indices`` (``tf.int64``),
``values`` (``dtype``) and ``dense_shape`` (``tf.int64``) of the decoded json.
Raises:
``tf.errors.InvalidArgumentError`` If the input is not a valid json, or there is mixing of
incompatible datatypes within the json.
``TypeError`` If the input is not a scalar tf.string.
"""
if add_brackets:
value = tf.strings.join(["[", value, "]"])
op = load_custom_tf_op("op_json_arrays_to_tensor.so")
indices, values, dense_shape = op.json_arrays_to_tensor(value=value, dtype=dtype)
return tf.SparseTensor(indices=indices, values=values, dense_shape=dense_shape)
def sparse_coordinate_feature_to_vertices_and_counts(st, vertex_dims=2):
"""Convert a sparse tensor containing coordinate vertices to a vertex and count tensor.
Args:
st (``tf.SparseTensor``): A sparse tensor containing features in the outer dimension,
then vertices, and in the innermost dimension a list of floats.
The inner dimension has the shape of ``vertex_dims``.
vertex_dims (int): The dimension of the vertices used.
Returns:
vertices (tensor): A tensor of shape (vertex count, ``vertex_dims``), containing all
concatented vertices. The feature to which each vertex belongs can be distinguished by
using the ``vertex_count_per_feature`` output.
vertex_count_per_feature (tensor): The tensor of shape (feature count,) containing vertex
count per feature. The sum of this tensor is equal to the total vertex count.
"""
# Calculate the vertex count per feature.
feature_count = tf.cast(st.dense_shape[0], tf.int32)
vertex_count_per_feature = (
tf.math.bincount(tf.cast(st.indices[:, 0], tf.int32), minlength=feature_count)
// vertex_dims
)
# Reshape the vertices simply to (n, vertex_dims).
vertices = tf.reshape(st.values, [-1, vertex_dims])
return vertices, vertex_count_per_feature
def values_and_count_to_sparse_tensor(values, counts, counts_of_counts=None):
"""Converts values and its counts into a sparse tensor.
Args:
values: tensor containing a long list of values of all the counts.
The length of this list is therefore equal to the total number of counts
that will be put into the sparse tensor.
counts: a 1D int32 tensor. The elements of the list are the value counts that will be put
into the sparse tensor. The sum of all the values in this list should equal the length
of the ``values`` list above.
counts_per_image: an optional 1D int32 tensor. The elements of the list are the counts for
each image that that will be put into the sparse tensor. The sum of all the values in
this list should equal the length of the ``counts`` list above. If this parameter is
not specified, then the output will be of a rank that is lower by 1
Returns:
A ``tf.SparseTensor`` containing (by definition) the ``indices`` (``tf.int64``),
``values`` (``dtype``) and ``dense_shape`` (``tf.int64``) of the values.
"""
if counts_of_counts is None:
counts_of_counts = tf.zeros([], dtype=tf.int32)
op = load_custom_tf_op("op_values_and_count_to_sparse_tensor.so")
indices, output_values, dense_shape = op.values_and_count_to_sparse_tensor(
values=values, counts=counts, counts_of_counts=counts_of_counts
)
return tf.SparseTensor(
indices=indices, values=output_values, dense_shape=dense_shape
)
def string_lower(x):
"""Convert any tensor of type tf.string to lowercase."""
return string_ops.string_lower(x)
def string_upper(x):
"""Convert any tensor of type tf.string to uppercase."""
return string_ops.string_upper(x)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/processors.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFRecords Iterator Processor."""
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import Processor
class TFRecordsIterator(Processor):
"""Processor that sets up a TFRecordsDataset and yields values from that input.
This uses TF Dataset API with initializable iterator. Note that when used for evaluation,
``repeat`` option should be set to ``False``.
Args:
file_list (string): File paths to tf records files, possibly containing wildcards.
All matching files will be iterated over each epoch. If this is `None`, you need to
pass in a ``tf.dataset`` object to the ``build`` method.
batch_size (int): How many records to return at a time.
shuffle_buffer_size (int): The maximum number of records the buffer will contain.
If more than 0, ``shuffle`` needs to be ``True``.
shuffle (bool): Toggle shuffling. If ``True``, ``shuffle_buffer_size`` needs to be
more than 0.
repeat (bool): Toggle repeating the tfrecords. If this is False, it will only output
tensors for one full cycle through all the data in the tfrecords files. If ``True``,
this can result in the last batch size of the epoch not being identical to
``batch_size``.
batch_as_list (bool): Whether a batch should be returned as a list (i.e. split into single
elements, rather than as a single large tensor with first dimension = ``batch_size``).
False by default.
sequence_length (int): Length of the sequence for sequence batching. A value of 0 means
disabled and is the default value. The output of the iterator is flattened to a
batch size of ``sequence_length * batch_size``. The sequence is obtained before
shuffling.
prefetch_buffer_size (int): How many batches should be prefetched (buffered). If this value
is 0, no buffering or prefetching will occur.
cache (bool): If you want to cache the entire dataset in memory.
Raises:
ValueError: if ``batch_as_list`` is set while ``repeat`` is False,
or if ``shuffle_buffer_size`` is greater than zero when ``shuffle`` is False,
or if ``shuffle`` is set with ``shuffle_buffer_size`` less than 1.
"""
ITERATOR_INIT_OP_NAME = "iterator_init"
@save_args
def __init__(
self,
file_list,
batch_size,
shuffle_buffer_size=0,
shuffle=False,
repeat=False,
batch_as_list=False,
sequence_length=0,
prefetch_buffer_size=0,
cache=False,
**kwargs
):
"""__init__ method."""
self.file_list = file_list
self.batch_size = batch_size
self.shuffle_buffer_size = shuffle_buffer_size
self.shuffle = shuffle
self.repeat = repeat
self.batch_as_list = batch_as_list
self.sequence_length = sequence_length
self.prefetch_buffer_size = (
prefetch_buffer_size
) # TODO(xiangbok): set default to >0
self.cache = cache
if self.repeat is False and self.batch_as_list:
raise ValueError(
"`batch_as_list` cannot be True if `repeat` is False because the "
"split dimension (batch size) is not fixed before run-time. Because "
"when repeat is False, the last batch can have a truncated size."
)
if self.shuffle is False and self.shuffle_buffer_size > 0:
raise ValueError(
"'shuffle' is False while 'shuffle_buffer_size' is %d."
% shuffle_buffer_size
)
if self.shuffle is True and self.shuffle_buffer_size < 1:
raise ValueError(
"'shuffle' is True while 'shuffle_buffer_size' is %d."
% self.shuffle_buffer_size
)
super(TFRecordsIterator, self).__init__(**kwargs)
def _build(self, dataset=None, *args, **kwargs): # pylint: disable=W1113
"""Build the record input.
Args:
dataset (TFRecordDataset): Optionally pass in a dataset object that's already been
prepared.
Raises:
ValueError: if no ``file_list`` was specified in ``init`` and ``dataset`` is also None.
"""
if dataset is None:
if self.file_list is None:
raise ValueError(
"If no `file_list` has been provided, a `dataset` needs to be "
"provided to the `build` method."
)
dataset = tf.data.TFRecordDataset(self.file_list)
if self.cache:
dataset = dataset.cache()
if self.prefetch_buffer_size:
dataset = dataset.prefetch(self.prefetch_buffer_size)
if self.sequence_length:
dataset = dataset.batch(self.sequence_length)
if self.shuffle:
dataset = dataset.shuffle(
buffer_size=self.shuffle_buffer_size, reshuffle_each_iteration=True
)
if self.repeat:
dataset = dataset.repeat()
dataset = dataset.batch(self.batch_size)
self.iterator = tf.compat.v1.data.Iterator.from_structure(
dataset.output_types, dataset.output_shapes
)
self.iterator_init_op = self.iterator.make_initializer(dataset)
# Add the iterator to our custom collection to easily retrieve later.
tf.compat.v1.add_to_collection(
self.ITERATOR_INIT_OP_NAME, self.iterator_init_op
)
def initialize(self, sess):
"""Initialize the iterator."""
sess.run(self.iterator_init_op)
def reset(self, sess):
"""Reset the iterator, as if no data has been pulled.
Note that resetting is the same operation as initialization.
"""
sess.run(self.iterator_init_op)
def process_records(self, records):
"""Process records helper function."""
if self.repeat:
# Only if repeat is True, our batch size is fixed and we can perform reshaping.
if self.sequence_length:
records = tf.reshape(records, [self.batch_size * self.sequence_length])
else:
records = tf.reshape(records, [self.batch_size])
if self.batch_as_list:
records = tf.split(records, int(records.get_shape()[0]), 0)
records = [tf.reshape(record, []) for record in records]
return records
def call(self):
"""call method.
Returns:
records: a list or dense tensor (depending on the value of ``batch_as_list``) containing
the next batch as yielded from the `TFRecordsDataset`. Each new call will pull a
fresh batch of samples. The set of input records cannot be depleted, as the records
will wrap around to the next epoch as required.
If the iterator reaches end of dataset, reinitialize the iterator
"""
records = self.iterator.get_next()
return self.process_records(records)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/tfrecords_iterator.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying a Modulus Transform to input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.processors import ColorTransform, ColorTransformer
from nvidia_tao_tf1.core.processors import SpatialTransform, SpatialTransformer
from nvidia_tao_tf1.core.types import Canvas2D, DataFormat, Transform
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("max_clip", [10.0, 20.0])
@pytest.mark.parametrize("min_clip", [0.0, 3.0])
@pytest.mark.parametrize("batch_size", [5, 10])
def test_color_transformer_call(batch_size, min_clip, max_clip, data_format):
"""Test color transformer call function."""
applicant = (
tf.ones([batch_size, 3, 5, 5])
if data_format == DataFormat.CHANNELS_FIRST
else tf.ones([batch_size, 5, 5, 3])
)
ctm = tf.random.uniform((4, 4), minval=1, maxval=5)
batched_ctms = tf.tile(tf.expand_dims(ctm, axis=0), [batch_size, 1, 1])
transform = Transform(
canvas_shape=Canvas2D(5, 5),
color_transform_matrix=ctm,
spatial_transform_matrix=tf.eye(3),
)
transformer = ColorTransformer(
transform, min_clip=min_clip, max_clip=max_clip, data_format=data_format
)
output = transformer(applicant)
expected_output = ColorTransform(
data_format=data_format, min_clip=min_clip, max_clip=max_clip
)(applicant, batched_ctms)
with tf.compat.v1.Session() as sess:
output, expected_output = sess.run([output, expected_output])
np.testing.assert_allclose(output, expected_output)
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("method", ["bilinear", "bicubic"])
@pytest.mark.parametrize("background_value", [0.0, 0.5, 1.0])
@pytest.mark.parametrize("batch_size", [5, 10])
def test_spatial_transformer_call(batch_size, background_value, method, data_format):
"""Test color transformer call function."""
applicant = (
tf.ones([batch_size, 3, 5, 5])
if data_format == DataFormat.CHANNELS_FIRST
else tf.ones([batch_size, 5, 5, 3])
)
stm = tf.random.uniform((3, 3), minval=1, maxval=5)
batched_stms = tf.tile(tf.expand_dims(stm, axis=0), [batch_size, 1, 1])
transform = Transform(
canvas_shape=Canvas2D(5, 5),
color_transform_matrix=tf.eye(4),
spatial_transform_matrix=stm,
)
transformer = SpatialTransformer(
transform,
data_format=data_format,
method=method,
background_value=background_value,
)
output = transformer(applicant)
expected_output = SpatialTransform(
data_format=data_format, method=method, background_value=background_value
)(applicant, batched_stms)
expected_shape = (
[batch_size, 3, 5, 5]
if data_format == DataFormat.CHANNELS_FIRST
else [batch_size, 5, 5, 3]
)
with tf.compat.v1.Session() as sess:
assert output.get_shape().is_fully_defined()
assert expected_shape == output.get_shape().as_list()
output, expected_output = sess.run([output, expected_output])
np.testing.assert_allclose(output, expected_output)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/test_transformers.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bbox Rasterizer Processor."""
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import load_custom_tf_op, Processor
from nvidia_tao_tf1.core.types import data_format as modulus_data_format, DataFormat
class BboxRasterizer(Processor):
"""Processor that rasterizes rectangles and ellipses into images.
Output of the rasterization operation is a 5D tensor with shape (N, C, G, H, W),
where N is the number of images (=batch size), C is the number of classes, G is the
number of gradient buffers (described later), H is image height, and W is image width.
The processor supports drawing both rectangles and ellipses. These shapes are
collectively referred to as bboxes. This is because an ellipse can be thought of as
being embedded within its bounding box.
The bboxes are input as a number of tensors specifying their attributes. Each bbox
is described by a 3x3 matrix giving its size and location, a class ID, a number
of gradient coefficients, horizontal and vertical coverage radii, and flags bitfield.
Bboxes are drawn in the order they are specified, ie. a later bbox is drawn on top of
an earlier one.
The reason for using a matrix to describe the geometry is that it is a compact way to
specify all affine transformations of the unit square/circle in a plane, ie. it allows
drawing arbitrarily rotated, scaled, and sheared boxes and ellipses. We're restricting
ourselves to matrices where the third column is always [0,0,1]^T as this processor does
not support perspective projections. The matrix maps an output image plane pixel
location P onto a location P' in rasterization space, where the bbox is the unit square
(and the embedded ellipse is the unit circle). In the rasterization space determining
whether a pixel is covered by the bbox is simply a matter of checking whether P' is within
the unit square (for rectangles) or the unit circle (for ellipses). One intuitive way of
forming the matrix is to construct a mapping that transforms the unit square to the
desired shape, then inverting the resulting matrix. Higher level classes are
expected to provide user friendly helper functions for constructing the matrix depending
on application's needs.
For each pixel and bbox, the rasterizer computes a coverage value by supersampling, ie.
checking at 4 by 4 grid of locations within the pixel whether the location is inside the
bbox. Coverage is defined as the fraction of locations inside the bbox, thus 0.0 means no
part of the pixel is covered, while 1.0 means the whole pixel is covered. Fractional
coverage occurs at object edges.
Sometimes we want to make a foreground object stand out better by masking out background
objects around it. For this, the rasterizer supports deadzone, which is defined using
the coverage radii inputs that define the actual object's size within the bbox, while
the rest is used for the deadzone. A coverage radius of 1.0 means the actual object
covers the whole bbox, and thus there's no deadzone. A coverage radius of 0.5 means
the central half of the bbox is used for the actual object, while the surrounding half is
used for the deadzone.
The third dimension of the output tensor contains a number of buffers with user specified
linearly interpolated values (=gradients) that are optionally multiplied by each pixel's
computed coverage value. Note that the term gradient comes from computer graphics and
does not have anything to do with network gradients. Use cases include an object coverage
buffer where a constant gradient of 1.0 is multiplied by pixel coverage, and a set of bbox
distance buffers where four interpolated gradients are used for computing each pixel's
distance to each of the four bbox edges. Note that in the latter case multiplying the
interpolated value by coverage does not make sense.
A gradient is defined by the formula g = A*px + B*py + C, where A, B, and C are user
defined coefficients and px and py are pixel coordinates. It's easy to see that A describes
the change in gradient value when moving one pixel right, and B the same when moving one
pixel down. C is a constant offset. For a constant gradient, simply set C to the desired
value and set A = B = 0. Higher level code is expected to provide user friendly helper
functions for constructing the gradient coefficients depending on application's needs.
The reason for using gradients is that it allows specifying all linear 1D functions on a
2D plane, including constant values. This is a general way for supporting variable number
of output buffers with application defined meanings. If a nonlinearity is needed on top of
a linear function, it should be done in a postprocessing pass. Note that nonlinearly
interpolated values such as radial functions cannot be done in a postprocess and instead
require modifications to this op.
Unlike coverage, a gradient's value is computed once per pixel. This has the effect that
if a gradient is chosen to be multiplied by coverage, the result has smooth edges. If a
gradient is not multiplied by coverage, the edges might appear rough.
For each pixel, the bboxes with the same class ID are composited in back to front order.
Compositing differs from
standard computer graphics compositing modes in that we keep track of maximum coverage
value seen so far, and only replace a pixel if the new bbox fragment has larger
coverage. This has been shown to improve detection performance with small objects.
If the pixel falls within the deadzone of a bbox and the bbox's coverage is larger than the
maximum seen so far, we clear the background and set maximum coverage to zero, which has
the effect of masking out the background objects.
"""
# Supported bbox_flags
DRAW_MODE_RECTANGLE = 0
DRAW_MODE_ELLIPSE = 1
# Supported gradient flags
GRADIENT_MODE_PASSTHROUGH = 0
GRADIENT_MODE_MULTIPLY_BY_COVERAGE = 1
@save_args
def __init__(self, verbose=False, data_format=None, **kwargs):
"""__init__ method.
Args:
data_format (str): A string representing the dimension ordering of the input data.
Must be one of 'channels_last' or 'channels_first'. If ``None`` (default), the
modulus global default will be used.
Raises:
NotImplementedError: if ``data_format`` is not in ['channels_first', 'channels_last'].
"""
super(BboxRasterizer, self).__init__(**kwargs)
self.verbose = verbose
self.data_format = (
data_format if data_format is not None else modulus_data_format()
)
if self.data_format not in [
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
]:
raise NotImplementedError(
"Data format not supported, must be 'channels_first' or "
"'channels_last', given {}.".format(self.data_format)
)
def call(
self,
num_images,
num_classes,
num_gradients,
image_height,
image_width,
bboxes_per_image,
bbox_class_ids,
bbox_matrices,
bbox_gradients,
bbox_coverage_radii,
bbox_flags,
gradient_flags,
bbox_sort_values=None,
force_cpu=False,
):
"""Generate image tensors by rasterization in native Tensorflow.
Args:
num_images: 1D tensor with length of 1 that describes the number of output images
(= batch size N). The value must be >= 1.
num_classes: integer constant that describes the number of output classes C. The value
must be >= 1.
num_gradients: 1D tensor with length of 1 that describes the number of output gradients
G. The value must be >= 1.
image_height: 1D tensor with length of 1 that describes the height H of output images.
The value must be >= 1.
image_width: 1D tensor with length of 1 that describes the width W of output images. The
value must be >= 1.
bboxes_per_image: 1D int32 tensor of length N. Specifies the number of bboxes in each
image.
bbox_class_ids: 1D int32 tensor of length B (=total number of bboxes to draw). Contains
a class ID for each bbox. Class ID must be a monotonically increasing value within
each image.
bbox_matrices: 3D float32 tensor of shape (B,3,3). Contains a 3x3 row major matrix that
specifies the shape of each bbox to be drawn. The third column of the matrix is
implicitly taken to be [0,0,1] (ie. the actual values in that column are ignored).
In rectangle drawing mode, pixel coordinates form a row vector P=[px,py,1] that is
multiplied by the matrix M from the right: Q = P M. The resulting coordinates Q that
end up within the unit square around the origin (ie. Q is within [-1,1] range) are
considered to be inside deadzone, and a Q that satisfy |Q.x| < coverage_radii.x AND
|Q.y| < coverage_radii.y are considered to be inside coverage zone. Pixels inside
coverage zone receive coverage value 1.0, and pixels outside coverage zone but
inside deadzone receive coverage value 0.0. Since coverage value is computed using
supersampling, pixels that cross zone edge receive coverage value between 0 and 1.
In ellipse mode, the unit square is replaced by the unit circle. Pixels inside
coverage zone satisfy (Q.x/coverage_radii.x)^2 + (Q.y/coverage_radii.y)^2 < 1.
bbox_gradients: 3D float32 tensor of shape (B,G,3). Contains three gradient coefficients
A, B, and C for each bbox and gradient. Used for computing a gradient value based on
pixel coordinates using the gradient function g = A*px+B*py+C.
Gradient values are written as is to all pixels within the actual object (ie. not
deadzone), to output tensor location [im, cl, g, px, py], optionally multiplied
by pixel's coverage value.
bbox_coverage_radii: 2D float32 tensor of shape (B, 2). Sensible coverage radius values
are between 0.0 and 1.0.
bbox_flags: 1D uint8 tensor of length B. Contains per bbox flags. Currently the only
supported flag chooses between rectangle mode and ellipse mode.
gradient_flags: 1D uint8 tensor of length G. Contains per gradient flags. Currently the
only supported flag chooses whether a particular gradient value should be multiplied
by coverage value or not.
bbox_sort_values: 1D float32 tensor of length B. Contains optional bbox sort values that
define bbox drawing order within each image and class (the order is ascending:
the bbox with the smallest sort value is drawn first). This input can be None, in
which case bboxes are drawn in the input order.
Returns:
output_image: 5D tensor with shape (N, C, G, H, W) or (N, H, W, C, G).
"""
# TODO(xiangbok): many of the inputs here are probably attributes, should be moved
# from call() to __init__().
bbox_sort_values = (
tf.zeros_like(bbox_class_ids, dtype=tf.float32)
if bbox_sort_values is None
else bbox_sort_values
)
op = load_custom_tf_op("op_rasterize_bbox.so")
if force_cpu:
with tf.device('CPU:0'):
output_image = op.rasterize_bbox(
num_images=num_images,
num_classes=num_classes,
num_gradients=num_gradients,
image_height=image_height,
image_width=image_width,
bboxes_per_image=bboxes_per_image,
bbox_class_ids=bbox_class_ids,
bbox_matrices=bbox_matrices,
bbox_gradients=bbox_gradients,
bbox_coverage_radii=bbox_coverage_radii,
bbox_flags=bbox_flags,
bbox_sort_values=bbox_sort_values,
gradient_flags=gradient_flags,
verbose=self.verbose,
)
else:
output_image = op.rasterize_bbox(
num_images=num_images,
num_classes=num_classes,
num_gradients=num_gradients,
image_height=image_height,
image_width=image_width,
bboxes_per_image=bboxes_per_image,
bbox_class_ids=bbox_class_ids,
bbox_matrices=bbox_matrices,
bbox_gradients=bbox_gradients,
bbox_coverage_radii=bbox_coverage_radii,
bbox_flags=bbox_flags,
bbox_sort_values=bbox_sort_values,
gradient_flags=gradient_flags,
verbose=self.verbose,
)
# Op returns NCGHW natively, need to do a transpose to get NHWCG.
if self.data_format == DataFormat.CHANNELS_LAST:
output_image = tf.transpose(a=output_image, perm=[0, 3, 4, 1, 2])
return output_image
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/bbox_rasterizer.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random glimpse transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import spatial
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Canvas2D, Transform
class RandomGlimpse(Processor):
"""Processor for extracting random glimpses of images and labels."""
# Always crop the center region.
CENTER = "center"
# Crop at random location keeping the cropped region within original image bounds.
RANDOM = "random"
CROP_LOCATIONS = [CENTER, RANDOM]
@save_args
def __init__(self, height, width, crop_location, crop_probability, **kwargs):
"""Construct a RandomGlimpse processor.
Args:
height (int): New height to which contents will be either cropped or scaled down to.
width (int): New width to which contents will be either cropper or scaled down to.
crop_location (str): Enumeration specifying how the crop location is selected.
crop_probability (float): Probability at which a crop is performed.
kwargs (dict): keyword arguments passed to parent class.
"""
super(RandomGlimpse, self).__init__(**kwargs)
if crop_location not in RandomGlimpse.CROP_LOCATIONS:
raise ValueError(
"RandomGlimpse.crop_location '{}' is not supported. Valid options: {}.".format(
crop_location, ", ".join(RandomGlimpse.CROP_LOCATIONS)
)
)
if crop_probability < 0.0 or crop_probability > 1.0:
raise ValueError(
"RandomGlimpse.crop_probability ({}) is not within the range [0, 1].".format(
crop_probability
)
)
if height <= 0:
raise ValueError(
"RandomGlimpse.height ({}) is not positive.".format(height)
)
if width <= 0:
raise ValueError("RandomGlimpse.width ({}) is not positive.".format(width))
self._height = height
self._width = width
self._crop_location = crop_location
self._crop_probability = crop_probability
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomGlimpse(height={}, width={}, crop_location={}, crop_probability={})".format(
self._height, self._width, self._crop_location, self._crop_probability
)
def call(self, transform):
"""Return a Transform that either crops or scales, always producing same sized output.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance with either cropping or scaling applied.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
input_height = transform.canvas_shape.height
input_width = transform.canvas_shape.width
batch_size = None
batch_shape = []
if transform.spatial_transform_matrix.shape.ndims == 3:
batch_size = tf.shape(input=transform.spatial_transform_matrix)[0]
batch_shape = [batch_size]
crop_probability = tf.random.uniform(batch_shape, minval=0.0, maxval=1.0)
should_crop = tf.less_equal(crop_probability, self._crop_probability)
glimpse_stm = tf.compat.v1.where(
should_crop,
self._crop(
input_height=input_height,
input_width=input_width,
batch_size=batch_size,
),
self._scale(
input_height=input_height,
input_width=input_width,
batch_size=batch_size,
),
)
processed_stm = tf.matmul(glimpse_stm, transform.spatial_transform_matrix)
return Transform(
canvas_shape=Canvas2D(height=self._height, width=self._width),
color_transform_matrix=transform.color_transform_matrix,
spatial_transform_matrix=processed_stm,
)
def _scale(self, input_width, input_height, batch_size):
"""Return a spatial transform matrix for scaling inputs to requested height and width."""
horizontal_ratio = input_width / self._width
vertical_ratio = input_height / self._height
stm = spatial.zoom_matrix(ratio=(horizontal_ratio, vertical_ratio))
if batch_size is not None:
stm = tf.broadcast_to(stm, [batch_size, 3, 3])
return stm
def _crop(self, input_height, input_width, batch_size):
"""Return a spatial transform matrix that crops a section of desired height and width."""
if self._crop_location == RandomGlimpse.RANDOM:
return self._random_crop(
input_height=input_height,
input_width=input_width,
batch_size=batch_size,
)
if self._crop_location == RandomGlimpse.CENTER:
return self._center_crop(
input_height=input_height,
input_width=input_width,
batch_size=batch_size,
)
raise ValueError("Unhandled crop location: '{}'.".format(self._crop_location))
def _random_crop(self, input_height, input_width, batch_size):
"""Return a STM that crops a random location contained within the input canvas."""
min_left_x = 0
max_left_x = input_width - self._width
if max_left_x < 0.0:
raise ValueError(
"Attempted to extract random crop ({}) wider than input width ({}).".format(
self._width, input_width
)
)
min_top_y = 0
max_top_y = input_height - self._height
if max_top_y < 0.0:
raise ValueError(
"Attempted to extract random crop ({}) taller than input height ({}).".format(
self._height, input_height
)
)
batch_shape = [] if batch_size is None else [batch_size]
left_x = tf.random.uniform(batch_shape, minval=min_left_x, maxval=max_left_x)
top_y = tf.random.uniform(batch_shape, minval=min_top_y, maxval=max_top_y)
return spatial.translation_matrix(x=left_x, y=top_y)
def _center_crop(self, input_height, input_width, batch_size):
"""Return a STM that crops a vertically and horizontally centered section of the canvas."""
horizontal_space = input_width - self._width
if horizontal_space < 0.0:
raise ValueError(
"Attempted to extract center crop ({}) wider than input width ({}).".format(
self._width, input_width
)
)
vertical_space = input_height - self._height
if vertical_space < 0.0:
raise ValueError(
"Attempted to extract center crop ({}) taller than input height ({}).".format(
self._height, input_height
)
)
left_x = horizontal_space // 2
top_y = vertical_space // 2
stm = spatial.translation_matrix(x=left_x, y=top_y)
if batch_size is not None:
stm = tf.broadcast_to(stm, [batch_size, 3, 3])
return stm
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/random_glimpse.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomZoom processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import RandomZoom
from nvidia_tao_tf1.core.processors.augment.spatial import translation_matrix, zoom_matrix
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"probability, message",
[
(2, "RandomZoom.probability (2) is not within the range [0.0, 1.0]."),
(-1, "RandomZoom.probability (-1) is not within the range [0.0, 1.0]."),
],
)
def test_invalid_zoom_probability(probability, message):
"""Test RandomZoom processor constructor error handling on invalid probability."""
with pytest.raises(ValueError) as exc:
RandomZoom(0, 0, probability=probability)
assert str(exc.value) == message
@mock.patch("nvidia_tao_tf1.core.processors.augment.random_zoom.spatial.random_zoom_matrix")
def test_random_zoom_call(mocked_random_zoom_matrix):
"""Test RandomZoom processor call."""
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4),
spatial_transform_matrix=tf.eye(3),
)
mocked_random_zoom_matrix.return_value = tf.eye(3)
processor = RandomZoom(ratio_min=0.4, ratio_max=1.2, probability=1)
processor(transform)
mocked_random_zoom_matrix.assert_called_with(
ratio_min=0.4, ratio_max=1.2, width=10, height=12, batch_size=None
)
def test_random_zoom_call_with_invalid_input():
"""Test RandomZoom processor call error handling on invalid input types."""
# Calling RandomZoom with str should throw a TypeError.
with pytest.raises(TypeError):
RandomZoom(0, 0, 0)("Transform")
@mock.patch("nvidia_tao_tf1.core.processors.augment.spatial.tf.random.uniform")
@pytest.mark.parametrize(
"batch_size", [None, 5, tf.compat.v1.placeholder(dtype=tf.int32)]
)
def test_random_zoom(mocked_random_uniform, batch_size):
"""Test RandomZoom processor."""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
if type(batch_size) == tf.Tensor:
feed_dict = {batch_size: 7}
rnd_prob = 0.0
rnd_ratio = 0.8
rnd_x = 15.0
rnd_y = 12.0
expected_ratio = rnd_ratio
expected_x = rnd_x
expected_y = rnd_y
if batch_size is not None:
# Generate a sequence of probabilities [0., 1., 0., 1., ...] so that every second
# sample gets randomly tranformed.
float_batch_size = tf.cast(batch_size, tf.float32)
rnd_prob = tf.math.floormod(
tf.linspace(0.0, float_batch_size - 1.0, batch_size), 2.0
)
# Generate a linearly interpolated sequences of ratio, and x and y translation values.
rnd_ratio = tf.linspace(0.4, 1.2, batch_size)
rnd_x = tf.linspace(1.0, 15.0, batch_size)
rnd_y = tf.linspace(-15.0, 20.0, batch_size)
# Zero out the samples that don't get transformed.
mask = 1.0 - rnd_prob
expected_x = rnd_x * mask
expected_y = rnd_y * mask
# Use ratio = 1.0 for the samples that don't get transformed.
expected_ratio = rnd_ratio * mask + rnd_prob
# The first tf.random_uniform call is for deciding whether translation is applied,
# the second is for ratio, the third for x translation, the fourth for y translation.
mocked_random_uniform.side_effect = [rnd_prob, rnd_ratio, rnd_x, rnd_y]
processor = RandomZoom(ratio_min=0.4, ratio_max=1.2, probability=0.25)
stm = processor(transform)
scale_stm = zoom_matrix(ratio=expected_ratio)
translate_stm = translation_matrix(x=-expected_x, y=-expected_y)
expected_stm = tf.matmul(translate_stm, scale_stm)
if batch_size is None:
assert expected_stm.shape.ndims == 2
else:
assert expected_stm.shape.ndims == 3
stm, expected_stm = tf.compat.v1.Session().run(
[stm.spatial_transform_matrix, expected_stm], feed_dict=feed_dict
)
np.testing.assert_equal(stm, expected_stm)
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
processor = RandomZoom(ratio_min=0.4, ratio_max=1.2, probability=0.5)
processor_dict = processor.serialize()
deserialized_processor = deserialize_tao_object(processor_dict)
assert processor._ratio_min == deserialized_processor._ratio_min
assert processor._ratio_max == deserialized_processor._ratio_max
assert processor._probability == deserialized_processor._probability
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_random_zoom.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.processors.augment.spatial_matrices_3D import flip_matrix_3D
from nvidia_tao_tf1.core.processors.augment.spatial_matrices_3D import rotation_matrix_3D
from nvidia_tao_tf1.core.processors.augment.spatial_matrices_3D import scaling_matrix_3D
from nvidia_tao_tf1.core.processors.augment.spatial_matrices_3D import translation_matrix_3D
@pytest.mark.parametrize("rotations", [1, 2, 4, 9])
@pytest.mark.parametrize("order", ["X", "Y", "Z"])
def test_rotation_matrix_3D_single_axis(rotations, order):
"""Perform a full rotation (2*pi) in a few steps, and check it yields the identity matrix."""
x = np.pi * 2 / rotations
y = np.pi * 2 / rotations
z = np.pi * 2 / rotations
m = rotation_matrix_3D(x=x, y=y, z=z, order=order)
out = tf.eye(4, dtype=tf.float32)
for _ in range(rotations):
out = tf.matmul(out, m)
out_np, m_np = tf.compat.v1.Session().run([out, m])
# Check that our single-rotation matrix is different than the output.
if rotations > 1:
np.testing.assert_equal(np.any(np.not_equal(m_np, out_np)), True)
# Check that our full rotation yields the identity matrix.
expected = np.eye(4, dtype=np.float32)
np.testing.assert_allclose(
expected,
out_np,
atol=1e-4,
err_msg="Full rotation through "
"multiple steps did not result in the identity matrix.",
)
@pytest.mark.parametrize("x", [1.2523])
@pytest.mark.parametrize("y", [0.7452])
@pytest.mark.parametrize("z", [-2.156])
@pytest.mark.parametrize("order", ["XY", "YZ", "XZ", "ZYX", "YXZ"])
def test_rotation_matrix_3D_multiple_axes(x, y, z, order):
"""Rotate first in the given order and then in the opposite order with negated angles.
The result should be an identity matrix."""
m = rotation_matrix_3D(x=x, y=y, z=z, order=order)
out = tf.eye(4, dtype=tf.float32)
out = tf.matmul(out, m)
m2 = rotation_matrix_3D(x=-x, y=-y, z=-z, order=order[::-1])
out = tf.matmul(out, m2)
out_np, m_np = tf.compat.v1.Session().run([out, m])
# Check that our single-rotation matrix is different than the output.
np.testing.assert_equal(np.any(np.not_equal(m_np, out_np)), True)
# Check that our full rotation yields the identity matrix.
expected = np.eye(4, dtype=np.float32)
np.testing.assert_allclose(
expected,
out_np,
atol=1e-4,
err_msg="Full rotation through "
"two opposite steps did not result in the identity matrix.",
)
@pytest.mark.parametrize("x", [1.2523])
@pytest.mark.parametrize("y", [0.7452])
@pytest.mark.parametrize("z", [-2.156])
@pytest.mark.parametrize("order", ["XYZ", "ZYX", "YXZ"])
def test_rotation_matrix_3D_order(x, y, z, order):
"""Check that order of multiplication is correct by comparing to single step rotations."""
# Rotation matrix.
m = rotation_matrix_3D(x=x, y=y, z=z, order=order)
out = tf.eye(4, dtype=tf.float32)
out = tf.matmul(out, m)
# Rotation matrix with multiple single step rotations.
out2 = tf.eye(4, dtype=tf.float32)
for ax in order:
m2 = rotation_matrix_3D(x=x, y=y, z=z, order=ax)
out2 = tf.matmul(out2, m2)
out_np, out2_np = tf.compat.v1.Session().run([out, out2])
# Check that the two rotation matrices are identical.
np.testing.assert_allclose(
out_np,
out2_np,
atol=1e-4,
err_msg="Rotation matrix defined by "
"order is not same as the multiplication of individual matrices.",
)
@pytest.mark.parametrize("x", [-5.0, 3.0, -4.0])
@pytest.mark.parametrize("y", [-5.0, 3.0, -4.0])
@pytest.mark.parametrize("z", [-5.0, 3.0, -4.0])
def test_translation_matrix_3D(x, y, z):
"""Test translation by translating and inversely translating, to yield an identity matrix."""
m = translation_matrix_3D(x=x, y=y, z=z)
m_inv = translation_matrix_3D(x=-x, y=-y, z=-z)
out = tf.matmul(m, m_inv)
out_np, m_np, m_inv_np = tf.compat.v1.Session().run([out, m, m_inv])
# Check that our translation and its inverse translation are different
np.testing.assert_equal(np.any(np.not_equal(m_np, m_inv_np)), True)
# Check that our roundtrip yields the identity matrix.
expected = np.eye(4, dtype=np.float32)
np.testing.assert_array_equal(
expected,
out_np,
err_msg="Flip roundtrip did not result in the " "identity matrix.",
)
@pytest.mark.parametrize("x", [0.666, 1.0, 1.337])
@pytest.mark.parametrize("y", [0.666, 1.0, 1.337])
@pytest.mark.parametrize("z", [0.666, 1.0, 1.337])
def test_scaling_matrix_3D(x, y, z):
"""Test scaling and applying the inverse scaling to yield the identity matrix."""
m = scaling_matrix_3D(x=x, y=y, z=z)
m_inv = scaling_matrix_3D(x=1.0 / x, y=1.0 / y, z=1.0 / z)
out = tf.matmul(m, m_inv)
out_np, m_pos_np, m_neg_np = tf.compat.v1.Session().run([out, m, m_inv])
# Check that our translation and its inverse translation are different.
if x != 1.0 or y != 1.0 or z != 1.0:
np.testing.assert_equal(np.any(np.not_equal(m_pos_np, m_neg_np)), True)
# Check that our roundtrip yields the identity matrix.
expected = np.eye(4, dtype=np.float32)
np.testing.assert_allclose(
expected,
out_np,
atol=1e-5,
err_msg="Flip roundtrip did not result " "in the identity matrix.",
)
@pytest.mark.parametrize(
"x, y, z",
[
(True, False, False),
(False, True, False),
(False, False, True),
(True, True, False),
(False, True, True),
(True, False, True),
(True, True, True),
],
)
def test_flip_matrix_3D(x, y, z):
"""Test a double flip with the same matrix, as it should return the identity matrix."""
m = flip_matrix_3D(x=x, y=y, z=z)
out = tf.matmul(m, m)
out_np, m_np = tf.compat.v1.Session().run([out, m])
# Check that our single-flip matrix is different than the output.
np.testing.assert_equal(np.any(np.not_equal(m_np, out_np)), True)
# Check that our roundtrip yields the identity matrix.
expected = np.eye(4, dtype=np.float32)
np.testing.assert_array_equal(
expected,
out_np,
err_msg="Flip roundtrip did not result in the " "identity matrix.",
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_spatial_matrices_3D.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomRotation processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import RandomShear
from nvidia_tao_tf1.core.processors.augment.spatial import shear_matrix
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"probability, message",
[
(-0.1, "RandomShear.probability (-0.1) is not within the range [0.0, 1.0]."),
(1.1, "RandomShear.probability (1.1) is not within the range [0.0, 1.0]."),
],
)
def test_raises_on_invalid_probability(probability, message):
with pytest.raises(ValueError) as exc:
RandomShear(max_ratio_x=0.1, max_ratio_y=0.1, probability=probability)
assert str(exc.value) == message
@pytest.mark.parametrize(
"max_ratio_x, message", [(-1, "RandomShear.max_ratio_x (-1) is less than 0.")]
)
def test_raises_on_invalid_max_ratio_x(max_ratio_x, message):
with pytest.raises(ValueError) as exc:
RandomShear(max_ratio_x=max_ratio_x, max_ratio_y=0.0, probability=0.5)
assert str(exc.value) == message
@pytest.mark.parametrize(
"max_ratio_y, message", [(-1, "RandomShear.max_ratio_y (-1) is less than 0.")]
)
def test_raises_on_invalid_max_ratio_y(max_ratio_y, message):
with pytest.raises(ValueError) as exc:
RandomShear(max_ratio_x=0.0, max_ratio_y=max_ratio_y, probability=0.5)
assert str(exc.value) == message
@mock.patch("nvidia_tao_tf1.core.processors.augment.random_translation.spatial.random_shear_matrix")
def test_random_shear_call(mocked_random_shear_matrix):
"""Test RandomShear processor call."""
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4),
spatial_transform_matrix=tf.eye(3),
)
mocked_random_shear_matrix.return_value = tf.eye(3)
processor = RandomShear(max_ratio_x=0.5, max_ratio_y=0.25, probability=1.0)
processor(transform)
mocked_random_shear_matrix.assert_called_with(
max_ratio_x=0.5, max_ratio_y=0.25, height=12, width=10, batch_size=None
)
def test_random_shear_call_with_invalid_input():
"""Test RandomShear processor call error handling on invalid input types."""
# Calling RandomShear with str should throw a TypeError.
with pytest.raises(TypeError):
RandomShear(0, 0, 0)("Transform")
@mock.patch("nvidia_tao_tf1.core.processors.augment.spatial.tf.random.uniform")
@pytest.mark.parametrize(
"batch_size", [None, 5, tf.compat.v1.placeholder(dtype=tf.int32)]
)
def test_random_shear(mocked_random_uniform, batch_size):
"""Test RandomShear processor."""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
if type(batch_size) == tf.Tensor:
feed_dict = {batch_size: 7}
rnd_prob = 0.0
rnd_x = 0.5
rnd_y = 0.25
expected_x = rnd_x
expected_y = rnd_y
if batch_size is not None:
# Generate a sequence of probabilities [0., 1., 0., 1., ...] so that every second
# sample gets randomly tranformed.
float_batch_size = tf.cast(batch_size, tf.float32)
rnd_prob = tf.math.floormod(
tf.linspace(0.0, float_batch_size - 1.0, batch_size), 2.0
)
# Generate a linearly interpolated sequences of x and y translation values.
rnd_x = tf.linspace(-0.5, 0.5, batch_size)
rnd_y = tf.linspace(1.0, -1.0, batch_size)
# Zero out the samples that don't get transformed.
mask = 1.0 - rnd_prob
expected_x = rnd_x * mask
expected_y = rnd_y * mask
# The first tf.random_uniform call is for deciding whether shear is applied,
# the second is for x shear ratio, the third is for y shear ratio.
mocked_random_uniform.side_effect = [rnd_prob, rnd_x, rnd_y]
processor = RandomShear(max_ratio_x=1.0, max_ratio_y=1.0, probability=0.5)
stm = processor(transform)
expected_stm = shear_matrix(expected_x, expected_y, 10, 12)
if batch_size is None:
assert expected_stm.shape.ndims == 2
else:
assert expected_stm.shape.ndims == 3
stm, expected_stm = tf.compat.v1.Session().run(
[stm.spatial_transform_matrix, expected_stm], feed_dict=feed_dict
)
np.testing.assert_equal(stm, expected_stm)
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
processor = RandomShear(max_ratio_x=1.0, max_ratio_y=1.0, probability=0.5)
processor_dict = processor.serialize()
deserialized_processor = deserialize_tao_object(processor_dict)
assert processor._max_ratio_x == deserialized_processor._max_ratio_x
assert processor._max_ratio_y == deserialized_processor._max_ratio_y
assert processor._probability == deserialized_processor._probability
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_random_shear.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import errno
import os
import numpy as np
from PIL import Image
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import SpatialTransform
from nvidia_tao_tf1.core.processors.augment.spatial import PolygonTransform
from nvidia_tao_tf1.core.types import DataFormat
# Debug mode for saving generated images to disk.
debug_save_shape_images = False
device_list = ["/cpu", "/gpu"]
def _get_image_batch(batch_size, data_format=DataFormat.CHANNELS_LAST):
"""
Generate batch of real images.
The image used is a real image, because spatial translations (mainly rotations) are not
lossless, especially for pixels with random values. Real images are quite smooth and therefore
do not suffer much from this loss at all.
"""
test_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "pekka_256.png"
)
image = Image.open(test_file)
images_np = np.array(image).astype(np.float32) / 255.0
images_np = np.repeat([images_np], batch_size, axis=0)
if data_format == DataFormat.CHANNELS_FIRST:
images_np = np.transpose(images_np, (0, 3, 1, 2))
return images_np
def _apply_stm_on_images(
device,
images,
stms,
method,
shape=None,
background_value=0.0,
input_data_format=DataFormat.CHANNELS_LAST,
output_data_format=DataFormat.CHANNELS_LAST,
input_dtype=np.float32,
output_dtype=None,
):
with tf.device(device):
stm_op = SpatialTransform(
method=method,
background_value=background_value,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
# Note: placeholders are needed to make TensorFlow respect device placement.
placeholder_images = tf.compat.v1.placeholder(dtype=input_dtype)
placeholder_stms = tf.compat.v1.placeholder(dtype=tf.float32)
fetches = stm_op(placeholder_images, placeholder_stms, shape)
sess = tf.compat.v1.Session()
return sess.run(
fetches, feed_dict={placeholder_images: images, placeholder_stms: stms}
)
def _apply_stm_on_default_image(device, stms, method, background_value=0.0):
input_np = _get_image_batch(stms.shape[0])
return _apply_stm_on_images(
device=device,
images=input_np,
stms=stms,
method=method,
background_value=background_value,
)
def _empty_canvas_checker(device, stms, background_value=0.42):
output_np = _apply_stm_on_default_image(
device=device, stms=stms, method="bilinear", background_value=background_value
)
blank_canvas = np.full(
output_np.shape, fill_value=background_value, dtype=np.float32
)
np.testing.assert_array_equal(
blank_canvas,
output_np,
err_msg="input array changed after "
"application of an identity spatial transformation matrix.",
)
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize(
"input_data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize(
"output_data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("dtype", [np.uint8, np.float16, np.float32])
@pytest.mark.parametrize("method", ["nearest", "bilinear", "bicubic"])
def test_identity_spatial_transform(
device, method, input_data_format, output_data_format, dtype, batch_size=2
):
"""Test an identity spatial transformation, which should not significantly augment the image."""
input_np = _get_image_batch(batch_size, data_format=input_data_format)
# For uint8, convert to [0,255] range.
if dtype == np.uint8:
input_np *= 255.0
stms = np.repeat([np.eye(3)], batch_size, axis=0)
output_np = _apply_stm_on_images(
device=device,
images=input_np,
stms=stms,
method=method,
input_data_format=input_data_format,
output_data_format=output_data_format,
output_dtype=dtype,
)
if input_data_format != output_data_format:
if input_data_format == DataFormat.CHANNELS_FIRST:
axes = [0, 2, 3, 1]
else:
axes = [0, 3, 1, 2]
input_np = np.transpose(input_np, axes=axes)
input_np = input_np.astype(dtype=dtype)
if method in ("nearest", "bilinear"):
# Nearest and bilinear should result in exact values when input and output are fp32.
if dtype == np.float32:
np.testing.assert_array_equal(
input_np,
output_np,
err_msg="input array changed after "
"application of an identity spatial transformation matrix.",
)
else:
# When input and output dtypes don't match, allow more tolerance.
np.testing.assert_almost_equal(
input_np,
output_np,
decimal=3,
err_msg="input array changed after "
"application of an identity spatial transformation matrix.",
)
elif method == "bicubic":
# Bicubic does not result in exact values upon identity transform (expected).
mean_abs_diff = np.mean(np.abs(output_np - input_np))
if dtype == np.uint8:
mean_abs_diff /= 255.0
assert (
mean_abs_diff < 0.02
), "input array changed significantly after identity transform"
else:
raise ValueError("Unknown method %s" % method)
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize(
"input_data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize(
"output_data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("dtype", [np.uint8, np.float16, np.float32])
@pytest.mark.parametrize("method", ["nearest", "bilinear", "bicubic"])
@pytest.mark.parametrize("zoom", ["1x", "2x"])
def test_zoom_transform(
device, method, input_data_format, output_data_format, dtype, zoom
):
"""Test zoom transformation against reference images."""
batch_size = 1
input_np = _get_image_batch(batch_size, data_format=input_data_format)
input_np *= 255.0
if zoom == "1x":
scale = 1.0
elif zoom == "2x":
scale = (
0.5
) # The matrix specifies an inverse mapping from output to input image.
stms = np.repeat(
[[[scale, 0.0, 0.0], [0.0, scale, 0.0], [0.0, 0.0, 1.0]]], batch_size, axis=0
)
image = _apply_stm_on_images(
device=device,
images=input_np,
stms=stms,
method=method,
input_data_format=input_data_format,
output_data_format=output_data_format,
output_dtype=dtype,
)
# Drop batch dimension.
image = image[0]
# Convert to HWC.
if output_data_format == DataFormat.CHANNELS_FIRST:
image = np.transpose(image, [1, 2, 0])
ref_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "test_spatial_transform_ref"
)
test_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "test_spatial_transform"
)
if debug_save_shape_images:
try:
os.mkdir(test_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
file_name = (
"test_zoom_transform_"
+ str(input_data_format)
+ "_"
+ str(output_data_format)
)
file_name += "_cpu" if device == "/cpu" else "_gpu"
if dtype == np.uint8:
file_name += "_uint8_"
elif dtype == np.float16:
file_name += "_fp16_"
else:
file_name += "_fp32_"
file_name += zoom + "_" + method + ".png"
debug_im = Image.fromarray(image.astype(np.uint8))
debug_im.save("%s/%s" % (test_dir, file_name))
# Load reference image.
file_name = "test_zoom_transform_" + zoom + "_" + method + ".png"
ref_image = Image.open("%s/%s" % (ref_dir, file_name))
ref_image = np.array(ref_image)
# Compare and assert that test images match reference.
# Note that there might be slight differences depending on whether the code
# is run on CPU or GPU, or between different GPUs, CUDA versions, TF versions,
# etc. We may need to change this assertion to allow some tolerance. Before
# doing that, please check the generated images to distinguish bugs from
# small variations.
squared_diff = np.square(np.subtract(ref_image, image.astype(np.uint8)))
assert np.sum(squared_diff) < 200
_xy_offsets = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize("x, y", _xy_offsets)
def test_translate_out_of_canvas(device, x, y, batch_size=2):
"""Test translating the image outside of the canvas visible area."""
width, height = 256, 256
stms = np.repeat(
[[[1, 0, 0], [0, 1, 0], [x * width, y * height, 1]]], batch_size, axis=0
)
_empty_canvas_checker(device, stms)
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize("x, y", [(-1, -1), (1, -1), (-1, 1)])
def test_flip_out_of_canvas(device, x, y, batch_size=2):
"""Test flipping the image outside of the canvas visible area."""
stms = np.repeat([np.diag([x, y, 1])], batch_size, axis=0)
_empty_canvas_checker(device, stms)
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize(
"input_data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize(
"output_data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("roundtrip", ["flip", "transpose"])
@pytest.mark.parametrize("method", ["nearest", "bilinear", "bicubic"])
def test_roundtrip(
device, roundtrip, method, input_data_format, output_data_format, batch_size=2
):
"""Test a that a xy-flip-move roundtrip results in the same image."""
input_np = _get_image_batch(batch_size, data_format=input_data_format)
if input_data_format == DataFormat.CHANNELS_LAST:
width, height = input_np.shape[2], input_np.shape[1]
else:
width, height = input_np.shape[3], input_np.shape[2]
if roundtrip == "flip":
stms = np.repeat(
[[[-1, 0, 0], [0, -1, 0], [width, height, 1]]], batch_size, axis=0
)
elif roundtrip == "transpose":
stms = np.repeat([[[0, 1, 0], [1, 0, 0], [0, 0, 1]]], batch_size, axis=0)
else:
raise ValueError("Unknown roundtrip %s" % roundtrip)
# First full flip and move
output_np = _apply_stm_on_images(
device=device,
images=input_np,
stms=stms,
method="bilinear",
input_data_format=input_data_format,
output_data_format=output_data_format,
)
# Check this one is significantly different
if input_data_format != output_data_format:
if input_data_format == DataFormat.CHANNELS_FIRST:
axes = [0, 2, 3, 1]
else:
axes = [0, 3, 1, 2]
check_input_np = np.transpose(input_np, axes=axes)
else:
check_input_np = input_np
mean_abs_diff = np.mean(np.abs(output_np - check_input_np))
assert mean_abs_diff > 0.1, (
"input array it not significantly different after %s." % roundtrip
)
# Final full flip and move should result in the same as initally
output_np = _apply_stm_on_images(
device=device,
images=output_np,
stms=stms,
method="bilinear",
input_data_format=output_data_format,
output_data_format=input_data_format,
)
if method in ("nearest", "bilinear"):
# Nearest and bilinear should result in exact values
np.testing.assert_array_equal(
input_np,
output_np,
err_msg="input array changed after " "%s roundtrip." % roundtrip,
)
elif method == "bicubic":
# bicubic does not result in exact values upon identity transform (expected)
mean_abs_diff = np.mean(np.abs(output_np - input_np))
assert mean_abs_diff < 0.02, (
"input array changed after %s roundtrip." % roundtrip
)
else:
raise ValueError("Unknown method %s" % method)
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
# Note: bicubic blurs the image so it's not tested here.
@pytest.mark.parametrize("method", ["nearest", "bilinear"])
@pytest.mark.parametrize("num_channels", [3, 1, 5])
def test_4x90_identity(device, data_format, method, num_channels, batch_size=2):
"""Test that 4 rotations of 90 degrees yields the same image."""
input_np = _get_image_batch(batch_size, data_format=data_format)
# Duplicate number of channels from three to six.
channels_dim = 1 if data_format == DataFormat.CHANNELS_FIRST else 3
input_np = np.concatenate([input_np, input_np * 0.5], axis=channels_dim)
# Slice to desired number of channels.
if data_format == DataFormat.CHANNELS_FIRST:
input_np = input_np[:, 0:num_channels, :, :]
input_height = input_np.shape[2]
else:
input_np = input_np[:, :, :, 0:num_channels]
input_height = input_np.shape[1]
theta = np.pi / 2
stms = np.repeat(
[
[
[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[input_height, 0, 1],
]
],
batch_size,
axis=0,
)
output_np = input_np
for _ in range(4):
output_np = _apply_stm_on_images(
device=device,
images=output_np,
stms=stms,
method=method,
input_data_format=data_format,
output_data_format=data_format,
)
np.testing.assert_array_equal(
input_np,
output_np,
err_msg="input array changed after " "four 90 degree rotations.",
)
def test_bad_method(method="foo"):
"""Test whether unsupported interpolation methods will raise errors as expected."""
stms = np.repeat([np.eye(3)], 2, axis=0)
with pytest.raises(NotImplementedError):
_apply_stm_on_default_image("/cpu", stms, method)
shapes_test = [
((32, 32), (32, 32)), # same size
((32, 32), (64, 32)), # larger h
((32, 32), (32, 64)), # larger w
((32, 32), (64, 64)), # larger w, h
((32, 32), (16, 16)),
] # smaller
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("shape, new_shape", shapes_test)
def test_different_shape(device, shape, new_shape, data_format):
"""Test that the canvas can be expanded through the provided shape argument."""
batch_size = 1
nchannels = 3
images = np.ones((batch_size,) + shape + (nchannels,), dtype=np.float32)
# Use NCHW images for channels_first tests.
if data_format == DataFormat.CHANNELS_FIRST:
images = np.transpose(images, (0, 3, 1, 2))
stms = np.repeat([np.eye(3, dtype=np.float32)], batch_size, axis=0)
out = _apply_stm_on_images(
device=device,
images=images,
stms=stms,
method="bilinear",
shape=new_shape,
input_data_format=data_format,
output_data_format=data_format,
)
# Expected shapes designed for NHWC, perform a transpose for channels_first cases.
if data_format == DataFormat.CHANNELS_FIRST:
out = np.transpose(out, (0, 2, 3, 1))
# Check the output shape is as intended
np.testing.assert_array_equal(new_shape, out.shape[1:3])
# Check that the input image sum is equal to its number of pixels * channels
assert images.sum() == (shape[0] * shape[1] * nchannels)
# Check that the minimum shape dicatates the amount of activates pixels * channels
# this works becaues our background is zeroed out so has no influence when the
# canvas is larger.
min_shape = np.min([new_shape, shape], axis=0)
assert out.sum() == (min_shape[0] * min_shape[1] * nchannels)
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
def test_perspective_transform(device, data_format):
"""Test the perspective transform.
This is quite hard to test, so currently just testing the shapes are correct, and a
perspective 'roundtrip' can be performed.
"""
MAX_MABS_DIFF = 0.02
batch_size = 2
nchannels = 3
new_shape = (384, 384)
full_shape = (
(batch_size,) + new_shape + (nchannels,)
if data_format == DataFormat.CHANNELS_LAST
else (batch_size, nchannels) + new_shape
)
stm1 = np.array(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [128, 128, 1.0]], dtype=np.float32
)
stm2 = np.array(
[[1.0, 0.0, 0.001], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float32
)
stm3 = np.array(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [-164, -164, 1.0]], dtype=np.float32
)
stm = np.matmul(stm2, stm1)
stm = np.matmul(stm3, stm)
stms = np.repeat([stm], batch_size, axis=0)
images = _get_image_batch(batch_size, data_format=data_format)
input_dtype = np.float32
with tf.device(device):
stm_op = SpatialTransform(
method="bilinear", background_value=0.0, data_format=data_format
)
# Note: placeholders are needed to make TensorFlow respect device placement.
placeholder_images = tf.compat.v1.placeholder(
shape=images.shape, dtype=input_dtype
)
placeholder_stms = tf.compat.v1.placeholder(dtype=tf.float32)
images_transformed = stm_op(
placeholder_images, placeholder_stms, shape=new_shape
)
# Test the new shape is correct.
np.testing.assert_array_equal(full_shape, images_transformed.shape)
# Perform the inverted stm, that should result in the original images.
# Note that such a roundtrip is (obviously) a lossy; it loses some fidelity.
inv_stm = np.linalg.inv(stms)
shape = (
images.shape[1:3]
if data_format == DataFormat.CHANNELS_LAST
else images.shape[2:]
)
placeholder_inv_stms = tf.compat.v1.placeholder(dtype=tf.float32)
fetches = stm_op(images_transformed, placeholder_inv_stms, shape=shape)
sess = tf.compat.v1.Session()
images_roundtrip = sess.run(
fetches,
feed_dict={
placeholder_images: images,
placeholder_stms: stms,
placeholder_inv_stms: inv_stm,
},
)
# Check that the roundtrip is the same as the input image.
mean_abs_diff = np.mean(np.abs(images_roundtrip - images))
assert mean_abs_diff < MAX_MABS_DIFF
def _make_dense_coordinates(batch_size, num_time_steps, num_shapes, num_vertices):
"""Construct a dense coordinate array."""
shape = [num_vertices, 2]
if num_shapes is not None:
shape = [num_shapes] + shape
if num_time_steps is not None:
shape = [num_time_steps] + shape
if batch_size is not None:
shape = [batch_size] + shape
return np.random.random_integers(low=0, high=9, size=shape).astype(np.float32)
def _make_sparse_coordinates(batch_size, num_time_steps, shape_dim):
"""Construct a sparse coordinate array."""
indices = []
# Construct 2D-5D indices depending on whether batch, time, and shape dimensions are
# present. Batch and time dimension sizes are fixed, but shape and vertices sizes
# vary. Some of the sizes are zero.
def make_vertices(prefix, num_vertices):
for v in range(num_vertices):
indices.append(prefix + [v, 0])
indices.append(prefix + [v, 1])
def make_shapes(prefix, num_shapes):
if shape_dim:
num_vertices_per_shape = np.random.permutation(num_shapes) * 2
for s, num_vertices in enumerate(num_vertices_per_shape):
make_vertices(prefix + [s], num_vertices)
else:
make_vertices(prefix, 4)
def make_time_steps(prefix, num_time_steps):
if num_time_steps is not None:
num_shapes_per_time_step = np.random.permutation(num_time_steps) * 2
for t, num_shapes in enumerate(num_shapes_per_time_step):
make_shapes(prefix + [t], num_shapes)
else:
make_shapes(prefix, 6)
if batch_size is not None:
for b in range(batch_size):
make_time_steps([b], num_time_steps)
else:
make_time_steps([], num_time_steps)
# Compute dense shape.
dense_shape = np.amax(np.array(indices), axis=0) # Max index along each axis.
dense_shape += np.ones_like(
dense_shape
) # + 1 = Max number of indices along each axis.
# Values are random integers between 0 and 9.
values = np.random.random_integers(low=0, high=9, size=len(indices)).astype(
np.float32
)
return values, indices, dense_shape
@pytest.mark.parametrize("batch_size", [None, 5])
@pytest.mark.parametrize("num_time_steps", [None, 3])
@pytest.mark.parametrize("shape_dim", [False, True])
@pytest.mark.parametrize("sparse", [True, False])
def test_identity_coordinate_transform(batch_size, num_time_steps, shape_dim, sparse):
"""Identity coordinate transform test."""
np.random.seed(42)
# Dimensions = [Batch, Time, Shape, Vertex, Coordinate]
if sparse is False:
num_shapes = 6 if shape_dim else None
num_vertices = 4
coordinates = _make_dense_coordinates(
batch_size, num_time_steps, num_shapes, num_vertices
)
coordinates_tf = tf.constant(coordinates, dtype=tf.float32)
else:
values, indices, dense_shape = _make_sparse_coordinates(
batch_size, num_time_steps, shape_dim
)
coordinates_tf = tf.SparseTensor(
values=tf.constant(values),
indices=tf.constant(indices, dtype=tf.int64),
dense_shape=dense_shape,
)
# Construct processor.
op = PolygonTransform(invert_stm=True)
# Construct transformation matrix.
stm = np.eye(3, dtype=np.float32)
if batch_size is not None:
stm = np.tile(stm, [batch_size, 1, 1])
# Do the operation.
coordinates_transformed = op(polygons=coordinates_tf, stm=stm)
coordinates_transformed_np = tf.compat.v1.Session().run(coordinates_transformed)
if sparse:
np.testing.assert_array_equal(values, coordinates_transformed_np.values)
np.testing.assert_array_equal(indices, coordinates_transformed_np.indices)
np.testing.assert_array_equal(
dense_shape, coordinates_transformed_np.dense_shape
)
else:
np.testing.assert_array_equal(coordinates, coordinates_transformed_np)
@pytest.mark.parametrize("sparse", [False, True])
@pytest.mark.parametrize("perspective", [False, True])
def test_batch_transform(sparse, perspective):
"""Batch coordinate transform test."""
batch_size = 8
np.random.seed(42)
# Dimensions = [Batch, Vertex, Coordinate]
if sparse is False:
coordinates = _make_dense_coordinates(
batch_size, num_time_steps=None, num_shapes=None, num_vertices=4
)
coordinates_tf = tf.constant(coordinates, dtype=tf.float32)
else:
values, indices, dense_shape = _make_sparse_coordinates(
batch_size, num_time_steps=None, shape_dim=False
)
coordinates_tf = tf.SparseTensor(
values=tf.constant(values),
indices=tf.constant(indices, dtype=tf.int64),
dense_shape=dense_shape,
)
# Construct processor.
op = PolygonTransform(invert_stm=False)
# Construct transformation matrix.
stms = []
for b in range(batch_size):
stm = np.eye(3, dtype=np.float32)
stm[0][0] = float(b + 1) # Scale x coordinates by b+1.
if perspective:
stm[2][2] = 0.5 # Set z coordinate to 0.5.
stms.append(stm)
stms = tf.constant(np.stack(stms))
# Do the operation.
coordinates_transformed = op(polygons=coordinates_tf, stm=stms)
coordinates_transformed_np = tf.compat.v1.Session().run(coordinates_transformed)
if sparse:
np.testing.assert_array_equal(indices, coordinates_transformed_np.indices)
np.testing.assert_array_equal(
dense_shape, coordinates_transformed_np.dense_shape
)
assert values.shape == coordinates_transformed_np.values.shape
for i, index in enumerate(indices):
b = index[0]
is_x_coordinate = index[2] == 0
expected = values[i]
if is_x_coordinate:
expected *= float(b + 1)
if perspective:
expected *= 2.0 # Due to perspective divide by z coord (0.5).
assert coordinates_transformed_np.values[i] == expected
else:
assert coordinates.shape == coordinates_transformed_np.shape
for b in range(coordinates.shape[0]):
for v in range(coordinates.shape[1]):
expected_x = float(b + 1) * coordinates[b][v][0]
expected_y = coordinates[b][v][1]
if perspective:
expected_x *= 2.0
expected_y *= 2.0
assert coordinates_transformed_np[b][v][0] == expected_x
assert coordinates_transformed_np[b][v][1] == expected_y
@pytest.mark.parametrize("sparse", [True, False])
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize("dense_shape", [(0, 0), (0, 2), (3, 0)])
def test_empty_coordinates(sparse, batch_size, dense_shape):
"""Test that empty coordinates tensor works."""
# Dimensions = [Batch, Vertices, Coordinates].
# SparseTensors can have pretty funky degeneracies, so test that any combination of
# zero length vertices and coords dimensions works.
if batch_size is not None:
dense_shape = (batch_size,) + dense_shape
if sparse:
# Indices has zero rows of data.
indices_shape = (0, len(dense_shape))
coordinates_tf = tf.SparseTensor(
values=tf.zeros(shape=[0], dtype=tf.float32),
indices=tf.zeros(shape=indices_shape, dtype=tf.int64),
dense_shape=dense_shape,
)
else:
coordinates_tf = tf.zeros(shape=dense_shape, dtype=tf.float32)
# Construct processor.
op = PolygonTransform(invert_stm=True)
# Construct transformation matrix.
stm = np.eye(3, dtype=np.float32)
if batch_size is not None:
stm = np.tile(stm, [batch_size, 1, 1])
# Do the operation.
coordinates_transformed = op(polygons=coordinates_tf, stm=stm)
coordinates_transformed_np = tf.compat.v1.Session().run(coordinates_transformed)
if sparse:
assert coordinates_transformed_np.values.shape == (0,)
assert coordinates_transformed_np.indices.shape == indices_shape
np.testing.assert_array_equal(
coordinates_transformed_np.dense_shape, dense_shape
)
else:
np.testing.assert_array_equal(coordinates_transformed_np.shape, dense_shape)
def test_error_checks():
"""Test error checks."""
# Both data_format and either input_data_format or output_data_format given.
with pytest.raises(ValueError):
SpatialTransform(
data_format=DataFormat.CHANNELS_FIRST,
input_data_format=DataFormat.CHANNELS_FIRST,
)
with pytest.raises(ValueError):
SpatialTransform(
data_format=DataFormat.CHANNELS_FIRST,
output_data_format=DataFormat.CHANNELS_FIRST,
)
# Input_data_format given, but output_data_format is missing.
with pytest.raises(ValueError):
SpatialTransform(input_data_format=DataFormat.CHANNELS_FIRST)
# Output_data_format given, but input_data_format is missing.
with pytest.raises(ValueError):
SpatialTransform(output_data_format=DataFormat.CHANNELS_FIRST)
# Invalid data format.
with pytest.raises(NotImplementedError):
SpatialTransform(data_format="weird_data_format")
# Unknown filtering method.
with pytest.raises(NotImplementedError):
SpatialTransform(method="unknown")
@pytest.mark.parametrize(
"input_np",
[
np.zeros([2, 1, 16, 12], np.int16), # Unsupported data type.
np.zeros([16, 12, 3], np.float32), # Too few dimensions.
np.zeros([2, 3, 16, 12, 3], np.float32), # Too many dimensions.
np.zeros(
[1, 3, 16, 12], np.float32
), # Number of images does not match number of stms.
],
)
def test_invalid_input_tensors(input_np):
"""Test invalid input tensors."""
with pytest.raises(Exception):
stm_op = SpatialTransform(data_format=DataFormat.CHANNELS_FIRST)
batch_size = 2
stms = np.repeat([np.eye(3)], batch_size, axis=0)
fetches = stm_op(tf.constant(input_np), tf.constant(stms, dtype=tf.float32))
sess = tf.compat.v1.Session()
sess.run(fetches)
@pytest.mark.parametrize(
"shape",
[
np.zeros([1], np.float32), # Too few entries.
np.zeros([3], np.float32), # Too many entries.
np.zeros([16, 12], np.float32), # Wrong rank.
],
)
def test_invalid_shape_tensors(shape):
"""Test invalid shape tensors."""
with pytest.raises(Exception):
stm_op = SpatialTransform(data_format=DataFormat.CHANNELS_FIRST)
batch_size = 2
stms = np.repeat([np.eye(3)], batch_size, axis=0)
fetches = stm_op(
tf.zeros([2, 3, 16, 12], dtype=tf.float32),
tf.constant(stms, dtype=tf.float32),
shape,
)
sess = tf.compat.v1.Session()
sess.run(fetches)
@pytest.mark.parametrize(
"stms",
[
np.zeros([2, 2, 3], np.float32), # Wrong number of rows.
np.zeros([2, 3, 4], np.float32), # Wrong number of columns.
np.zeros([2, 9], np.float32), # Wrong dimensionality.
np.zeros([1, 3, 3], np.float32), # Wrong batch size.
],
)
def test_invalid_stms(stms):
"""Test invalid stms."""
with pytest.raises(Exception):
input_np = np.zeros([2, 16, 12, 3], np.float32)
stm_op = SpatialTransform(data_format=DataFormat.CHANNELS_FIRST)
fetches = stm_op(tf.constant(input_np), tf.constant(stms, dtype=tf.float32))
sess = tf.compat.v1.Session()
sess.run(fetches)
@pytest.mark.parametrize(
"images, input_data_format, output_data_format, shape, expected_shape",
[
(
np.zeros([2, 3, 16, 12], np.float32),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
None,
[2, 3, 16, 12],
),
(
np.zeros([2, 4, 16, 12], np.float32),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
None,
[2, 16, 12, 4],
),
(
np.zeros([2, 16, 12, 3], np.float32),
DataFormat.CHANNELS_LAST,
DataFormat.CHANNELS_LAST,
None,
[2, 16, 12, 3],
),
(
np.zeros([2, 16, 12, 1], np.float32),
DataFormat.CHANNELS_LAST,
DataFormat.CHANNELS_FIRST,
None,
[2, 1, 16, 12],
),
(
tf.zeros(shape=[2, 3, 16, 12], dtype=tf.uint8),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
None,
[2, 16, 12, 3],
),
(
tf.compat.v1.placeholder(shape=[2, 3, 16, 12], dtype=tf.float16),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
None,
[2, 3, 16, 12],
),
# Cases where the shape is completely or partially unknown.
(
tf.compat.v1.placeholder(dtype=tf.float32),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
None,
[None, None, None, None],
),
(
tf.compat.v1.placeholder(shape=[2, 5, 16, None], dtype=tf.float16),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
None,
[2, 5, 16, None],
),
(
tf.compat.v1.placeholder(shape=[2, None, 16, 12], dtype=tf.uint8),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
None,
[2, 16, 12, None],
),
(
tf.compat.v1.placeholder(shape=[None, 3, 16, 12], dtype=tf.uint8),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
None,
[None, 16, 12, 3],
),
(
tf.compat.v1.placeholder(shape=[2, None, None, 3], dtype=tf.float32),
DataFormat.CHANNELS_LAST,
DataFormat.CHANNELS_FIRST,
None,
[2, 3, None, None],
),
(
tf.compat.v1.placeholder(shape=[None, None, None, None], dtype=tf.uint8),
DataFormat.CHANNELS_LAST,
DataFormat.CHANNELS_FIRST,
None,
[None, None, None, None],
),
# Cases where output shape is given.
(
np.zeros([3, 2, 16, 12], np.float32),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
[24, 18],
[3, 2, 24, 18],
),
(
tf.compat.v1.placeholder(shape=[2, 3, None, None], dtype=tf.float16),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
[24, 18],
[2, 3, 24, 18],
),
(
tf.compat.v1.placeholder(shape=[2, None, None, None], dtype=tf.float16),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
[24, 18],
[2, None, 24, 18],
),
(
tf.compat.v1.placeholder(shape=[None, None, None, None], dtype=tf.float16),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
[24, 18],
[None, None, 24, 18],
),
(
tf.compat.v1.placeholder(dtype=tf.float16),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
[24, 18],
[None, None, 24, 18],
),
(
tf.compat.v1.placeholder(dtype=tf.float16),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
[24, -1],
[None, None, 24, None],
),
(
np.zeros([2, 3, 16, 12], np.float32),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
tf.compat.v1.placeholder(shape=[1, 1], dtype=tf.int32),
[2, 3, None, None],
),
],
)
def test_spatial_transform_shape_inference(
images, input_data_format, output_data_format, shape, expected_shape
):
"""Test shape inference."""
stm_op = SpatialTransform(
input_data_format=input_data_format, output_data_format=output_data_format
)
output = stm_op(images, tf.constant(0.0, tf.float32), shape)
assert expected_shape == output.shape.as_list()
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
stm_op = SpatialTransform(
method="bilinear",
background_value=0.0,
input_data_format=DataFormat.CHANNELS_LAST,
output_data_format=DataFormat.CHANNELS_LAST,
)
stm_op_dict = stm_op.serialize()
deserialized_stm_op = deserialize_tao_object(stm_op_dict)
assert stm_op.method == deserialized_stm_op.method
assert stm_op.background_value == deserialized_stm_op.background_value
assert stm_op.output_data_format == deserialized_stm_op.output_data_format
assert stm_op.input_data_format == deserialized_stm_op.input_data_format
assert stm_op.output_dtype == deserialized_stm_op.output_dtype
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_spatial_transform.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomTranslation processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import RandomTranslation
from nvidia_tao_tf1.core.processors.augment.spatial import translation_matrix
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"probability, message",
[
(2, "RandomTranslation.probability (2) is not within the range [0.0, 1.0]."),
(-1, "RandomTranslation.probability (-1) is not within the range [0.0, 1.0]."),
],
)
def test_invalid_translation_probability(probability, message):
"""Test RandomTranslation processor constructor error handling on invalid probability."""
with pytest.raises(ValueError) as exc:
RandomTranslation(0, 0, probability=probability)
assert str(exc.value) == message
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.random_translation.spatial.random_translation_matrix"
)
def test_random_translation_call(mocked_random_translation_matrix):
"""Test RandomTranslation processor call."""
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4),
spatial_transform_matrix=tf.eye(3),
)
mocked_random_translation_matrix.return_value = tf.eye(3)
processor = RandomTranslation(max_x=90, max_y=45, probability=1.0)
processor(transform)
mocked_random_translation_matrix.assert_called_with(
max_x=90, max_y=45, batch_size=None
)
def test_random_translation_call_with_invalid_input():
"""Test RandomTranslation processor call error handling on invalid input types."""
# Calling RandomTranslation with str should throw a TypeError.
with pytest.raises(TypeError):
RandomTranslation(0, 0, 0)("Transform")
@mock.patch("nvidia_tao_tf1.core.processors.augment.spatial.tf.random.uniform")
@pytest.mark.parametrize(
"batch_size", [None, 5, tf.compat.v1.placeholder(dtype=tf.int32)]
)
def test_random_translation(mocked_random_uniform, batch_size):
"""Test RandomTranslation processor."""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
if type(batch_size) == tf.Tensor:
feed_dict = {batch_size: 7}
rnd_prob = 0.0
rnd_x = 15.0
rnd_y = 12.0
expected_x = rnd_x
expected_y = rnd_y
if batch_size is not None:
# Generate a sequence of probabilities [0., 1., 0., 1., ...] so that every second
# sample gets randomly tranformed.
float_batch_size = tf.cast(batch_size, tf.float32)
rnd_prob = tf.math.floormod(
tf.linspace(0.0, float_batch_size - 1.0, batch_size), 2.0
)
# Generate a linearly interpolated sequences of x and y translation values.
rnd_x = tf.linspace(1.0, 15.0, batch_size)
rnd_y = tf.linspace(-15.0, 20.0, batch_size)
# Zero out the samples that don't get transformed.
mask = 1.0 - rnd_prob
expected_x = rnd_x * mask
expected_y = rnd_y * mask
# The first tf.random_uniform call is for deciding whether translation is applied,
# the second is for x translation, the third is for y translation.
mocked_random_uniform.side_effect = [rnd_prob, rnd_x, rnd_y]
processor = RandomTranslation(max_x=30, max_y=20, probability=0.5)
stm = processor(transform)
expected_stm = translation_matrix(x=expected_x, y=expected_y)
stm, expected_stm = tf.compat.v1.Session().run(
[stm.spatial_transform_matrix, expected_stm], feed_dict=feed_dict
)
np.testing.assert_equal(stm, expected_stm)
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
processor = RandomTranslation(max_x=30, max_y=20, probability=0.5)
processor_dict = processor.serialize()
deserialized_processor = deserialize_tao_object(processor_dict)
assert processor._max_x == deserialized_processor._max_x
assert processor._max_y == deserialized_processor._max_y
assert processor._probability == deserialized_processor._probability
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_random_translation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomHueSaturation processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import RandomHueSaturation
from nvidia_tao_tf1.core.processors.augment.color import hue_saturation_matrix
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"hue, saturation, message",
[
(
-1,
0,
"RandomHueSaturation.hue_rotation_max (-1) "
"is not within the range [0.0, 360.0].",
),
(
361,
0,
"RandomHueSaturation.hue_rotation_max (361) "
"is not within the range [0.0, 360.0].",
),
(
0,
-1,
"RandomHueSaturation.saturation_shift_max (-1) "
"is not within the range [0.0, 1.0].",
),
(
0,
2,
"RandomHueSaturation.saturation_shift_max (2) "
"is not within the range [0.0, 1.0].",
),
],
)
def test_invalid_hue_saturation_values(hue, saturation, message):
"""Test RandomHueSaturation constructor error handling for invalid hue and saturation values."""
with pytest.raises(ValueError) as exc:
RandomHueSaturation(hue, saturation)
assert str(exc.value) == message
@pytest.mark.parametrize(
"batch_size", [None, 3, tf.compat.v1.placeholder(dtype=tf.int32)]
)
@pytest.mark.parametrize("hue", [0, 10, 20, 180, 360])
@pytest.mark.parametrize("saturation", [0.0, 0.2, 0.5, 1.0])
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.random_contrast.color.tf.random.truncated_normal"
)
@mock.patch("nvidia_tao_tf1.core.processors.augment.random_contrast.color.tf.random.uniform")
def test_random_hue_saturation_call(
mocked_random_uniform, mocked_truncated_normal, batch_size, hue, saturation
):
"""Test RandomHueSaturation processor call."""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
expected_batch_size = batch_size
if type(batch_size) == tf.Tensor:
expected_batch_size = 7
feed_dict = {batch_size: expected_batch_size}
# Fix all random function calls to return deterministic values for testing.
if batch_size is not None:
batched_hue = tf.linspace(float(hue), float(hue) + 45.0, batch_size)
batched_saturation = tf.linspace(saturation, saturation + 0.2, batch_size)
else:
batched_hue = tf.constant(hue, dtype=tf.float32)
batched_saturation = tf.constant(saturation, dtype=tf.float32)
mocked_truncated_normal.return_value = batched_hue
mocked_random_uniform.return_value = batched_saturation
processor = RandomHueSaturation(
hue_rotation_max=hue, saturation_shift_max=saturation
)
final_transform = processor(transform)
# Add mean saturation.
final_hue = batched_hue
final_saturation = 1.0 + batched_saturation
expected_ctm = hue_saturation_matrix(hue=final_hue, saturation=final_saturation)
if batch_size is None:
assert expected_ctm.shape.ndims == 2
else:
assert expected_ctm.shape.ndims == 3
ctm, expected_ctm = tf.compat.v1.Session().run(
[final_transform.color_transform_matrix, expected_ctm], feed_dict=feed_dict
)
np.testing.assert_equal(ctm, expected_ctm)
if batch_size is None:
mocked_truncated_normal.assert_called_with([], mean=0.0, stddev=hue / 2.0)
mocked_random_uniform.assert_called_with(
[], minval=-saturation, maxval=saturation
)
else:
mocked_truncated_normal.assert_called_once()
call_batch_shape = mocked_truncated_normal.call_args[0][0]
assert len(call_batch_shape) == 1
assert (
tf.compat.v1.Session().run(call_batch_shape[0], feed_dict=feed_dict)
== expected_batch_size
)
assert mocked_truncated_normal.call_args[1] == {
"mean": 0.0,
"stddev": hue / 2.0,
}
mocked_random_uniform.assert_called_once()
call_batch_shape = mocked_random_uniform.call_args[0][0]
assert len(call_batch_shape) == 1
assert (
tf.compat.v1.Session().run(call_batch_shape[0], feed_dict=feed_dict)
== expected_batch_size
)
assert mocked_random_uniform.call_args[1] == {
"minval": -saturation,
"maxval": saturation,
}
def test_random_hue_saturation_call_with_invalid_input():
"""Test RandomHueSaturation processor call error handling on invalid input types."""
# Calling RandomHueSaturation with str should throw a TypeError.
with pytest.raises(TypeError):
RandomHueSaturation(0, 0)("Transform")
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
processor = RandomHueSaturation(hue_rotation_max=10, saturation_shift_max=0.2)
processor_dict = processor.serialize()
deserialized_processor = deserialize_tao_object(processor_dict)
assert processor._hue_rotation_max == deserialized_processor._hue_rotation_max
assert (
processor._saturation_shift_max == deserialized_processor._saturation_shift_max
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_random_hue_saturation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus Color Processors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import load_custom_tf_op
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import data_format as tao_data_format, DataFormat
class ColorTransform(Processor):
"""
ColorTransform class.
Args:
min_clip (float): Minimum color value after transformation.
max_clip (float): Maximum color value after transformation.
data_format (string): A string representing the dimension ordering of the input
images, must be one of 'channels_last' (NHWC) or 'channels_first' (NCHW). If
specified, input_data_format and output_data_format must be None.
input_data_format (string): Data format for input. If specified, data_format must be None,
and output_data_format must be given.
output_data_format (string): Data format for output. If specified, data_format must be
None, and input_data_format must be given.
output_dtype (dtype): Valid values are tf.uint8, tf.float16, tf.float32, None. If None,
image dtype is used. Note for uint8 output: Image data must be prescaled to [0,255]
range, and min_clip set to at least 0 and max_clip set to at most 255.
kwargs (dict): keyword arguments passed to parent class.
"""
@save_args
def __init__(
self,
min_clip=0.0,
max_clip=255.0,
data_format=None,
input_data_format=None,
output_data_format=None,
output_dtype=None,
**kwargs
):
"""__init__ method."""
if min_clip > max_clip:
raise ValueError(
"Min_clip={} is greater than max_clip={}.".format(min_clip, max_clip)
)
self.min_clip = min_clip
self.max_clip = max_clip
if data_format is not None and (
input_data_format is not None or output_data_format is not None
):
raise ValueError(
"When data_format is specified, input_data_format and "
"output_data_format must be None."
)
if input_data_format is not None and output_data_format is None:
raise ValueError(
"When input_data_format is specified, output_data_format "
"must be specified too."
)
if output_data_format is not None and input_data_format is None:
raise ValueError(
"When output_data_format is specified, input_data_format "
"must be specified too."
)
if (
data_format is None
and input_data_format is None
and output_data_format is None
):
data_format = tao_data_format()
if data_format is not None:
input_data_format = data_format
output_data_format = data_format
if input_data_format not in [
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
]:
raise NotImplementedError(
"Data format not supported, must be 'channels_first' or "
"'channels_last', given {}.".format(input_data_format)
)
if output_data_format not in [
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
]:
raise NotImplementedError(
"Data format not supported, must be 'channels_first' or "
"'channels_last', given {}.".format(output_data_format)
)
self.output_data_format = output_data_format
self.input_data_format = input_data_format
self.output_dtype = output_dtype
super(ColorTransform, self).__init__(**kwargs)
def call(self, images, ctms):
"""
Apply color transformation matrices on images.
For each pixel, computes (r,g,b,_) = (r,g,b,1) * color_matrix, where _ denotes that
the result is not used.
Args:
images: 4D tensor with shape `(batch_size, channels, height, width)`
if input_data_format='channels_first', or 4D tensor with shape
`(batch_size, height, width, channels)` if input_data_format='channels_last'.
Number of channels must be 3.
ctms: 3D tensor (batch_size, 4, 4) Color Transformation Matrix per image (N, 4, 4)
Returns
4D Tensor after ctm application, with shape `(batch_size, channels, height, width)`
if output_data_format='channels_first', or 4D tensor with shape
`(batch_size, height, width, channels)` if output_data_format='channels_last'.
"""
op = load_custom_tf_op("op_colortransform.so")
output_dtype = self.output_dtype
if output_dtype is None:
output_dtype = images.dtype
data_formats = {
DataFormat.CHANNELS_FIRST: "NCHW",
DataFormat.CHANNELS_LAST: "NHWC",
}
input_data_format = data_formats[self.input_data_format]
output_data_format = data_formats[self.output_data_format]
transformed_images = op.colortransform(
images,
ctms,
min_clip=self.min_clip,
max_clip=self.max_clip,
input_data_format=input_data_format,
output_data_format=output_data_format,
output_dtype=output_dtype,
)
return transformed_images
def brightness_offset_matrix(offset):
"""
Form a per-channel brightness offset matrix for transforming RGB images.
Args:
offset: tensor(float32) offset per color channel (3,) or a batch of offsets (N, 3).
Returns:
fp32 tensor (4, 4), color transformation matrix if offset is not batched. If
offset is batched, (N, 4, 4).
"""
offset = tf.cast(tf.convert_to_tensor(value=offset), tf.float32)
if offset.shape.ndims == 2:
batch_shape = [tf.shape(input=offset)[0]]
one = tf.ones(shape=batch_shape + [1], dtype=tf.float32)
else:
batch_shape = None
one = tf.constant([1.0])
# Attach fourth column to offset: [N, 3] + [N, 1] = [N, 4]
offset = tf.concat([offset, one], axis=-1)
# [N, 4] -> [N, 1, 4]
offset = tf.expand_dims(offset, axis=-2)
# Construct a [N, 3, 4] identity matrix.
m = tf.eye(num_rows=3, num_columns=4, batch_shape=batch_shape)
# Attach offset row: [N, 3, 4] + [N, 1, 4] = [N, 4, 4]
return tf.concat([m, offset], axis=-2)
def contrast_matrix(contrast, center):
"""
Form a contrast transformation matrix for RGB images.
The contrast matrix introduces a scaling around a center point.
Args:
contrast: tensor(float32) contrast value (scalar or vector). A value of 0 will keep the
scaling untouched.
center: tensor(float32) center value. For 8-bit images this is commonly 127.5, and 0.5 for
images within the [0,1] range. Scalar or vector.
Returns:
fp32 tensor (4, 4), color transformation matrix if contrast and center are scalars. If
contrast and center are vectors, (len(contrast), 4, 4).
"""
contrast = tf.cast(tf.convert_to_tensor(value=contrast), tf.float32)
center = tf.cast(tf.convert_to_tensor(value=center), tf.float32)
zero = tf.zeros_like(contrast)
one = tf.ones_like(contrast)
scale = one + contrast
bias = -contrast * center
m = tf.stack(
[
scale,
zero,
zero,
zero,
zero,
scale,
zero,
zero,
zero,
zero,
scale,
zero,
bias,
bias,
bias,
one,
],
axis=-1,
)
shape = [-1, 4, 4] if contrast.shape.ndims == 1 else [4, 4]
return tf.reshape(m, shape)
def hue_saturation_matrix(hue, saturation):
"""
Form a color saturation and hue transformation matrix for RGB images.
Single matrix transform for both hue and saturation change. Matrix taken from [1].
Derived by transforming first to HSV, then do the modification, and transform back to RGB.
Note that perfect conversions between RGB and HSV are non-linear, but we can approximate it
very well with these linear matrices. If one would truly care about color conversions, one
would need calibrated images with a known color profile, white-point, etc.
Args:
hue: (float) hue rotation in degrees (scalar or vector). A value of 0.0 (modulo 360)
leaves the hue unchanged.
saturation: (float) saturation multiplier (scalar or vector). A value of 1.0 leaves the
saturation unchanged. A value of 0 removes all saturation from the image and makes
all channels equal in value.
Returns:
fp32 tensor (4, 4), color transformation matrix if hue and saturation are scalars. If
hue and saturation are vectors, (len(hue), 4, 4).
[1] See https://beesbuzz.biz/code/hsv_color_transforms.php, notice that our matrix convention
is transposed compared to this reference.
"""
hue = tf.cast(tf.convert_to_tensor(value=hue), tf.float32)
saturation = tf.cast(tf.convert_to_tensor(value=saturation), tf.float32)
const_mat = tf.constant(
[
[0.299, 0.299, 0.299, 0.0],
[0.587, 0.587, 0.587, 0.0],
[0.114, 0.114, 0.114, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
dtype=tf.float32,
)
sch_mat = tf.constant(
[
[0.701, -0.299, -0.299, 0.0],
[-0.587, 0.413, -0.587, 0.0],
[-0.114, -0.114, 0.886, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
dtype=tf.float32,
)
ssh_mat = tf.constant(
[
[0.168, -0.328, 1.25, 0.0],
[0.330, 0.035, -1.05, 0.0],
[-0.497, 0.292, -0.203, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
dtype=tf.float32,
)
angle = hue * (np.pi / 180.0)
sch = saturation * tf.cos(angle)
ssh = saturation * tf.sin(angle)
if hue.shape.ndims == 1:
# Tile constant matrices to batch size.
batch_size = tf.shape(input=hue)[0]
const_mat = tf.tile(tf.expand_dims(const_mat, 0), [batch_size, 1, 1])
sch_mat = tf.tile(tf.expand_dims(sch_mat, 0), [batch_size, 1, 1])
ssh_mat = tf.tile(tf.expand_dims(ssh_mat, 0), [batch_size, 1, 1])
# Reshape to 3D for element-wise multiplication.
sch = tf.reshape(sch, [batch_size, 1, 1])
ssh = tf.reshape(ssh, [batch_size, 1, 1])
return const_mat + sch * sch_mat + ssh * ssh_mat
def random_hue_saturation_matrix(
hue_rotation_max,
saturation_shift_max,
batch_size=None,
hue_center=0.0,
saturation_shift_min=None,
):
"""Get random hue-saturation transformation matrix.
Args:
hue_rotation_max (float): The maximum rotation angle. This used in a truncated
normal distribution, with a zero mean. This rotation angle is half of the
standard deviation, because twice the standard deviation will be truncated.
A value of 0 will not affect the matrix.
saturation_shift_max (float): The random uniform shift that changes the
saturation. This value gives is the positive extent of the
augmentation, where a value of 0 leaves the matrix unchanged.
For example, a value of 1 can result in a saturation values bounded
between of 0 (entirely desaturated) and 2 (twice the saturation).
batch_size (int): If None, return a single matrix, else return a batch of matrices.
hue_center (float): The center of the distribution from which to select the hue.
saturation_shift_min (float): The minimum of the uniform distribution from which to
select the saturation shift. If unspecified, defaults to -saturation_shift_max.
Returns:
(tf.Tensor) If batch_size is None, a color transformation matrix of shape (4,4)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,4,4).
"""
if saturation_shift_min is None:
saturation_shift_min = -saturation_shift_max
batch_shape = [] if batch_size is None else [batch_size]
hue = tf.random.truncated_normal(
batch_shape, mean=hue_center, stddev=hue_rotation_max / 2.0
)
mean_saturation = 1 # no saturation when saturation_shift_max=0
saturation = mean_saturation + tf.random.uniform(
batch_shape, minval=saturation_shift_min, maxval=saturation_shift_max
)
return hue_saturation_matrix(hue, saturation)
def random_contrast_matrix(scale_max, center, batch_size=None, scale_center=0.0):
"""Create random contrast transformation matrix.
Args:
scale_max (float): The scale (or slope) of the contrast, as rotated
around the provided center point. This value is half of the standard
deviation, where values of twice the standard deviation are truncated.
A value of 0 will not affect the matrix.
center (float): The center around which the contrast is 'tilted', this
is generally equal to the middle of the pixel value range. This value is
typically 0.5 with a maximum pixel value of 1, or 127.5 when the maximum
value is 255.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
scale_center (float): The center of the normal distribution from which to choose the
contrast scale.
Returns:
(tf.Tensor) If batch_size is None, a color transformation matrix of shape (4,4)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,4,4).
"""
batch_shape = [] if batch_size is None else [batch_size]
contrast = tf.random.truncated_normal(
batch_shape, mean=scale_center, stddev=scale_max / 2.0
)
return contrast_matrix(contrast, center)
def random_brightness_matrix(
brightness_scale_max,
brightness_uniform_across_channels=True,
batch_size=None,
brightness_center=0.0,
):
"""Create a random brightness transformation matrix.
Args:
brightness_scale_max (float): The range of the brightness offsets. This value
is half of the standard deviation, where values of twice the standard
deviation are truncated. A value of 0 will not affect the matrix.
brightness_uniform_across_channels (bool): If true will apply the same brightness
shift to all channels. If false, will apply a different brightness shift to each
channel.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
brightness_center (float): The center of the distribution of brightness offsets to
sample from.
Returns:
(tf.Tensor) If batch_size is None, a color transformation matrix of shape (4,4)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,4,4).
"""
if not brightness_uniform_across_channels:
batch_shape = [3] if batch_size is None else [batch_size, 3]
brightness_offset = tf.random.truncated_normal(
batch_shape, mean=brightness_center, stddev=brightness_scale_max / 2.0
)
else:
batch_shape = [1] if batch_size is None else [batch_size, 1]
tile_shape = [3] if batch_size is None else [1, 3]
randoms = tf.random.truncated_normal(
batch_shape, mean=brightness_center, stddev=brightness_scale_max / 2.0
)
brightness_offset = tf.tile(randoms, tile_shape)
return brightness_offset_matrix(offset=brightness_offset)
def get_color_transformation_matrix(
ctm=None,
hue_rotation=0.0,
saturation_shift=0.0,
contrast_scale=0.0,
contrast_center=0.5,
brightness_scale=0,
batch_size=None,
):
"""
The color transformation matrix (ctm) generator used for a specific set of values.
This function creates a color transformation matrix (ctm) with the exact values given
to augment 3-channel color images.
Args:
ctm ((4,4) fp32 Tensor or None): A color transformation matrix.
If ``None`` (default), an identity matrix will be used.
hue_rotation (float): The rotation angle for the hue.
saturation_shift (float): The amound to shift the saturation of the image.
contrast_scale (float): The scale (or slope of the contrast), as rotated
around the provided center point.
contrast_center (float): The center around which the contrast is 'tilted', this
is generally equal to the middle of the pixel value range. This value is
typically 0.5 with a maximum pixel value of 1, or 127.5 when the maximum
value is 255.
brightness_scale (float): The brightness offsets. A value of 0 (default)
will not affect the matrix.
"""
return get_random_color_transformation_matrix(
ctm=ctm,
hue_center=hue_rotation,
saturation_shift_max=saturation_shift,
saturation_shift_min=saturation_shift,
contrast_scale_center=contrast_scale,
contrast_center=contrast_center,
brightness_center=brightness_scale,
)
def get_random_color_transformation_matrix(
ctm=None,
hue_rotation_max=0.0,
hue_center=0.0,
saturation_shift_max=0.0,
saturation_shift_min=None,
contrast_scale_max=0.0,
contrast_scale_center=0.0,
contrast_center=0.5,
brightness_scale_max=0,
brightness_center=0.0,
brightness_uniform_across_channels=True,
batch_size=None,
):
"""
The color transformation matrix (ctm) generator used for random augmentation.
This function creates a random color transformation matrix (ctm) to augment 3-channel
color images.
Args:
ctm ((4,4) fp32 Tensor or None): A random color transformation matrix.
If ``None`` (default), an identity matrix will be used.
hue_rotation_max (float): The maximum rotation angle. This used in a truncated
normal distribution, with a zero mean. This rotation angle is half of the
standard deviation, because twice the standard deviation will be truncated.
A value of 0 will not affect the matrix.
saturation_shift_max (float): The random uniform shift that changes the
saturation. This value gives is the negative and positive extent of the
augmentation, where a value of 0 leaves the matrix unchanged.
For example, a value of 1 can result in a saturation values bounded
between of 0 (entirely desaturated) and 2 (twice the saturation).
contrast_scale_max (float): The scale (or slope) of the contrast, as rotated
around the provided center point. This value is half of the standard
deviation, where values of twice the standard deviation are truncated.
A value of 0 (default) will not affect the matrix.
contrast_scale_center (float): The center of the distribution from which to choose
the contrast scale.
contrast_center (float): The center around which the contrast is 'tilted', this
is generally equal to the middle of the pixel value range. This value is
typically 0.5 with a maximum pixel value of 1, or 127.5 when the maximum
value is 255.
brightness_scale_max (float): The range of the brightness offsets. This value
is half of the standard deviation, where values of twice the standard
deviation are truncated.
A value of 0 (default) will not affect the matrix.
brightness_uniform_across_channels (bool): If true will apply the same brightness
shift to all channels. If false, will apply a different brightness shift to each
channel.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
Returns:
(tf.Tensor) If batch_size is None, a color transformation matrix of shape (4,4)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,4,4).
"""
# Initialize the spatial transform matrix as a 4x4 identity matrix
if ctm is None:
batch_shape = [] if batch_size is None else [batch_size]
ctm = tf.eye(4, batch_shape=batch_shape, dtype=tf.float32)
# Apply hue-saturation transformations.
hue_saturation = random_hue_saturation_matrix(
hue_rotation_max,
saturation_shift_max,
batch_size,
hue_center,
saturation_shift_min,
)
ctm = tf.matmul(ctm, hue_saturation)
# Apply contrast transformations.
contrast = random_contrast_matrix(
contrast_scale_max, contrast_center, batch_size, contrast_scale_center
)
ctm = tf.matmul(ctm, contrast)
# Apply brightness transformations.
brightness = random_brightness_matrix(
brightness_scale_max,
brightness_uniform_across_channels,
batch_size,
brightness_center,
)
ctm = tf.matmul(ctm, brightness)
return ctm
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/color.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random zoom transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import spatial
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Transform
class RandomZoom(Processor):
"""Random zoom transform."""
@save_args
def __init__(self, ratio_min=0.5, ratio_max=1.5, probability=0.5, **kwargs):
"""Construct a RandomZoom processor.
Args:
ratio_min (float): The lower bound of the zooming ratio's uniform distribution.
A zooming ratio of 1.0 will not affect the image, while values higher than 1 will
result in 'zooming out' (image gets rendered smaller than the canvas), and vice
versa for values below 1.0.
ratio_max (float): The upper bound of the zooming ratio's uniform distribution.
A zooming ratio of 1.0 will not affect the image, while values higher than 1 will
result in 'zooming out' (image gets rendered smaller than the canvas), and vice
versa for values below 1.0.
kwargs (dict): keyword arguments passed to parent class.
"""
super(RandomZoom, self).__init__(**kwargs)
self._ratio_min = ratio_min
self._ratio_max = ratio_max
if probability < 0.0 or probability > 1.0:
raise ValueError(
"RandomZoom.probability ({}) is not within the range "
"[0.0, 1.0].".format(probability)
)
self._probability = probability
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomZoom(ratio_min={}, ratio_max={}, probability={})".format(
self._ratio_min, self._ratio_max, self._probability
)
def call(self, transform):
"""Return a Transform whose spatial transformation matrix is perturbed at random.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance with spatial transform matrix perturbed.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
batch_size = None
batch_shape = []
if transform.spatial_transform_matrix.shape.ndims == 3:
batch_size = tf.shape(input=transform.spatial_transform_matrix)[0]
batch_shape = [batch_size]
probability = tf.random.uniform(batch_shape, minval=0.0, maxval=1.0)
should_zoom = tf.less_equal(probability, self._probability)
stm_zoom = spatial.random_zoom_matrix(
ratio_min=self._ratio_min,
ratio_max=self._ratio_max,
width=transform.canvas_shape.width,
height=transform.canvas_shape.height,
batch_size=batch_size,
)
processed_stm = tf.compat.v1.where(
should_zoom,
tf.matmul(stm_zoom, transform.spatial_transform_matrix),
transform.spatial_transform_matrix,
)
return Transform(
canvas_shape=transform.canvas_shape,
color_transform_matrix=transform.color_transform_matrix,
spatial_transform_matrix=processed_stm,
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/random_zoom.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions used for tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
import tensorflow as tf
def assert_truncated_normal_distribution(values, mean, stddev):
"""Check that ``values`` are from truncated normal distribution with ``mean`` and ``stddev``."""
# Check that bounds fit. Truncated normal distribution cuts off values further than
# two times stddev away from mean.
assert np.max(values) <= mean + stddev * 2.0
assert np.min(values) >= mean - stddev * 2.0
# Standard deviation of estimate of a mean is stddev/sqrt(num_samples), we're using three times
# the standard deviation as a tolerance threshold, just to be safe.
tolerance = stddev / np.sqrt(len(values)) * 3
# Check that the sample mean fits.
assert np.isclose(np.mean(values), mean, atol=tolerance)
def assert_uniform_distribution(values, min_bound, max_bound):
"""Check that ``values`` are from uniform distribution with ``max`` and ``min`` bounds."""
# Check that bounds fit.
assert np.max(values) <= max_bound
assert np.min(values) >= min_bound
# Calculate stddev of uniform distribution.
stddev = (max_bound - min_bound) / np.sqrt(12)
# Standard deviation of estimate of a mean is stddev/sqrt(num_samples), we're using four times
# the standard deviation as a tolerance threshold, just to be safe.
tolerance = stddev / np.sqrt(len(values)) * 4
# Check that sample mean fits.
assert np.isclose(np.mean(values), (max_bound + min_bound) / 2.0, atol=tolerance)
def assert_bernoulli_distribution(values, p):
"""Check that ``values`` are from bernoulli with ``p`` probability of event."""
# Calculate stddev of bernoulli distribution.
stddev = np.sqrt(p * (1 - p))
if type(values[0]) is np.ndarray:
num_values = sum([len(v) for v in values])
else:
num_values = len(values)
# Standard deviation of estimate of a mean is stddev/sqrt(num_samples).
tolerance = stddev / np.sqrt(num_values) * 2.0
# Check that events are generated with correct probability.
event_count = np.array(values).sum()
event_probability = float(event_count) / float(num_values)
assert np.isclose(event_probability, p, atol=tolerance)
def sample_tensors(tensors, num_samples):
"""Sample ``num_samples`` values of list of tensors."""
samples = [list() for _ in xrange(len(tensors))]
with tf.compat.v1.Session() as sess:
for _ in xrange(num_samples):
new_samples = sess.run(tensors)
# This "zips" ``new_samples`` with ``samples``. ``new_samples`` is a list of values of
# length N. ``samples`` is a list of N lists. We're appending first item from
# ``new_samples`` to first list of ``samples``.
samples = [
old_samples + [new_sample]
for old_samples, new_sample in zip(samples, new_samples)
]
return samples
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/testing_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Homogeneous 3D spatial transformation matrices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def rotation_matrix_3D(x=0.0, y=0.0, z=0.0, order="ZYX"):
"""
3D rotation matrix for counter-clockwise rotation.
Rotations are performed in the order specified by the input argument `order`. E.g.,
order = 'ZYX' rotates first about the z-axis, then about the y-axis, and lastly about
the x-axis.
The output rotation matrix is defined such that it is to be used to post-multiply
row vectors (w*R_tot).
Args:
x: (0-D Tensor of type tf.float32) Rotation angle about x-axis in radians.
y: (0-D Tensor of type tf.float32) Rotation angle about y-axis in radians.
z: (0-D Tensor of type tf.float32) Rotation angle about z-axis in radians.
order: (str) Rotation order ['X', 'Y', 'Z' or their combination, e.g., 'ZYX'].
Returns:
R_tot: (4x4 Tensor) Rotation matrix in homogeneous coordinates.
"""
R = dict()
if "X" in order:
cos_x = tf.cos(x)
sin_x = tf.sin(x)
R["X"] = tf.stack(
[
1.0,
0.0,
0.0,
0.0,
0.0,
cos_x,
sin_x,
0.0,
0.0,
-sin_x,
cos_x,
0.0,
0.0,
0.0,
0.0,
1.0,
]
)
R["X"] = tf.reshape(R["X"], [4, 4])
if "Y" in order:
cos_y = tf.cos(y)
sin_y = tf.sin(y)
R["Y"] = tf.stack(
[
cos_y,
0.0,
-sin_y,
0.0,
0.0,
1.0,
0.0,
0.0,
sin_y,
0,
cos_y,
0.0,
0.0,
0.0,
0.0,
1.0,
]
)
R["Y"] = tf.reshape(R["Y"], [4, 4])
if "Z" in order:
cos_z = tf.cos(z)
sin_z = tf.sin(z)
R["Z"] = tf.stack(
[
cos_z,
sin_z,
0.0,
0.0,
-sin_z,
cos_z,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
]
)
R["Z"] = tf.reshape(R["Z"], [4, 4])
# Overall rotation.
R_tot = tf.eye(4)
for ax in order:
if ax in R:
R_tot = tf.matmul(R_tot, R[ax])
else:
raise ValueError("Unsupported rotation order: %s" % order)
return R_tot
def translation_matrix_3D(x, y, z):
"""
Spatial transformation matrix for translation.
The output translation matrix is defined such that it is to be used to post-multiply
row vectors (w*T).
Args:
x: (0-D Tensor of type tf.float32) Translation in x-coordinate.
y: (0-D Tensor of type tf.float32) Translation in y-coordinate.
z: (0-D Tensor of type tf.float32) Translation in z-coordinate.
Returns:
T: (4x4 Tensor) Translation matrix in homogeneous coordinates.
"""
T = tf.stack(
[1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, x, y, z, 1.0]
)
T = tf.reshape(T, [4, 4])
return T
def scaling_matrix_3D(x, y, z):
"""
Spatial transformation matrix for scaling.
Args:
x: (0-D Tensor of type tf.float32) Scaling in x-coordinate.
y: (0-D Tensor of type tf.float32) Scaling in y-coordinate.
z: (0-D Tensor of type tf.float32) Scaling in z-coordinate.
Returns:
S: (4x4 Tensor) Scaling matrix in homogeneous coordinates.
"""
S = tf.stack(
[x, 0.0, 0.0, 0.0, 0.0, y, 0.0, 0.0, 0.0, 0.0, z, 0.0, 0.0, 0.0, 0.0, 1.0]
)
S = tf.reshape(S, [4, 4])
return S
def flip_matrix_3D(x, y, z):
"""
Spatial transformation matrix for flipping (=reflection) along the coordinate axes.
Args:
x: (0-D Tensor of type tf.bool) If x-coordinate should be flipped.
y: (0-D Tensor of type tf.bool) If y-coordinate should be flipped.
z: (0-D Tensor of type tf.bool) If z-coordinate should be flipped.
Returns:
F: (4x4 Tensor) Flipping matrix in homogeneous coordinates.
"""
x = tf.cast(x, tf.float32)
y = tf.cast(y, tf.float32)
z = tf.cast(z, tf.float32)
F = tf.stack(
[
1.0 - 2 * x,
0.0,
0.0,
0.0,
0.0,
1.0 - 2 * y,
0.0,
0.0,
0.0,
0.0,
1.0 - 2 * z,
0.0,
0.0,
0.0,
0.0,
1.0,
]
)
F = tf.reshape(F, [4, 4])
return F
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/spatial_matrices_3D.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for PixelRemoval class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from mock import MagicMock
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.processors.augment.pixel_removal import PixelRemoval
test_dir = "nvidia_tao_tf1/core/processors/augment/test_data/pixel_removal/"
test_inputs = [
(0.025, 5, "_0"),
# prob
(0.275, 5, "_1"),
(0.525, 5, "_2"),
# max_block
(0.025, 10, "_3"),
(0.025, 15, "_4"),
]
@pytest.mark.parametrize("width", [160])
@pytest.mark.parametrize("height", [240])
@pytest.mark.parametrize("prob,max_block,post", test_inputs)
def test_pixel_removal(width, height, prob, max_block, post, tmpdir):
"""Iterate through every augmentation and run it.
Load a correctly augmented image to compare against.
"""
transform = PixelRemoval(random=False)
mocked_noise = np.load(test_dir + "mocked_noise.npy")
transform._sample = MagicMock(return_value=mocked_noise)
sess = tf.compat.v1.Session()
test_img = cv2.imread(test_dir + "test_image.jpg")
test_img = cv2.resize(test_img, (width, height))
test_img = np.transpose(test_img, [2, 0, 1])
test_img = np.expand_dims(test_img, 0)
test_img = test_img.astype(float) / 255.0
aug_img = sess.run(transform(test_img, max_block=max_block, pct=prob))
filename = test_dir + "pixel_removal" + post + ".npy"
aug_img = np.squeeze(aug_img, 0)
aug_img = np.transpose(aug_img, [1, 2, 0])
aug_img = (aug_img * 255).astype(np.dtype("int8"))
target_img = np.load(filename)
np.testing.assert_allclose(aug_img, target_img, atol=1.0)
test_inputs_random = [
(0.00, 1, 0.0),
(0.00, 1, 1.0),
(0.275, 2, 0.5),
(0.525, 10, 0.7),
(1.00, 20, 1.0),
]
@pytest.mark.parametrize("width", [160])
@pytest.mark.parametrize("height", [240])
@pytest.mark.parametrize("pct,max_block, prob", test_inputs_random)
def test_random_pixel_removal(width, height, pct, max_block, prob, tmpdir):
"""Run random augmentations to make sure they work as expected."""
transform = PixelRemoval(random=True)
sess = tf.compat.v1.Session()
test_img = cv2.imread(test_dir + "test_image.jpg")
test_img = cv2.resize(test_img, (width, height))
test_img = np.transpose(test_img, [2, 0, 1])
test_img = np.expand_dims(test_img, 0)
test_img = test_img.astype(float) / 255.0
aug_img = sess.run(transform(test_img, max_block=max_block, pct=pct, prob=prob))
if prob == 0.0:
np.testing.assert_equal(aug_img, test_img)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_pixel_removal.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.processors.augment.spatial import (
flip_matrix,
rotation_matrix,
shear_matrix,
)
from nvidia_tao_tf1.core.processors.augment.spatial import get_random_spatial_transformation_matrix
from nvidia_tao_tf1.core.processors.augment.spatial import (
random_flip_matrix,
random_rotation_matrix,
)
from nvidia_tao_tf1.core.processors.augment.spatial import (
random_shear_matrix,
random_translation_matrix,
)
from nvidia_tao_tf1.core.processors.augment.spatial import (
random_zoom_matrix,
translation_matrix,
zoom_matrix,
)
from nvidia_tao_tf1.core.processors.augment.testing_utils import (
assert_bernoulli_distribution,
assert_uniform_distribution,
sample_tensors,
)
from nvidia_tao_tf1.core.utils import set_random_seed
NUM_SAMPLES = 1000
_WIDTH = 255
_HEIGHT = 255
def tile_spatial_matrix(stm, batch_size):
"""Tile a spatial matrix batch_size number of times."""
if batch_size is None:
return stm
return np.tile(np.reshape(stm, [1, 3, 3]), [batch_size, 1, 1])
def identity_spatial_matrix(batch_size):
"""Return a batched identity matrix."""
stm = np.eye(3, dtype=np.float32)
return tile_spatial_matrix(stm, batch_size)
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize(
"horizontal, vertical", [(True, True), (False, True), (True, False)]
)
@pytest.mark.parametrize("width, height", [(None, None), (_WIDTH, _HEIGHT)])
def test_flip_matrix(batch_size, horizontal, vertical, width, height):
"""Test a double flip with the same matrix, as it should return the identity matrix."""
h = -1.0 if horizontal else 1.0
v = -1.0 if vertical else 1.0
x_t = width if horizontal and width is not None else 0.0
y_t = height if vertical and height is not None else 0.0
expected_stm = np.array([[h, 0.0, 0.0], [0.0, v, 0.0], [x_t, y_t, 1.0]])
expected_stm = tile_spatial_matrix(expected_stm, batch_size)
if batch_size is not None:
horizontal = tf.constant(horizontal, shape=[batch_size], dtype=tf.bool)
vertical = tf.constant(vertical, shape=[batch_size], dtype=tf.bool)
m = flip_matrix(
horizontal=horizontal, vertical=vertical, width=width, height=height
)
out = tf.matmul(m, m)
out_np, m_np = tf.compat.v1.Session().run([out, m])
if batch_size is None:
assert m_np.shape == (3, 3)
else:
assert m_np.shape == (batch_size, 3, 3)
# Check that our single-flip matrix is different than the output
np.testing.assert_equal(np.any(np.not_equal(m_np, out_np)), True)
# Check that our roundtrip yields the identity matrix
expected = identity_spatial_matrix(batch_size)
np.testing.assert_array_equal(
expected,
out_np,
err_msg="Flip roundtrip did not result in the " "identity matrix.",
)
np.testing.assert_allclose(
expected_stm,
m_np,
atol=1e-4,
err_msg="Flip matrix does not match expected value.",
)
@pytest.mark.parametrize("batch_size", [None, 3])
@pytest.mark.parametrize("rotations", [1, 2, 4, 9])
@pytest.mark.parametrize("width, height", [(None, None), (_WIDTH, _HEIGHT)])
def test_rotation_matrix(batch_size, rotations, width, height):
"""Perform a full rotation (2*pi) in a few steps, and check it yields the identity matrix."""
theta = np.pi * 2 / rotations
# Compute expected rotation matrix.
cos_t = np.cos(theta)
sin_t = np.sin(theta)
if width is not None and height is not None:
x_t = height * sin_t / 2.0 - width * cos_t / 2.0 + width / 2.0
y_t = -1 * height * cos_t / 2.0 + height / 2.0 - width * sin_t / 2.0
else:
x_t = y_t = 0.0
expected_stm = np.array(
[[cos_t, sin_t, 0.0], [-sin_t, cos_t, 0.0], [x_t, y_t, 1.0]]
)
expected_stm = tile_spatial_matrix(expected_stm, batch_size)
if batch_size is not None:
theta = tf.constant(theta, shape=[batch_size], dtype=tf.float32)
m = rotation_matrix(theta, width=width, height=height)
batch_shape = [] if batch_size is None else [batch_size]
out = tf.eye(3, batch_shape=batch_shape, dtype=tf.float32)
for _ in range(rotations):
out = tf.matmul(out, m)
out_np, m_np = tf.compat.v1.Session().run([out, m])
if batch_size is None:
assert m_np.shape == (3, 3)
else:
assert m_np.shape == (batch_size, 3, 3)
np.testing.assert_allclose(
expected_stm,
m_np,
atol=1e-4,
err_msg="Rotation matrix does not match expected value.",
)
# Check that our single-rotation matrix is different than the output
if rotations > 1:
np.testing.assert_equal(np.any(np.not_equal(m_np, out_np)), True)
# Check that our full rotation yields the identity matrix
expected = identity_spatial_matrix(batch_size)
np.testing.assert_allclose(
expected,
out_np,
atol=1e-4,
err_msg="Full rotation through "
"multiple steps did not result in the identity matrix.",
)
@pytest.mark.parametrize("batch_size", [None, 3])
@pytest.mark.parametrize("x", [0.5, 0.0])
@pytest.mark.parametrize("y", [1.5, 1.0])
@pytest.mark.parametrize("width, height", [(None, None), (_WIDTH, _HEIGHT)])
def test_shear_matrix(batch_size, x, y, width, height):
"""Test shear transform by shearing and inversely shearing,
and check it yields the identity matrix."""
# Compute expected matrix.
if width and height:
x_t = width / 2.0 * y * x
y_t = height / 2.0 * x * y
else:
x_t, y_t = 0.0, 0.0
diag = 1.0 - x * y
expected_stm = np.array(
[[diag, 0.0, 0.0], [0.0, diag, 0.0], [x_t, y_t, 1.0]], dtype=np.float32
)
expected_stm = tile_spatial_matrix(expected_stm, batch_size)
if batch_size is not None:
x = tf.constant(x, shape=[batch_size], dtype=tf.float32)
y = tf.constant(y, shape=[batch_size], dtype=tf.float32)
m = shear_matrix(ratio_x=x, ratio_y=y, width=width, height=height)
m_inv = shear_matrix(ratio_x=-x, ratio_y=-y, width=width, height=height)
out = tf.matmul(m, m_inv)
out_np, m_np, m_inv_np = tf.compat.v1.Session().run([out, m, m_inv])
if batch_size is None:
assert m_np.shape == (3, 3)
else:
assert m_np.shape == (batch_size, 3, 3)
# Check that one single shear is different with the output.
np.testing.assert_equal(np.any(np.not_equal(m_np, m_inv_np)), True)
# Check that our shear transform generate expected matrix.
np.testing.assert_allclose(
expected_stm,
out_np,
atol=1e-4,
err_msg="Shear and unshear in "
"the same direction with same amount does not result in "
"an expected matrix.",
)
@pytest.mark.parametrize("batch_size", [None, 5])
@pytest.mark.parametrize("x", [-5, 3, -4])
@pytest.mark.parametrize("y", [-5, 3, -4])
def test_translation_matrix(batch_size, x, y):
"""Test translation by translating and inversely translating, to yield an identity matrix."""
expected_stm = np.array(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [x, y, 1.0]], dtype=np.float32
)
expected_stm = tile_spatial_matrix(expected_stm, batch_size)
if batch_size is not None:
x = tf.constant(x, shape=[batch_size], dtype=tf.float32)
y = tf.constant(y, shape=[batch_size], dtype=tf.float32)
m = translation_matrix(x=x, y=y)
m_inv = translation_matrix(x=-x, y=-y)
out = tf.matmul(m, m_inv)
out_np, m_np, m_inv_np = tf.compat.v1.Session().run([out, m, m_inv])
if batch_size is None:
assert m_np.shape == (3, 3)
else:
assert m_np.shape == (batch_size, 3, 3)
np.testing.assert_allclose(
expected_stm,
m_np,
atol=1e-4,
err_msg="Translation matrix does not match expected value.",
)
# Check that our translation and its inverse translation are different
np.testing.assert_equal(np.any(np.not_equal(m_np, m_inv_np)), True)
# Check that our roundtrip yields the identity matrix
expected = identity_spatial_matrix(batch_size)
np.testing.assert_array_equal(
expected,
out_np,
err_msg="Flip roundtrip did not result in the " "identity matrix.",
)
@pytest.mark.parametrize("batch_size", [None, 5])
@pytest.mark.parametrize("ratio", [0.666, 1.0, 1.337])
@pytest.mark.parametrize("width, height", [(None, None), (_WIDTH, _HEIGHT)])
def test_zoom_matrix(batch_size, ratio, width, height):
"""Test zooming in and applying the inverse zoom to yield the identity matrix."""
# Compute expected zoom matrix.
r_x = ratio
r_y = ratio
if width is not None and height is not None:
x_t = (width - width * r_x) * 0.5
y_t = (height - height * r_y) * 0.5
else:
x_t = y_t = 0.0
expected_stm = np.array(
[[r_x, 0.0, 0.0], [0.0, r_y, 0.0], [x_t, y_t, 1.0]], dtype=np.float32
)
expected_stm = tile_spatial_matrix(expected_stm, batch_size)
expect_difference = ratio != 1.0
if batch_size is not None:
ratio = tf.constant(ratio, shape=[batch_size], dtype=tf.float32)
m = zoom_matrix(ratio=ratio, width=width, height=height)
m_inv = zoom_matrix(ratio=1.0 / ratio, width=width, height=height)
out = tf.matmul(m, m_inv)
out_np, m_pos_np, m_neg_np = tf.compat.v1.Session().run([out, m, m_inv])
if batch_size is None:
assert m_pos_np.shape == (3, 3)
else:
assert m_pos_np.shape == (batch_size, 3, 3)
np.testing.assert_allclose(
expected_stm,
m_pos_np,
atol=1e-4,
err_msg="Zoom matrix does not match expected value.",
)
# Check that our translation and its inverse translation are different
if expect_difference:
np.testing.assert_equal(np.any(np.not_equal(m_pos_np, m_neg_np)), True)
# Check that our roundtrip yields the identity matrix
expected = identity_spatial_matrix(batch_size)
np.testing.assert_allclose(
expected,
out_np,
atol=1e-5,
err_msg="Flip roundtrip did not result " "in the identity matrix.",
)
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.spatial.flip_matrix",
side_effect=lambda horizontal, vertical, width, height: horizontal,
)
@pytest.mark.parametrize("batch_size", [None, 3])
@pytest.mark.parametrize("flip_lr_prob", [0.0, 1.0, 0.5])
def test_random_flip_matrix_horizontal(patched, batch_size, flip_lr_prob):
"""Test that random_flip_matrix produces correct distributions."""
set_random_seed(42)
flip_tensor = random_flip_matrix(
flip_lr_prob, 0.0, _WIDTH, _HEIGHT, batch_size=batch_size
)
flips = sample_tensors([flip_tensor], NUM_SAMPLES)
assert_bernoulli_distribution(flips[0], p=flip_lr_prob)
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.spatial.flip_matrix",
side_effect=lambda horizontal, vertical, width, height: vertical,
)
@pytest.mark.parametrize("batch_size", [None, 3])
@pytest.mark.parametrize("flip_tb_prob", [1.0, 0.5])
def test_random_flip_matrix_vertical(patched, batch_size, flip_tb_prob):
"""Test that random_flip_matrix produces correct distributions."""
# Using a different random seed because 42 generates numbers that fails this test.
set_random_seed(40)
flip_tensor = random_flip_matrix(
0.0, flip_tb_prob, _WIDTH, _HEIGHT, batch_size=batch_size
)
flips = sample_tensors([flip_tensor], NUM_SAMPLES)
assert_bernoulli_distribution(flips[0], p=flip_tb_prob)
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.spatial.translation_matrix",
side_effect=lambda x, y: (x, y),
)
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize(("translate_max_x", "translate_max_y"), [(0, 0), (16, 16)])
@pytest.mark.parametrize(
("translate_min_x", "translate_min_y"), [(-16, -16), (0, 0), (None, None)]
)
def test_random_translation_matrix(
patched,
batch_size,
translate_max_x,
translate_max_y,
translate_min_x,
translate_min_y,
):
"""Test that random_translation_matrix produces correct distributions."""
set_random_seed(42)
translation_tensors = random_translation_matrix(
translate_max_x, translate_max_y, batch_size, translate_min_x, translate_min_y
)
translate_xs, translate_ys = sample_tensors(translation_tensors, NUM_SAMPLES)
if translate_min_x is None:
translate_min_x = -translate_max_x
if translate_min_y is None:
translate_min_y = -translate_max_y
assert_uniform_distribution(translate_xs, translate_min_x, translate_max_x)
assert_uniform_distribution(translate_ys, translate_min_y, translate_max_y)
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.spatial.shear_matrix",
side_effect=lambda x, y, w, h: (x, y),
)
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize(("max_ratio_x", "max_ratio_y"), [(0, 0), (0.2, 0.5)])
@pytest.mark.parametrize(
("min_ratio_x", "min_ratio_y"), [(-0.5, -0.2), (0, 0), (None, None)]
)
def test_random_shear_matrix(
patched, batch_size, max_ratio_x, max_ratio_y, min_ratio_x, min_ratio_y
):
"""Test that random_shear_matrix produces correct distributions."""
set_random_seed(42)
shear_tensors = random_shear_matrix(
max_ratio_x,
max_ratio_y,
_WIDTH,
_HEIGHT,
batch_size=batch_size,
min_ratio_x=min_ratio_x,
min_ratio_y=min_ratio_y,
)
# Sample 2x the regular amount to make sure the test passes.
shear_xs, shear_ys = sample_tensors(shear_tensors, NUM_SAMPLES * 2)
if min_ratio_x is None:
min_ratio_x = -max_ratio_x
if min_ratio_y is None:
min_ratio_y = -max_ratio_y
assert_uniform_distribution(shear_xs, min_ratio_x, max_ratio_x)
assert_uniform_distribution(shear_ys, min_ratio_y, max_ratio_y)
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.spatial.rotation_matrix", side_effect=lambda a, w, h: a
)
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize("rotate_rad_max", [0.0, 1.56, 3.1415])
@pytest.mark.parametrize("rotate_rad_min", [0.0, -1.50, -3.0, None])
def test_random_rotation_matrix(patched, batch_size, rotate_rad_max, rotate_rad_min):
"""Test that random_rotation_matrix produces correct distributions."""
set_random_seed(42)
rotation_tensor = random_rotation_matrix(
rotate_rad_max,
_WIDTH,
_HEIGHT,
batch_size=batch_size,
rotate_rad_min=rotate_rad_min,
)
rotations = sample_tensors([rotation_tensor], NUM_SAMPLES)
if rotate_rad_min is None:
rotate_rad_min = -rotate_rad_max
assert_uniform_distribution(rotations, rotate_rad_min, rotate_rad_max)
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.spatial.zoom_matrix", side_effect=lambda ratio: ratio
)
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.spatial.translation_matrix",
side_effect=lambda x, y: (x, y),
)
@mock.patch("tensorflow.matmul", side_effect=lambda x, y: (x[0], x[1], y))
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize(
"zoom_ratio_min, zoom_ratio_max", [(0.5, 0.8), (1.0, 1.0), (1.2, 1.5), (0.5, 1.5)]
)
def test_random_zoom_matrix(
patched_zoom,
patched_translation,
patched_Matmul,
batch_size,
zoom_ratio_min,
zoom_ratio_max,
):
"""Test that random_zoom_matrix produces correct distributions."""
set_random_seed(42)
tensors = random_zoom_matrix(
zoom_ratio_min, zoom_ratio_max, _WIDTH, _HEIGHT, batch_size=batch_size
)
translate_xs, translate_ys, scales = sample_tensors(tensors, NUM_SAMPLES)
assert_uniform_distribution(scales, zoom_ratio_min, zoom_ratio_max)
# Check that translation values are within boundaries. Note that translation isn't sampled from
# distribution with constant min/max parameters, but distribution with min/max bounds varying
# based on zoom ratio. This means that we need to find the maximum bound for every zoom ratio.
# Further complications arise from a fact that the max value of distribution can be negative
# when zoom_ratio < 1.0. To handle this, we're working with absolute value of both bounds and
# sampled translation values.
max_x_lower_bound = _WIDTH - (_WIDTH / zoom_ratio_min)
max_x_upper_bound = _WIDTH - (_WIDTH / zoom_ratio_max)
# This is maximum possible absolute value of translation.
max_x = np.maximum(np.abs(max_x_lower_bound), np.abs(max_x_upper_bound))
assert np.max(np.abs(translate_xs)) <= max_x
assert np.min(np.abs(translate_xs)) >= 0
max_y_lower_bound = _HEIGHT - (_HEIGHT / zoom_ratio_min)
max_y_upper_bound = _HEIGHT - (_HEIGHT / zoom_ratio_max)
max_y = np.maximum(np.abs(max_y_lower_bound), np.abs(max_y_upper_bound))
assert np.max(np.abs(translate_ys)) <= max_y
assert np.min(np.abs(translate_ys)) >= 0
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize("width, height, flip_lr_prob", [(1, 1, 0), [1, 1, 1]])
def test_get_random_spatial_transformation_matrix(
batch_size, width, height, flip_lr_prob
):
"""
Test generate random spatial transform matrix.
"""
set_random_seed(42)
stm = get_random_spatial_transformation_matrix(
width=width,
height=height,
flip_lr_prob=flip_lr_prob,
translate_max_x=0,
translate_max_y=0,
zoom_ratio_min=1.0,
zoom_ratio_max=1.0,
rotate_rad_max=0.0,
shear_max_ratio_x=0.0,
shear_max_ratio_y=0.0,
batch_size=batch_size,
)
stm_np = tf.compat.v1.Session().run(stm)
if flip_lr_prob:
stm_ref = np.array([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
stm_ref = tile_spatial_matrix(stm_ref, batch_size)
np.testing.assert_allclose(stm_ref, stm_np)
else:
stm_ref = identity_spatial_matrix(batch_size)
np.testing.assert_array_equal(stm_ref, stm_np)
def test_no_op_spatial_transform():
"""Tests that supplying no kwargs results in a no-op spatial transformation matrix."""
height, width = np.random.randint(1000, size=2)
stm = get_random_spatial_transformation_matrix(width, height)
stm_np = tf.compat.v1.Session().run(stm)
np.testing.assert_equal(
stm_np,
np.eye(3),
verbose=True,
err_msg="Default spatial transformation matrix is not the identity matrix.",
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_spatial_matrices.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random contrast transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import color
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Transform
class RandomContrast(Processor):
"""Random contrast transform."""
@save_args
def __init__(self, scale_max, center, **kwargs):
"""Construct a RandomContrast processor.
Args:
scale_max (float): The scale (or slope) of the contrast, as rotated
around the provided center point. This value is half of the standard
deviation, where values of twice the standard deviation are truncated.
A value of 0 will not affect the matrix.
center (float): The center around which the contrast is 'tilted', this
is generally equal to the middle of the pixel value range. This value is
typically 0.5 with a maximum pixel value of 1, or 127.5 when the maximum
value is 255.
kwargs (dict): keyword arguments passed to parent class.
"""
super(RandomContrast, self).__init__(**kwargs)
self._scale_max = scale_max
self._center = center
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomContrast(scale_max={}, center={})".format(
self._scale_max, self._center
)
def call(self, transform):
"""Return a Transform whose color transformation matrix is perturbed at random.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance with color transform matrix perturbed.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
batch_size = None
if transform.color_transform_matrix.shape.ndims == 3:
batch_size = tf.shape(input=transform.color_transform_matrix)[0]
ctm_contrast = color.random_contrast_matrix(
scale_max=self._scale_max, center=self._center, batch_size=batch_size
)
processed_ctm = tf.matmul(ctm_contrast, transform.color_transform_matrix)
return Transform(
canvas_shape=transform.canvas_shape,
color_transform_matrix=processed_ctm,
spatial_transform_matrix=transform.spatial_transform_matrix,
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/random_contrast.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock, xrange
import tensorflow as tf
from nvidia_tao_tf1.core.processors.augment import color
from nvidia_tao_tf1.core.processors.augment.testing_utils import (
assert_truncated_normal_distribution,
assert_uniform_distribution,
sample_tensors,
)
from nvidia_tao_tf1.core.utils import set_random_seed
NUM_SAMPLES = 1000
def tile_color_matrix(ctm, batch_size):
"""Tile a color matrix batch_size number of times."""
if batch_size is None:
return ctm
return np.tile(np.reshape(ctm, [1, 4, 4]), [batch_size, 1, 1])
def identity_color_matrix(batch_size):
"""Return a batched identity matrix."""
ctm = np.eye(4, dtype=np.float32)
return tile_color_matrix(ctm, batch_size)
@pytest.fixture(scope="module")
def get_random_image(batch_size, start=0.0, stop=1.0):
"""Create a batch of images, with values within a linspace, that are then randomly shuffled."""
shape = (batch_size, 16, 64, 3)
images = np.linspace(start, stop, batch_size * 3072, dtype=np.float32).reshape(
shape
)
return np.random.permutation(images)
offset_tests = [(0.0, 0.0, 0.0), (1.0, 1.0, 1.0), (-1.0, -1.0, -1.0), (0.1, 0.2, 0.3)]
@pytest.mark.parametrize("batch_size", [None, 3])
@pytest.mark.parametrize("offset", offset_tests)
def test_brightness_offset_matrix(batch_size, offset):
"""Test the brightness offset matrix, by checking it's an identity matrix with offsets."""
if batch_size is not None:
offset = np.tile(offset, [batch_size, 1])
m = color.brightness_offset_matrix(offset)
m_np = tf.compat.v1.Session().run(m)
if batch_size is not None:
assert m_np.shape == (batch_size, 4, 4)
created_offsets = m_np[:, 3, 0:3]
else:
assert m_np.shape == (4, 4)
created_offsets = m_np[3, 0:3]
# Test the validity of the offsets
np.testing.assert_allclose(
offset,
created_offsets,
rtol=1e-6,
err_msg="Offset matrix contains different offset values than those "
"supplied.",
)
# Test the rest of the matrix is untouched (identity)
# Zero out the offests, so we can test versus an identity matrix.
if batch_size is not None:
m_np[:, 3, 0:3] = 0.0
else:
m_np[3, 0:3] = 0.0
expected = identity_color_matrix(batch_size)
np.testing.assert_allclose(
expected,
m_np,
rtol=1e-6,
err_msg="Brightness offset matrix introduced non-identity values "
"in elements other than the expected offsets.",
)
@pytest.mark.parametrize("batch_size", [None, 10])
def test_brightness_offset_matrix2(batch_size):
"""Test that brightness offset matrix matches expected value."""
if batch_size is None:
r = 0.5
g = 1.0
b = 1.5
offset = [r, g, b]
expected_ctm = np.array(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[r, g, b, 1.0],
]
)
else:
r = np.linspace(-0.5, 0.5, batch_size)
g = np.linspace(-1.0, 1.0, batch_size)
b = np.linspace(-1.5, 1.5, batch_size)
offset = np.transpose(np.array([r, g, b]))
zero = np.zeros_like(r)
one = np.ones_like(zero)
expected_ctm = np.array(
[
[one, zero, zero, zero],
[zero, one, zero, zero],
[zero, zero, one, zero],
[r, g, b, one],
]
)
# Swap the batch dimension first.
expected_ctm = np.transpose(expected_ctm, [2, 0, 1])
m = color.brightness_offset_matrix(offset)
m_np = tf.compat.v1.Session().run(m)
np.testing.assert_allclose(
expected_ctm,
m_np,
atol=1e-2,
err_msg="Brightness offset matrix does not match expected value.",
)
@pytest.mark.parametrize("batch_size", [None, 3])
@pytest.mark.parametrize("contrast", [-0.5, 0.0, 0.5, 1.0])
@pytest.mark.parametrize("center", [1.0 / 2.0, 255.0 / 2.0])
def test_contrast_matrix(batch_size, contrast, center):
"""Test the contrast matrix."""
zero_contrast = contrast == 0.0
if batch_size is not None:
contrast = np.tile(contrast, [batch_size])
center = np.tile(center, [batch_size])
m = color.contrast_matrix(contrast=contrast, center=center)
m_np = tf.compat.v1.Session().run(m)
if zero_contrast:
np.testing.assert_allclose(
identity_color_matrix(batch_size),
m_np,
rtol=1e-6,
err_msg="Zero contrast did not result in the identity matrix.",
)
if batch_size is not None:
assert m_np.shape == (batch_size, 4, 4)
m = m_np[0]
else:
assert m_np.shape == (4, 4)
m = m_np
bias = np.unique(m[3, 0:3])
scale = np.unique([m[0, 0], m[1, 1], m[2, 2]])
assert len(scale) == 1, "Contrast scale is different across channels."
assert len(bias) == 1, "Contrast bias is different across channels."
@pytest.mark.parametrize("batch_size", [None, 10])
def test_contrast_matrix2(batch_size):
"""Test that contrast matrix matches expectation."""
if batch_size is None:
contrast = 1.5
center = 0.5
else:
contrast = np.linspace(0.0, 2.0, batch_size)
center = np.linspace(-1.0, 1.0, batch_size)
m = color.contrast_matrix(contrast=contrast, center=center)
m_np = tf.compat.v1.Session().run(m)
zero = np.zeros_like(contrast)
one = np.ones_like(contrast)
scale = one + contrast
bias = -contrast * center
expected_ctm = np.array(
[
[scale, zero, zero, zero],
[zero, scale, zero, zero],
[zero, zero, scale, zero],
[bias, bias, bias, one],
]
)
if batch_size is not None:
# Swap the batch dimension first.
expected_ctm = np.transpose(expected_ctm, [2, 0, 1])
np.testing.assert_allclose(
expected_ctm,
m_np,
atol=1e-2,
err_msg="Contrast matrix does not match expected value.",
)
@pytest.mark.parametrize("batch_size", [None, 3])
@pytest.mark.parametrize("hue", [0.0, 360.0])
@pytest.mark.parametrize("saturation", [0.0, 1.0])
def test_hue_saturation_matrix(batch_size, hue, saturation):
"""
Test the hue and saturation matrix.
The tests are quite tolerant because a perfect HSV conversion cannot be done with a linear
matrices. For more information, review the docs of the method.
"""
check_identity = hue in [0.0, 360.0] and saturation == 1.0
zero_saturation = saturation == 0.0
if batch_size is not None:
hue = np.tile(hue, [batch_size])
saturation = np.tile(saturation, [batch_size])
m = color.hue_saturation_matrix(hue=hue, saturation=saturation)
m_np = tf.compat.v1.Session().run(m)
if batch_size is None:
assert m_np.shape == (4, 4)
else:
assert m_np.shape == (batch_size, 4, 4)
if check_identity:
np.testing.assert_allclose(
identity_color_matrix(batch_size),
m_np,
atol=1e-2,
err_msg="No hue and saturation changed did not result in the "
"identity matrix.",
)
# Zero saturation should result in equal weighting of all channels
if zero_saturation:
for c in range(1, 3):
# Compare the 2nd and 3rd channel with the first.
if batch_size is not None:
m0 = m_np[:, 0:3, 0]
mc = m_np[:, 0:3, c]
else:
m0 = m_np[0:3, 0]
mc = m_np[0:3, c]
np.testing.assert_array_equal(
m0,
mc,
err_msg="Zero saturation resulted in differences across " "channels.",
)
@pytest.mark.parametrize("batch_size", [None, 10])
def test_hue_saturation_matrix2(batch_size):
"""Test that hue and saturation matrix matches expected value."""
if batch_size is None:
hue = 45.0
saturation = 1.0
else:
hue = np.linspace(0.0, 360.0, batch_size)
saturation = np.linspace(0.0, 2.0, batch_size)
m = color.hue_saturation_matrix(hue=hue, saturation=saturation)
m_np = tf.compat.v1.Session().run(m)
const_mat = np.array(
[
[0.299, 0.299, 0.299, 0.0],
[0.587, 0.587, 0.587, 0.0],
[0.114, 0.114, 0.114, 0.0],
[0.0, 0.0, 0.0, 1.0],
],
dtype=np.float32,
)
sch_mat = np.array(
[
[0.701, -0.299, -0.300, 0.0],
[-0.587, 0.413, -0.588, 0.0],
[-0.114, -0.114, 0.886, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
dtype=np.float32,
)
ssh_mat = np.array(
[
[0.168, -0.328, 1.25, 0.0],
[0.330, 0.035, -1.05, 0.0],
[-0.497, 0.292, -0.203, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
dtype=np.float32,
)
angle = hue * (np.pi / 180.0)
if batch_size is not None:
const_mat = np.tile(const_mat, [batch_size, 1, 1])
sch_mat = np.tile(sch_mat, [batch_size, 1, 1])
ssh_mat = np.tile(ssh_mat, [batch_size, 1, 1])
angle = np.reshape(angle, [batch_size, 1, 1])
saturation = np.reshape(saturation, [batch_size, 1, 1])
expected_ctm = const_mat + saturation * (
np.cos(angle) * sch_mat + np.sin(angle) * ssh_mat
)
np.testing.assert_allclose(
expected_ctm,
m_np,
atol=1e-2,
err_msg="Hue and saturation matrix does not match expected value.",
)
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.color.hue_saturation_matrix",
side_effect=lambda h, s: (h, s),
)
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize("hue_rotation_max", [0.0, 100.0])
@pytest.mark.parametrize("saturation_shift_max", [0.0, 0.5])
@pytest.mark.parametrize("hue_center", [0.0, 50.0])
@pytest.mark.parametrize("saturation_shift_min", [-100.0, 0.0, None])
def test_random_hue_saturation_matrix(
patched,
batch_size,
hue_rotation_max,
saturation_shift_max,
hue_center,
saturation_shift_min,
):
"""Test that random_hue_saturation_matrix produces correct distributions."""
set_random_seed(42)
tensors = color.random_hue_saturation_matrix(
hue_rotation_max,
saturation_shift_max,
batch_size=batch_size,
hue_center=hue_center,
saturation_shift_min=saturation_shift_min,
)
hue_rotations, saturation_shifts = sample_tensors(tensors, NUM_SAMPLES)
assert_truncated_normal_distribution(
hue_rotations, mean=hue_center, stddev=hue_rotation_max / 2.0
)
if saturation_shift_min is None:
saturation_shift_min = -saturation_shift_max
min_bound = 1.0 + saturation_shift_min
max_bound = 1.0 + saturation_shift_max
assert_uniform_distribution(saturation_shifts, min_bound, max_bound)
@mock.patch("nvidia_tao_tf1.core.processors.augment.color.tf.random.truncated_normal")
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.color.hue_saturation_matrix",
side_effect=color.hue_saturation_matrix,
)
@pytest.mark.parametrize("batch_size", [None, 4])
def test_random_hue_saturation_matrix_samples_hue(
mocked_hue_saturation_matrix, mocked_truncated_normal, batch_size
):
hue = tf.constant(42, dtype=tf.float32)
mocked_truncated_normal.return_value = hue
color.random_hue_saturation_matrix(
hue_rotation_max=180.0, saturation_shift_max=0.0, batch_size=batch_size
)
expected_shape = [] if batch_size is None else [batch_size]
mocked_truncated_normal.assert_called_with(expected_shape, mean=0.0, stddev=90.0)
mocked_hue_saturation_matrix.assert_called_with(hue, mock.ANY)
@mock.patch("nvidia_tao_tf1.core.processors.augment.color.tf.random.uniform")
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.color.hue_saturation_matrix",
side_effect=color.hue_saturation_matrix,
)
@pytest.mark.parametrize("batch_size", [None, 4])
def test_random_hue_saturation_matrix_samples_saturation(
mocked_hue_saturation_matrix, mocked_random_uniform, batch_size
):
saturation = 0.42
mocked_random_uniform.return_value = saturation
color.random_hue_saturation_matrix(
hue_rotation_max=0.0, saturation_shift_max=0.5, batch_size=batch_size
)
expected_shape = [] if batch_size is None else [batch_size]
mocked_random_uniform.assert_called_with(expected_shape, minval=-0.5, maxval=0.5)
mocked_hue_saturation_matrix.assert_called_with(mock.ANY, 1 + saturation)
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.color.contrast_matrix",
side_effect=lambda c, cs: (c, cs),
)
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize("contrast_scale_max", [0.0, 0.5, 1.0])
@pytest.mark.parametrize("contrast_center", [1.0 / 2.0, 255.0 / 2.0])
@pytest.mark.parametrize("contrast_scale_center", [0.0, 0.5, 1.0])
def test_random_contrast_matrix(
patched, batch_size, contrast_scale_max, contrast_center, contrast_scale_center
):
"""Test that random_contrast_matrix produces correct distributions."""
set_random_seed(42)
contrast_scale_tensor, contrast_center_value = color.random_contrast_matrix(
contrast_scale_max,
contrast_center,
batch_size=batch_size,
scale_center=contrast_scale_center,
)
contrast_scales = sample_tensors([contrast_scale_tensor], NUM_SAMPLES)
assert_truncated_normal_distribution(
contrast_scales, mean=contrast_scale_center, stddev=contrast_scale_max / 2.0
)
assert contrast_center == contrast_center_value
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.color.brightness_offset_matrix",
side_effect=lambda offset: offset,
)
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize("brightness_scale_max", [0.0, 0.5, 1.0])
@pytest.mark.parametrize("brightness_uniform_across_channels", [True, False])
@pytest.mark.parametrize("brightness_center", [-0.5, 0.0, 0.5])
def test_random_brightness_matrix(
patched,
batch_size,
brightness_scale_max,
brightness_uniform_across_channels,
brightness_center,
):
"""Test that random_brightness_matrix produces correct distributions."""
set_random_seed(42)
brightness_scale_tensor = color.random_brightness_matrix(
brightness_scale_max,
brightness_uniform_across_channels,
batch_size=batch_size,
brightness_center=brightness_center,
)
brightness_scales = sample_tensors([brightness_scale_tensor], NUM_SAMPLES)
brightness_scales = np.array(brightness_scales[0])
assert_truncated_normal_distribution(
brightness_scales, mean=brightness_center, stddev=brightness_scale_max / 2.0
)
if brightness_uniform_across_channels:
# If ``brightness_uniform_across_channels`` is True, check that values for each channel
# match. This is done by subtracting value of red channel from all channels and checking
# that result is zero.
if batch_size is None:
assert all(
[
np.allclose(brightness_scales[i, :] - brightness_scales[i, 0], 0.0)
for i in xrange(len(brightness_scales))
]
)
else:
for b in xrange(len(brightness_scales)):
assert all(
[
np.allclose(
brightness_scales[b, i, :] - brightness_scales[b, i, 0], 0.0
)
for i in xrange(len(brightness_scales[b]))
]
)
elif brightness_scale_max > 0.0:
# If ``brightness_uniform_across_channels`` is False, check that values for each channel
# match. This is done by negating test for ``brightness_uniform_across_channels`` True.
# Note that we're not checking value of red channel after subtracting value of red channel
# since that will be always zero. Similarly, values will be all zero and hence the same
# if ``brightness_scale_max`` == 0.0.
if batch_size is None:
assert all(
[
not np.allclose(
brightness_scales[i, 1:] - brightness_scales[i, 0], 0.0
)
for i in xrange(len(brightness_scales))
]
)
else:
for b in xrange(len(brightness_scales)):
assert all(
[
not np.allclose(
brightness_scales[b, i, 1:] - brightness_scales[b, i, 0],
0.0,
)
for i in xrange(len(brightness_scales[b]))
]
)
@pytest.mark.parametrize("batch_size", [None, 4])
@pytest.mark.parametrize(
"hue_rotation_max, saturation_shift_max, contrast_scale_max, "
"brightness_scale_max, brightness_uniform_across_channels",
[
(0, 0, 0, 0, True),
(0, 0, 0, 0, False),
(0, 0, 0, 0.5, True),
(0, 0, 0, 0.5, False),
],
)
def test_get_random_color_transformation_matrix(
batch_size,
hue_rotation_max,
saturation_shift_max,
contrast_scale_max,
brightness_scale_max,
brightness_uniform_across_channels,
):
"""
Test generate random color transform matrix.
"""
set_random_seed(42)
# No linter approved way to break up the brightness_uniform_across_channels=
# brightness_uniform_across_channels line and maintain indentation, so using
# a dummy variable.
uniform_bright = brightness_uniform_across_channels
ctm = color.get_random_color_transformation_matrix(
hue_rotation_max=hue_rotation_max,
saturation_shift_max=saturation_shift_max,
contrast_scale_max=contrast_scale_max,
contrast_center=0.5,
brightness_scale_max=brightness_scale_max,
brightness_uniform_across_channels=uniform_bright,
batch_size=batch_size,
)
ctm_np = tf.compat.v1.Session().run(ctm)
if brightness_scale_max > 0:
if batch_size is None:
ctm = ctm_np[3, 0:3]
else:
ctm = ctm_np[:, 3, 0:3]
if brightness_uniform_across_channels:
# Tests that the first three values in the last row of the transform matrix
# (the offset channels) have the same value.
np.testing.assert_allclose(
np.sum(np.diff(ctm)),
0,
atol=1e-2,
err_msg="color transform matrix is not correctly "
"generated when brightness is uniform.",
)
else:
# Tests that the first three values in the last row of the transform matrix
# (the offset channels) have different values.
np.testing.assert_equal(
np.not_equal(np.sum(np.diff(ctm)), 0),
True,
err_msg="color transform matrix is not correctly "
"generated when brightness is not uniform.",
)
else:
np.testing.assert_allclose(
identity_color_matrix(batch_size),
ctm_np,
atol=1e-2,
err_msg="color transform matrix is not correctly generated.",
)
def test_no_op_color_transform():
"""Tests that supplying no kwargs results in an almost-no-op color transformation matrix."""
ctm = color.get_random_color_transformation_matrix()
ctm_np = tf.compat.v1.Session().run(ctm)
# 'Almostness' comes from saturation matrix.
np.testing.assert_allclose(
ctm_np,
np.eye(4),
atol=2e-3,
verbose=True,
err_msg="Default color transformation matrix is too 'far' from the identity matrix.",
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_color_matrices.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Scale processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import Scale
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"height, width, message",
[
(0, 1, "Scale.height (0) is not positive."),
(1, 0, "Scale.width (0) is not positive."),
],
)
def test_invalid_scale_parameters(height, width, message):
"""Test Scale processor constructor error handling on invalid height and width."""
with pytest.raises(ValueError) as exc:
Scale(height=height, width=width)
assert str(exc.value) == message
@mock.patch("nvidia_tao_tf1.core.processors.augment.scale.spatial.zoom_matrix")
def test_scale_call(mocked_zoom_matrix):
"""Test Scale processor call."""
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4),
spatial_transform_matrix=tf.eye(3),
)
mocked_zoom_matrix.return_value = tf.eye(3)
processor = Scale(height=6, width=5)
processor(transform)
mocked_zoom_matrix.assert_called_with(ratio=(2, 2))
@pytest.mark.parametrize(
"batch_size", [None, 3, tf.compat.v1.placeholder(dtype=tf.int32)]
)
def test_scale(batch_size):
"""Test Scale processor."""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
expected_batch_size = batch_size
if type(batch_size) == tf.Tensor:
expected_batch_size = 7
feed_dict = {batch_size: expected_batch_size}
processor = Scale(height=6, width=5)
transformed = processor(transform)
res = tf.compat.v1.Session().run(
transformed.spatial_transform_matrix, feed_dict=feed_dict
)
expected = np.array([[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 1.0]])
if batch_size is not None:
expected = np.tile(expected, [expected_batch_size, 1, 1])
np.testing.assert_array_equal(res, expected)
def test_scale_call_with_invalid_input():
"""Test Scale processor call error handling on invalid input types."""
# Calling Scale with str should throw a TypeError.
with pytest.raises(TypeError):
Scale(1, 1)("Transform")
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
processor = Scale(height=6, width=5)
processor_dict = processor.serialize()
deserialized_processor = deserialize_tao_object(processor_dict)
assert processor._height == deserialized_processor._height
assert processor._width == deserialized_processor._width
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_scale.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus Pixel Removal Processors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import Processor
class PixelRemoval(Processor):
"""Base class for blur transforms."""
@save_args
def __init__(self, random=True, **kwargs):
"""__init__ function for pixel removal transformation."""
super(PixelRemoval, self).__init__(**kwargs)
self.random = random
def _sample(self, dist, shape):
"""A wrapper to make it possible to Mock the sample function."""
return dist.sample((shape[0], 1, shape[2], shape[3]))
def make_selection_condition(self, pct, max_block, shape):
"""Make an image mask with uniformly distributed patches."""
# define a standard normal distribution
dist = tf.compat.v1.distributions.Normal(loc=0.0, scale=1.0)
# make a tensor of samples that is unique for each image
samples = self._sample(dist, (shape[0], 1, shape[2], shape[3]))
# pass a gaussian filter over the pixels to get patches
samples = self._uniform_blur(samples, size=max_block)
# get an array of probabilities
probs = 1 - dist.cdf(samples)
probs = tf.tile(probs, [1, 3, 1, 1])
if self.random:
random_pct = tf.greater(pct, 0)
pct = tf.cond(
pred=random_pct,
true_fn=lambda: tf.random.uniform(
shape=[], minval=0, maxval=pct, dtype=tf.float32
),
false_fn=lambda: pct,
)
comparison = tf.less(probs, pct)
return comparison
def _make_uniform_kernel(self, size):
"""Make a kernel of all the same number."""
# assume 3 channel image
size = tf.constant(size, dtype=tf.int32)
if self.random:
random_size = tf.greater(size, 1)
size = tf.cond(
pred=random_size,
true_fn=lambda: tf.random.uniform(
shape=[], minval=1, maxval=size, dtype=tf.int32
),
false_fn=lambda: size,
)
kernel = tf.ones((3, size, size), dtype=tf.float32)
kernel = kernel / tf.cast(size, dtype=tf.float32)
kernel = tf.expand_dims(tf.expand_dims(kernel, axis=-1), axis=-1)
return kernel
def _uniform_blur(self, images, **kwargs):
"""Uniformly blur the image."""
kernels = self._make_uniform_kernel(**kwargs)
return self._convolve_filter(images, kernels)
def _convolve_filter(self, images, kernels):
"""Convolve a filter channel-wise."""
image_blurs = []
for idx in range(images.shape[1]):
# isolate a channel to blur
image = tf.expand_dims(images[:, idx, ...], axis=1)
blurred_channel = tf.nn.conv2d(
input=image,
filters=kernels[idx],
strides=[1, 1, 1, 1],
data_format="NCHW",
padding="SAME",
name="gaussian_blur",
)
image_blurs.append(blurred_channel)
blurred = tf.concat(image_blurs, axis=1)
return blurred
def call(self, images, pct=0.2, max_block=1, prob=1.0):
"""Call function for BlurTransform.
Args:
Images (ndarray/tensor): An np array or tensor of images in the format (NCHW).
pct (float): The percentage of pixels to drop in the image.
max_block (int): The largest size of area to be taken out in one chunk.
prob (float): The probability of applying the augmentation. Only
applicable if random.
Outputs:
The image with chunks blacked out,
"""
condition = self.make_selection_condition(pct, max_block, images.shape)
masked = tf.compat.v1.where(condition, tf.zeros_like(images), images)
if self.random:
application_prob = tf.random.uniform(shape=[], maxval=1.0)
no_aug_cond = tf.greater(application_prob, prob)
return tf.cond(
pred=no_aug_cond, true_fn=lambda: images, false_fn=lambda: masked
)
return masked
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/pixel_removal.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random brightness transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import color
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Transform
class RandomBrightness(Processor):
"""Random brightness transform."""
@save_args
def __init__(self, scale_max, uniform_across_channels, **kwargs):
"""Construct a RandomBrightness processor.
Args:
scale_max (float): The range of the brightness offsets. This value
is half of the standard deviation, where values of twice the standard
deviation are truncated. A value of 0 (default) will not affect the matrix.
uniform_across_channels (bool): If true will apply the same brightness
shift to all channels. If False, will apply a different brightness shift to each
channel.
kwargs (dict): keyword arguments passed to parent class.
"""
super(RandomBrightness, self).__init__(**kwargs)
self._scale_max = scale_max
self._uniform_across_channels = uniform_across_channels
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomBrightness(scale_max={}, uniform_across_channels={})".format(
self._scale_max, self._uniform_across_channels
)
def call(self, transform):
"""Return a Transform whose color transformation matrix is perturbed at random.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance with color transform matrix perturbed.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
batch_size = None
if transform.color_transform_matrix.shape.ndims == 3:
batch_size = tf.shape(input=transform.color_transform_matrix)[0]
ctm_brightness = color.random_brightness_matrix(
brightness_scale_max=self._scale_max,
brightness_uniform_across_channels=self._uniform_across_channels,
batch_size=batch_size,
)
processed_ctm = tf.matmul(ctm_brightness, transform.color_transform_matrix)
return Transform(
canvas_shape=transform.canvas_shape,
color_transform_matrix=processed_ctm,
spatial_transform_matrix=transform.spatial_transform_matrix,
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/random_brightness.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus augment processors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import add_move, MovedModule
add_move(MovedModule('mock', 'mock', 'unittest.mock'))
from nvidia_tao_tf1.core.processors.augment import additive_noise
from nvidia_tao_tf1.core.processors.augment import blur
from nvidia_tao_tf1.core.processors.augment import color
from nvidia_tao_tf1.core.processors.augment import pixel_removal
from nvidia_tao_tf1.core.processors.augment import random_blur
from nvidia_tao_tf1.core.processors.augment import spatial
from nvidia_tao_tf1.core.processors.augment import spatial_matrices_3D
from nvidia_tao_tf1.core.processors.augment.additive_noise import AdditiveNoise
from nvidia_tao_tf1.core.processors.augment.blur import Blur
from nvidia_tao_tf1.core.processors.augment.pixel_removal import PixelRemoval
from nvidia_tao_tf1.core.processors.augment.random_blur import RandomBlur
__all__ = (
"color",
"spatial",
"spatial_matrices_3D",
"blur",
"additive_noise",
"pixel_removal",
"random_blur",
"AdditiveNoise",
"Blur",
"PixelRemoval",
"RandomBlur",
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus Additive Noise Processors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import Processor
class AdditiveNoise(Processor):
"""Additive noise transformation class."""
@save_args
def __init__(self, random=True, **kwargs):
"""__init__ method."""
super(AdditiveNoise, self).__init__(**kwargs)
self.random = random
def call(self, images, var, prob=1.0):
"""Add random gaussian noise to each channel of the image.
Args:
images (tensor): An array of images in NCHW format.
var (float): The variance of the noise to be added to the image.
If random, the variance is chosen uniformly from [0, var].
prob (float): The probability of applying the augmentation to
the image. Only applicable if random.
Outputs:
The image with noise added.
"""
loc = tf.constant(0.0, dtype=images.dtype)
var = tf.constant(var, dtype=images.dtype)
if self.random:
var = tf.random.uniform(shape=[], maxval=1.0, dtype=images.dtype) * var
dist = tf.compat.v1.distributions.Normal(loc=loc, scale=var)
samples = dist.sample(images.shape)
if self.random:
application_prob = tf.random.uniform(shape=[], maxval=1.0)
no_aug_cond = tf.greater(application_prob, prob)
return tf.cond(
pred=no_aug_cond,
true_fn=lambda: images,
false_fn=lambda: images + samples,
)
return images + samples
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/additive_noise.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus Spatial Transformation Processors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework.sparse_tensor import is_sparse
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import load_custom_tf_op
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import data_format as modulus_data_format, DataFormat
class PolygonTransform(Processor):
"""
Processor that transforms polygons using a given spatial transformation matrix.
Args:
invert_stm (bool): Whether or not to invert the spatial transformation matrix. This
is needed when the stm describes the change of new canvas, instead of the old
canvas (matter of definition) or visa versa.
"""
@save_args
def __init__(self, invert_stm=True, **kwargs):
"""__init__ method."""
# TODO(xiangbok): Inversion should not be default.
self._invert_stm = invert_stm
super(PolygonTransform, self).__init__(**kwargs)
@staticmethod
def _dense_transform(vertices, stm):
"""Dense transform.
Args:
vertices (Tensor): Vertices can be of any rank as long as they can be reshaped
to [N, -1, 2] where N is the batch dimension.
stm (Tensor): stm is must be either 3x3 (non-batched) or Nx3x3 (batched).
Batched vs non-batched is infered from stm rank, which needs to be known
statically. Batch dimension needs to be known only at runtime.
Returns:
Tensor of transformed vertices.
"""
# Store the original vertices shape.
vertices_shape = tf.shape(input=vertices)
if stm.shape.ndims == 3:
# Batched case. Take batch_size from stm dim 0. Use tf.shape to allow dynamic batch
# size.
batch_size = tf.shape(input=stm)[0]
processing_shape = [batch_size, -1, 2]
else:
# Non-batched case. Convert both vertices and stm to batched so that we can handle both
# batched and non-batched with the same code below.
processing_shape = [1, -1, 2]
stm = tf.expand_dims(stm, 0)
# Reshape vertices to [N, n, 2] for processing.
vertices_2D = tf.reshape(vertices, processing_shape)
# Expand vertices into 3D: [x, y] -> [x, y, 1].
num_vertices = tf.shape(input=vertices_2D)[1]
one = tf.ones(
shape=[processing_shape[0], num_vertices, 1], dtype=vertices.dtype
)
vertices_3D = tf.concat([vertices_2D, one], axis=-1)
# Apply the transformation: (N, n, 3) x (N, 3, 3) = (N, n, 3).
vertices_transformed = tf.matmul(vertices_3D, stm)
# Normalize back from 3D homogeneous coordinates to regular 2D ones.
xy = vertices_transformed[:, :, 0:2]
z = vertices_transformed[:, :, 2:3]
vertices_transformed_2D = xy / z
# Restore the original vertices shape.
return tf.reshape(vertices_transformed_2D, vertices_shape)
@staticmethod
def _sparse_transform(vertices, stm):
"""Sparse transform.
Args:
vertices (SparseTensor): Vertices can be of any rank as long as they can be reshaped
to [N, -1, 2] where N is the batch dimension.
stm (Tensor): stm is assumed to be either 3x3 (non-batched) or Nx3x3 (batched).
Batched vs non-batched is infered from stm rank.
Returns:
SparseTensor of transformed vertices.
"""
# Convert to dense for matmul. This is simpler, and likely to be faster than doing
# a sparse matmul.
dense_vertices = tf.sparse.to_dense(vertices, validate_indices=False)
vertices_transformed = PolygonTransform._dense_transform(
vertices=dense_vertices, stm=stm
)
# Sparsify.
sparse_vertices = tf.gather_nd(
params=vertices_transformed, indices=vertices.indices
)
# Rebuild a sparse tensor.
return tf.SparseTensor(
indices=vertices.indices,
values=sparse_vertices,
dense_shape=vertices.dense_shape,
)
def call(self, polygons, stm):
"""call method.
Args:
polygons (sparse or dense tensor (float32) of shape (n, 2))): A tensor with ``n``
vertices with (x, y) coordinates. This tensor can contain multiple polygons
concatenated together, as all coordinates will be transformed with the same
transformation matrix.
stm (tensor (float32) of shape (3, 3) ): spatial transformation matrix
"""
if self._invert_stm:
stm = tf.linalg.inv(stm)
if is_sparse(polygons):
return self._sparse_transform(vertices=polygons, stm=stm)
return self._dense_transform(vertices=polygons, stm=stm)
class SpatialTransform(Processor):
"""
Processor that transforms images using a given spatial transformation matrix.
Args:
method (string): Sampling method used. Can be 'nearest', 'bilinear', or 'bicubic'.
background_value (float): The value the background canvas should have.
verbose (bool): Toggle verbose output during processing.
data_format (string): A string representing the dimension ordering of the input
images, must be one of 'channels_last' (NHWC) or 'channels_first' (NCHW). If
specified, input_data_format and output_data_format must be None.
input_data_format (string): Data format for input. If specified, data_format must be None,
and output_data_format must be given.
output_data_format (string): Data format for output. If specified, data_format must be
None, and input_data_format must be given.
output_dtype (dtype): Valid values are tf.uint8, tf.float16, tf.float32, None. If None,
image dtype is used. Note for uint8 output: Image data must be prescaled to [0,255]
range, and min_clip set to at least 0 and max_clip set to at most 255.
"""
SUPPORTED_METHODS = ["nearest", "bilinear", "bicubic"]
@save_args
def __init__(
self,
method="bilinear",
background_value=0.0,
verbose=False,
data_format=None,
input_data_format=None,
output_data_format=None,
output_dtype=None,
**kwargs
):
"""__init__ method."""
self.method = method
self.background_value = background_value
self.verbose = verbose
if data_format is not None and (
input_data_format is not None or output_data_format is not None
):
raise ValueError(
"When data_format is specified, input_data_format and "
"output_data_format must be None."
)
if input_data_format is not None and output_data_format is None:
raise ValueError(
"When input_data_format is specified, output_data_format "
"must be specified too."
)
if output_data_format is not None and input_data_format is None:
raise ValueError(
"When output_data_format is specified, input_data_format "
"must be specified too."
)
if (
data_format is None
and input_data_format is None
and output_data_format is None
):
data_format = modulus_data_format()
if data_format is not None:
input_data_format = data_format
output_data_format = data_format
if input_data_format not in [
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
]:
raise NotImplementedError(
"Data format not supported, must be 'channels_first' or "
"'channels_last', given {}.".format(input_data_format)
)
if output_data_format not in [
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
]:
raise NotImplementedError(
"Data format not supported, must be 'channels_first' or "
"'channels_last', given {}.".format(output_data_format)
)
self.output_data_format = output_data_format
self.input_data_format = input_data_format
self.output_dtype = output_dtype
if method not in self.SUPPORTED_METHODS:
raise NotImplementedError(
"Sampling method not supported: '{}'".format(method)
)
super(SpatialTransform, self).__init__(**kwargs)
def call(self, images, stms, shape=None):
"""
Apply spatial transformation (aka. image warp) to images.
Args:
images (4D tensor float32): 4D tensor with shape `(batch_size, channels, height, width)`
if data_format='channels_first', or 4D tensor with shape
`(batch_size, height, width, channels)` if data_format='channels_last'.
Note that batch and channel dimensions must exist, even if their sizes are 1.
stms (3D tensor float32): Spatial transformation matrices of
shape (batch_size, 3, 3). Matrices are specified in row-major
format. A matrix M transforms a destination image pixel
coordinate vector P=[px, py, 1] into source image pixel Q:
Q = P M. If Q is outside the source image, the
sampled value is set to the background value, otherwise
source image is sampled at location Q with a bilinear or
bicubic filter kernel.
shape (tuple): A tuple of size 2 containing height and width of
the output images. If ``shape`` is ``None``, the canvas size
is unchanged.
Returns:
4D tensor float32: Transformed images of shape `(batch_size, channels, height, width)`
if data_format='channels_first', or 4D tensor with shape
`(batch_size, height, width, channels)` if data_format='channels_last'.
"""
images = tf.convert_to_tensor(value=images)
# Shape inference needs to know whether shape == None. Unfortunately it is not possible
# to just pass shape=None to the tensorflow side, so we need to create a boolean
# tensor.
use_input_image_shape = False
if shape is None:
use_input_image_shape = True
if self.input_data_format == DataFormat.CHANNELS_FIRST:
shape = tf.shape(input=images)[2:4] # (height, width).
else:
shape = tf.shape(input=images)[1:3] # (height, width).
op = load_custom_tf_op("op_spatialtransform.so")
output_dtype = self.output_dtype
if output_dtype is None:
output_dtype = images.dtype
data_formats = {
DataFormat.CHANNELS_FIRST: "NCHW",
DataFormat.CHANNELS_LAST: "NHWC",
}
input_data_format = data_formats[self.input_data_format]
output_data_format = data_formats[self.output_data_format]
transformed_images = op.spatial_transform(
images=images,
transformation_matrices=stms,
shape=shape,
use_input_image_shape=use_input_image_shape,
filter_mode=self.method,
background_value=self.background_value,
input_data_format=input_data_format,
output_data_format=output_data_format,
output_dtype=output_dtype,
verbose=self.verbose,
)
return transformed_images
def flip_matrix(horizontal, vertical, width=None, height=None):
"""Construct a spatial transformation matrix that flips.
Note that if width and height are supplied, it will move the object back into the canvas
together with the flip.
Args:
horizontal (bool): If the flipping should be horizontal. Scalar or vector.
vertical (bool): If the flipping should be vertical. Scalar or vector.
width (int): the width of the canvas. Used for translating the coordinates into the canvas.
Defaults to None (no added translation).
height (int): the height of the canvas. Used for translating the coordinates back into the
canvas. Defaults to None (no added translation).
Returns:
fp32 tensor (3, 3), spatial transformation matrix if horizontal and vertical are scalars.
If horizontal and vertical are vectors, (len(horizontal), 3, 3).
"""
# Casting bool to float converts False to 0.0 and True to 1.0.
h = tf.cast(tf.convert_to_tensor(value=horizontal), tf.float32)
v = tf.cast(tf.convert_to_tensor(value=vertical), tf.float32)
zero = tf.zeros_like(h)
one = tf.ones_like(h)
if (width is None) ^ (height is None):
raise ValueError(
"Variables `width` and `height` should both be defined, or both `None`."
)
elif width is not None and height is not None:
x_t = h * width
y_t = v * height
else:
x_t = zero
y_t = zero
m = tf.stack(
[one - 2.0 * h, zero, zero, zero, one - 2.0 * v, zero, x_t, y_t, one], axis=-1
)
shape = [-1, 3, 3] if h.shape.ndims == 1 else [3, 3]
return tf.reshape(m, shape)
def rotation_matrix(theta, width=None, height=None):
"""Construct a rotation transformation matrix.
Note that if width and height are supplied, it will rotate the coordinates around the canvas
center-point, so there will be a translation added to the rotation matrix.
Args:
theta (float): the rotation radian. Scalar or vector.
width (int): the width of the canvas. Used for center rotation. Defaults to None
(no center rotation).
height (int): the height of the canvas. Used for center rotation. Defaults to None
(no center rotation).
Returns:
fp32 tensor (3, 3), spatial transformation matrix if theta is scalar. If theta is
a vector, (len(theta), 3, 3).
"""
theta = tf.cast(tf.convert_to_tensor(value=theta), tf.float32)
cos_t = tf.cos(theta)
sin_t = tf.sin(theta)
zero = tf.zeros_like(theta)
one = tf.ones_like(theta)
if (width is None) ^ (height is None):
raise ValueError(
"Variables `width` and `height` should both be defined, or both `None`."
)
elif width is not None and height is not None:
width = tf.cast(tf.convert_to_tensor(value=width), tf.float32)
height = tf.cast(tf.convert_to_tensor(value=height), tf.float32)
x_t = height * sin_t / 2.0 - width * cos_t / 2.0 + width / 2.0
y_t = -1 * height * cos_t / 2.0 + height / 2.0 - width * sin_t / 2.0
else:
x_t = zero
y_t = zero
m = tf.stack([cos_t, sin_t, zero, -sin_t, cos_t, zero, x_t, y_t, one], axis=-1)
shape = [-1, 3, 3] if theta.shape.ndims == 1 else [3, 3]
return tf.reshape(m, shape)
def shear_matrix(ratio_x, ratio_y, width=None, height=None):
"""Construct a shear transformation matrix.
Note that if width and height are supplied, it will shear the coordinates around
the canvas center-point, so there will be a translation added to the shear matrix.
It follows formula:
[x_new, y_new, 1] = [x, y, 1] * [[1., ratio_y, 0],
[ratio_x, 1., 0],
[-height*ratio_x/2., -width*ratio_y/2., 1]]
Args:
ratio_x (float): the amount of horizontal shift per y row. Scalar or vector.
ratio_y (float): the amount of vertical shift per x column. Scalar or vector.
width (int): the width of the canvas. Used for center shearing. Defaults to None
(no center shearing).
height (int): the height of the canvas. Used for center shearing. Defaults to None
(no center shearing).
Returns:
fp32 tensor (3, 3), spatial transformation matrix if ratio_{x,y} are scalars. If
ratio_{x,y} are vectors, (len(ratio_x), 3, 3).
"""
ratio_x = tf.cast(tf.convert_to_tensor(value=ratio_x), tf.float32)
ratio_y = tf.cast(tf.convert_to_tensor(value=ratio_y), tf.float32)
zero = tf.zeros_like(ratio_x)
one = tf.ones_like(ratio_x)
if (width is None) ^ (height is None):
raise ValueError(
"Variables `width` and `height` should both be defined, or both `None`."
)
elif width is not None and height is not None:
x_t = -1 * height / 2.0 * ratio_x
y_t = -1 * width / 2.0 * ratio_y
else:
x_t = zero
y_t = zero
m = tf.stack([one, ratio_y, zero, ratio_x, one, zero, x_t, y_t, one], axis=-1)
shape = [-1, 3, 3] if ratio_x.shape.ndims == 1 else [3, 3]
return tf.reshape(m, shape)
def translation_matrix(x, y):
"""Construct a spatial transformation matrix for translation.
Args:
x (float): the horizontal translation. Scalar or vector.
y (float): the vertical translation. Scalar or vector.
Returns:
fp32 tensor (3, 3), spatial transformation matrix if x and y are scalars. If
x and y are vectors, (len(x), 3, 3).
"""
x = tf.cast(tf.convert_to_tensor(value=x), tf.float32)
y = tf.cast(tf.convert_to_tensor(value=y), tf.float32)
zero = tf.zeros_like(x)
one = tf.ones_like(x)
m = tf.stack([one, zero, zero, zero, one, zero, x, y, one], axis=-1)
shape = [-1, 3, 3] if x.shape.ndims == 1 else [3, 3]
return tf.reshape(m, shape)
def zoom_matrix(ratio, width=None, height=None):
"""Construct a spatial transformation matrix for zooming.
Note that if width and height are supplied, it will perform a center-zoom by translation.
Args:
ratio (float or tuple(2) of float): the zoom ratio. If a tuple of length 2 is supplied,
they distinguish between the horizontal and vertical zooming. Scalar or vector, or
a tuple of scalars or vectors.
width (int): the width of the canvas. Used for center-zooming. Defaults to None (no added
translation).
height (int): the height of the canvas. Used for center-zooming. Defaults to None (no added
translation).
Returns:
fp32 tensor (3, 3), spatial transformation matrix if ratio is scalar. If
ratio is a vector, (len(ratio), 3, 3).
"""
if type(ratio) == tuple and len(ratio) == 2:
r_x, r_y = ratio
else:
r_x, r_y = ratio, ratio
r_x = tf.cast(tf.convert_to_tensor(value=r_x), tf.float32)
r_y = tf.cast(tf.convert_to_tensor(value=r_y), tf.float32)
zero = tf.zeros_like(r_x)
one = tf.ones_like(r_x)
if (width is None) ^ (height is None):
raise ValueError(
"Variables `width` and `height` should both be defined, or both `None`."
)
elif width is not None and height is not None:
x_t = (width - width * r_x) * 0.5
y_t = (height - height * r_y) * 0.5
else:
x_t = zero
y_t = zero
m = tf.stack([r_x, zero, zero, zero, r_y, zero, x_t, y_t, one], axis=-1)
shape = [-1, 3, 3] if r_x.shape.ndims == 1 else [3, 3]
return tf.reshape(m, shape)
def random_flip_matrix(
horizontal_probability, vertical_probability, width, height, batch_size=None
):
"""Create random horizontal and vertical flip transformation matrix.
Args:
horizontal_probability (float): The probability that a left-right flip will occur.
vertical_probability (float): The probability that a top-bottom flip will occur.
width (int): the width of the image canvas.
height (int): the height of the image canvas.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
Returns:
(tf.Tensor) If batch_size is None, a spatial transformation matrix of shape (3,3)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,3,3).
"""
batch_shape = [] if batch_size is None else [batch_size]
flip_lr_flag = tf.less(
tf.random.uniform(batch_shape, 0.0, 1.0), horizontal_probability
)
flip_tb_flag = tf.less(
tf.random.uniform(batch_shape, 0.0, 1.0), vertical_probability
)
return flip_matrix(
horizontal=flip_lr_flag, vertical=flip_tb_flag, width=width, height=height
)
def random_shear_matrix(
max_ratio_x,
max_ratio_y,
width,
height,
batch_size=None,
min_ratio_x=None,
min_ratio_y=None,
):
"""Create random shear transformation matrix.
Args:
max_ratio_x (float): The higher bound for the uniform distribution from which a
float will be picked to shear horizontally.
max_ratio_y (float): The higher bound for the uniform distribution from which a
float will be picked to shear vertically.
width (int): The width of the image canvas.
height (int): The height of the image canvas.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
min_ratio_x (float): The lower bound for the uniform distribution from which a
float will be picked to shear horizontally. If unspecified, defaults to
-max_ratio_x.
min_ratio_y (float): The lower bound for the uniform distribution from which a
float will be picked to shear vertically. If unspecified, defaults to
-max_ratio_y.
Returns:
(tf.Tensor) If batch_size is None, a spatial transformation matrix of shape (3,3)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,3,3).
"""
if min_ratio_x is None:
min_ratio_x = -max_ratio_x
if min_ratio_y is None:
min_ratio_y = -max_ratio_y
batch_shape = [] if batch_size is None else [batch_size]
s_x = tf.random.uniform(
batch_shape, minval=min_ratio_x, maxval=max_ratio_x, dtype=tf.float32
)
s_y = tf.random.uniform(
batch_shape, minval=min_ratio_y, maxval=max_ratio_y, dtype=tf.float32
)
return shear_matrix(s_x, s_y, width, height)
def random_translation_matrix(max_x, max_y, batch_size=None, min_x=None, min_y=None):
"""Create random translation transformation matrix.
Args:
max_x (int): The higher bound for the uniform distribution from which an integer will
be picked to translate horizontally.
max_y (int): The higher bound for the uniform distribution from which an integer will
be picked to translate vertically.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
min_x (int): The lower bound for the uniform distribution from which an integer will be
picked to translate horizontally. If unspecified, defaults to -max_x.
min_y (int): The lower bound for the uniform distribution from which an integer will be
picked to translate vertically. If unspecified, defaults to -max_y.
Returns:
(tf.Tensor) If batch_size is None, a spatial transformation matrix of shape (3,3)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,3,3).
"""
if min_x is None:
min_x = -max_x
if min_y is None:
min_y = -max_y
batch_shape = [] if batch_size is None else [batch_size]
t_x = tf.random.uniform(batch_shape, minval=min_x, maxval=max_x + 1, dtype=tf.int32)
t_y = tf.random.uniform(batch_shape, minval=min_y, maxval=max_y + 1, dtype=tf.int32)
return translation_matrix(x=t_x, y=t_y)
def random_zoom_matrix(ratio_min, ratio_max, width, height, batch_size=None):
"""Create random zoom transformation matrix.
Args:
ratio_min (float): The lower bound of the zooming ratio's uniform distribution.
A zooming ratio of 1.0 will not affect the image, while values higher than 1 will
result in 'zooming out' (image gets rendered smaller than the canvas), and vice versa
for values below 1.0.
ratio_max (float): The upper bound of the zooming ratio's uniform distribution.
A zooming ratio of 1.0 will not affect the image, while values higher than 1 will
result in 'zooming out' (image gets rendered smaller than the canvas), and vice versa
for values below 1.0.
width (int): The width of the image canvas.
height (int): The height of the image canvas.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
Returns:
(tf.Tensor) If batch_size is None, a spatial transformation matrix of shape (3,3)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,3,3).
"""
batch_shape = [] if batch_size is None else [batch_size]
ratio = tf.random.uniform(
batch_shape, minval=ratio_min, maxval=ratio_max, dtype=tf.float32
)
t_x = tf.random.uniform(
batch_shape, minval=0, maxval=(width - (width / ratio)), dtype=tf.float32
)
t_y = tf.random.uniform(
batch_shape, minval=0, maxval=(height - (height / ratio)), dtype=tf.float32
)
scale_stm = zoom_matrix(ratio=ratio)
translate_stm = translation_matrix(x=-t_x, y=-t_y)
return tf.matmul(translate_stm, scale_stm)
def random_rotation_matrix(
rotate_rad_max, width, height, batch_size=None, rotate_rad_min=None
):
"""Create random rotation transformation matrix.
Args:
rotate_rad_max (float): Maximum rotation angle. The final rotation angle will be bounded
by [-rotate_rad_min, rotate_rad_max], following an uniform distribution.
width (int): The width of the image canvas.
height (int): The height of the image canvas.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
rotate_rad_min (float): Minimum rotation angle. If unspecified, defaults to -rotate_rad_max.
Returns:
(tf.Tensor) If batch_size is None, a spatial transformation matrix of shape (3,3)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,3,3).
"""
if rotate_rad_min is None:
rotate_rad_min = -rotate_rad_max
batch_shape = [] if batch_size is None else [batch_size]
angle = tf.random.uniform(batch_shape, minval=rotate_rad_min, maxval=rotate_rad_max)
return rotation_matrix(angle, width, height)
def get_spatial_transformation_matrix(
width,
height,
stm=None,
flip_lr=False,
translate_x=0,
translate_y=0,
zoom_ratio=1.0,
rotate_rad=0.0,
shear_ratio_x=0.0,
shear_ratio_y=0.0,
batch_size=None,
):
"""
The spatial transformation matrix (stm) generator used for augmentation.
This function creates a spatial transformation matrix (stm) that can be used for
generic data augmentation, usually images or coordinates.
The order of spatial transform: flip, rotation, zoom and translation.
Args:
width (int): the width of the image canvas.
height (int): the height of the image canvas.
stm ((3,3) fp32 Tensor or None): A spatial transformation matrix produced in this
function and will be used to transform images and coordinates spatiallly.
If ``None`` (default), an identity matrix will be generated.
flip_lr (bool): Flag to indicate whether to flip the image or not.
translate_x (int): The amount by which to translate the image horizontally.
translate_y (int): The amount by which to translate the image vertically.
zoom_ratio (float): The ratio by which to zoom into the image. A zooming ratio of 1.0
will not affect the image, while values higher than 1 will result in 'zooming out'
(image gets rendered smaller than the canvas), and vice versa for values below 1.0.
rotate_rad (float): The rotation in radians.
shear_ratio_x (float): The amount to shear the horizontal direction per y row.
shear_ratio_y (float): The amount to shear the vertical direction per x column.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
Returns:
(tf.Tensor) If batch_size is None, a spatial transformation matrix of shape (3,3)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,3,3).
"""
return get_random_spatial_transformation_matrix(
width=width,
height=height,
stm=stm,
flip_lr_prob=1.0 if flip_lr else 0.0,
translate_max_x=translate_x,
translate_min_x=translate_x,
translate_max_y=translate_y,
translate_min_y=translate_y,
zoom_ratio_min=zoom_ratio,
zoom_ratio_max=zoom_ratio,
rotate_rad_max=rotate_rad,
rotate_rad_min=rotate_rad,
shear_max_ratio_x=shear_ratio_x,
shear_min_ratio_x=shear_ratio_x,
shear_max_ratio_y=shear_ratio_y,
shear_min_ratio_y=shear_ratio_y,
batch_size=batch_size,
)
def get_random_spatial_transformation_matrix(
width,
height,
stm=None,
flip_lr_prob=0.0,
flip_tb_prob=0.0,
translate_max_x=0,
translate_min_x=None,
translate_max_y=0,
translate_min_y=None,
zoom_ratio_min=1.0,
zoom_ratio_max=1.0,
rotate_rad_max=0.0,
rotate_rad_min=None,
shear_max_ratio_x=0.0,
shear_min_ratio_x=None,
shear_max_ratio_y=0.0,
shear_min_ratio_y=None,
batch_size=None,
):
"""
The spatial transformation matrix (stm) generator used for random augmentation.
This function creates a random spatial transformation matrix (stm) that can be used for
generic data augmentation, usually images or coordinates. The flipping, rotation, translation
and zooming all have independent probabilities. The RNG used is always of uniform distribution.
Translation is lossless, as it picks discrete integers.
The order of spatial transform: flip, rotation, zoom and translation.
Args:
width (int): the width of the image canvas.
height (int): the height of the image canvas.
stm ((3,3) fp32 Tensor or None): A random spatial transformation matrix produced in this
function and will be used to transform images and coordinates spatiallly.
If ``None`` (default), an identity matrix will be generated.
flip_lr_prob (float): The probability that a left-right (horizontal) flip will occur.
flip_tb_prob (float): The probability that a top-bottom (vertical) flip will occur.
translate_max_x (int): If translation occurs, this is the higher bound the
uniform distribution from which an integer will be picked to translate horizontally.
translate_min_x (int): If translation occus, this is the lower bound for the uniform
distribution from which an integer will be picked to translate horizontally. If
unspecified, it defaults to -translate_max_x.
translate_max_y (int): If translation occurs, this is the higher bound the
uniform distribution from which an integer will be picked to translate vertically.
translate_min_y (int): If translation occurs, this is the lower bound the
uniform distribution from which an integer will be picked to translate vertically.
If unspecified, it defaults to -translate_max_y.
zoom_ratio_min (float): The lower bound of the zooming ratio's uniform distribution.
A zooming ratio of 1.0 will not affect the image, while values higher than 1 will
result in 'zooming out' (image gets rendered smaller than the canvas), and vice versa
for values below 1.0.
zoom_ratio_max (float): The upper bound of the zooming ratio's uniform distribution.
A zooming ratio of 1.0 will not affect the image, while values higher than 1 will
result in 'zooming out' (image gets rendered smaller than the canvas), and vice versa
for values below 1.0.
rotate_rad_max (float): The maximal allowed rotation in radians
rotate_rad_min (float): The minimum allowed rotation in radians. If unspecified,
defaults to -rotate_rad_max.
shear_max_ratio_x (float): The maximal allowed shearing of horizontal directions
per y row.
shear_min_ratio_x (float): The minimal allowed shearing of horizontal directions
per y row. If unspecified, defaults to -shear_max_ratio_x.
shear_max_ratio_y (float): The maximal allowed shearing of vertical directions
per x column.
shear_min_ratio_y (float): The minimal allowed shearing of vertical directions
per y row. If unspecified, defaults to -shear_max_ratio_y.
batch_size (int): If None, return a single matrix, else return a batch of matrices.
Returns:
(tf.Tensor) If batch_size is None, a spatial transformation matrix of shape (3,3)
and type tf.float32. If batch_size is not None, a tensor of shape (batch_size,3,3).
"""
# Initialize the spatial transform matrix as a 3x3 identity matrix
if stm is None:
batch_shape = [] if batch_size is None else [batch_size]
stm = tf.eye(3, batch_shape=batch_shape, dtype=tf.float32)
# Apply horizontal flipping.
flip = random_flip_matrix(flip_lr_prob, flip_tb_prob, width, height, batch_size)
stm = tf.matmul(stm, flip)
# Apply rotation transform.
rotate_transformation = random_rotation_matrix(
rotate_rad_max, width, height, batch_size, rotate_rad_min
)
stm = tf.matmul(stm, rotate_transformation)
# Apply zoom transform.
zoom_transformation = random_zoom_matrix(
zoom_ratio_min, zoom_ratio_max, width, height, batch_size
)
stm = tf.matmul(stm, zoom_transformation)
# Apply translation.
translate_transformation = random_translation_matrix(
translate_max_x, translate_max_y, batch_size, translate_min_x, translate_min_y
)
stm = tf.matmul(stm, translate_transformation)
# Apply shear transform.
shear_transformation = random_shear_matrix(
shear_max_ratio_x,
shear_max_ratio_y,
width,
height,
batch_size,
shear_min_ratio_x,
shear_min_ratio_y,
)
stm = tf.matmul(stm, shear_transformation)
return stm
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/spatial.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform random rotations on examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import spatial
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Transform
class RandomRotation(Processor):
"""Random rotate transform."""
@save_args
def __init__(self, min_angle, max_angle, probability, **kwargs):
"""Construct a RandomRotation processor.
Args:
min_angle (float): Minimum angle in degrees.
max_angle (float): Maximum angle in degrees.
probability (float): Probability at which rotation is performed.
kwargs (dict): keyword arguments passed to parent class.
"""
super(RandomRotation, self).__init__(**kwargs)
if probability < 0.0 or probability > 1.0:
raise ValueError(
"RandomRotation.probability ({}) is not within the range [0.0, 1.0].".format(
probability
)
)
if min_angle < -360.0:
raise ValueError(
"RandomRotation.min_angle ({}) is smaller than -360.0 degrees.".format(
min_angle
)
)
if max_angle > 360.0:
raise ValueError(
"RandomRotation.max_angle ({}) is greater than 360.0 degrees.".format(
max_angle
)
)
if min_angle > max_angle:
raise ValueError(
"RandomRotation.min_angle ({})"
" is greater than RandomRotation.max_angle ({}).".format(
min_angle, max_angle
)
)
self._probability = probability
self._min_angle = min_angle
self._max_angle = max_angle
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomRotation(min_angle={}, max_angle={}, probability={})".format(
self._min_angle, self._max_angle, self._probability
)
def call(self, transform):
"""Return a Transform whose spatial transformation matrix is perturbed at random.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance with spatial transform matrix perturbed.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
batch_shape = []
if transform.spatial_transform_matrix.shape.ndims == 3:
batch_size = tf.shape(input=transform.spatial_transform_matrix)[0]
batch_shape = [batch_size]
angle = tf.random.uniform(
batch_shape,
minval=math.radians(self._min_angle),
maxval=math.radians(self._max_angle),
)
rotate_stm = spatial.rotation_matrix(
angle,
width=transform.canvas_shape.width,
height=transform.canvas_shape.height,
)
should_rotate = tf.less_equal(
tf.random.uniform(batch_shape, 0.0, 1.0), self._probability
)
next_stm = tf.compat.v1.where(
should_rotate,
tf.matmul(rotate_stm, transform.spatial_transform_matrix),
transform.spatial_transform_matrix,
)
return Transform(
spatial_transform_matrix=next_stm,
color_transform_matrix=transform.color_transform_matrix,
canvas_shape=transform.canvas_shape,
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/random_rotation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying crop transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import spatial
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Canvas2D, Transform
class Crop(Processor):
"""Crop transform processor."""
@save_args
def __init__(self, left, top, right, bottom, **kwargs):
"""Construct a crop processor.
The origin of the coordinate system is at the top-left corner. Coordinates keep increasing
from left to right and from top to bottom.
top
--------
left | |
| | right
--------
bottom
Args:
left (int): Left edge before which contents will be discarded.
top (int): Top edge above which contents will be discarded.
right (int): Right edge after which contents will be discarded
bottom (int): Bottom edge after which contents will be discarded.
"""
super(Crop, self).__init__(**kwargs)
if left < 0:
raise ValueError("Crop.left ({}) is not positive.".format(left))
if top < 0:
raise ValueError("Crop.top ({}) is not positive.".format(top))
if right < 0:
raise ValueError("Crop.right ({}) is not positive.".format(right))
if bottom < 0:
raise ValueError("Crop.bottom ({}) is not positive.".format(bottom))
if right <= left:
raise ValueError(
"Crop.right ({}) should be greater than Crop.left ({}).".format(
right, left
)
)
if bottom <= top:
raise ValueError(
"Crop.bottom ({}) should be greater than Crop.top ({}).".format(
bottom, top
)
)
self._left = left
self._top = top
self._right = right
self._bottom = bottom
def __repr__(self):
"""Return a string representation of the processor."""
return "Crop(left={}, top={}, right={}, bottom={})".format(
self._left, self._top, self._right, self._bottom
)
def call(self, transform):
"""Return a Transform that defines the Crop transformation.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance representing a crop transform.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
# Translate top left corner up and towards left to to move it outside of the canvas.
# Translation is expressed in sizes relative to the original canvas.
translate_stm = spatial.translation_matrix(x=self._left, y=self._top)
translate_stm = tf.broadcast_to(
translate_stm, tf.shape(input=transform.spatial_transform_matrix)
)
# Reduce canvas size at bottom and right edges to move them outside of the canvas.
final_shape = Canvas2D(
width=self._right - self._left, height=self._bottom - self._top
)
processed_stm = tf.matmul(translate_stm, transform.spatial_transform_matrix)
return Transform(
canvas_shape=final_shape,
color_transform_matrix=transform.color_transform_matrix,
spatial_transform_matrix=processed_stm,
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/crop.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomFlip processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import RandomFlip
from nvidia_tao_tf1.core.processors.augment.spatial import flip_matrix
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"horizontal_probability, vertical_probability, message",
[
(
2,
0,
"RandomFlip.horizontal_probability (2)"
" is not within the range [0.0, 1.0].",
),
(
-1,
0,
"RandomFlip.horizontal_probability (-1)"
" is not within the range [0.0, 1.0].",
),
(
0,
2,
"RandomFlip.vertical_probability (2)"
" is not within the range [0.0, 1.0].",
),
(
0,
-1,
"RandomFlip.vertical_probability (-1)"
" is not within the range [0.0, 1.0].",
),
],
)
def test_invalid_flip_probability(
horizontal_probability, vertical_probability, message
):
"""Test RandomFlip processor constructor error handling on invalid flip probability."""
with pytest.raises(ValueError) as exc:
RandomFlip(
horizontal_probability=horizontal_probability,
vertical_probability=vertical_probability,
)
assert str(exc.value) == message
@mock.patch("nvidia_tao_tf1.core.processors.augment.random_flip.spatial.random_flip_matrix")
@pytest.mark.parametrize(
"horizontal_probability, vertical_probability", [(0.0, 0.0), (0.5, 0.5), (1.0, 1.0)]
)
def test_random_flip_call(
mocked_random_flip_matrix, horizontal_probability, vertical_probability
):
"""Test RandomFlip processor call."""
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4),
spatial_transform_matrix=tf.eye(3),
)
mocked_random_flip_matrix.return_value = tf.eye(3)
processor = RandomFlip(
horizontal_probability=horizontal_probability,
vertical_probability=vertical_probability,
)
processor(transform)
mocked_random_flip_matrix.assert_called_with(
horizontal_probability=horizontal_probability,
vertical_probability=vertical_probability,
height=12,
width=10,
batch_size=None,
)
def test_random_flip_call_with_invalid_input():
"""Test RandomTranslation processor call error handling on invalid input types."""
# Calling RandomTranslation with str should throw a TypeError.
with pytest.raises(TypeError):
RandomFlip(0)("Transform")
@mock.patch("nvidia_tao_tf1.core.processors.augment.spatial.tf.random.uniform")
@pytest.mark.parametrize(
"batch_size", [None, 5, tf.compat.v1.placeholder(dtype=tf.int32)]
)
def test_random_flip(mocked_random_uniform, batch_size):
"""Test RandomFlip processor."""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
if type(batch_size) == tf.Tensor:
feed_dict = {batch_size: 7}
rnd_prob = 0.0
expected_x = True
expected_y = True
if batch_size is not None:
# Generate a sequence of probabilities [0., 1., 0., 1., ...] so that every second
# sample gets randomly transformed.
float_batch_size = tf.cast(batch_size, tf.float32)
rnd_prob = tf.math.floormod(
tf.linspace(0.0, float_batch_size - 1.0, batch_size), 2.0
)
expected_x = tf.cast(1.0 - rnd_prob, tf.bool)
expected_y = tf.cast(1.0 - rnd_prob, tf.bool)
mocked_random_uniform.return_value = rnd_prob
processor = RandomFlip(horizontal_probability=0.5, vertical_probability=0.5)
stm = processor(transform)
expected_stm = flip_matrix(
horizontal=expected_x, vertical=expected_y, width=10, height=12
)
if batch_size is None:
assert expected_stm.shape.ndims == 2
else:
assert expected_stm.shape.ndims == 3
stm, expected_stm = tf.compat.v1.Session().run(
[stm.spatial_transform_matrix, expected_stm], feed_dict=feed_dict
)
np.testing.assert_equal(stm, expected_stm)
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
processor = RandomFlip(horizontal_probability=0.5)
processor_dict = processor.serialize()
deserialized_processor = deserialize_tao_object(processor_dict)
assert (
processor._horizontal_probability
== deserialized_processor._horizontal_probability
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_random_flip.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus Blur Processors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.processors import Processor
class Blur(Processor):
"""Base class for blur transforms."""
@save_args
def __init__(self, random=True, **kwargs):
"""__init__ function for blur class."""
super(Blur, self).__init__(**kwargs)
self.random = random
def _convolve_filter(self, images, kernels):
"""Convolve a filter channel-wise."""
image_blurs = []
for idx in range(images.shape[1]):
# isolate a channel to blur
image = tf.cast(tf.expand_dims(images[:, idx, ...], axis=1), tf.float32)
blurred_channel = tf.nn.conv2d(
input=image,
filters=kernels,
strides=[1, 1, 1, 1],
data_format="NCHW",
padding="SAME",
name="gaussian_blur",
)
image_blurs.append(blurred_channel)
blurred = tf.concat(image_blurs, axis=1)
return tf.cast(blurred, images.dtype)
def _make_gaussian_kernel(self, size, std):
"""Make 2D gaussian Kernel for convolution.
see:
https://stackoverflow.com/questions/52012657/how-to-make-a-2d-gaussian-filter-in-tensorflow
"""
if self.random and size > 1:
size = tf.random.uniform(minval=1, maxval=size, dtype=tf.int32, shape=[])
size = tf.cast(size, dtype=tf.float32)
std = tf.random.uniform(minval=0, maxval=std, dtype=tf.float32, shape=[])
if std is None or std == 0:
# Set std if not specified.
std = (
tf.multiply(tf.multiply(tf.cast(size, tf.float32) - 1, 0.5) - 1, 0.3)
+ 0.8
)
d = tf.compat.v1.distributions.Normal(
tf.cast(0.0, tf.float32), tf.cast(std, tf.float32)
)
vals = d.prob(tf.range(start=-size, limit=size + 1, dtype=tf.float32))
gauss_kernel = tf.einsum("i,j->ij", vals, vals)
gauss_kernel /= tf.reduce_sum(input_tensor=gauss_kernel)
gauss_kernel = tf.cast(gauss_kernel, tf.float32)
gauss_kernel = gauss_kernel[:, :, tf.newaxis, tf.newaxis]
kernels = gauss_kernel
return kernels
def _gaussian_blur(self, images, size, std):
"""Make a gaussian blur of the image."""
kernels = self._make_gaussian_kernel(size, std)
return self._convolve_filter(images, kernels)
def call(self, images, size=1, std=0, prob=0.5):
"""Blur the image.
Args:
images (tensor): A tensor of images in NCHW format.
size (int): The size of the gaussian filter for blurring
If random, then a filter size will be picked uniformly
from the range [1, size].
std (float): The standard deviation of the gaussian filter
for blurring. If random then the standard deviation will
be picked uniformly from the range [0, std].
prob (float): The probability of applying the blur to the image.
Only applicable if Random, otherwise the blur is always applied.
Outputs:
The blurred image.
"""
assert size >= 1, "Gaussian Kernel size must be positive integer."
if self.random:
application_prob = tf.random.uniform(shape=[], maxval=1.0)
no_aug_cond = tf.greater(application_prob, prob)
return tf.cond(
pred=no_aug_cond,
true_fn=lambda: images,
false_fn=lambda: self._gaussian_blur(images, size=size, std=std),
)
return self._gaussian_blur(images, size=size, std=std)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/blur.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Crop processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import Crop
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"left, top, right, bottom, message",
[
(-1, 0, 1, 1, "Crop.left (-1) is not positive."),
(0, -1, 1, 1, "Crop.top (-1) is not positive."),
(0, 0, -1, 1, "Crop.right (-1) is not positive."),
(0, 0, 1, -1, "Crop.bottom (-1) is not positive."),
(2, 0, 1, 1, "Crop.right (1) should be greater than Crop.left (2)."),
(0, 2, 1, 1, "Crop.bottom (1) should be greater than Crop.top (2)."),
],
)
def test_invalid_crop_parameters(left, top, right, bottom, message):
"""Test Scale processor constructor error handling on invalid arguments."""
with pytest.raises(ValueError) as exc:
Crop(left=left, top=top, right=right, bottom=bottom)
assert str(exc.value) == message
@pytest.mark.parametrize(
"batch_size", [None, 4, tf.compat.v1.placeholder(dtype=tf.int32)]
)
@pytest.mark.parametrize(
"left, top, right, bottom", [(0, 0, 1, 1), (2, 2, 5, 5), (3, 3, 10, 10)]
)
def test_crop_call(left, top, right, bottom, batch_size):
"""Test Crop processor call"""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
expected_batch_size = batch_size
if type(batch_size) == tf.Tensor:
expected_batch_size = 7
feed_dict = {batch_size: expected_batch_size}
processor = Crop(left=left, top=top, right=right, bottom=bottom)
expected_stm = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [left, top, 1.0]])
expected_ctm = np.eye(4)
if batch_size is not None:
expected_stm = np.tile(expected_stm, [expected_batch_size, 1, 1])
expected_ctm = np.tile(expected_ctm, [expected_batch_size, 1, 1])
expected_shape = Canvas2D(width=right - left, height=bottom - top)
final_transform = processor(transform)
ctm, stm = tf.compat.v1.Session().run(
[
final_transform.color_transform_matrix,
final_transform.spatial_transform_matrix,
],
feed_dict=feed_dict,
)
np.testing.assert_equal(ctm, expected_ctm)
np.testing.assert_equal(stm, expected_stm)
assert final_transform.canvas_shape == expected_shape
def test_crop_call_with_invalid_input():
"""Test Crop processor call error handling on invalid input types."""
# Calling Crop with str should throw a TypeError.
with pytest.raises(TypeError):
Crop(0, 0, 1, 1)("Transform")
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
processor = Crop(left=2, top=2, right=5, bottom=5)
processor_dict = processor.serialize()
deserialized_processor = deserialize_tao_object(processor_dict)
assert processor._left == deserialized_processor._left
assert processor._top == deserialized_processor._top
assert processor._right == deserialized_processor._right
assert processor._bottom == deserialized_processor._bottom
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_crop.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Blur class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import numpy as np
import pytest
import tensorflow as tf
import nvidia_tao_tf1.core
from nvidia_tao_tf1.core.processors.augment.blur import Blur
test_dir = "nvidia_tao_tf1/core/processors/augment/test_data/blur/"
test_inputs = [
(4, 7, "_max_size_0"),
(7, 7, "_max_size_1"),
(10, 7, "_max_size_2"),
(7, 5, "_max_std_0"),
(7, 10, "_max_std_1"),
(7, 15, "_max_std_2"),
]
@pytest.mark.parametrize("width", [160])
@pytest.mark.parametrize("height", [240])
@pytest.mark.parametrize("size,std,post", test_inputs)
@pytest.mark.parametrize("random", [True, False])
def test_blur(width, height, size, std, post, random, tmpdir):
"""Iterate through every augmentation and run it.
Load a correctly augmented image to compare against.
"""
transform = Blur(random=random)
np.random.seed(17)
nvidia_tao_tf1.core.utils.set_random_seed(17)
tf.compat.v1.set_random_seed(17)
filegroup = "uniform_gaussian_blur"
if random:
filegroup = "random_uniform_gaussian_blur"
sess = tf.compat.v1.Session()
test_img = cv2.imread(test_dir + "test_image.jpg")
test_img = cv2.resize(test_img, (height, width))
test_img = np.transpose(test_img, [2, 0, 1])
test_img = np.expand_dims(test_img, 0)
test_img = test_img.astype(float) / 255.0
aug_img = sess.run(transform(test_img, size=size, std=std))
filename = os.path.join(test_dir, filegroup + post + ".npy")
aug_img = np.squeeze(aug_img, 0)
aug_img = np.transpose(aug_img, [1, 2, 0])
aug_img = (aug_img * 255).astype(np.dtype("int8"))
target_img = np.load(filename)
np.testing.assert_allclose(aug_img, target_img, atol=1.0)
test_inputs_random = [(1, 7, 0.0), (1, 0, 1.0), (2, 1, 0.5), (10, 0.2, 0.7)]
@pytest.mark.parametrize("width", [160])
@pytest.mark.parametrize("height", [240])
@pytest.mark.parametrize("size,std,prob", test_inputs_random)
def test_random_pixel_removal(width, height, size, std, prob, tmpdir):
"""Run random augmentations to make sure they work as expected."""
transform = Blur(random=True)
sess = tf.compat.v1.Session()
test_img = cv2.imread(test_dir + "test_image.jpg")
test_img = cv2.resize(test_img, (width, height))
test_img = np.transpose(test_img, [2, 0, 1])
test_img = np.expand_dims(test_img, 0)
test_img = test_img.astype(np.float32) / 255.0
aug_img = sess.run(transform(test_img, size=size, std=std, prob=prob))
if prob == 0.0:
np.testing.assert_equal(aug_img, test_img)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_blur.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Additive Noise class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.processors.augment.additive_noise import AdditiveNoise
from nvidia_tao_tf1.core.utils import set_random_seed
@pytest.mark.parametrize("var", [0.05, 0.3, 0.55])
@pytest.mark.parametrize("width", [450, 960])
@pytest.mark.parametrize("height", [960, 450])
def test_additive_gaussian_noise(var, width, height, tmpdir):
"""Test the additive gaussian noise class."""
# Use fixed seed to remove test flakiness.
set_random_seed(42)
transform = AdditiveNoise(random=False)
test_image = np.random.random((1, 3, width, height))
aug_image = transform(test_image, var)
sess = tf.compat.v1.Session()
aug_image = sess.run(aug_image)
diff = test_image - aug_image
diff = diff.flatten()
tolerance = var / np.sqrt(len(diff)) * 3
assert np.isclose(np.mean(diff), 0, atol=tolerance)
@pytest.mark.parametrize("width", [160])
@pytest.mark.parametrize("height", [240])
@pytest.mark.parametrize("var, prob", [(0, 1.0), (0.5, 0.0), (0.2, 1.0), (0.5, 1)])
def test_random_pixel_removal(width, height, var, prob, tmpdir):
"""Run random augmentations to make sure they work as expected."""
transform = AdditiveNoise(random=True)
sess = tf.compat.v1.Session()
test_img = np.random.random((1, 3, width, height))
aug_img = sess.run(transform(test_img, var=var, prob=prob))
if prob == 0.0 or var == 0:
np.testing.assert_equal(aug_img, test_img)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_additive_noise.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomBlur class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from mock import MagicMock
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.processors.augment.random_blur import RandomBlur
test_dir = "nvidia_tao_tf1/core/processors/augment/test_data/random_blur/"
test_inputs = [
(5, 5, 0.2, 5, "_0"),
# std
(5, 7.5, 0.2, 5, "_1"),
(5, 10, 0.2, 5, "_2"),
# size
(15, 5, 0.2, 5, "_3"),
(25, 5, 0.2, 5, "_4"),
# blur_max_block
(5, 5, 0.2, 35, "_5"),
(5, 5, 0.2, 65, "_6"),
# blur_prob
(5, 5, 0.6, 5, "_7"),
(5, 5, 1.0, 5, "_8"),
]
@pytest.mark.parametrize("width", [160])
@pytest.mark.parametrize("height", [240])
@pytest.mark.parametrize("size,std,blur_prob,blur_max_block,post", test_inputs)
def test_random_blur(width, height, size, std, blur_prob, blur_max_block, post, tmpdir):
"""Iterate through every augmentation and run it.
Load a correctly augmented image to compare against.
"""
transform = RandomBlur(random=False)
mocked_noise = np.load(test_dir + "mocked_noise.npy")
transform.pixel_remover._sample = MagicMock(return_value=mocked_noise)
sess = tf.compat.v1.Session()
test_img = cv2.imread(test_dir + "test_image.jpg")
test_img = cv2.resize(test_img, (width, height))
test_img = np.transpose(test_img, [2, 0, 1])
test_img = np.expand_dims(test_img, 0)
test_img = test_img.astype(float) / 255.0
aug_img = sess.run(
transform(
test_img,
size=size,
std=std,
prob=0.5,
blur_max_block=blur_max_block,
blur_pct=blur_prob,
)
)
filename = test_dir + "random_gaussian_blur" + post + ".npy"
aug_img = np.squeeze(aug_img, 0)
aug_img = np.transpose(aug_img, [1, 2, 0])
aug_img = (aug_img * 255).astype(np.dtype("int8"))
target_img = np.load(filename)
np.testing.assert_allclose(aug_img, target_img, atol=1.0)
test_inputs_random = [
(5, 0, 0.2, 5, 0.0),
# std
(5, 0.1, 0.2, 5, 1.0),
(5, 10.0, 0.2, 5, 0.5),
# size
(1, 500, 0.2, 5, 0.8),
(100, 5, 0.2, 5, 1.0),
# blur_max_block
(5, 5, 0.2, 1, 1.0),
(5, 5, 0.2, 100, 1.0),
# blur_pct
(5, 5, 0.0, 5, 0.5),
(5, 5, 1.0, 5, 0.8),
]
@pytest.mark.parametrize("width", [160])
@pytest.mark.parametrize("height", [240])
@pytest.mark.parametrize("size,std,blur_pct,blur_max_block,prob", test_inputs_random)
def test_random_pixel_removal(
width, height, size, std, blur_max_block, blur_pct, prob, tmpdir
):
"""Run random augmentations to make sure they work as expected."""
transform = RandomBlur(random=True)
sess = tf.compat.v1.Session()
test_img = cv2.imread(test_dir + "test_image.jpg")
test_img = cv2.resize(test_img, (width, height))
test_img = np.transpose(test_img, [2, 0, 1])
test_img = np.expand_dims(test_img, 0)
test_img = test_img.astype(np.float32) / 255.0
aug_img = sess.run(
transform(
test_img,
size=size,
std=std,
blur_max_block=blur_max_block,
blur_pct=blur_pct,
prob=prob,
)
)
if prob == 0.0:
np.testing.assert_equal(aug_img, test_img)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_random_blur.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import ColorTransform
from nvidia_tao_tf1.core.types import DataFormat
device_list = ["/cpu", "/gpu"]
def get_random_image(
batch_size,
start=0.0,
stop=1.0,
data_format=DataFormat.CHANNELS_LAST,
dtype=np.float32,
num_channels=3,
):
"""Create a batch of images, with values within a linespace, that are then randomly shuffled."""
shape = (batch_size, 16, 64, num_channels)
if data_format == DataFormat.CHANNELS_FIRST:
shape = (batch_size, num_channels, 16, 64)
images = np.linspace(
start, stop, batch_size * 16 * 64 * num_channels, dtype=dtype
).reshape(shape)
return np.random.permutation(images)
def _apply_ctm_on_images(
device,
images,
ctms,
min_clip=0.0,
max_clip=1.0,
data_format=DataFormat.CHANNELS_LAST,
input_dtype=np.float32,
output_dtype=None,
):
"""Run ColorTransform op."""
with tf.device(device):
ctm_op = ColorTransform(
min_clip=min_clip,
max_clip=max_clip,
data_format=data_format,
output_dtype=output_dtype,
)
# Note: placeholders are needed to make TensorFlow respect device placement.
placeholder_images = tf.compat.v1.placeholder(dtype=input_dtype)
placeholder_ctms = tf.compat.v1.placeholder(dtype=tf.float32)
fetches = ctm_op(placeholder_images, placeholder_ctms)
sess = tf.compat.v1.Session()
return sess.run(
fetches, feed_dict={placeholder_images: images, placeholder_ctms: ctms}
)
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize("min_clip", [0.0, 1.0])
@pytest.mark.parametrize("max_clip", [1.0, 8.0, 255.0])
@pytest.mark.parametrize("input_dtype", [np.uint8, np.float16, np.float32])
@pytest.mark.parametrize("output_dtype", [np.uint8, np.float16, np.float32, None])
def test_clipping_and_type_cast(
device, min_clip, max_clip, input_dtype, output_dtype, batch_size=2
):
"""Test color clipping and type casting."""
ctms = np.repeat([np.eye(4)], batch_size, axis=0)
input_np = get_random_image(batch_size, start=-512, stop=512, dtype=input_dtype)
output_np = _apply_ctm_on_images(
device,
input_np,
ctms,
min_clip,
max_clip,
input_dtype=input_dtype,
output_dtype=output_dtype,
)
assert (
output_np.min() >= min_clip
), "Minimal value lower than specified 'min_clip' value."
assert (
output_np.max() <= max_clip
), "Maximum value lower than specified 'max_clip' value."
if output_dtype is None:
assert output_np.dtype == input_dtype
else:
assert output_np.dtype == output_dtype
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize("max_value", [1.0, 255.0])
@pytest.mark.parametrize("identity_scale", [1.0, 255.0])
def test_identity_and_scale(device, max_value, identity_scale, batch_size=2):
"""Test elements over the diagonal leaving the image unchanged or only linearly scale them."""
ctms = np.repeat([np.eye(4)], batch_size, axis=0) * identity_scale
input_np = get_random_image(batch_size, start=0.0, stop=max_value)
output_np = _apply_ctm_on_images(
device, input_np, ctms, min_clip=0.0, max_clip=max_value * identity_scale
)
np.testing.assert_allclose(
input_np * identity_scale,
output_np,
rtol=1e-6,
err_msg="input array changed after application of an identity color "
" transformation matrix.",
)
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize("c1", [0, 0.1])
@pytest.mark.parametrize("c2", [0, 0.2])
@pytest.mark.parametrize("c3", [0, 0.3])
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
def test_value_offset(device, c1, c2, c3, data_format, batch_size=2):
"""Test adding offsets to either of the channels, and assert that they shift those channels."""
ctm = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [c1, c2, c3, 1]], dtype=np.float32
)
ctms = np.repeat([ctm], batch_size, axis=0)
input_np = get_random_image(
batch_size, start=0.0, stop=1.0, data_format=data_format
)
output_np = _apply_ctm_on_images(
device, input_np, ctms, min_clip=0.0, max_clip=2.0, data_format=data_format
)
axis = (0, 1, 2) if data_format == DataFormat.CHANNELS_LAST else (0, 2, 3)
mean_diff = np.mean(output_np - input_np, axis=axis)
expected_mean_diff = (c1, c2, c3)
np.testing.assert_allclose(mean_diff, expected_mean_diff, rtol=1e-4)
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize(
"data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
@pytest.mark.parametrize("dtype", [np.uint8, np.float16, np.float32])
def test_value_roundtrip(device, data_format, dtype, batch_size=2):
"""Shift channels by 1 channel, 3 times; then assert that it results in the same tensor."""
ctm = np.array(
[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]], dtype=np.float32
)
ctms = np.repeat([ctm], batch_size, axis=0)
input_np = get_random_image(batch_size, data_format=data_format, dtype=dtype)
# Test that the first 2 channels shifts (out of 3) result in a different tensor
output_np = input_np
for _ in range(2):
output_np = _apply_ctm_on_images(
device, output_np, ctms, data_format=data_format, input_dtype=dtype
)
np.testing.assert_equal(np.any(np.not_equal(output_np, input_np)), True)
# The 3rd out of 3 channel shifts should result in the same image as the input image
output_np = _apply_ctm_on_images(
device, output_np, ctms, data_format=data_format, input_dtype=dtype
)
assert output_np.dtype == input_np.dtype
np.testing.assert_array_equal(
input_np,
output_np,
err_msg="input array changed after application of shifting 3 "
"color channels 3 times.",
)
@pytest.mark.parametrize("device", device_list)
@pytest.mark.parametrize(
"input_data_format", [DataFormat.CHANNELS_FIRST, DataFormat.CHANNELS_LAST]
)
def test_transpose(device, input_data_format, batch_size=2):
"""Test transposing between channels first and channels last."""
ctms = np.repeat([np.eye(4)], batch_size, axis=0)
input_np = get_random_image(
batch_size, start=0.0, stop=255.0, data_format=input_data_format
)
if input_data_format == DataFormat.CHANNELS_FIRST:
output_data_format = DataFormat.CHANNELS_LAST
axes = [0, 2, 3, 1]
else:
output_data_format = DataFormat.CHANNELS_FIRST
axes = [0, 3, 1, 2]
input_shape_transposed = []
for i in range(4):
input_shape_transposed.append(input_np.shape[axes[i]])
with tf.device(device):
ctm_op = ColorTransform(
min_clip=0.0,
max_clip=255.0,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
images = tf.constant(input_np, dtype=tf.float32)
fetches = ctm_op(images, tf.constant(ctms, dtype=tf.float32))
# Check shape inference.
np.testing.assert_array_equal(fetches.shape, input_shape_transposed)
sess = tf.compat.v1.Session()
output_np = sess.run(fetches)
# Check output shape.
np.testing.assert_array_equal(output_np.shape, input_shape_transposed)
transposed_input_np = np.transpose(input_np, axes=axes)
np.testing.assert_array_equal(
transposed_input_np,
output_np,
err_msg="input array changed after application of conversion from CHANNELS_LAST to "
"CHANNELS_FIRST.",
)
def test_error_checks():
"""Test error checks."""
# min_clip > max_clip.
with pytest.raises(ValueError):
ColorTransform(min_clip=1.0, max_clip=0.0)
# Both data_format and either input_data_format or output_data_format given.
with pytest.raises(ValueError):
ColorTransform(
data_format=DataFormat.CHANNELS_FIRST,
input_data_format=DataFormat.CHANNELS_FIRST,
)
with pytest.raises(ValueError):
ColorTransform(
data_format=DataFormat.CHANNELS_FIRST,
output_data_format=DataFormat.CHANNELS_FIRST,
)
# Input_data_format given, but output_data_format is missing.
with pytest.raises(ValueError):
ColorTransform(input_data_format=DataFormat.CHANNELS_FIRST)
# Output_data_format given, but input_data_format is missing.
with pytest.raises(ValueError):
ColorTransform(output_data_format=DataFormat.CHANNELS_FIRST)
# Invalid data format.
with pytest.raises(NotImplementedError):
ColorTransform(data_format="weird_data_format")
@pytest.mark.parametrize(
"input_np",
[
np.zeros([2, 16, 12, 3], np.float32), # Wrong channel order.
np.zeros([2, 4, 16, 12], np.float32), # Too many color channels.
np.zeros([2, 1, 16, 12], np.int16), # Unsupported data type.
np.zeros([16, 12, 3], np.float32), # Too few dimensions.
np.zeros([2, 3, 16, 12, 3], np.float32), # Too many dimensions.
np.zeros(
[1, 3, 16, 12], np.float32
), # Number of images does not match number of ctms.
],
)
def test_invalid_input_tensors(input_np):
"""Test invalid input tensors."""
with pytest.raises(Exception):
ctm_op = ColorTransform(data_format=DataFormat.CHANNELS_FIRST)
batch_size = 2
ctms = np.repeat([np.eye(4)], batch_size, axis=0)
fetches = ctm_op(tf.constant(input_np), tf.constant(ctms, dtype=tf.float32))
sess = tf.compat.v1.Session()
sess.run(fetches)
@pytest.mark.parametrize(
"ctms",
[
np.zeros([2, 3, 4], np.float32), # Wrong number of rows.
np.zeros([2, 5, 4], np.float32), # Wrong number of columns.
np.zeros([2, 16], np.float32), # Wrong dimensionality.
np.zeros([1, 4, 4], np.float32), # Wrong batch size.
],
)
def test_invalid_ctms(ctms):
"""Test invalid ctms."""
with pytest.raises(Exception):
input_np = np.zeros([2, 16, 12, 3], np.float32)
ctm_op = ColorTransform(data_format=DataFormat.CHANNELS_FIRST)
fetches = ctm_op(tf.constant(input_np), tf.constant(ctms, dtype=tf.float32))
sess = tf.compat.v1.Session()
sess.run(fetches)
@pytest.mark.parametrize(
"images, input_data_format, output_data_format, expected_shape",
[
(
np.zeros([2, 3, 16, 12], np.float32),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
[2, 3, 16, 12],
),
(
np.zeros([2, 3, 16, 12], np.float32),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
[2, 16, 12, 3],
),
(
np.zeros([2, 16, 12, 3], np.float32),
DataFormat.CHANNELS_LAST,
DataFormat.CHANNELS_LAST,
[2, 16, 12, 3],
),
(
np.zeros([2, 16, 12, 3], np.float32),
DataFormat.CHANNELS_LAST,
DataFormat.CHANNELS_FIRST,
[2, 3, 16, 12],
),
(
tf.zeros(shape=[2, 3, 16, 12], dtype=tf.uint8),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
[2, 16, 12, 3],
),
(
tf.compat.v1.placeholder(shape=[2, 3, 16, 12], dtype=tf.float16),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
[2, 3, 16, 12],
),
# Cases where the shape is completely or partially unknown.
(
tf.compat.v1.placeholder(dtype=tf.float32),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
[None, None, None, None],
),
(
tf.compat.v1.placeholder(shape=[2, 3, 16, None], dtype=tf.float16),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_FIRST,
[2, 3, 16, None],
),
(
tf.compat.v1.placeholder(shape=[2, None, 16, 12], dtype=tf.uint8),
DataFormat.CHANNELS_FIRST,
DataFormat.CHANNELS_LAST,
[2, 16, 12, None],
),
(
tf.compat.v1.placeholder(shape=[2, None, None, 3], dtype=tf.float32),
DataFormat.CHANNELS_LAST,
DataFormat.CHANNELS_FIRST,
[2, 3, None, None],
),
(
tf.compat.v1.placeholder(shape=[None, None, None, None], dtype=tf.uint8),
DataFormat.CHANNELS_LAST,
DataFormat.CHANNELS_FIRST,
[None, None, None, None],
),
],
)
def test_color_transform_shape_inference(
images, input_data_format, output_data_format, expected_shape
):
"""Test shape inference."""
ctm_op = ColorTransform(
input_data_format=input_data_format, output_data_format=output_data_format
)
output = ctm_op(images, tf.constant(0.0, tf.float32))
assert expected_shape == output.shape.as_list()
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
ctm_op = ColorTransform(
input_data_format=DataFormat.CHANNELS_FIRST,
output_data_format=DataFormat.CHANNELS_FIRST,
)
ctm_op_dict = ctm_op.serialize()
deserialized_ctm_op = deserialize_tao_object(ctm_op_dict)
assert ctm_op.min_clip == deserialized_ctm_op.min_clip
assert ctm_op.max_clip == deserialized_ctm_op.max_clip
assert ctm_op.input_data_format == deserialized_ctm_op.input_data_format
assert ctm_op.output_data_format == deserialized_ctm_op.output_data_format
assert ctm_op.output_dtype == deserialized_ctm_op.output_dtype
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_color_transform.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random translation transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import spatial
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Transform
class RandomTranslation(Processor):
"""Random translation transform."""
@save_args
def __init__(self, max_x, max_y, probability=0.5, **kwargs):
"""Construct a RandomTranslation processor.
Args:
max_x (int): If translation occurs, this is the lower and higher bound the
uniform distribution from which an integer will be picked to translate horizontally.
max_y (int): If translation occurs, this is the lower and higher bound the
uniform distribution from which an integer will be picked to translate vertically.
probability (float): Probability at which translation occurs.
kwargs (dict): keyword arguments passed to parent class.
"""
super(RandomTranslation, self).__init__(**kwargs)
self._max_x = max_x
self._max_y = max_y
if probability < 0.0 or probability > 1.0:
raise ValueError(
"RandomTranslation.probability ({}) is not within the range "
"[0.0, 1.0].".format(probability)
)
self._probability = probability
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomTranslation(max_x={}, max_y={}, probability={})".format(
self._max_x, self._max_y, self._probability
)
def call(self, transform):
"""Return a Transform whose spatial transformation matrix is perturbed at random.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance with spatial transform matrix perturbed.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
batch_size = None
batch_shape = []
if transform.spatial_transform_matrix.shape.ndims == 3:
batch_size = tf.shape(input=transform.spatial_transform_matrix)[0]
batch_shape = [batch_size]
probability = tf.random.uniform(
batch_shape, minval=0.0, maxval=1.0, dtype=tf.float32
)
should_translate = tf.less_equal(probability, self._probability)
stm_translation = spatial.random_translation_matrix(
max_x=self._max_x, max_y=self._max_y, batch_size=batch_size
)
processed_stm = tf.compat.v1.where(
should_translate,
tf.matmul(stm_translation, transform.spatial_transform_matrix),
transform.spatial_transform_matrix,
)
return Transform(
canvas_shape=transform.canvas_shape,
color_transform_matrix=transform.color_transform_matrix,
spatial_transform_matrix=processed_stm,
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/random_translation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random hue and saturation transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import color
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Transform
class RandomHueSaturation(Processor):
"""Random brightness transform."""
@save_args
def __init__(self, hue_rotation_max, saturation_shift_max, **kwargs):
"""Construct a RandomHueSaturation processor.
Args:
hue_rotation_max (float): The maximum rotation angle (0-360). This used in a truncated
normal distribution, with a zero mean. This rotation angle is half of the
standard deviation, because twice the standard deviation will be truncated.
A value of 0 will not affect the matrix.
saturation_shift_max (float): The random uniform shift between 0 - 1 that changes the
saturation. This value gives the negative and positive extent of the
augmentation, where a value of 0 leaves the matrix unchanged.
For example, a value of 1 can result in a saturation values bounded
between of 0 (entirely desaturated) and 2 (twice the saturation).
kwargs (dict): keyword arguments passed to parent class.
"""
super(RandomHueSaturation, self).__init__(**kwargs)
if hue_rotation_max < 0.0 or hue_rotation_max > 360.0:
raise ValueError(
"RandomHueSaturation.hue_rotation_max ({})"
" is not within the range [0.0, 360.0].".format(hue_rotation_max)
)
if saturation_shift_max < 0.0 or saturation_shift_max > 1.0:
raise ValueError(
"RandomHueSaturation.saturation_shift_max ({})"
" is not within the range [0.0, 1.0].".format(saturation_shift_max)
)
self._hue_rotation_max = hue_rotation_max
self._saturation_shift_max = saturation_shift_max
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomHueSaturation(hue_rotation_max={}, saturation_shift_max={})".format(
self._hue_rotation_max, self._saturation_shift_max
)
def call(self, transform):
"""Return a Transform whose color transformation matrix is perturbed at random.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance with color transform matrix perturbed.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
batch_size = None
if transform.color_transform_matrix.shape.ndims == 3:
batch_size = tf.shape(input=transform.color_transform_matrix)[0]
ctm_brightness = color.random_hue_saturation_matrix(
hue_rotation_max=self._hue_rotation_max,
saturation_shift_max=self._saturation_shift_max,
batch_size=batch_size,
)
processed_ctm = tf.matmul(ctm_brightness, transform.color_transform_matrix)
return Transform(
canvas_shape=transform.canvas_shape,
color_transform_matrix=processed_ctm,
spatial_transform_matrix=transform.spatial_transform_matrix,
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/random_hue_saturation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random translation transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import spatial
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Transform
class RandomShear(Processor):
"""Random shear transform."""
@save_args
def __init__(self, max_ratio_x, max_ratio_y, probability=0.5, **kwargs):
"""Construct a RandomShear processor.
Args:
max_x (int): If shear transform occurs, this is the lower and higher bound the
uniform distribution from which a ratio will be picked to sheared horizontally.
max_y (int): If translation occurs, this is the lower and higher bound the
uniform distribution from which a ratio will be picked to sheared vertically.
probability (float): Probability at which translation occurs.
kwargs (dict): keyword arguments passed to parent class.
"""
super(RandomShear, self).__init__(**kwargs)
if max_ratio_x < 0.0:
raise ValueError(
"RandomShear.max_ratio_x ({}) is less than 0.".format(max_ratio_x)
)
self._max_ratio_x = max_ratio_x
if max_ratio_y < 0.0:
raise ValueError(
"RandomShear.max_ratio_y ({}) is less than 0.".format(max_ratio_y)
)
self._max_ratio_y = max_ratio_y
if probability < 0.0 or probability > 1.0:
raise ValueError(
"RandomShear.probability ({}) is not within the range "
"[0.0, 1.0].".format(probability)
)
self._probability = probability
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomShear(max_ratio_x={}, max_ratio_y={}, probability={})".format(
self._max_ratio_x, self._max_ratio_y, self._probability
)
def call(self, transform):
"""Return a Transform whose spatial transformation matrix is perturbed at random.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance with spatial transform matrix perturbed.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
batch_size = None
batch_shape = []
if transform.spatial_transform_matrix.shape.ndims == 3:
batch_size = tf.shape(input=transform.spatial_transform_matrix)[0]
batch_shape = [batch_size]
probability = tf.random.uniform(batch_shape, minval=0.0, maxval=1.0)
should_shear = tf.less_equal(probability, self._probability)
stm_shear = spatial.random_shear_matrix(
max_ratio_x=self._max_ratio_x,
max_ratio_y=self._max_ratio_y,
width=transform.canvas_shape.width,
height=transform.canvas_shape.height,
batch_size=batch_size,
)
processed_stm = tf.compat.v1.where(
should_shear,
tf.matmul(stm_shear, transform.spatial_transform_matrix),
transform.spatial_transform_matrix,
)
return Transform(
canvas_shape=transform.canvas_shape,
color_transform_matrix=transform.color_transform_matrix,
spatial_transform_matrix=processed_stm,
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/random_shear.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying random flip transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import spatial
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Transform
class RandomFlip(Processor):
"""Random flip transform."""
@save_args
def __init__(self, horizontal_probability=0.5, vertical_probability=0.0, **kwargs):
"""Construct a RandomFlip processor.
Note that the default value of horizontal_probability is different from
vertical_probability due to compatability issues for networks that
currently use this processor but assumes vertical_probability is 0.
Args:
horizontal_probability (float): Probability between 0 to 1
at which a left-right flip occurs. Defaults to 0.5.
vertical_probability (float): Probability between 0 to 1
at which a top-down flip occurs. Defaults to 0.0.
kwargs (dict): keyword arguments passed to parent class.
"""
super(RandomFlip, self).__init__(**kwargs)
if horizontal_probability < 0.0 or 1.0 < horizontal_probability:
raise ValueError(
"RandomFlip.horizontal_probability ({}) is not within the range "
"[0.0, 1.0].".format(horizontal_probability)
)
if vertical_probability < 0.0 or 1.0 < vertical_probability:
raise ValueError(
"RandomFlip.vertical_probability ({}) is not within the range "
"[0.0, 1.0].".format(vertical_probability)
)
self._horizontal_probability = horizontal_probability
self._vertical_probability = vertical_probability
def __repr__(self):
"""Return a string representation of the processor."""
return "RandomFlip(horizontal_probability={}, vertical_probability={})".format(
self._horizontal_probability, self._vertical_probability
)
def call(self, transform):
"""Return a Transform whose spatial transformation matrix is perturbed at random.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance with spatial transform matrix perturbed.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
batch_size = None
if transform.spatial_transform_matrix.shape.ndims == 3:
batch_size = tf.shape(input=transform.spatial_transform_matrix)[0]
stm_flip = spatial.random_flip_matrix(
horizontal_probability=self._horizontal_probability,
vertical_probability=self._vertical_probability,
height=transform.canvas_shape.height,
width=transform.canvas_shape.width,
batch_size=batch_size,
)
processed_stm = tf.matmul(stm_flip, transform.spatial_transform_matrix)
return Transform(
canvas_shape=transform.canvas_shape,
color_transform_matrix=transform.color_transform_matrix,
spatial_transform_matrix=processed_stm,
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/random_flip.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomGlimpse processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import RandomGlimpse
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"height, width, crop_location, crop_probability, message",
[
(-1, 1, "random", 1, "RandomGlimpse.height (-1) is not positive."),
(1, -1, "random", 1, "RandomGlimpse.width (-1) is not positive."),
(
1,
-1,
"random",
2,
"RandomGlimpse.crop_probability (2) is not within the range [0, 1].",
),
(
1,
-1,
"none",
2,
"RandomGlimpse.crop_location 'none' is not "
"supported. Valid options: center, random.",
),
],
)
def test_invalid_random_glimpse_parameters(
height, width, crop_location, crop_probability, message
):
"""Test RandomGlimpse processor constructor error handling on invalid arguments."""
with pytest.raises(ValueError) as exc:
RandomGlimpse(
height=height,
width=width,
crop_location=crop_location,
crop_probability=crop_probability,
)
assert str(exc.value) == message
@pytest.mark.parametrize("batch_size", [None, 5])
def test_random_glimpse_call_with_center_crop(batch_size):
"""Test RandomGlimpse processor call center crop mode."""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
random_glimpse = RandomGlimpse(
crop_location=RandomGlimpse.CENTER, crop_probability=1.0, height=6, width=5
)
expected_stm = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [2.0, 3.0, 1.0]])
if batch_size is not None:
expected_stm = np.tile(expected_stm, [batch_size, 1, 1])
final_transform = random_glimpse(transform)
stm = tf.compat.v1.Session().run(final_transform.spatial_transform_matrix)
np.testing.assert_equal(stm, expected_stm)
assert final_transform.canvas_shape == Canvas2D(6, 5)
@mock.patch("nvidia_tao_tf1.core.processors.augment.random_glimpse.tf.random.uniform")
@pytest.mark.parametrize("batch_size", [None, 5])
def test_random_glimpse_call_with_random_crop(mocked_random_uniform, batch_size):
"""Test RandomGlimpse processor call random crop mode."""
batch_shape = [] if batch_size is None else [batch_size]
# Fix random uniform to return 0.5, which will be the value for x and y of a translation
# matrix.
mocked_random_uniform.return_value = tf.constant(
0.5, shape=batch_shape, dtype=tf.float32
)
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
random_glimpse = RandomGlimpse(
crop_location=RandomGlimpse.RANDOM, crop_probability=1.0, height=6, width=5
)
expected_stm = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.5, 1.0]])
if batch_size is not None:
expected_stm = np.tile(expected_stm, [batch_size, 1, 1])
final_transform = random_glimpse(transform)
stm = tf.compat.v1.Session().run(final_transform.spatial_transform_matrix)
np.testing.assert_equal(stm, expected_stm)
assert final_transform.canvas_shape == Canvas2D(6, 5)
@pytest.mark.parametrize(
"batch_size", [None, 5, tf.compat.v1.placeholder(dtype=tf.int32)]
)
def test_random_glimpse_call_with_scale(batch_size):
"""Test RandomGlimpse processor call scale mode."""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
expected_batch_size = batch_size
if type(batch_size) == tf.Tensor:
expected_batch_size = 7
feed_dict = {batch_size: expected_batch_size}
random_glimpse = RandomGlimpse(
crop_location=RandomGlimpse.CENTER, crop_probability=0.0, height=6, width=5
)
expected_stm = np.array([[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 1.0]])
if batch_size is not None:
expected_stm = np.tile(expected_stm, [expected_batch_size, 1, 1])
final_transform = random_glimpse(transform)
stm = tf.compat.v1.Session().run(
final_transform.spatial_transform_matrix, feed_dict=feed_dict
)
np.testing.assert_equal(stm, expected_stm)
assert final_transform.canvas_shape == Canvas2D(6, 5)
def test_random_glimpse_call_with_invalid_input():
"""Test RandomGlimpse processor call error handling on invalid input types."""
# Calling RandomGlimpse with str should throw a TypeError.
with pytest.raises(TypeError):
RandomGlimpse(1, 1, RandomGlimpse.CENTER, 1)("Transform")
@pytest.mark.parametrize(
"crop_location, height, width, message",
[
(
RandomGlimpse.RANDOM,
5,
6,
"Attempted to extract random crop (6) wider than input width (5).",
),
(
RandomGlimpse.RANDOM,
6,
5,
"Attempted to extract random crop (6) taller than input height (5).",
),
(
RandomGlimpse.CENTER,
5,
6,
"Attempted to extract center crop (6) wider than input width (5).",
),
(
RandomGlimpse.CENTER,
6,
5,
"Attempted to extract center crop (6) taller than input height (5).",
),
("unknown", 5, 5, "Unhandled crop location: 'unknown'."),
],
)
def test_random_glimpse_invalid_crop_configurations(
crop_location, height, width, message
):
"""Test RandomGlimpse processor call error raising for invalid crop configurations."""
transform = Transform(
canvas_shape=Canvas2D(5, 5),
color_transform_matrix=tf.eye(4),
spatial_transform_matrix=tf.eye(3),
)
# Bypass constructor validation.
if crop_location == "unknown":
RandomGlimpse.CROP_LOCATIONS.append("unknown")
random_glimpse = RandomGlimpse(
crop_location=crop_location, crop_probability=1.0, height=height, width=width
)
with pytest.raises(ValueError) as exc:
random_glimpse(transform)
assert str(exc.value) == message
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
random_glimpse = RandomGlimpse(
height=12, width=10, crop_location=RandomGlimpse.CENTER, crop_probability=1.0
)
random_glimpse_dict = random_glimpse.serialize()
deserialized_random_glimpse = deserialize_tao_object(random_glimpse_dict)
assert random_glimpse._height == deserialized_random_glimpse._height
assert random_glimpse._width == deserialized_random_glimpse._width
assert random_glimpse._crop_location == deserialized_random_glimpse._crop_location
assert (
random_glimpse._crop_probability
== deserialized_random_glimpse._crop_probability
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_random_glimpse.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomRotation processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import RandomRotation
from nvidia_tao_tf1.core.processors.augment.spatial import rotation_matrix
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"probability, message",
[
(-0.1, "RandomRotation.probability (-0.1) is not within the range [0.0, 1.0]."),
(1.1, "RandomRotation.probability (1.1) is not within the range [0.0, 1.0]."),
],
)
def test_raises_on_invalid_probability(probability, message):
with pytest.raises(ValueError) as exc:
RandomRotation(min_angle=7, max_angle=7, probability=probability)
assert str(exc.value) == message
@pytest.mark.parametrize(
"min_angle, message",
[
(-600, "RandomRotation.min_angle (-600) is smaller than -360.0 degrees."),
(
8,
"RandomRotation.min_angle (8) is greater than RandomRotation.max_angle (7).",
),
],
)
def test_raises_on_invalid_min_angle(min_angle, message):
with pytest.raises(ValueError) as exc:
RandomRotation(min_angle=min_angle, max_angle=7, probability=0.5)
assert str(exc.value) == message
@pytest.mark.parametrize(
"max_angle, message",
[(361, "RandomRotation.max_angle (361) is greater than 360.0 degrees.")],
)
def test_raises_on_invalid_max_angle(max_angle, message):
with pytest.raises(ValueError) as exc:
RandomRotation(min_angle=7, max_angle=max_angle, probability=0.5)
assert str(exc.value) == message
@mock.patch("nvidia_tao_tf1.core.processors.augment.random_rotation.tf.random.uniform")
@mock.patch("nvidia_tao_tf1.core.processors.augment.random_rotation.spatial.rotation_matrix")
def test_delegates_random_angle_to_rotation_matrix(
mocked_rotation_matrix, mocked_random_uniform
):
"""Test RandomRotation processor call."""
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4),
spatial_transform_matrix=tf.eye(3),
)
mocked_rotation_matrix.return_value = tf.eye(3)
seven = tf.constant(7.0, dtype=tf.float32)
mocked_random_uniform.return_value = seven
processor = RandomRotation(min_angle=40, max_angle=90, probability=1.0)
processor(transform)
mocked_rotation_matrix.assert_called_with(seven, height=12, width=10)
@mock.patch("nvidia_tao_tf1.core.processors.augment.random_rotation.tf.random.uniform")
@pytest.mark.parametrize(
"batch_size", [None, 3, tf.compat.v1.placeholder(dtype=tf.int32)]
)
def test_random_rotation(mocked_random_uniform, batch_size):
"""Test RandomRotation processor."""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
if type(batch_size) == tf.Tensor:
feed_dict = {batch_size: 7}
rnd = tf.fill(dims=batch_shape, value=0.5)
mocked_random_uniform.return_value = rnd
processor = RandomRotation(min_angle=40, max_angle=90, probability=1.0)
stm = processor(transform)
expected_stm = rotation_matrix(rnd, 10, 12)
if batch_size is None:
assert expected_stm.shape.ndims == 2
else:
assert expected_stm.shape.ndims == 3
stm, expected_stm = tf.compat.v1.Session().run(
[stm.spatial_transform_matrix, expected_stm], feed_dict=feed_dict
)
np.testing.assert_equal(stm, expected_stm)
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
processor = RandomRotation(min_angle=40, max_angle=90, probability=1.0)
processor_dict = processor.serialize()
deserialized_processor = deserialize_tao_object(processor_dict)
assert processor._min_angle == deserialized_processor._min_angle
assert processor._max_angle == deserialized_processor._max_angle
assert processor._probability == deserialized_processor._probability
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_random_rotation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomBrightness processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import RandomBrightness
from nvidia_tao_tf1.core.processors.augment.color import brightness_offset_matrix
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"batch_size", [None, 5, tf.compat.v1.placeholder(dtype=tf.int32)]
)
@pytest.mark.parametrize("scale_max", [90, 180])
@pytest.mark.parametrize(
"brightness_offset", [(0.0, 0.0, 0.0), (1.0, 1.0, 1.0), (-1.0, 2.0, -1.0)]
)
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.random_contrast.color.tf.random.truncated_normal"
)
def test_random_brightness_call(
mocked_truncated_normal, batch_size, brightness_offset, scale_max
):
"""Test RandomBrightness processor call"""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
expected_batch_size = batch_size
if type(batch_size) == tf.Tensor:
expected_batch_size = 7
feed_dict = {batch_size: expected_batch_size}
# Fix brightness offset for testing.
if batch_size is not None:
brightness_offset = tf.tile(
tf.constant(brightness_offset, shape=[1, 3]), [batch_size, 1]
)
mocked_truncated_normal.return_value = brightness_offset
else:
mocked_truncated_normal.return_value = tf.constant(
brightness_offset, dtype=tf.float32
)
processor = RandomBrightness(scale_max=scale_max, uniform_across_channels=False)
final_transform = processor(transform)
expected_ctm = brightness_offset_matrix(offset=brightness_offset)
if batch_size is None:
assert expected_ctm.shape.ndims == 2
else:
assert expected_ctm.shape.ndims == 3
ctm, expected_ctm = tf.compat.v1.Session().run(
[final_transform.color_transform_matrix, expected_ctm], feed_dict=feed_dict
)
np.testing.assert_equal(ctm, expected_ctm)
if batch_size is None:
mocked_truncated_normal.assert_called_with([3], mean=0.0, stddev=scale_max / 2)
else:
mocked_truncated_normal.assert_called_once()
call_batch_shape = mocked_truncated_normal.call_args[0][0]
assert len(call_batch_shape) == 2
assert (
tf.compat.v1.Session().run(call_batch_shape[0], feed_dict=feed_dict)
== expected_batch_size
)
assert call_batch_shape[1] == 3
assert mocked_truncated_normal.call_args[1] == {
"mean": 0.0,
"stddev": scale_max / 2,
}
def test_random_brightness_call_with_invalid_input():
"""Test RandomBrightness processor call error handling on invalid input types."""
# Calling RandomBrightness with str should throw a TypeError.
with pytest.raises(TypeError):
RandomBrightness(0, True)("Transform")
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
processor = RandomBrightness(scale_max=90, uniform_across_channels=False)
processor_dict = processor.serialize()
deserialized_processor = deserialize_tao_object(processor_dict)
assert processor._scale_max == deserialized_processor._scale_max
assert (
processor._uniform_across_channels
== deserialized_processor._uniform_across_channels
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_random_brightness.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processor for applying scale transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment import spatial
from nvidia_tao_tf1.core.processors.processors import Processor
from nvidia_tao_tf1.core.types import Canvas2D, Transform
class Scale(Processor):
"""Processor for fixed scaling transform."""
@save_args
def __init__(self, height, width, **kwargs):
"""Construct a Scale processor.
Args:
height (float) New height to which contents will be scaled up/down to.
width (float) New width to which contents will be scaled up/down/to.
kwargs (dict): keyword arguments passed to parent class.
"""
super(Scale, self).__init__(**kwargs)
if height <= 0:
raise ValueError("Scale.height ({}) is not positive.".format(height))
if width <= 0:
raise ValueError("Scale.width ({}) is not positive.".format(width))
self._height = height
self._width = width
def __repr__(self):
"""Return a string representation of the processor."""
return "Scale(height={}, width={})".format(self._height, self._width)
def call(self, transform):
"""Return a Transform whose spatial transformation matrix is perturbed at random.
Args:
transform (Transform): An input Transform instance to be processed.
Returns:
Transform: Final Transform instance with spatial transform matrix perturbed.
"""
if not isinstance(transform, Transform):
raise TypeError(
"Expecting an argument of type 'Transform', "
"given: {}.".format(type(transform).__name__)
)
horizontal_ratio = transform.canvas_shape.width / self._width
vertical_ratio = transform.canvas_shape.height / self._height
stm_zoom = spatial.zoom_matrix(ratio=(horizontal_ratio, vertical_ratio))
stm_zoom = tf.broadcast_to(
stm_zoom, tf.shape(input=transform.spatial_transform_matrix)
)
processed_stm = tf.matmul(stm_zoom, transform.spatial_transform_matrix)
return Transform(
canvas_shape=Canvas2D(height=self._height, width=self._width),
color_transform_matrix=transform.color_transform_matrix,
spatial_transform_matrix=processed_stm,
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/scale.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RandomContrast processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from six.moves import mock
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import deserialize_tao_object
from nvidia_tao_tf1.core.processors import RandomContrast
from nvidia_tao_tf1.core.processors.augment.color import contrast_matrix
from nvidia_tao_tf1.core.types import Canvas2D, Transform
@pytest.mark.parametrize(
"batch_size", [None, 3, tf.compat.v1.placeholder(dtype=tf.int32)]
)
@pytest.mark.parametrize("scale_max", [90, 180])
@pytest.mark.parametrize("center", [1.0 / 2.0, 255.0 / 2.0])
@pytest.mark.parametrize("contrast", [-0.5, 0.0, 0.5, 1.0])
@mock.patch(
"nvidia_tao_tf1.core.processors.augment.random_contrast.color.tf.random.truncated_normal"
)
def test_random_contrast_call(
mocked_truncated_normal, batch_size, contrast, center, scale_max
):
"""Test RandomContrast processor call"""
batch_shape = [] if batch_size is None else [batch_size]
transform = Transform(
canvas_shape=Canvas2D(height=12, width=10),
color_transform_matrix=tf.eye(4, batch_shape=batch_shape, dtype=tf.float32),
spatial_transform_matrix=tf.eye(3, batch_shape=batch_shape, dtype=tf.float32),
)
feed_dict = {}
expected_batch_size = batch_size
if type(batch_size) == tf.Tensor:
expected_batch_size = 7
feed_dict = {batch_size: expected_batch_size}
contrast = tf.fill(dims=batch_shape, value=contrast)
center = tf.fill(dims=batch_shape, value=center)
mocked_truncated_normal.return_value = contrast
processor = RandomContrast(scale_max=scale_max, center=center)
final_transform = processor(transform)
expected_ctm = contrast_matrix(contrast=contrast, center=center)
if batch_size is None:
assert expected_ctm.shape.ndims == 2
else:
assert expected_ctm.shape.ndims == 3
ctm, expected_ctm = tf.compat.v1.Session().run(
[final_transform.color_transform_matrix, expected_ctm], feed_dict=feed_dict
)
np.testing.assert_equal(ctm, expected_ctm)
if batch_size is None:
mocked_truncated_normal.assert_called_with([], mean=0.0, stddev=scale_max / 2.0)
else:
mocked_truncated_normal.assert_called_once()
call_batch_shape = mocked_truncated_normal.call_args[0][0]
assert len(call_batch_shape) == 1
assert (
tf.compat.v1.Session().run(call_batch_shape[0], feed_dict=feed_dict)
== expected_batch_size
)
assert mocked_truncated_normal.call_args[1] == {
"mean": 0.0,
"stddev": scale_max / 2.0,
}
def test_random_contrast_call_with_invalid_input():
"""Test RandomContrast processor call error handling on invalid input types."""
# Calling RandomContrast with str should throw a TypeError.
with pytest.raises(TypeError):
RandomContrast(0, 0)("Transform")
def test_serialization_and_deserialization():
"""Test that it is a MaglevObject that can be serialized and deserialized."""
processor = RandomContrast(90, 0.5)
processor_dict = processor.serialize()
deserialized_processor = deserialize_tao_object(processor_dict)
assert processor._scale_max == deserialized_processor._scale_max
assert processor._center == deserialized_processor._center
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/test_random_contrast.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus Random Blur Processor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.core.coreobject import save_args
from nvidia_tao_tf1.core.processors.augment.blur import Blur
from nvidia_tao_tf1.core.processors.augment.pixel_removal import PixelRemoval
class RandomBlur(Blur):
"""Random Blur Transformation class."""
@save_args
def __init__(self, random=True, **kwargs):
"""__init__ method."""
super(RandomBlur, self).__init__(random=random, **kwargs)
self.pixel_remover = PixelRemoval(random)
def call(self, images, size, std, blur_pct, blur_max_block, prob=1.0):
"""
Randomly blur patches of the image.
Args:
images (tensor): The images to augment in NCHW format.
size (int): The largest size for the blur filter if random.
If not random, then this is the size of the filter to be used to
blur the image.
std (float): The maximum standard deviation of the gaussian kernel.
If not random, then this is the standard deviation to be used to
blur the image.
blur_pct (float): The percentage of pixels to blur.
blur_max_block (float): The maximum block size with which to group blurred pixels.
prob (float): The probability of applying the augmentation. Only used if
random is set.
Outputs:
The randomly blurred image.
"""
fully_blurred = self._gaussian_blur(images, size=size, std=std)
blur_condition = self.pixel_remover.make_selection_condition(
pct=blur_pct, max_block=blur_max_block, shape=images.shape
)
blurred = tf.compat.v1.where(blur_condition, fully_blurred, images)
if self.random:
application_prob = tf.random.uniform(shape=[], maxval=1.0)
no_aug_cond = tf.greater(application_prob, prob)
return tf.cond(
pred=no_aug_cond, true_fn=lambda: images, false_fn=lambda: blurred
)
return blurred
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/processors/augment/random_blur.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import numpy as np
from nvidia_tao_tf1.core.models.import_keras import keras as keras_fn
from nvidia_tao_tf1.core.models.templates.conv_gru_2d import ConvGRU2D
from nvidia_tao_tf1.core.models.templates.qdq_layer import QDQ
from nvidia_tao_tf1.core.models.templates.quantized_conv2d import QuantizedConv2D
from nvidia_tao_tf1.core.models.templates.utils import add_deconv_head
import nvidia_tao_tf1.core.pruning
from nvidia_tao_tf1.core.pruning.utils import find_prunable_parent
from nvidia_tao_tf1.core.templates.alexnet import AlexNet
from nvidia_tao_tf1.core.templates.helnet import HelNet
from nvidia_tao_tf1.core.templates.resnet import ResNet
import nvidia_tao_tf1.core.templates.utils
import nvidia_tao_tf1.core.utils
import pytest
keras = keras_fn()
class TestPruning(object):
"""Main class for pruning tests."""
@staticmethod
def create_alexnet(input_shape, data_format):
return AlexNet(
input_shape, add_head=True, data_format=data_format, nclasses=10, hidden_fc_neurons=256)
def check_weights(self, pruned_model, granularity, min_num_filters, filter_counts):
for layer in pruned_model.layers:
weights = layer.get_weights()
if type(layer) in [
keras.layers.Conv2D,
keras.layers.DepthwiseConv2D,
keras.layers.Conv2DTranspose,
keras.layers.Dense,
QuantizedConv2D
]:
# When unpacking weights, the last element is always a scale factor, while the
# remaining blobs behaves like a conv layer.
if type(layer) == QuantizedConv2D:
# when the QuantizedConv2D node is generated from using
# modulus.models.quantized_keras_model.create_quantized_keras_model
# the scale factor is not intialized and weightsp[-1] is not a float
weights = weights[:-1]
if len(weights) == 1:
kernels = weights[0]
biases = None
elif len(weights) == 2:
kernels, biases = weights
else:
raise ValueError("Unhandled number of weights: %d" % len(weights))
if type(layer) == keras.models.Model:
self.check_weights(layer,
granularity,
min_num_filters,
filter_counts.pop(layer.name)
)
elif type(layer) == keras.layers.Conv2DTranspose:
# we're not pruning these layers
filter_count = filter_counts[layer.name]
n_kept = kernels.shape[-2]
assert n_kept == filter_count['total']
if biases is not None:
assert n_kept == biases.shape[-1]
elif type(layer) in [
keras.layers.BatchNormalization,
QDQ
]:
# this should just propagate previous pruning
filter_count = filter_counts[layer.name]
to_prune = filter_count['to_prune']
# apply granularity and min
to_prune = min(to_prune - to_prune % granularity,
filter_count['total'] - min_num_filters)
to_keep = filter_count['total'] - to_prune
if type(layer) == keras.layers.BatchNormalization:
assert all([len(w) == to_keep for w in weights])
else:
assert all([type(w) == np.float32 for w in weights])
elif type(layer) == keras.layers.DepthwiseConv2D:
# handle depthwiseconv2d specially.
n_kept = kernels.shape[-2]
if biases is not None:
assert n_kept == biases.shape[-1]
filter_count = filter_counts[layer.name]
if filter_count['total'] > min_num_filters:
assert n_kept >= min_num_filters
n_pruned = filter_count['total'] - n_kept
to_prune = filter_count['to_prune']
assert n_pruned == min(to_prune - to_prune % granularity,
filter_count['total'] - min_num_filters)
elif type(layer) == ConvGRU2D:
# Check all weights (kernels and biases) share the same output dimensionality.
n_kept = [w.shape[-1] for w in weights]
assert len(set(n_kept)) == 1
n_kept = n_kept[0]
# Check that weights operating on the state variable have correct input depth.
# U_r, U_z, U_h.
for state_weight in weights[3:6]:
assert state_weight.shape[-2] == n_kept
# Make sure we kept the min amount of filters.
filter_count = filter_counts[layer.name]
if filter_count['total'] > min_num_filters:
assert n_kept >= min_num_filters
n_pruned = filter_count['total'] - n_kept
to_prune = filter_count['to_prune']
# Make sure the number of pruned filters matches
# the expected granularity.
assert n_pruned == min(to_prune - to_prune % granularity,
filter_count['total'] - min_num_filters)
elif weights:
# Checking weights for a conv2d layer.
n_kept = kernels.shape[-1]
if biases is not None:
# Make sure we pruned kernels and biases identically.
assert n_kept == biases.shape[-1]
filter_count = filter_counts[layer.name]
# Make sure we kept the min amount of filters.
if filter_count['total'] > min_num_filters:
assert n_kept >= min_num_filters
n_pruned = filter_count['total'] - n_kept
to_prune = filter_count['to_prune']
# Make sure the number of pruned filters matches
# the expected granularity.
assert n_pruned <= min(to_prune - to_prune % granularity,
filter_count['total'] - min_num_filters)
def common(self,
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=None,
check_output_on_input_shape=None,
layer_config_overrides=None,
equalization_criterion='union'):
"""Common denominator for most pruning tests.
This method sets weights such that half of the neurons should be pruned
considering the specified threshold but ignoring granularity, the min number
of filters to retain and excluded layers.
This method then proceeds to pruning the model and checks whether the expected
number of neurons has been pruned.
Args:
model: the model to prune.
method (str): pruning method.
normalizer (str): type of normalizer to use when pruning.
criterion (str): type of criterion to use when pruning.
granularity (int): granularity by which to prune filters.
min_num_filters (int): min number of filters to retain when pruning.
threshold (float): pruning threshold.
excluded_layers (list): list of layers to exclude when pruning.
check_output_on_input_shape (tuple): shape to use to verify inference (output shape
and activations), or ``None`` to skip inference
checks. For multiple inputs, this can also be
passed as a list of tuples.
layer_config_overrides (dict): A dictionary of key-value pairs used for overriding
layer configuration. Use cases include changing regularizers after pruning.
equalization_criterion (str): Criteria to equalize the stats of inputs to an element
wise op layer. Options are [arithmetic_mean, geometric_mean, union, intersection].
"""
if excluded_layers is None:
excluded_layers = []
assert criterion == 'L2'
# Targeted average norm of the filters to keep (actual weights will be
# randomly picked from a narrow uniform distribution).
keep_norm = threshold * 4.
if check_output_on_input_shape is not None:
# This test only works on activations for which f(0)=0, for example:
# "tanh", "relu", "linear".
for layer in model.layers:
if (layer.name not in excluded_layers and hasattr(layer, 'activation') and
layer.activation.__name__ not in ['linear', 'relu', 'tanh']):
raise ValueError("Found unsupported activation in layer "
"named %s with type %s and activation type %s" %
(layer.name, type(layer), layer.activation.__name__))
if equalization_criterion in ['intersection', 'geometric_mean']:
raise ValueError("Unsupported merge layer equalization criterion for"
"pruning output check: %s." % equalization_criterion)
# Set the norm of neurons to prune to zero so we can match the unpruned
# model output with the pruned model output.
prune_norm = 0.
else:
# Just make neurons small enough to be pruned.
prune_norm = threshold / 4.
filter_counts = {}
filter_counts = self.set_weights(model, method, normalizer, criterion, granularity,
min_num_filters, keep_norm, prune_norm, excluded_layers,
threshold, equalization_criterion, filter_counts)
if check_output_on_input_shape is not None:
batch_size = 2
if isinstance(check_output_on_input_shape, list):
# Multiple-input case.
batch_data = []
for shape in check_output_on_input_shape:
batch_shape = (batch_size,) + shape
batch_data.append(np.random.random_sample(batch_shape))
else:
# single-input case.
batch_shape = (batch_size,) + check_output_on_input_shape
batch_data = np.random.random_sample(batch_shape)
output_before = model.predict(batch_data)
shape_before = output_before.shape
pruned_model = nvidia_tao_tf1.core.pruning.prune(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers,
equalization_criterion=equalization_criterion,
layer_config_overrides=layer_config_overrides
)
self.check_weights(pruned_model, granularity, min_num_filters, filter_counts)
if check_output_on_input_shape is not None:
output_after = pruned_model.predict(batch_data)
shape_after = output_after.shape
assert shape_before == shape_after
assert np.allclose(output_before, output_after, rtol=1e-03, atol=1e-05)
return pruned_model
@staticmethod
def get_uniform(shape, mean, boundary=0.1):
"""Return a uniform distributed sample with a randomized sign.
Returns U(mean*(1-boundary), mean*(1+boundary)) with a random sign.
Args:
shape (list): shape of distribution to return.
mean (float): float of distribution to return.
boundary (float): relative multiplier to set range boundaries.
"""
x = np.random.uniform(low=mean * (1 - boundary), high=mean * (1 + boundary), size=shape)
x *= np.sign(np.random.normal(size=shape))
return x
def set_weights(self, model, method, normalizer, criterion, granularity, min_num_filters,
keep_norm, prune_norm, excluded_layers, threshold, equalization_criterion,
filter_counts):
# Pass 1 : Visit only prunable layers
for layer in model.layers:
weights = layer.get_weights()
norms = []
prune_indices = []
keep_indices = []
if type(layer) in [
keras.layers.Conv2D,
keras.layers.DepthwiseConv2D,
keras.layers.Conv2DTranspose,
keras.layers.Dense,
QuantizedConv2D,
]:
scale_factor = None
if type(layer) == QuantizedConv2D:
scale_factor = weights[-1]
weights = weights[:-1]
if len(weights) == 1:
kernels = weights[0]
biases = None
elif len(weights) == 2:
kernels, biases = weights
else:
raise ValueError("Unhandled number of weights: %d" % len(weights))
if type(layer) == keras.models.Model:
filter_counts = self.set_weights(
layer, method, normalizer, criterion, granularity, min_num_filters, keep_norm,
prune_norm, excluded_layers, threshold, equalization_criterion, filter_counts)
elif type(layer) == keras.layers.Conv2DTranspose:
# expected kernel shape is (kernel_width, kernel_height, output_fmaps, input_fmaps)
n_filters = kernels.shape[-2]
# we are not pruning these layers
filter_counts[layer.name] = {
'to_keep': n_filters,
'to_prune': 0,
'total': n_filters,
'keep_indices': range(n_filters),
'prune_indices': prune_indices,
'norms': np.asarray(norms)
}
elif type(layer) in [
keras.layers.BatchNormalization,
QDQ
]:
# Account for weights in the layer, but pass through during first pass
# waiting all prunable and element wise layers to be explored.
pass
elif (
type(layer) == keras.layers.Conv2D or
type(layer) == QuantizedConv2D
):
n_prune = 0
n_keep = 0
n_params_per_kernel = kernels[:, :, :, 0].size
keep_norm_ = math.sqrt(keep_norm**2 / n_params_per_kernel)
prune_norm_ = math.sqrt(prune_norm**2 / n_params_per_kernel)
for i in range(kernels.shape[-1]):
# Randomly keep or remove filters.
if np.random.uniform() > 0.5 or layer.name in excluded_layers:
# Keep that one.
kernels[:, :, :, i] = self.get_uniform(kernels.shape[:3], keep_norm_)
if biases is not None:
biases[i] = keep_norm_
n_keep += 1
keep_indices.append(i)
norms.append(keep_norm)
else:
# Prune that one.
kernels[:, :, :, i] = self.get_uniform(kernels.shape[:3], prune_norm_)
if biases is not None:
biases[i] = prune_norm_
norms.append(prune_norm)
n_prune += 1
prune_indices.append(i)
if biases is not None:
apply_weights = (kernels, biases)
else:
apply_weights = (kernels,)
# Packing weights for the QuantizedConv2D layer.
if scale_factor:
apply_weights += (scale_factor,)
layer.set_weights(apply_weights)
filter_counts[layer.name] = {
'layer_name': layer.name,
'to_keep': n_keep,
'to_prune': n_prune,
'total': n_keep + n_prune,
'keep_indices': keep_indices,
'prune_indices': prune_indices,
'norms': np.asarray(norms)
}
elif type(layer) == keras.layers.DepthwiseConv2D:
n_prune = 0
n_keep = 0
n_params_per_kernel = kernels[:, :, 0, 0].size
keep_norm_ = math.sqrt(keep_norm ** 2 / n_params_per_kernel)
prune_norm_ = math.sqrt(prune_norm ** 2 / n_params_per_kernel)
for i in range(kernels.shape[-2]):
# Randomly keep or remove filters.
if np.random.uniform() > 0.5 or layer.name in excluded_layers:
# Keep that one.
kernels[:, :, i, 0] = self.get_uniform(kernels.shape[:2], keep_norm_)
if biases is not None:
biases[i] = keep_norm_
n_keep += 1
keep_indices.append(i)
norms.append(keep_norm)
else:
# Prune that one.
kernels[:, :, i, 0] = self.get_uniform(kernels.shape[:2], prune_norm_)
if biases is not None:
biases[i] = prune_norm_
norms.append(prune_norm)
n_prune += 1
prune_indices.append(i)
if biases is not None:
layer.set_weights((kernels, biases))
else:
layer.set_weights((kernels,))
filter_counts[layer.name] = {'layer_name': layer.name,
'to_keep': n_keep,
'to_prune': n_prune,
'total': n_keep + n_prune,
'keep_indices': keep_indices,
'prune_indices': prune_indices,
'norms': np.asarray(norms)}
elif type(layer) == keras.layers.Dense:
n_prune = 0
n_keep = 0
n_params_per_kernel = kernels.shape[0]
keep_norm_ = math.sqrt(keep_norm**2 / n_params_per_kernel)
prune_norm_ = math.sqrt(prune_norm**2 / n_params_per_kernel)
for i in range(kernels.shape[1]):
# Randomly keep or remove filters.
if np.random.uniform() > 0.5 or layer.name in excluded_layers:
# Keep that one.
kernels[:, i] = self.get_uniform(kernels.shape[:1], keep_norm_)
n_keep += 1
if biases is not None:
biases[i] = keep_norm_
keep_indices.append(i)
norms.append(keep_norm_)
else:
# Prune that one.
kernels[:, i] = self.get_uniform(kernels.shape[:1], prune_norm_)
if biases is not None:
biases[i] = prune_norm_
n_prune += 1
prune_indices.append(i)
norms.append(prune_norm_)
if biases is not None:
layer.set_weights((kernels, biases))
else:
layer.set_weights((kernels,))
filter_counts[layer.name] = {
'to_keep': n_keep,
'to_prune': n_prune,
'total': n_keep + n_prune,
'keep_indices': keep_indices,
'prune_indices': prune_indices,
'norms': np.asarray(norms)
}
elif type(layer) == ConvGRU2D:
n_prune = 0
n_keep = 0
# Weights are W_z, W_r, W_h, U_z, U_r, U_h, b_z, b_r, b_h.
n_params_per_kernel = weights[0][:, :, :, 0].size
keep_norm_ = math.sqrt(keep_norm**2 / n_params_per_kernel)
prune_norm_ = math.sqrt(prune_norm**2 / n_params_per_kernel)
for i in range(weights[0].shape[-1]):
# Randomly keep or remove filters.
if np.random.uniform() > 0.5 or layer.name in excluded_layers:
# Keep that one. Kernels W_z, W_r, W_h, U_z, U_r, U_h.
for kernels in weights[:6]:
kernels[:, :, :, i] = self.get_uniform(kernels.shape[:3], keep_norm_)
# Biases b_z, b_r, b_h.
for biases in weights[6:]:
biases[i] = keep_norm_
n_keep += 1
keep_indices.append(i)
norms.append(keep_norm)
else:
# Prune that one. Kernels W_z, W_r, W_h, U_z, U_r, U_h.
for kernels in weights[:6]:
kernels[:, :, :, i] = self.get_uniform(kernels.shape[:3], prune_norm_)
# Biases b_z, b_r, b_h.
for biases in weights[6:]:
biases[i] = prune_norm_
n_prune += 1
prune_indices.append(i)
norms.append(prune_norm)
layer.set_weights(weights)
filter_counts[layer.name] = {
'layer_name': layer.name,
'to_keep': n_keep,
'to_prune': n_prune,
'total': n_keep + n_prune,
'keep_indices': keep_indices,
'prune_indices': prune_indices,
'norms': np.asarray(norms)
}
elif weights:
raise RuntimeError("Unknown layer type=%s has weights" % type(layer))
# Equalizing inputs for layers with element wise operations.
filter_counts = self._equalize_inputs(model, filter_counts, granularity, min_num_filters,
threshold, equalization_criterion, excluded_layers)
# Pass two: This time visit batchnorm layers.
for layer in model.layers:
if type(layer) in [
keras.layers.BatchNormalization,
QDQ
]:
# We are just propagating the previous layer.
previous_layer = []
inbound_nodes = layer._inbound_nodes
# For some reason, tf.keras does not always put things in a list.
if not isinstance(inbound_nodes, list):
inbound_nodes = [inbound_nodes]
for n in inbound_nodes:
_inbound_layers = n.inbound_layers
# For some reason, tf.keras does not always put things in a list.
if not isinstance(_inbound_layers, list):
_inbound_layers = [_inbound_layers]
for l in _inbound_layers:
previous_layer.append(l.name)
filter_counts[layer.name] = filter_counts[previous_layer[0]]
if type(layer) == keras.layers.DepthwiseConv2D:
dw_parents = []
dw_parents = find_prunable_parent(dw_parents, layer, True)
filter_counts = self._match_dw_indices(dw_parents[0], layer, filter_counts,
min_num_filters, granularity, threshold,
equalization_criterion, excluded_layers)
return filter_counts
def _equalize_inputs(self,
model,
filter_counts,
granularity,
min_num_filters,
threshold,
equalization_criterion,
excluded_layers=None):
layer_types = {type(l) for l in model.layers}
if keras.models.Model in layer_types:
if layer_types != set([keras.layers.InputLayer, keras.models.Model]):
raise NotImplementedError("Model encapsulation is only supported if outer model"
"only consists of input layers.")
model_layer = [l for l in model.layers if (type(l) == keras.models.Model)]
if len(model_layer) > 1:
raise NotImplementedError("Model encapsulation is only supported if outer model"
"only includes one inner model")
return self._equalize_inputs(model_layer[0], filter_counts, granularity,
min_num_filters, equalization_criterion, excluded_layers)
# Iterating though model layers.
for layer in model.layers:
if type(layer) in [
keras.layers.Add, keras.layers.Subtract, keras.layers.Multiply,
keras.layers.Average, keras.layers.Maximum
]:
eltwise_prunable_inputs = []
eltwise_prunable_inputs = find_prunable_parent(eltwise_prunable_inputs, layer)
# Remove broadcast operation layers from mapping
for l in eltwise_prunable_inputs:
if l.filters == 1:
eltwise_prunable_inputs.pop(eltwise_prunable_inputs.index(l))
# Do not update/match filter indices for eltwise layer inputs if they included
# in exclude layers.
# if not any(i.name in excluded_layers for i in eltwise_prunable_inputs):
if len(eltwise_prunable_inputs) > 1:
filter_counts = self._match_indices(
eltwise_prunable_inputs, filter_counts, min_num_filters, granularity, layer,
threshold, equalization_criterion, excluded_layers)
return filter_counts
def _match_indices(self, eltwise_prunable_inputs, filter_counts, min_num_filters, granularity,
layer, threshold, equalization_criterion, excluded_layers):
# Compute retainable filters.
output_depth = eltwise_prunable_inputs[0].filters
if any(l.name in excluded_layers for l in eltwise_prunable_inputs):
matched_retained_idx = range(output_depth)
else:
cumulative_stat = np.array([])
for idx, l in enumerate(eltwise_prunable_inputs, 1):
layerwise_stat = filter_counts[l.name]['norms']
if not np.asarray(cumulative_stat).size:
cumulative_stat = layerwise_stat
elif equalization_criterion == 'union':
cumulative_stat = np.maximum(layerwise_stat, cumulative_stat)
elif equalization_criterion == 'intersection':
cumulative_stat = np.minimum(layerwise_stat, cumulative_stat)
elif equalization_criterion == "arithmetic_mean":
cumulative_stat = (cumulative_stat * (idx - 1) + layerwise_stat) / float(idx)
elif equalization_criterion == "geometric_mean":
cumulative_stat = np.power(
np.multiply(np.power(cumulative_stat, idx - 1), layerwise_stat),
float(1 / idx))
else:
raise NotImplementedError("Unknown equalization criterion: {}"
.format(equalization_criterion))
output_idx = np.where(cumulative_stat > threshold)[0]
num_retained = len(output_idx)
min_num_filters = min(min_num_filters, output_depth)
num_retained = max(min_num_filters, num_retained)
if num_retained % granularity > 0:
num_retained += granularity - (num_retained % granularity)
num_retained = min(num_retained, output_depth)
sorted_idx = np.argsort(-cumulative_stat)
matched_retained_idx = np.sort(sorted_idx[:num_retained])
# Set filter counts for updated layers
for l in eltwise_prunable_inputs:
filter_counts[l.name]['keep_indices'] = matched_retained_idx
filter_counts[l.name]['prune_indices'] = np.setdiff1d(matched_retained_idx,
range(output_depth))
filter_counts[l.name]['to_keep'] = len(matched_retained_idx)
filter_counts[l.name]['to_prune'] = output_depth - len(matched_retained_idx)
filter_counts[l.name]['total'] = output_depth
return filter_counts
def _match_dw_indices(self, parent_layer, layer, filter_counts,
min_num_filters, granularity, threshold,
equalization_criterion, excluded_layers):
# Compute retainable filters for DepthwiseConv2D layer.
dw_layers = [parent_layer, layer]
output_depth = parent_layer.filters
if any(l.name in excluded_layers for l in dw_layers):
matched_retained_idx = range(output_depth)
else:
cumulative_stat = np.array([])
for idx, l in enumerate(dw_layers, 1):
layerwise_stat = filter_counts[l.name]['norms']
if not np.asarray(cumulative_stat).size:
cumulative_stat = layerwise_stat
elif equalization_criterion == 'union':
cumulative_stat = np.maximum(layerwise_stat, cumulative_stat)
elif equalization_criterion == 'intersection':
cumulative_stat = np.minimum(layerwise_stat, cumulative_stat)
elif equalization_criterion == "arithmetic_mean":
cumulative_stat = (cumulative_stat * (idx - 1) + layerwise_stat) / float(idx)
elif equalization_criterion == "geometric_mean":
cumulative_stat = np.power(np.multiply(np.power(cumulative_stat, idx - 1),
layerwise_stat), float(1 / idx))
else:
raise NotImplementedError("Unknown equalization criterion: {}"
.format(equalization_criterion))
output_idx = np.where(cumulative_stat > threshold)[0]
num_retained = len(output_idx)
min_num_filters = min(min_num_filters, output_depth)
num_retained = max(min_num_filters, num_retained)
if num_retained % granularity > 0:
num_retained += granularity - (num_retained % granularity)
num_retained = min(num_retained, output_depth)
sorted_idx = np.argsort(-cumulative_stat)
matched_retained_idx = np.sort(sorted_idx[:num_retained])
# Set filter counts for updated layers
for l in dw_layers:
filter_counts[l.name]['keep_indices'] = matched_retained_idx
filter_counts[l.name]['prune_indices'] = np.setdiff1d(matched_retained_idx,
range(output_depth))
filter_counts[l.name]['to_keep'] = len(matched_retained_idx)
filter_counts[l.name]['to_prune'] = output_depth - len(matched_retained_idx)
filter_counts[l.name]['total'] = output_depth
return filter_counts
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"equalization_criterion, elmtwise_op, method, normalizer, criterion,"
"granularity, min_num_filters, threshold, dont_prune_elmtwise_input", [
(HelNet, 6, 'channels_first', False, (3, 128, 64), "union",
keras.layers.Add, 'min_weight', 'off', 'L2', 2, 8, 0.5, False),
(ResNet, 10, 'channels_first', True, (3, 128, 64), "union",
keras.layers.Add, 'min_weight', 'off', 'L2', 2, 8, 0.5, True),
(HelNet, 6, 'channels_first', False, (3, 128, 64), "geometric_mean",
keras.layers.Multiply, 'min_weight', 'off', 'L2', 2, 8, 0.5, True),
(ResNet, 10, 'channels_first', False, (3, 128, 64), "union",
keras.layers.Subtract, 'min_weight', 'off', 'L2', 2, 8, 0.5, False),
])
def test_broadcast_ops(self, model, nlayers, data_format, use_batch_norm, input_shape,
equalization_criterion, elmtwise_op, normalizer, method, criterion,
granularity, min_num_filters, threshold, dont_prune_elmtwise_input):
"""Test broadcast element-wise operations."""
inputs = keras.layers.Input(shape=input_shape)
if model == ResNet: # pylint: disable=W0143
model = model(
nlayers,
inputs,
use_batch_norm=use_batch_norm,
data_format=data_format,
all_projections=True)
else:
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Add conv layer.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='elmtwise_input_1')(x)
if elmtwise_op != keras.layers.Subtract:
x2 = keras.layers.Conv2D(
filters=1,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name="broadcast_input")(x)
# Add branch.
x1 = keras.layers.Conv2D(
filters=24,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x1')(x)
x1 = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='elmtwise_input_2')(x1)
# Add skip connection. Broadcast operations are not supported for subtract layers.
if elmtwise_op != keras.layers.Subtract:
x = elmtwise_op()([x1, x, x2])
else:
x = elmtwise_op()([x1, x])
# Add extra layer on top.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
excluded_layers = ['conv2d_output']
if dont_prune_elmtwise_input:
excluded_layers.extend(['elmtwise_input_1'])
if equalization_criterion in ['intersection', 'geometric_mean']:
# Disable the output tests, as these criteria are not even supposed to work.
# Technically, geometric_mean might work when the merge is a multiplication,
# but since the setting is global, it is better not support it.
check_output_on_input_shape = None
else:
check_output_on_input_shape = input_shape
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'excluded_layers': excluded_layers,
'check_output_on_input_shape': check_output_on_input_shape,
'equalization_criterion': equalization_criterion
}
# Pruning and check for pruned weights.
self.common(*args, **kwargs)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold,"
"excluded_layers", [
(HelNet, 6, 'channels_first', False,
(3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5, ['block_4a_conv_1']),
('alexnet', None, 'channels_last', True,
(256, 256, 3), 'min_weight', 'off', 'L2', 4, 16, 0.25, ['head_fc8']),
])
def test_dont_prune_layer(self, model, nlayers, data_format, use_batch_norm, input_shape,
normalizer, method, criterion, granularity, min_num_filters,
threshold, excluded_layers):
"""Test that we don't prune layers that are explicitly excluded."""
inputs = keras.layers.Input(shape=input_shape)
if model == HelNet: # pylint: disable=W0143
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
elif model == 'alexnet':
model = self.create_alexnet(input_shape, data_format)
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=excluded_layers,
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"equalization_criterion, elmtwise_op, method, normalizer, criterion,"
"granularity, min_num_filters, threshold, dont_prune_elmtwise_input,"
"enable_qat", [
(HelNet, 6, 'channels_first', False, (3, 128, 64), "union",
keras.layers.Add, 'min_weight', 'off', 'L2', 2, 8, 0.5, False, False),
(HelNet, 6, 'channels_first', True, (3, 128, 64), "intersection",
keras.layers.Add, 'min_weight', 'off', 'L2', 2, 8, 0.5, True, True),
(ResNet, 10, 'channels_last', False, (128, 64, 3), "arithmetic_mean",
keras.layers.Average, 'min_weight', 'off', 'L2', 2, 8, 0.5, False, False),
(HelNet, 6, 'channels_first', False, (3, 128, 64), "geometric_mean",
keras.layers.Multiply, 'min_weight', 'off', 'L2', 2, 8, 0.5, True, True),
(ResNet, 18, 'channels_last', True, (128, 64, 3), "union",
keras.layers.Subtract, 'min_weight', 'off', 'L2', 2, 8, 0.5, False, False),
])
def test_elmtwise_ops(self, model, nlayers, data_format, use_batch_norm, input_shape,
equalization_criterion, elmtwise_op, normalizer, method, criterion,
granularity, min_num_filters, threshold, dont_prune_elmtwise_input,
enable_qat):
"""Test element-wise operations."""
if os.getenv("TF_KERAS") is not None and enable_qat:
pytest.skip("Quantized Conv2D not supported by pruning app for tf.keras yet.")
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Add conv layer.
if enable_qat:
x = QuantizedConv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='elmtwise_input_1')(x)
# adding QDQ node to compute moving average
# scale factors.
x = QDQ(name="elmtwise_input_1_qdq")(x)
else:
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='elmtwise_input_1')(x)
# Add branch.
if enable_qat:
x1 = QuantizedConv2D(
filters=24,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x1')(x)
# adding QDQ node to compute moving average
# scale factors.
x1 = QDQ(name="conv2d_x1_qdq")(x1)
else:
x1 = keras.layers.Conv2D(
filters=24,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x1')(x)
if enable_qat:
x1 = QuantizedConv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='elmtwise_input_2')(x1)
# adding QDQ node to compute moving average
# scale factors.
x1 = QDQ(name="elmtwise_input_2_qdq")(x1)
else:
x1 = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='elmtwise_input_2')(x1)
# Add skip connection.
x = elmtwise_op()([x1, x])
# Add extra layer on top.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
excluded_layers = ['conv2d_output']
if dont_prune_elmtwise_input:
excluded_layers.extend(['elmtwise_input_1', 'elmtwise_input_2'])
if equalization_criterion in ['intersection', 'geometric_mean']:
# Disable the output tests, as these criteria are not even supposed to work.
check_output_on_input_shape = None
else:
check_output_on_input_shape = input_shape
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'equalization_criterion': equalization_criterion,
'excluded_layers': excluded_layers,
'check_output_on_input_shape': check_output_on_input_shape
}
self.common(*args, **kwargs)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"equalization_criterion, method, normalizer, criterion,"
"granularity, min_num_filters, threshold", [
(HelNet, 6, 'channels_first', False,
(3, 128, 64), "union", 'min_weight', 'off', 'L2', 1, 8, 0.5),
])
def test_conv_gru_2d(self, model, nlayers, data_format, use_batch_norm, input_shape,
equalization_criterion, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test convolutional GRU layer."""
if os.getenv("TF_KERAS") is not None:
pytest.skip("ConvGRU2D not supported by pruning app for tf.keras yet.")
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Add conv layer.
x = ConvGRU2D(model_sequence_length_in_frames=2,
input_sequence_length_in_frames=2,
state_scaling=1.0,
input_shape=[None, None, 8, 4],
initial_state_shape=[None, 64, 8, 4],
spatial_kernel_height=3,
spatial_kernel_width=3)(x)
# Add extra layer on top.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
excluded_layers = ['conv2d_output']
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'equalization_criterion': equalization_criterion,
'excluded_layers': excluded_layers,
'check_output_on_input_shape': input_shape
}
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
(HelNet, 6, 'channels_first', False,
(3, 128, 256), 'min_weight', 'max', 'L2', 8, 16, 1.),
(HelNet, 12, 'channels_last', True,
(256, 256, 3), 'min_weight', 'off', 'L2', 8, 16, 1e3),
])
def test_min_weight(self, model, nlayers, data_format, use_batch_norm, input_shape, normalizer,
method, criterion, granularity, min_num_filters, threshold):
"""Test that we retain min_num_filters.
This also tests the lower bound on thresholds.
"""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
pruned_model = nvidia_tao_tf1.core.pruning.prune(
model, method, normalizer, criterion, granularity,
min_num_filters, threshold
)
weights = pruned_model.get_weights()
assert all([w.shape[-1] == min_num_filters for w in weights])
@pytest.mark.parametrize("data_format, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
('channels_first',
(3, 64, 96), 'min_weight', 'off', 'L2', 4, 8, 0.5),
(None, (3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5),
])
def test_flatten(self, data_format, input_shape, normalizer, method, criterion, granularity,
min_num_filters, threshold):
"""Test that we can prune 'flatten' layer."""
inputs = keras.layers.Input(shape=input_shape)
if data_format is not None:
x = keras.layers.Conv2D(
filters=32,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format)(inputs)
else:
# Test pruning of flatten layer with unknown format (the API will
# verify that the previous layer was unpruned).
x = inputs
x = keras.layers.Activation('relu')(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(64, activation='relu')(x)
x = keras.layers.Dense(10, activation='linear', name='dense_output')(x)
model = keras.models.Model(inputs=inputs, outputs=x)
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=['dense_output'],
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
(HelNet, 6, 'channels_first', False,
(3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5),
(HelNet, 12, 'channels_last', True,
(256, 256, 3), 'min_weight', 'max', 'L2', 8, 16, 0.5),
('alexnet', None, 'channels_first', True,
(3, 223, 223), 'min_weight', 'max', 'L2', 8, 16, 0.5),
('alexnet', None, 'channels_last', True,
(256, 256, 3), 'min_weight', 'off', 'L2', 4, 16, 0.25),
])
def test_granularity(self, model, nlayers, data_format, use_batch_norm, input_shape, normalizer,
method, criterion, granularity, min_num_filters, threshold):
"""Test that we prune n*granularity filters."""
inputs = keras.layers.Input(shape=input_shape)
if model == HelNet: # pylint: disable=W0143
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
elif model == 'alexnet':
model = self.create_alexnet(input_shape, data_format)
batch_shape = (1,) + input_shape
pruned_model = self.common(model, method, normalizer, criterion, granularity,
min_num_filters, threshold)
model = keras.models.Model(inputs=inputs, outputs=pruned_model(inputs), name=model.name)
model.predict(np.zeros(batch_shape))
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape_1,"
"input_shape_2, method, normalizer, criterion, granularity,"
"min_num_filters, threshold", [
(HelNet, 6, 'channels_first', False, (3, 128, 64),
(3, 64, 32), 'min_weight', 'off', 'L2', 2, 4, 0.5),
])
def test_mimo(self, model, nlayers, data_format, use_batch_norm, input_shape_1, input_shape_2,
normalizer, method, criterion, granularity, min_num_filters, threshold):
"""Test the pruning of models with multiple inputs and multiple outputs."""
input_1 = keras.layers.Input(shape=input_shape_1)
model = model(nlayers, input_1, use_batch_norm=use_batch_norm, data_format=data_format)
x_1 = model.outputs[0]
input_2 = keras.layers.Input(shape=input_shape_2)
x_2 = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(8, 8), padding='same',
data_format=data_format)(input_2)
inputs = [input_1, input_2]
# Merge.
x = keras.layers.Concatenate(axis=1, name='output')([x_1, x_2])
# Add two branches on top.
out_1 = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='output_1')(x)
out_2 = keras.layers.Conv2D(
filters=24,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='output_2')(x)
# Provide outputs in reverse creation order to verify fix in output ordering.
outputs = [out_2, out_1]
# Create model.
model = keras.models.Model(inputs=inputs, outputs=outputs)
batch_shape_1 = (8,) + input_shape_1
batch_shape_2 = (8,) + input_shape_2
batch = [np.zeros(batch_shape_1), np.zeros(batch_shape_2)]
shapes_before = [out.shape for out in model.predict(batch)]
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': ['output_1', 'output_2']}
pruned_model = self.common(*args, **kwargs)
pruned_model = keras.models.Model(
inputs=inputs, outputs=pruned_model(inputs), name=model.name)
shapes_after = [out.shape for out in pruned_model.predict(batch)]
assert shapes_before == shapes_after
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape_1,"
"input_shape_2, method, normalizer, criterion, granularity,"
"min_num_filters, threshold", [
(HelNet, 6, 'channels_first', False, (3, 128, 64),
(3, 64, 32), 'min_weight', 'off', 'L2', 2, 4, 0.5),
])
def test_multiple_inputs(self, model, nlayers, data_format, use_batch_norm, input_shape_1,
input_shape_2, normalizer, method, criterion, granularity,
min_num_filters, threshold):
"""Test the pruning of models with multiple inputs."""
input_1 = keras.layers.Input(shape=input_shape_1)
model = model(nlayers, input_1, use_batch_norm=use_batch_norm, data_format=data_format)
out_1 = model.outputs[0]
input_2 = keras.layers.Input(shape=input_shape_2)
out_2 = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(8, 8), padding='same',
data_format=data_format)(input_2)
# Feed inputs in reverse creation order to verify fix in input order.
inputs = [input_2, input_1]
input_shapes = [input_shape_2, input_shape_1]
# Merge.
x = keras.layers.Concatenate(axis=1, name='output')([out_1, out_2])
# Add extra layer on top.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='tanh',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': ['conv2d_output'], 'check_output_on_input_shape': input_shapes}
self.common(*args, **kwargs)
@pytest.mark.parametrize(
"data_format, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
('channels_first', (3, 128, 256), 'min_weight', 'max', 'L2', 8, 16, 0.5),
('channels_last', (256, 256, 3), 'min_weight', 'off', 'L2', 8, 16, 0.5),
])
def test_no_bias_in_conv_layer(self, data_format, input_shape, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test that we can prune conv layers with no bias terms."""
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(
filters=64,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='relu',
use_bias=False,
kernel_initializer='glorot_uniform',
name='conv2d_1')(inputs)
x = keras.layers.Conv2D(
filters=64,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
use_bias=True,
kernel_initializer='glorot_uniform',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=['conv2d_output'],
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize("data_format, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
('channels_first',
(3, 128, 256), 'min_weight', 'max', 'L2', 8, 16, 0.5),
])
def test_no_bias_in_conv_transpose_layer(self, data_format, input_shape, normalizer, method,
criterion, granularity, min_num_filters, threshold):
"""Test that we can prune conv layers with no bias terms."""
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(
filters=64,
kernel_size=[3, 3],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
name='conv2d')(inputs)
x = keras.layers.Conv2DTranspose(
filters=8,
kernel_size=(2, 2),
strides=(2, 2),
padding='same',
data_format=data_format,
use_bias=False,
name='conv2d_transpose')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize(
"input_shape, data_format, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
((3, 16, 16), 'channels_first', 'min_weight', 'max', 'L2', 2, 2, 0.5),
])
def test_no_bias_in_dense_layer(self, input_shape, data_format, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test that we can prune dense layers with no bias terms."""
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(2, 2),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
use_bias=True,
kernel_initializer='glorot_uniform',
name='conv2d_1')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(32, activation='relu', use_bias=False)(x)
x = keras.layers.Dense(16, activation='tanh', use_bias=True)(x)
x = keras.layers.Dense(10, activation='linear', name='dense_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=['dense_output'],
check_output_on_input_shape=input_shape)
# @pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape, method,"
# "normalizer, criterion, granularity, min_num_filters, threshold", [
# (HelNet, 6, 'channels_first', False,
# (3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5),
# (HelNet, 6, 'channels_last', True,
# (256, 256, 3), 'min_weight', 'max', 'L2', 8, 16, 0.5),
# ])
# def test_recurse(self, model, nlayers, data_format, use_batch_norm, input_shape, normalizer,
# method, criterion, granularity, min_num_filters, threshold):
# """Test that we recurse through 'Model' layers."""
# inner_inputs = keras.layers.Input(shape=input_shape)
# outer_inputs = keras.layers.Input(shape=input_shape)
# inner_model = model(
# nlayers, inner_inputs, use_batch_norm=use_batch_norm, data_format=data_format)
# outer_model = keras.models.Model(inputs=outer_inputs, outputs=inner_model(outer_inputs))
# self.common(outer_model, method, normalizer, criterion, granularity, min_num_filters,
# threshold)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"method, normalizer, criterion, granularity, min_num_filters,"
"threshold, equalization_criterion, all_projections",
[(ResNet, 18, 'channels_first', False,
(3, 960, 544), 'min_weight', 'off', 'L2', 2, 8, 0.5, "union", False),
(ResNet, 18, 'channels_first', False,
(3, 960, 544), 'min_weight', 'off', 'L2', 2, 8, 0.5, "union", True),
(ResNet, 10, 'channels_first', True,
(3, 960, 544), 'min_weight', 'off', 'L2', 2, 8, 0.5, "arithmetic_mean", True),
(ResNet, 10, 'channels_first', True,
(3, 960, 544), 'min_weight', 'off', 'L2', 2, 8, 0.5, "union", False)])
def test_resnets(self, model, nlayers, data_format, use_batch_norm, input_shape, normalizer,
method, criterion, granularity, min_num_filters, threshold,
equalization_criterion, all_projections):
"""Test partial pruning for MSRA resnet templates."""
# Set up Resnet model.
inputs = keras.layers.Input(shape=input_shape)
model = model(
nlayers,
inputs,
use_batch_norm=use_batch_norm,
data_format=data_format,
all_projections=all_projections)
x = model.outputs[0]
# Hooking up to fully connected layer for 10 classes.
x = keras.layers.Flatten(name='flatten')(x)
x = keras.layers.Dense(256, name='inner_fc', activation='relu')(x)
x = keras.layers.Dense(10, name='output_fc', activation='relu')(x)
# Setting up a model.
model = keras.models.Model(inputs=inputs, outputs=x)
# Define elementwise input layers alone as exclude layers.
excluded_layers = ['output_fc']
# Prune model and check weights.
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'equalization_criterion': equalization_criterion,
'excluded_layers': excluded_layers,
'check_output_on_input_shape': input_shape
}
self.common(*args, **kwargs)
@pytest.mark.parametrize("data_format, input_shape,"
"method, normalizer, criterion, granularity,"
"min_num_filters, threshold", [
('channels_first',
(3, 128, 64), 'min_weight', 'off', 'L2', 2, 4, 0.5),
])
def test_shared_layer(self, data_format, input_shape, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test the pruning of models with shared layers."""
input_1 = keras.layers.Input(shape=input_shape)
input_2 = keras.layers.Input(shape=input_shape)
input_3 = keras.layers.Input(shape=input_shape)
inputs = [input_1, input_2, input_3]
# This layer will be applied to three different inputs.
conv_layer = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(2, 2), padding='same', data_format=data_format)
conv_layer_output_1 = conv_layer(input_1)
conv_layer_output_2 = conv_layer(input_2)
conv_layer_output_3 = conv_layer(input_3)
# Merge.
x = keras.layers.Concatenate(
axis=1, name='concat')([conv_layer_output_1, conv_layer_output_2, conv_layer_output_3])
x = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(8, 8), padding='same',
data_format=data_format)(x)
# Add named output layer.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
input_shapes = [input_shape, input_shape, input_shape]
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': ['conv2d_output'], 'check_output_on_input_shape': input_shapes}
self.common(*args, **kwargs)
@pytest.mark.parametrize("data_format, input_shape,"
"method, normalizer, criterion, granularity,"
"min_num_filters, threshold", [
('channels_first',
(3, 128, 64), 'min_weight', 'off', 'L2', 2, 4, 0.5),
])
def test_shared_layer2(self, data_format, input_shape, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test the pruning of models with shared layers."""
input_1 = keras.layers.Input(shape=input_shape)
input_2 = keras.layers.Input(shape=input_shape)
input_3 = keras.layers.Input(shape=input_shape)
inputs = [input_1, input_2, input_3]
# This layer will be applied to three different inputs.
c1 = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(1, 1), padding='same', data_format=data_format)
# This layer will be applied to three different inputs.
c2 = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(1, 1), padding='same', data_format=data_format)
# This layer will be applied to three different inputs.
c3 = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(1, 1), padding='same', data_format=data_format)
# This layer will be applied to three different inputs.
conv_layer = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(2, 2), padding='same', data_format=data_format)
conv_layer_output_1 = conv_layer(c1(input_1))
conv_layer_output_2 = conv_layer(c2(input_2))
conv_layer_output_3 = conv_layer(c3(input_3))
# Merge.
x = keras.layers.Concatenate(
axis=1, name='concat')([conv_layer_output_1, conv_layer_output_2, conv_layer_output_3])
x = keras.layers.Conv2D(
filters=32, kernel_size=[1, 1], strides=(8, 8), padding='same',
data_format=data_format)(x)
# Add named output layer.
x = keras.layers.Conv2D(
filters=16,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': ['conv2d_output']}
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
(HelNet, 6, 'channels_first', False,
(3, 256, 256), 'toto', 'off', 'L2', 4, 8, 0.5),
(HelNet, 6, 'channels_first', True,
(3, 256, 256), 'min_weight', 'toto', 'L2', 4, 8, 0.5),
(HelNet, 6, 'channels_first', False,
(3, 256, 256), 'min_weight', 'max', 'toto', 4, 8, 0.5),
])
def test_unknown_params(self, model, nlayers, data_format, use_batch_norm, input_shape,
normalizer, method, criterion, granularity, min_num_filters, threshold):
"""Test that we prune n*granularity filters."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
with pytest.raises(NotImplementedError):
nvidia_tao_tf1.core.pruning.prune(
model, method, normalizer, criterion, granularity,
min_num_filters, threshold
)
def test_unsupported_layer(self):
"""Test that we drop an error on an unsupported layer."""
inputs = keras.layers.Input(shape=(3, 8, 4, 2))
# 3D layers are not currently supported.
x = keras.layers.ZeroPadding3D(padding=(1, 1, 1), data_format="channels_first")(
inputs
)
model = keras.models.Model(inputs, x)
with pytest.raises(NotImplementedError):
nvidia_tao_tf1.core.pruning.prune(
model, "min_weight",
"off", "L2", 8, 1,
0.01
)
# def test_unsupported_recurse(self):
# """Test that the API fails 'gracefully' on unsupported types of recursion."""
# # Create the inner model.
# inputs = keras.layers.Input((1,))
# x = keras.layers.core.Dense(1, activation='linear')(inputs)
# outputs = keras.layers.core.Dense(2, activation='softmax')(x)
# inner_model = keras.models.Model(inputs, outputs)
# # Create an outer model with a layer before the inner model.
# inputs = keras.layers.Input((1,))
# x = keras.layers.core.Dense(1, activation='linear')(inputs)
# outputs = inner_model(x)
# outer_model_layer_before = keras.models.Model(inputs, outputs)
# with pytest.raises(NotImplementedError):
# nvidia_tao_tf1.core.pruning.prune(
# outer_model_layer_before,
# 'min_weight',
# 'off', 'L2', 8,
# 1, 0.01)
# # Create an outer model with a layer on top of the inner model.
# inputs = keras.layers.Input((1,))
# x = inner_model(inputs)
# outputs = keras.layers.core.Dense(2, activation='softmax')(x)
# outer_model_layer_after = keras.models.Model(inputs, outputs)
# with pytest.raises(NotImplementedError):
# nvidia_tao_tf1.core.pruning.prune(
# outer_model_layer_after,
# 'min_weight', 'off',
# 'L2', 8, 1, 0.01)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold",
[(HelNet, 6, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 2, 8, 0.5),
(HelNet, 6, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 4, 8, 0.5)])
def test_with_averagepooling2d(self, model, nlayers, data_format, use_batch_norm, input_shape,
num_classes, method, normalizer, criterion, granularity,
min_num_filters, threshold):
"""Test with AveragePooling2D."""
inputs = keras.layers.Input(shape=input_shape)
# Defining the model defined in the test case.
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Adding AveragePooling2D node.
x = keras.layers.AveragePooling2D(
pool_size=(2, 2), data_format=data_format, padding='same')(x)
# Adding a dense head of num classes.
x = keras.layers.Flatten(name='flatten')(x)
x = keras.layers.Dense(num_classes, name='output_fc', activation='relu')(x)
model = keras.models.Model(inputs=inputs, outputs=x)
# Exclude final fc layer from pruning.
excluded_layers = ['output_fc']
# Prune model and check weights.
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': excluded_layers, 'check_output_on_input_shape': input_shape}
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold, exclude_permute_inputs",
[(HelNet, 6, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 2, 8, 0.5, False),
(HelNet, 6, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 4, 8, 0.5, True)])
def test_with_permute_layer(self, model, nlayers, data_format, use_batch_norm, input_shape,
num_classes, method, normalizer, criterion, granularity,
min_num_filters, threshold, exclude_permute_inputs):
"""Test with Permute layer."""
inputs = keras.layers.Input(shape=input_shape)
# Defining the model defined in the test case.
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Adding Permute Node.
x = keras.layers.Permute((1, 3, 2))(x)
# Adding a dense head of num classes.
x = keras.layers.Flatten(name='flatten')(x)
x = keras.layers.Dense(num_classes, name='output_fc', activation='relu')(x)
model = keras.models.Model(inputs=inputs, outputs=x)
model.summary()
# Exclude final fc layer from pruning.
excluded_layers = ['output_fc']
if exclude_permute_inputs:
excluded_layers.append("block_4a_conv_1")
# Prune model and check weights.
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': excluded_layers, 'check_output_on_input_shape': input_shape}
# Catch error if permute inputs are not excluded.
if not exclude_permute_inputs:
with pytest.raises(NotImplementedError):
self.common(*args, **kwargs)
else:
self.common(*args, **kwargs)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold, zeropadding_dims",
[(HelNet, 6, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 2, 8, 0.5, 3),
(HelNet, 6, 'channels_first', False, (3, 128, 64), 4, 'min_weight', 'off', 'L2', 4, 8, 0.5,
(3, 2)), (HelNet, 6, 'channels_last', False,
(128, 64, 3), 4, 'min_weight', 'off', 'L2', 4, 8, 0.5, ((3, 2), (3, 2)))])
def test_with_zeropadding2D_layer(self, model, nlayers, data_format, use_batch_norm,
input_shape, num_classes, method, normalizer, criterion,
granularity, min_num_filters, threshold, zeropadding_dims):
"""Test with ZeroPadding2D."""
inputs = keras.layers.Input(shape=input_shape)
# Defining the model defined in the test case.
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Adding ZeroPadding2D Node.
x = keras.layers.ZeroPadding2D(
padding=zeropadding_dims, data_format=data_format)(x)
# Adding a dense head of num classes.
x = keras.layers.Flatten(name='flatten')(x)
x = keras.layers.Dense(num_classes, name='output_fc', activation='relu')(x)
model = keras.models.Model(inputs=inputs, outputs=x)
# Exclude final fc layer from pruning.
excluded_layers = ['output_fc']
# Prune model and check weights.
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {'excluded_layers': excluded_layers, 'check_output_on_input_shape': input_shape}
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold", [
(HelNet, 6, 'channels_first', False,
(3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5),
(HelNet, 6, 'channels_last', True,
(256, 256, 3), 'min_weight', 'max', 'L2', 8, 16, 0.5),
])
def test_with_conv_transpose_head(self, model, nlayers, data_format, use_batch_norm,
input_shape, normalizer, method, criterion, granularity,
min_num_filters, threshold):
"""Test that we prune n*granularity filters."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
model = add_deconv_head(
model=model,
inputs=inputs,
nmaps=1,
upsampling=2,
data_format=data_format,
activation_type='relu')
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape, method,"
"normalizer, criterion, granularity, min_num_filters, threshold,"
"excluded_layers",
[(HelNet, 6, 'channels_first', False,
(3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5, ['block_4a_conv_1'])])
def test_reprune(self, model, nlayers, data_format, use_batch_norm, input_shape, normalizer,
method, criterion, granularity, min_num_filters, threshold, excluded_layers):
"""Test that we can reprune a model.
Args:
model: the model template to use.
nlayers (int): number of layers to build template of.
data_format (str): one of 'channels_first' or 'channels_last'.
use_batch_norm (bool): whether to use batchnorm.
input_shape (tuple of ints): input shape.
method (str): pruning method.
normalizer (str): type of normalizer to use when pruning.
criterion (str): type of criterion to use when pruning.
granularity (int): granularity by which to prune filters.
min_num_filters (int): min number of filters to retain when pruning.
threshold (float): pruning threshold.
excluded_layers (list): list of layers to exclude when pruning."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
pruned_model = self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=excluded_layers,
check_output_on_input_shape=input_shape)
# Apply pruned model to our inputs. When cloning a model, Keras internally
# recreates all layers - this is different from applying the model to
# another input, which creates another model but does not create new layers
# (thus the layers are shared between models, which means layers have multiple
# outbound nodes, making forward parsing ill-defined).
# Below we are cloning the model and instructing Keras to use placeholders
# for the new inputs (if we provide the same input layer as in the original
# model, Keras will - wrongly? - re-create a new layer with the same name and
# complain that two layers of the model have the same name!).
pruned_model_applied = keras.models.clone_model(pruned_model)
pruned_model_applied.set_weights(pruned_model.get_weights())
# Note: at this stage a typical workflow would fine-tune the pruned model.
# Now prune the model again and verify the output shape.
self.common(
pruned_model_applied,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=excluded_layers,
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold, concat_axis",
[(HelNet, 6, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 2, 8, 0.5, 1),
(HelNet, 6, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 4, 8, 0.5, 2)])
def test_with_branches(self, model, nlayers, data_format, use_batch_norm, input_shape,
num_classes, normalizer, method, criterion, granularity, min_num_filters,
threshold, concat_axis):
"""Test concatenation head."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Add 1st branch.
cov_channels = 1
x1 = keras.layers.Conv2D(
filters=num_classes * cov_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x1')(x)
# Add 2nd branch.
bbox_channels = 4 if concat_axis == 1 else 1
x2 = keras.layers.Conv2D(
filters=num_classes * bbox_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='relu',
name='conv2d_x2')(x)
# Merge.
x = keras.layers.Concatenate(axis=concat_axis, name='output')([x1, x2])
# Add extra layer on top.
x = keras.layers.Conv2D(
filters=num_classes * (bbox_channels + cov_channels),
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'excluded_layers': ['conv2d_x1', 'conv2d_x2', 'conv2d_output'],
'check_output_on_input_shape': input_shape
}
if concat_axis == 1: # Only channels_first is supported by this test.
self.common(*args, **kwargs)
else:
with pytest.raises(ValueError):
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold", [
(HelNet, 6, 'channels_first', False,
(3, 128, 64), 4, 'min_weight', 'off', 'L2', 2, 8, 0.5),
])
def test_with_concat_before_reshape(self, model, nlayers, data_format, use_batch_norm,
input_shape, num_classes, normalizer, method, criterion,
granularity, min_num_filters, threshold):
"""Test pruning in presence of concat layer following a reshape."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Add 1st branch.
cov_channels = 1
x1 = keras.layers.Conv2D(
filters=num_classes * cov_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x1')(x)
x1 = keras.layers.Reshape((num_classes, cov_channels, int(x.shape[-2]),
int(x.shape[-1])))(x1)
# Add 2nd branch.
bbox_channels = 4
x2 = keras.layers.Conv2D(
filters=num_classes * bbox_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='relu',
name='conv2d_x2')(x)
x2 = keras.layers.Reshape((num_classes, bbox_channels, int(x.shape[-2]),
int(x.shape[-1])))(x2)
# Merge.
x = keras.layers.Concatenate(axis=2, name='output')([x1, x2])
x = keras.layers.Reshape((num_classes * (bbox_channels + cov_channels), int(x.shape[-2]),
int(x.shape[-1])))(x)
x = keras.layers.Conv2D(
filters=8,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='relu',
name='conv2d_output')(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
kwargs = {
'excluded_layers': ['conv2d_x1', 'conv2d_x2', 'conv2d_output'],
'check_output_on_input_shape': input_shape
}
self.common(*args, **kwargs)
@pytest.mark.parametrize("model, nlayers, data_format, use_batch_norm, input_shape,"
"num_classes, method, normalizer, criterion, granularity,"
"min_num_filters, threshold, prune_before_reshape", [
(HelNet, 6, 'channels_first', False,
(3, 128, 64), 8, 'min_weight', 'off', 'L2', 2, 2, 0.5, True),
(HelNet, 6, 'channels_first', False,
(3, 128, 64), 8, 'min_weight', 'off', 'L2', 2, 2, 0.5, False),
])
def test_with_reshape(self, model, nlayers, data_format, use_batch_norm, input_shape,
num_classes, normalizer, method, criterion, granularity, min_num_filters,
threshold, prune_before_reshape):
"""Test pruning of reshape layer."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
x = model.outputs[0]
# Add conv layer
cov_channels = 2
x = keras.layers.Conv2D(
filters=num_classes * cov_channels,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='linear',
name='conv2d_x')(x)
# Add reshape.
x = keras.layers.Reshape((num_classes, cov_channels, int(x.shape[-2]), int(x.shape[-1])))(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
model.summary()
args = [model, method, normalizer, criterion, granularity, min_num_filters, threshold]
if not prune_before_reshape:
kwargs = {'excluded_layers': ['conv2d_x'], 'check_output_on_input_shape': input_shape}
self.common(*args, **kwargs)
else:
with pytest.raises(NotImplementedError):
self.common(*args)
@pytest.mark.parametrize(
"model, nlayers, data_format, input_shape,"
"method, normalizer, criterion, granularity,"
"min_num_filters, threshold ", [
(HelNet, 6, 'channels_first', (3, 128, 64), 'min_weight', 'off', 'L2', 2, 2, 0.5),
(HelNet, 6, 'channels_last', (64, 64, 3), 'min_weight', 'off', 'L2', 2, 2, 0.5),
])
def test_with_softmax(self, model, nlayers, data_format, input_shape, normalizer, method,
criterion, granularity, min_num_filters, threshold):
"""Test pruning in presence of softmax layer."""
inputs = keras.layers.Input(shape=input_shape)
model = model(nlayers, inputs, data_format=data_format)
x = model.outputs[0]
x = keras.layers.Conv2D(
filters=8,
kernel_size=[1, 1],
strides=(1, 1),
padding='same',
data_format=data_format,
dilation_rate=(1, 1),
activation='sigmoid',
name='conv2d_output')(x)
# Add softmax layer
if data_format == 'channels_first':
softmax_axis = 1
elif data_format == 'channels_last':
softmax_axis = -1
else:
raise ValueError("Unknown data format: %s" % data_format)
x = keras.layers.Softmax(axis=softmax_axis)(x)
# Create model.
model = keras.models.Model(inputs=inputs, outputs=x)
# Prune and check activations.
self.common(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers='conv2d_output',
check_output_on_input_shape=input_shape)
@pytest.mark.parametrize("data_format, input_shape,"
" method, normalizer, criterion, granularity,"
"min_num_filters, threshold ", [
('channels_first', (3, 128, 64),
'min_weight', 'off', 'L2', 2, 2, 0.5),
('channels_last', (64, 64, 3),
'min_weight', 'off', 'L2', 2, 2, 0.5),
])
def test_with_depthwise_conv_layer(self, data_format, input_shape, normalizer, method,
criterion, granularity, min_num_filters, threshold):
"""Test pruning in presence of DepthwiseConv2D layer."""
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(32,
kernel_size=3,
strides=(1, 1),
padding='valid',
name='conv1',
data_format=data_format)(inputs)
x = keras.layers.DepthwiseConv2D((3, 3),
padding='valid',
strides=1,
depth_multiplier=1,
name='conv_dw_1',
data_format=data_format)(x)
model = keras.models.Model(inputs=inputs, outputs=x)
self.common(model, method, normalizer, criterion,
granularity, min_num_filters, threshold)
@pytest.mark.parametrize("data_format, input_shape,"
" method, normalizer, criterion, granularity,"
"min_num_filters, threshold, depth_multiplier ", [
('channels_first', (3, 128, 64),
'min_weight', 'off', 'L2', 2, 2, 0.5, 2),
('channels_last', (64, 64, 3),
'min_weight', 'off', 'L2', 2, 2, 0.5, 3),
])
def test_depth_multiplier_not_one(self, data_format, input_shape, normalizer, method,
criterion, granularity, min_num_filters, threshold,
depth_multiplier):
"""Test pruning in presence of DepthwiseConv2D layer."""
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(32,
kernel_size=3,
strides=(1, 1),
padding='valid',
name='conv1',
data_format=data_format)(inputs)
x = keras.layers.DepthwiseConv2D((3, 3),
padding='valid',
strides=1,
depth_multiplier=depth_multiplier,
name='conv_dw_1',
data_format=data_format)(x)
model = keras.models.Model(inputs=inputs, outputs=x)
# Will raise ValueError during explore stage when depth_multiplier is not 1.
with pytest.raises(ValueError):
self.common(model, method, normalizer, criterion,
granularity, min_num_filters, threshold)
def test_overrides(self):
"""Test that layer config overrides work."""
input_shape = (10,)
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Dense(3, activation='linear', name='dense_output')(inputs)
model = keras.models.Model(inputs=inputs, outputs=x)
layer_config_overrides = {
'bias_regularizer': keras.regularizers.l1(0.01),
'kernel_regularizer': keras.regularizers.l1(0.01),
'trainable': False
}
pruned_model = self.common(
model,
'min_weight',
'off',
'L2',
4,
8,
0.5,
excluded_layers=['dense_output'],
check_output_on_input_shape=input_shape,
layer_config_overrides=layer_config_overrides)
# Verify that the overrides got applied.
for layer in pruned_model.layers:
# Overrides don't apply to input layers.
if isinstance(layer, keras.layers.InputLayer):
continue
for key in layer_config_overrides:
assert getattr(layer, key) == layer_config_overrides[key]
# @pytest.mark.parametrize(
# "output_fn, model, nlayers, data_format, use_batch_norm, input_shape,"
# "method, normalizer, criterion, granularity, min_num_filters, threshold", [
# ('default', HelNet, 6, 'channels_first', False,
# (3, 256, 256), 'min_weight', 'off', 'L2', 4, 8, 0.5),
# ('toto.pruned', HelNet, 6, 'channels_last', True,
# (256, 256, 3), 'min_weight', 'max', 'L2', 8, 16, 0.5),
# ])
# @pytest.mark.script_launch_mode('subprocess')
# def test_prune_app(self, script_runner, tmpdir, output_fn, model, nlayers, data_format,
# use_batch_norm, input_shape, normalizer, method, criterion, granularity,
# min_num_filters, threshold):
# """Test the prune application.
# Just make sure a pruned model file is generated.
# """
# model_filename = os.path.join(str(tmpdir), 'model.h5')
# if output_fn == 'default':
# extra_args = []
# suffix = '.pruned'
# pruned_model_filename = os.path.join(str(tmpdir), 'model.h5' + suffix)
# else:
# pruned_model_filename = os.path.join(str(tmpdir), output_fn)
# extra_args = ['--output', pruned_model_filename]
# inputs = keras.layers.Input(shape=input_shape)
# model = model(nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format)
# model.save(model_filename)
# env = os.environ.copy()
# env['CUDA_VISIBLE_DEVICES'] = ''
# script = 'app.py'
# # Path adjustment for bazel tests
# root_dir = os.path.dirname(os.path.abspath(__file__))
# if os.path.exists(os.path.join(root_dir, script)):
# script = os.path.join(root_dir, script)
# ret = script_runner.run(script, model_filename, env=env, *extra_args)
# assert ret.success, "Process returned error: %s error trace: %s" % (ret.success, ret.stderr)
# assert os.path.isfile(pruned_model_filename)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/pruning/test_pruning.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus pruning.
This module includes APIs to prune a Keras model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
from nvidia_tao_tf1.core.decorators import override, subclass
from nvidia_tao_tf1.core.models.import_keras import keras as keras_fn
from nvidia_tao_tf1.core.models.templates.conv_gru_2d import ConvGRU2D
from nvidia_tao_tf1.core.models.templates.qdq_layer import QDQ
from nvidia_tao_tf1.core.models.templates.quantized_conv2d import QuantizedConv2D
from nvidia_tao_tf1.core.models.templates.quantized_conv2dtranspose import QuantizedConv2DTranspose
from nvidia_tao_tf1.core.models.templates.quantized_dense import QuantizedDense
from nvidia_tao_tf1.core.models.templates.quantized_depthwiseconv2d import QuantizedDepthwiseConv2D
from nvidia_tao_tf1.core.pruning import utils
from nvidia_tao_tf1.core.templates.utils import mish
from nvidia_tao_tf1.core.templates.utils_tf import swish
from nvidia_tao_tf1.cv.efficientdet.layers.image_resize_layer import ImageResizeLayer
from nvidia_tao_tf1.cv.efficientdet.layers.weighted_fusion_layer import WeightedFusion
from nvidia_tao_tf1.cv.efficientdet.utils.utils import PatchedBatchNormalization
from nvidia_tao_tf1.cv.faster_rcnn.layers.custom_layers import (
CropAndResize,
Proposal,
ProposalTarget
)
from nvidia_tao_tf1.cv.mask_rcnn.layers.anchor_layer import AnchorLayer
from nvidia_tao_tf1.cv.mask_rcnn.layers.box_input_layer import BoxInput
from nvidia_tao_tf1.cv.mask_rcnn.layers.box_target_encoder import BoxTargetEncoder
from nvidia_tao_tf1.cv.mask_rcnn.layers.class_input_layer import ClassInput
from nvidia_tao_tf1.cv.mask_rcnn.layers.foreground_selector_for_mask import (
ForegroundSelectorForMask
)
from nvidia_tao_tf1.cv.mask_rcnn.layers.gpu_detection_layer import GPUDetections
from nvidia_tao_tf1.cv.mask_rcnn.layers.image_input_layer import ImageInput
from nvidia_tao_tf1.cv.mask_rcnn.layers.info_input_layer import InfoInput
from nvidia_tao_tf1.cv.mask_rcnn.layers.mask_input_layer import MaskInput
from nvidia_tao_tf1.cv.mask_rcnn.layers.mask_postprocess_layer import MaskPostprocess
from nvidia_tao_tf1.cv.mask_rcnn.layers.mask_targets_layer import MaskTargetsLayer
from nvidia_tao_tf1.cv.mask_rcnn.layers.multilevel_crop_resize_layer import MultilevelCropResize
from nvidia_tao_tf1.cv.mask_rcnn.layers.multilevel_proposal_layer import MultilevelProposal
from nvidia_tao_tf1.cv.mask_rcnn.layers.proposal_assignment_layer import ProposalAssignment
from nvidia_tao_tf1.cv.mask_rcnn.layers.reshape_layer import ReshapeLayer
from nvidia_tao_tf1.cv.retinanet.initializers.prior_prob import PriorProbability
from nvidia_tao_tf1.cv.retinanet.layers.anchor_box_layer import RetinaAnchorBoxes
from nvidia_tao_tf1.cv.ssd.layers.anchor_box_layer import AnchorBoxes
from nvidia_tao_tf1.cv.yolo_v3.layers.yolo_anchor_box_layer import YOLOAnchorBox
from nvidia_tao_tf1.cv.yolo_v4.layers.bbox_postprocessing_layer import BBoxPostProcessingLayer
from nvidia_tao_tf1.cv.yolo_v4.layers.split import Split
keras = keras_fn()
"""Logger for pruning APIs."""
logger = logging.getLogger(__name__)
class Prune(object):
"""A class interface for the pruning operator."""
def _get_filter_stats(self, kernels, layer):
"""Return norms of all kernel filters.
Args:
kernels (Array): array of kernels to get retained indices of, where the last
dimension indexes individual kernels.
layer(keras Layer): the layer whose filters we are going to make statistics.
"""
raise NotImplementedError
def _get_retained_idx(self, explored_stat):
"""Return indices of filters to retain.
Args:
explored_stat (1-d array): array of kernel filter norms.
"""
raise NotImplementedError
@override
def prune(self, model):
"""Prune a model.
Args:
model (Model): the Keras model to prune.
Returns:
model (Model): the pruned model.
"""
raise NotImplementedError()
class PrunedLayer(object):
"""Class definition to store information about pruned layers.
Args:
retained_idx (list): list of retained indices.
output_tensor: Output tensor.
"""
def __init__(
self,
retained_idx,
explored_stat=None,
visited=False,
is_pruned=None,
keras_layer=None,
):
"""Initialization routine.
Args:
retained_idx (list): All filter indices that are above the pruning threshold and may be
retained.
explored_stat (list): Norms of the filter in the given layer to keep track for full
pruning of element-wise operations.
is_pruned (bool): Flag to mark layer as pruned if num of retained filters is
less than the number of incoming filters.
visited (bool): Flag to mark layer as visited during phase 2 of the pruning.
keras_layer (keras layer): Output of the current layer.
Returns:
Pruned_layer data structure.
"""
self.retained_idx = retained_idx
self.keras_layer = keras_layer
self.is_pruned = is_pruned
self.visited = visited
self.explored_stat = explored_stat
@subclass
class PruneMinWeight(Prune):
"""
A class that implements pruning according to the "min weight method".
This function implements the 'min_weight' filtering method described on:
[Molchanov et al.] Pruning Convolutional Neural Networks for Resource Efficient Inference,
arXiv:1611.06440.
For convolutional layers, only the norm of the kernels is considered (the norm of biases
is ignored).
Args:
normalizer (str): 'max' to normalize by dividing each norm by the maximum norm within
a layer; 'L2' to normalize by dividing by the L2 norm of the vector comprising all
kernel norms.
criterion (str): only 'L2' is supported.
granularity (int): granularity of the number of filters to remove at a time.
min_num_filters (int): minimum number of filters to retain in each layer.
threshold (float): threshold to compare normalized norm against.
equalization_criterion (str): criteria to equalize element wise operation layers.
Supported criteria are 'arithmetic_mean', 'geometric_mean', 'union', 'intersection'.
excluded_layers (list): list of names of layers that should not be pruned. Typical usage
is for output layers of conv nets where the number of output channels must match
a number of classes.
"""
def __init__(
self,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=None,
equalization_criterion="union"
):
"""Initialization routine."""
self._normalizer = normalizer
self._criterion = criterion
self._granularity = granularity
self._min_num_filters = min_num_filters
self._equalization_criterion = equalization_criterion
self._equalization_groups = []
self._threshold = threshold
if excluded_layers is None:
excluded_layers = []
self._excluded_layers = excluded_layers
self._explored_layers = {}
@staticmethod
def _get_channel_index(data_format):
"""Return the channel index for the specified data format.
Args:
data_format (str): 'channels_first' or 'channels_last'.
"""
if data_format == "channels_first":
return -3
if data_format == "channels_last":
return -1
raise ValueError("Unknown data format: %s" % str(data_format))
def _get_data_format(self, layer):
# Return a layer's data format. Recurse through previous layers
# if necessary. If the data format cannot be determined, this
# function returns ``None``.
if hasattr(layer, "data_format"):
return layer.data_format
if type(layer) == keras.layers.TimeDistributed and hasattr(layer.layer, 'data_format'):
return layer.layer.data_format
if type(layer) in [keras.layers.Reshape,
keras.layers.Permute,
AnchorBoxes,
RetinaAnchorBoxes,
YOLOAnchorBox,
BBoxPostProcessingLayer,
AnchorLayer,
ReshapeLayer,
BoxTargetEncoder,
ForegroundSelectorForMask,
GPUDetections,
MaskPostprocess,
MaskTargetsLayer,
MultilevelCropResize,
MultilevelProposal,
ProposalAssignment] or \
(type(layer) == keras.layers.TimeDistributed and
type(layer.layer) in [keras.layers.Reshape,
keras.layers.Permute,
AnchorBoxes,
RetinaAnchorBoxes,
YOLOAnchorBox,
BBoxPostProcessingLayer,
AnchorLayer,
ReshapeLayer,
BoxTargetEncoder,
ForegroundSelectorForMask,
GPUDetections,
MaskPostprocess,
MaskTargetsLayer,
MultilevelCropResize,
MultilevelProposal,
ProposalAssignment]):
# Reshape and Permute layers make it impossible to retrieve the data format.
return None
if type(layer) == keras.layers.Flatten or \
(type(layer) == keras.layers.TimeDistributed and
type(layer.layer) == keras.layers.Flatten):
# Flatten layers yield (N, K) tensors and can be considered
# either "channels_first" or "channels_last" indifferently.
# Let's pick "channels_first" (N, C, H, W) arbitrarily.
return "channels_first"
inbound_layers = _get_inbound_layers(layer)
if not inbound_layers:
# We have not found the data format.
return None
# Recurse through inbound layers.
data_formats = [self._get_data_format(l) for l in inbound_layers]
if type(layer) in [CropAndResize, Proposal, ProposalTarget]:
data_formats = data_formats[:1]
if len(set(data_formats)) > 1:
raise ValueError(
"Found more than 1 data format in "
"inbound layers: %s" % repr(data_formats)
)
return data_formats[0]
def _get_previous_retained_idx(self, layer):
inbound_layers = _get_inbound_layers(layer)
if inbound_layers:
return self._explored_layers[inbound_layers[0].name].retained_idx
return None
@override
def _get_filter_stats(self, kernels, layer):
"""
Return norms of all kernel filters.
This function implements the 'min_weight' and returns the norms of the filters
in the layer.
Args:
kernels (Array): array of kernels to get retained indices of, where the last
dimension indexes individual kernels.
layer(keras Layer): the layer whose filters we are going to make statistics.
Returns:
explored_stat (1-d array): array of pruning stats for individual kernels
"""
if self._criterion == "L2":
pruning_stat = utils.get_L2_norm(kernels, layer)
else:
raise NotImplementedError("%s pruning" % self._criterion)
# Layer-wise normalization.
pruning_stat = utils.normalize_stat(pruning_stat, self._normalizer)
return pruning_stat
def _merge_layerwise_stats(self, layerwise_stats):
"""
Merge the layerwise pruning stats according to equalization_criterion.
Args:
layerwise_stats (2-d array): Array of pruning stats for individual kernels
in multiple weights.
Returns:
merged_stats (1-d array): Merged pruning stats.
"""
if type(layerwise_stats) == list:
layerwise_stats = np.stack(layerwise_stats)
assert (
layerwise_stats.ndim == 2
), "Layerwise stats has to be two dimensional array."
if self._equalization_criterion == "union":
# Equalizing input retained indices by computing union of the feature maps.
merged_stat = np.amax(layerwise_stats, axis=0)
elif self._equalization_criterion == "intersection":
# Equalizing input retained indices by computing intersection of the
# feature maps.
merged_stat = np.amin(layerwise_stats, axis=0)
elif self._equalization_criterion == "arithmetic_mean":
# Equalizing input retained indices by making sure the mean of the filter norms
# cross the threshold.
merged_stat = np.mean(layerwise_stats, axis=0)
elif self._equalization_criterion == "geometric_mean":
# Equalizing input retained indices by making sure the geometric mean of the
# filter norms cross the threshold.
# Numpy handles np.exp(np.log(0.0)) == 0.0 OK, although throws a warning.
log_stats = np.log(layerwise_stats)
merged_stat = np.exp(np.mean(log_stats, axis=0))
else:
raise NotImplementedError(
"Unknown equalization criterion for element-wise"
"operations: {}".format(self._equalization_criterion)
)
return merged_stat
@override
def _get_retained_idx(self, explored_stat):
"""
Return indices of filters to retain from explored stats (filter norms).
This function computes the filter indices to be retained at the end of pruning.
The number ouf filters is clamped to a multiple of the granularity.
Args:
explored_stat (1-d array): array of pruning stats for individual kernels
Returns:
retained_idx (1-d array): indices of filters to retain.
"""
retained_idx = np.where(explored_stat > self._threshold)[0]
# Compute depth of the layer before pruning.
orig_output_depth = len(explored_stat)
# Check granularity and minimum number of filters.
num_retained = len(retained_idx)
# Minimum number of filters - this shouldn't be more than the
# original number of filters.
min_num_filters = min(self._min_num_filters, orig_output_depth)
# Maintaining atleast filters >= min_num_filters.
num_retained = max(min_num_filters, num_retained)
# Clamping to the nearest multiple of granularity.
if num_retained % self._granularity > 0:
num_retained += self._granularity - (num_retained % self._granularity)
# Sanity check.
num_retained = min(num_retained, orig_output_depth)
# Sorting filter id's based on their pruning stat
sorted_idx = np.argsort(-explored_stat)
retained_idx = np.sort(sorted_idx[:num_retained])
return retained_idx
def _is_layer_pruned(self, layer):
# Recurse through previous layers until we find one that is explicitly
# pruned or not pruned.
if layer.name in self._explored_layers:
is_pruned = self._explored_layers[layer.name].is_pruned
if is_pruned is not None:
return is_pruned
inbound_layers = _get_inbound_layers(layer)
if inbound_layers:
return any([self._is_layer_pruned(l) for l in inbound_layers])
return False
def _equalize_retained_indices(self, eltwise_prunable_inputs):
"""Equalize retained indices of all inputs to the element wise layer."""
if type(eltwise_prunable_inputs[0]) != keras.layers.TimeDistributed:
output_depth = eltwise_prunable_inputs[0].filters
else:
output_depth = eltwise_prunable_inputs[0].layer.filters
if any(l.name in self._excluded_layers for l in eltwise_prunable_inputs):
logger.debug(
"Skipping equalization of eltwise inputs: "
"{}".format(eltwise_prunable_inputs)
)
output_idx = range(output_depth)
else:
layerwise_stats = []
for prunable_input in eltwise_prunable_inputs:
layerwise_stats.append(
self._explored_layers[prunable_input.name].explored_stat
)
merged_stats = self._merge_layerwise_stats(layerwise_stats)
output_idx = self._get_retained_idx(merged_stats)
return output_idx
def _equalize_dw_retained_indices(self,
previous_stat,
this_stat,
previous_layer,
this_layer,
criterion):
"""Equalize the depth-wise conv. and its previous layer's retained indexes."""
dw_layers = [previous_layer, this_layer]
if type(dw_layers[0]) == keras.layers.TimeDistributed:
output_depth = dw_layers[0].layer.filters
else:
output_depth = dw_layers[0].filters
if any(l.name in self._excluded_layers for l in dw_layers):
logger.debug("Skipping equalization of depth-wise conv layers: "
"{}".format(dw_layers))
output_idx = range(output_depth)
else:
cumulative_stat = previous_stat
if criterion == "union":
# Equalizing input retained indices by computing union of the feature maps.
cumulative_stat = np.maximum(cumulative_stat, this_stat)
elif criterion == "intersection":
# Equalizing input retained indices by computing intersection of the
# feature maps.
cumulative_stat = np.minimum(cumulative_stat, this_stat)
elif criterion == "arithmetic_mean":
# Equalizing input retained indices by making sure the mean of the filter norms
# cross the threshold.
cumulative_stat = (cumulative_stat + this_stat) / 2.0
elif criterion == "geometric_mean":
# Equalizing input retained indices by making sure the geometric mean of the
# filter norms cross the threshold.
cumulative_stat = np.power(np.multiply(cumulative_stat,
this_stat), float(1 / 2.0))
else:
raise NotImplementedError("Unknown equalization criterion for depth-wise conv. "
"operations: {}".format(criterion))
# Clamp outputs to a multiple of the granularity
output_idx = self._get_retained_idx(cumulative_stat)
return output_idx
def _explore_conv_transpose_layer(self, layer):
# No transformation here, just propogate the number of output feature maps.
kernels, _, _ = self._unravel_weights(layer)
retained_idx = range(kernels.shape[2])
return retained_idx
def _explore_conv_or_fc_layer(self, layer):
# Retrieve weights.
kernels, _, _ = self._unravel_weights(layer)
# Identify filters to prune.
if layer.name in self._excluded_layers:
explored_stat = None
retained_idx = range(kernels.shape[-1])
else:
explored_stat = self._get_filter_stats(kernels, layer)
retained_idx = self._get_retained_idx(explored_stat)
initial_neuron_count = kernels.shape[-1]
retained_neuron_count = len(retained_idx)
is_pruned = retained_neuron_count < initial_neuron_count
return retained_idx, explored_stat, is_pruned
def _explore_conv_dw_layer(self, layer):
# Retrieve weights.
kernels, _, _ = self._unravel_weights(layer)
# Raise error when it's a DepthwiseConv2D layer but depth_multiplier != 1
if kernels.shape[-1] != 1:
raise ValueError('DepthwiseConv2D for pruning can only have depth_multiplier == 1.')
# Identify filters to prune.
if layer.name in self._excluded_layers:
explored_stat = None
retained_idx = range(kernels.shape[2])
else:
explored_stat = self._get_filter_stats(kernels, layer)
retained_idx = self._get_retained_idx(explored_stat)
initial_neuron_count = kernels.shape[2]
retained_neuron_count = len(retained_idx)
# apply equalization for depth-wise conv.
dw_layers = []
dw_layers = utils.find_prunable_parent(dw_layers,
layer,
True)
self._update_equalization_groups(dw_layers + [layer])
previous_layer = dw_layers[0]
previous_stat = self._explored_layers[previous_layer.name].explored_stat
retained_idx = self._equalize_dw_retained_indices(previous_stat,
explored_stat,
previous_layer,
layer,
self._equalization_criterion)
retained_neuron_count = len(retained_idx)
self._explored_layers[previous_layer.name].retained_idx = retained_idx
is_pruned = retained_neuron_count < initial_neuron_count
self._explored_layers[previous_layer.name].is_pruned = is_pruned
return retained_idx, explored_stat, is_pruned
def _explore_conv_2d_gru_layer(self, layer):
# Retrieve weights: W_z, W_r, W_h, U_z, U_r, U_h, b_z, b_r, b_h.
weights = layer.get_weights()
# Identify filters to prune.
if layer.name in self._excluded_layers or not layer.trainable:
if layer.name not in self._excluded_layers:
logger.info("Skipping nontrainable layer: {}".format(layer.name))
# Infer output channels from first kernel.
explored_stat = None
retained_idx = range(weights[0].shape[-1])
else:
layerwise_stats = []
# Do not take bias into account in the pruning decision,
# use only W_z, W_r, W_h, U_z, U_r, U_h.
for kernels in weights[:6]:
layerwise_stats.append(self._get_filter_stats(kernels, layer))
# Merge stats according to equalization criterion for determining joint pruned indices.
# This handles the elementwise ops in the layer.
explored_stat = self._merge_layerwise_stats(layerwise_stats)
retained_idx = self._get_retained_idx(explored_stat)
initial_neuron_count = weights[0].shape[-1]
retained_neuron_count = len(retained_idx)
is_pruned = retained_neuron_count < initial_neuron_count
return retained_idx, explored_stat, is_pruned
def _update_equalization_groups(self, eltwise_prunable_inputs):
eq_groups = []
for g in self._equalization_groups:
for epi in eltwise_prunable_inputs:
if epi in g:
eq_groups.append(g)
break
merged_group = []
for g in eq_groups:
merged_group.extend(g)
for epi in eltwise_prunable_inputs:
if epi not in merged_group:
merged_group.append(epi)
for g in eq_groups:
idx = self._equalization_groups.index(g)
del self._equalization_groups[idx]
self._equalization_groups.append(merged_group)
return merged_group
def _explore_elmtwise_layer(self, layer):
eltwise_prunable_inputs = []
eltwise_prunable_inputs = utils.find_prunable_parent(
eltwise_prunable_inputs, layer
)
logger.debug(
"At explore_elmtwise_layer: Prunable parents at layer {}".format(layer.name)
)
eltwise_prunable_inputs = list(set(eltwise_prunable_inputs))
for l in eltwise_prunable_inputs:
logger.debug("Prunable_parents {}".format(l.name))
# If any of the parents are broadcast layers, pop them out of prunable input list.
if type(l) != keras.layers.TimeDistributed and l.filters == 1:
# Set retained indices for this layer as 0.
self._explored_layers[l.name].retained_idx = range(l.filters)
self._explored_layers[l.name].is_pruned = False
eltwise_prunable_inputs.pop(eltwise_prunable_inputs.index(l))
elif type(l) == keras.layers.TimeDistributed and l.layer.filters == 1:
# Set retained indices for this layer as 0.
self._explored_layers[l.name].retained_idx = range(l.layer.filters)
self._explored_layers[l.name].is_pruned = False
eltwise_prunable_inputs.pop(eltwise_prunable_inputs.index(l))
# If newly updated eltwise true inputs have more than one branch, then
# equalize the retained indices.
if len(eltwise_prunable_inputs) > 1:
eltwise_prunable_inputs = self._update_equalization_groups(
eltwise_prunable_inputs
)
fixed_retained_idx = self._equalize_retained_indices(
eltwise_prunable_inputs
)
# Otherwise just prune the one conv layer as it was before.
elif len(eltwise_prunable_inputs) == 1:
layer_name = eltwise_prunable_inputs[-1].name
fixed_retained_idx = self._explored_layers[layer_name].retained_idx
else:
# Retrieve weights.
kernels, _, _ = self._unravel_weights(layer)
return range(kernels.shape[-1])
retained_idx = fixed_retained_idx
# Set the newly calculated retained indices for all eltwise prunable layers while,
# also checking if they were pruned during the equalization.
for i in eltwise_prunable_inputs:
self._explored_layers[i.name].retained_idx = retained_idx
initial_neuron_count = i.get_weights()[0].shape[-1]
pruned_state = len(retained_idx) < initial_neuron_count
self._explored_layers[i.name].is_pruned = pruned_state
# if the layer is a shared conv
if type(layer) == keras.layers.Conv2D:
logger.debug("Conv2D layer '{}' is shared.".format(layer.name))
retained_idx, _, _ = self._explore_conv_or_fc_layer(layer)
return retained_idx
def _explore_td_layer(self, layer):
retained_idx = None
explored_stat = None
is_pruned = None
if type(layer.layer) in [
keras.layers.Conv2D,
keras.layers.Dense,
QuantizedConv2D,
QuantizedDense,
]:
retained_idx, explored_stat, is_pruned = self._explore_conv_or_fc_layer(layer)
elif type(layer.layer) in [keras.layers.DepthwiseConv2D, QuantizedDepthwiseConv2D]:
retained_idx, explored_stat, is_pruned = self._explore_conv_dw_layer(layer)
return retained_idx, explored_stat, is_pruned
def _prune_explored_concat_layer(self, layer):
data_format = self._get_data_format(layer)
if data_format is not None:
channel_index = self._get_channel_index(data_format)
n_dims = len(layer.output_shape)
allowed_axes = [channel_index % n_dims, channel_index % -n_dims]
if layer.axis not in allowed_axes:
raise ValueError(
"Concatenation layer only supported on channel axis: "
"data_format=%s axis=%d" % (data_format, layer.axis)
)
else:
# The data format is unknown so we must make sure the previous layer was not pruned.
if self._is_layer_pruned(layer):
raise ValueError(
"Cannot process a concatenation layer if the data format "
"is unknown and one of the previous layers was pruned"
)
channel_index = layer.axis
previous_layers = [l for n in layer._inbound_nodes for l in n.inbound_layers]
retained_indices = []
offset = 0
for l in previous_layers:
if self._is_layer_pruned(l):
# Retain unpruned channels.
retained_idx = self._explored_layers[l.name].retained_idx
else:
# Retain all channels.
retained_idx = range(l.output_shape[channel_index])
shifted_indices = [idx + offset for idx in retained_idx]
retained_indices.extend(shifted_indices)
offset += l.output_shape[channel_index]
return retained_indices
def _prune_explored_split_layer(self, layer):
data_format = self._get_data_format(layer)
if data_format is not None:
channel_index = self._get_channel_index(data_format)
else:
channel_index = 1
previous_layers = _get_inbound_layers(layer)
pl = previous_layers[0]
assert not self._is_layer_pruned(pl), (
"Split layer's previous layer cannot be pruned. Try to add it "
"to excluded layer list."
)
total_channels = pl.output_shape[channel_index]
assert total_channels % layer.groups == 0, (
"The number of channels of previous layer should be a multiple "
"of the Split layer's group attribute."
)
n_channels = total_channels // layer.groups
return range(n_channels)
def _prune_explored_flatten_layer(self, layer):
# We need to take the activations of previous layer into account.
try:
previous_layer = layer._inbound_nodes[-1].inbound_layers[0]
except TypeError:
# tf.keras could, for some weird reason, not make inbound nodes / layers
# a list if there's only a single item for them.
previous_layer = layer._inbound_nodes[-1].inbound_layers
# if get QDQ, trace back again
if type(previous_layer) == QDQ:
previous_layer = previous_layer._inbound_nodes[-1].inbound_layers[0]
try:
previous_layer = layer._inbound_nodes[-1].inbound_layers[0]
except TypeError:
# tf.keras could, for some weird reason, not make inbound nodes / layers
# a list if there's only a single item for them.
previous_layer = layer._inbound_nodes[-1].inbound_layers
previous_layer_shape = previous_layer.output.get_shape()
if type(previous_layer) == CropAndResize:
previous_layer_shape = previous_layer.output_shape
previous_data_format = self._get_data_format(previous_layer)
previous_retained_idx = self._get_previous_retained_idx(layer)
if previous_data_format in ['channels_first', 'channels_last']:
assert previous_retained_idx is not None, ''
'Previous retrained index of Flatten layer cannot be None if data format'
' is known.'
if len(previous_layer_shape) != 4 and type(layer) != keras.layers.TimeDistributed:
raise ValueError("Expecting 4-dimensional activations got shape=%s" %
(repr(previous_layer_shape)))
# Take the spatial size into account and create a mask of activations to
# retain from previous layer.
if previous_data_format == "channels_first":
# NCHW case.
inp_spatial_size = int(np.prod(previous_layer_shape[-2:]))
inp_num_fmaps = int(previous_layer_shape[-3])
retained_filter_mask = np.asarray([False] * inp_num_fmaps)
retained_filter_mask[previous_retained_idx] = True
retained_activation_mask = np.repeat(retained_filter_mask, inp_spatial_size)
elif previous_data_format == "channels_last":
# NHWC case.
inp_spatial_size = int(np.prod(previous_layer_shape[-3:-1]))
inp_num_fmaps = int(previous_layer_shape[-1])
retained_filter_mask = np.asarray([False] * inp_num_fmaps)
retained_filter_mask[previous_retained_idx] = True
retained_activation_mask = np.tile(retained_filter_mask, inp_spatial_size)
elif previous_data_format is None:
# The data format is unknown, make sure the previous layer was not pruned.
if self._is_layer_pruned(layer):
raise ValueError(
"Cannot process a pruned flatten layer if the "
"data format is unknown."
)
else:
raise ValueError("Unknown data format: %s" % previous_data_format)
if previous_data_format is not None:
retained_idx = np.where(retained_activation_mask)[0]
else:
retained_idx = None
return retained_idx
def _prune_explored_batch_norm_layer(self, layer):
# Propagate from previous layer.
retained_idx = self._get_previous_retained_idx(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
weights = tuple([w[retained_idx] for w in layer.get_weights()])
return weights
def _prune_explored_conv_or_fc_layer(self, layer):
# Retrieve weights.
kernels, biases, scale_factor = self._unravel_weights(layer)
previous_retained_idx = self._get_previous_retained_idx(layer)
# Remove incoming connections that have been pruned in previous layer.
is_conv2d = False
if type(layer) in [keras.layers.Conv2D,
QuantizedConv2D]:
is_conv2d = True
elif (type(layer) == keras.layers.TimeDistributed and
type(layer.layer) in [keras.layers.Conv2D,
QuantizedConv2D]):
is_conv2d = True
if previous_retained_idx is not None:
if is_conv2d:
kernels = kernels[:, :, previous_retained_idx, :]
else:
kernels = kernels[previous_retained_idx, :]
# Check if the current layer has been explored
if layer.name not in self._explored_layers:
raise ValueError("{} not explored".format(layer.name))
# Import retained idx from the explored stage.
retained_idx = self._explored_layers[layer.name].retained_idx
initial_neuron_count = kernels.shape[-1]
retained_neuron_count = len(retained_idx)
# Prune neurons from kernels and update layer spec.
if is_conv2d:
kernels = kernels[:, :, :, retained_idx]
if type(layer) in [keras.layers.Conv2D,
QuantizedConv2D]:
layer.filters = retained_neuron_count
else:
layer.layer.filters = retained_neuron_count
else:
kernels = kernels[:, retained_idx]
if type(layer) in [keras.layers.Dense, QuantizedDense]:
layer.units = retained_neuron_count
else:
layer.layer.units = retained_neuron_count
# Prune neurons from biases.
if biases is not None:
biases = biases[retained_idx]
output_weights = (kernels, biases)
else:
output_weights = (kernels,)
# Set scale factor for QuantizedConv2D layer.
if scale_factor is not None:
output_weights += (scale_factor,)
msg = "layer %s: %d -> %d - actions: %s " % (
layer.name,
initial_neuron_count,
retained_neuron_count,
"[name: %s]" % layer.name,
)
logger.debug(msg)
return output_weights
def _prune_explored_conv_dw_layer(self, layer):
# Retrieve weights.
kernels, biases, scale_factor = self._unravel_weights(layer)
initial_neuron_count = kernels.shape[2]
# Check if the current layer has been explored
if layer.name not in self._explored_layers:
raise ValueError("{} not explored".format(layer.name))
# Import retained idx from the explored stage.
retained_idx = self._explored_layers[layer.name].retained_idx
kernels = kernels[:, :, retained_idx, :]
retained_neuron_count = len(retained_idx)
# Prune neurons from biases.
if biases is not None:
biases = biases[retained_idx]
output_weights = (kernels, biases)
else:
output_weights = (kernels,)
# Set scale factor for QuantizedDWConv layer.
if scale_factor is not None:
output_weights += (scale_factor,)
msg = "layer %s: %d -> %d - actions: %s " % (
layer.name,
initial_neuron_count,
retained_neuron_count,
"[name: %s]" % layer.name,
)
logger.debug(msg)
return output_weights
def _prune_explored_conv_transpose_layer(self, layer):
previous_retained_idx = self._get_previous_retained_idx(layer)
kernels, biases, scale_factor = self._unravel_weights(layer)
kernels = kernels[:, :, :, previous_retained_idx]
weights = (kernels, biases) if biases is not None else (kernels,)
# Set scale factor for QuantizedConvTranspose layer.
if scale_factor is not None:
weights += (scale_factor,)
return weights
def _prune_explored_conv_2d_gru_layer(self, layer):
# Retrieve weights: W_z, W_r, W_h, U_z, U_r, U_h, b_z, b_r, b_h
weights = layer.get_weights()
previous_retained_idx = self._get_previous_retained_idx(layer)
# Remove incoming connections that have been pruned in previous layer.
if previous_retained_idx is not None:
# First three convlolution weights (W_z, W_r, W_h) operate on the input tensor.
for idx, kernels in enumerate(weights[:3]):
weights[idx] = kernels[:, :, previous_retained_idx, :]
# Check if the current layer has been explored
if layer.name not in self._explored_layers:
raise ValueError("{} not explored".format(layer.name))
# Import retained idx from the explored stage.
retained_idx = self._explored_layers[layer.name].retained_idx
initial_neuron_count = weights[0].shape[-1]
retained_neuron_count = len(retained_idx)
# Remove incoming connections in the kernels that operate on the state (U_z, U_r, U_h).
for idx, kernels in enumerate(weights[3:6]):
weights[idx + 3] = kernels[:, :, retained_idx, :]
# Prune output channels from all kernels (W_z, W_r, W_h, U_z, U_r, U_h).
for idx, kernels in enumerate(weights[:6]):
weights[idx] = kernels[:, :, :, retained_idx]
# Prune output channels from biases (b_z, b_r, b_h).
for idx, biases in enumerate(weights[6:]):
weights[idx + 6] = biases[retained_idx]
# Update layer config.
layer.state_depth = retained_neuron_count
layer.initial_state_shape = list(layer.initial_state_shape)
layer.initial_state_shape[1] = retained_neuron_count
msg = "layer %s: %d -> %d - actions: %s " % (
layer.name,
initial_neuron_count,
retained_neuron_count,
"[name: %s]" % layer.name,
)
logger.debug(msg)
return weights
def _prune_explored_td_layer(self, layer):
weights = None
retained_idx = None
if type(layer.layer) in [
keras.layers.Conv2D,
keras.layers.Dense,
QuantizedConv2D,
QuantizedDense,
]:
weights = self._prune_explored_conv_or_fc_layer(layer)
elif type(layer.layer) in [keras.layers.DepthwiseConv2D, QuantizedDepthwiseConv2D]:
weights = self._prune_explored_conv_dw_layer(layer)
elif type(layer.layer) in [keras.layers.BatchNormalization, PatchedBatchNormalization]:
weights = self._prune_explored_batch_norm_layer(layer)
elif type(layer.layer) == keras.layers.Flatten:
retained_idx = self._prune_explored_flatten_layer(layer)
elif type(layer.layer) == keras.layers.Concatenate:
retained_idx = self._prune_explored_concat_layer(layer)
else:
retained_idx = self._get_previous_retained_idx(layer)
return weights, retained_idx
def _unravel_weights(self, layer):
configs = layer.get_config()
is_quantized = ('quantize' in configs) and configs['quantize']
weights = layer.get_weights()
if is_quantized:
scaling_factor = weights[-1]
weights = weights[:-1]
else:
scaling_factor = None
if len(weights) == 1:
kernels = weights[0]
biases = None
elif len(weights) == 2:
kernels, biases = weights
else:
raise ValueError("Unhandled number of weights: %d" % len(weights))
return kernels, biases, scaling_factor
def _convert_to_list(self, obj):
if not isinstance(obj, list):
return [obj]
return obj
def _explore(self, model):
"""Explore a model for pruning and decide the feature-maps for all layers.
The model to prune must be a string of convolutional or fully-connected nodes. For example,
the nv-Helnet family of models, the VGG-xx family of models, the ResNet-xx family of
models, AlexNet or LeNet can be pruned using this API.
This function implements the 'min_weight' filtering method described on:
[Molchanov et al.] Pruning Convolutional Neural Networks for Resource Efficient Inference,
arXiv:1611.06440.
For convolutional layers, only the norm of the kernels is considered (the norm of biases
is ignored).
Args:
model (Model): the Keras model to prune.
Returns:
model (Model): the explored model.
"""
# Explore the model using Breadth First Traversal, starting from the input layers.
logger.info("Exploring graph for retainable indices")
layers_to_explore = model._input_layers
model_inputs = []
model_outputs = []
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
logger.debug("Exploring layer : {}".format(layer.name))
go_to_another_layer = any([l.name not in self._explored_layers
for n in self._convert_to_list(layer._inbound_nodes)
for l in self._convert_to_list(n.inbound_layers)])
if go_to_another_layer:
# Some of the inbound layers have not been explored yet.
# Skip this layer for now, it will come back to the list
# of layers to explore as the outbound layer of one of the
# yet unexplored layers.
continue
retained_idx = None
explored_stat = None
outputs = None
is_pruned = None
# Layer-specific handling.
if type(layer) in [
keras.layers.DepthwiseConv2D,
QuantizedDepthwiseConv2D
]:
retained_idx, explored_stat, is_pruned = self._explore_conv_dw_layer(layer)
elif type(layer) in [
keras.layers.Conv2D,
keras.layers.Dense,
QuantizedConv2D,
QuantizedDense,
]:
elmtwise_inputs = _get_inbound_layers(layer)
# Handle pruning for keras element wise merge layers.
if len(layer._inbound_nodes) > 1 and len(set(elmtwise_inputs)) > 1:
# For eltwise layers check if all the inbound layers have been explored first.
if all(l.name in self._explored_layers for l in elmtwise_inputs):
retained_idx = self._explore_elmtwise_layer(layer)
else:
continue
else:
# Explore current conv or fc layer for retainable feature maps.
retained_idx, explored_stat, is_pruned = self._explore_conv_or_fc_layer(
layer
)
elif type(layer) in [
keras.layers.Conv2DTranspose,
QuantizedConv2DTranspose
]:
# Explore conv2d traspose layer for retainable indices.
retained_idx = self._explore_conv_transpose_layer(layer)
elif type(layer) in [
keras.layers.Activation,
keras.layers.BatchNormalization,
PatchedBatchNormalization,
keras.layers.Dropout,
keras.layers.MaxPooling2D,
keras.layers.AveragePooling2D,
keras.layers.GlobalAveragePooling2D,
keras.layers.Softmax,
keras.layers.ReLU,
keras.layers.ELU,
keras.layers.LeakyReLU,
keras.layers.InputLayer,
keras.layers.ZeroPadding2D,
keras.layers.Flatten,
keras.layers.Concatenate,
keras.layers.UpSampling2D,
keras.layers.Cropping2D,
AnchorBoxes,
RetinaAnchorBoxes,
YOLOAnchorBox,
BBoxPostProcessingLayer,
CropAndResize,
Proposal,
ProposalTarget,
keras.models.Model,
QDQ,
AnchorLayer,
# ReshapeLayer,
BoxTargetEncoder,
ForegroundSelectorForMask,
GPUDetections,
MaskPostprocess,
MaskTargetsLayer,
MultilevelCropResize,
MultilevelProposal,
ProposalAssignment,
BoxInput, ClassInput,
MaskInput, ImageInput,
InfoInput, Split,
ImageResizeLayer,
keras.layers.SeparableConv2D
]:
# These layers are just pass-throughs.
pass
elif type(layer) in [keras.layers.Reshape, keras.layers.Permute]:
# Make sure that the previous layer was unpruned.
if self._is_layer_pruned(layer):
if (
type(layer) == keras.layers.Reshape and
-1 in layer.target_shape
):
retained_idx = None
is_pruned = None
else:
raise NotImplementedError(
"Reshape/Permute is not supported after a pruned layer."
)
else:
retained_idx = None
is_pruned = False
elif type(layer) == ReshapeLayer:
# Make sure that the previous layer was unpruned.
retained_idx = None
is_pruned = False
# Handle pruning for keras element wise merge layers.
elif type(layer) in [
keras.layers.Add,
keras.layers.Subtract,
keras.layers.Multiply,
keras.layers.Average,
keras.layers.Maximum,
WeightedFusion,
]:
# For eltwise layers check if all the inbound layers have been explored first.
elmtwise_inputs = [
l for n in layer._inbound_nodes for l in n.inbound_layers
]
if all(l.name in self._explored_layers for l in elmtwise_inputs):
retained_idx = self._explore_elmtwise_layer(layer)
else:
continue
elif type(layer) == ConvGRU2D:
# Explore conv2d GRU layer for retainable indices.
retained_idx, explored_stat, is_pruned = self._explore_conv_2d_gru_layer(
layer
)
elif type(layer) == keras.layers.TimeDistributed:
retained_idx, explored_stat, is_pruned = self._explore_td_layer(layer)
elif type(layer) == keras.layers.Lambda:
if layer.name not in self._excluded_layers:
raise ValueError(
"Lambda layers must be explicitly excluded from pruning. "
"Met lambda layer with name {} that is not explicitly "
"excluded".format(layer.name)
)
# Once we have verified that the lambda layer is excluded from pruning, it
# can safely be assumed to be a pass-through.
pass
else:
# Raise not implemented error for layers that aren't supported.
raise NotImplementedError("Unknown layer type: %s" % type(layer))
# Explore the input layer.
if type(layer) in [keras.layers.InputLayer,
BoxInput, ClassInput,
MaskInput, ImageInput,
InfoInput]:
# Re-use the existing InputLayer.
outputs = layer.output
model_inputs.append(outputs)
retained_idx = None
else:
# Make sure there are no duplicates in the retained indices.
if retained_idx is not None:
assert len(retained_idx) == len(set(retained_idx)), (
"Duplicates found in "
"list of retained "
"indices: %s." % repr(retained_idx)
)
outbound_nodes = layer._outbound_nodes
if not outbound_nodes:
model_outputs.append(outputs)
for node in outbound_nodes:
if node.outbound_layer not in layers_to_explore:
layers_to_explore.append(node.outbound_layer)
names_to_explore = [l.name for l in layers_to_explore]
logger.debug(
"Updating layers to explore at {} to: {}".format(
layer.name, names_to_explore
)
)
# Save info about the layer we just explored.
self._explored_layers[layer.name] = PrunedLayer(
retained_idx, explored_stat=explored_stat, is_pruned=is_pruned
)
return model
@override
def prune(self, model, layer_config_overrides=None, output_layers_with_outbound_nodes=None):
"""Prune an alreayd explored model, contains the retained-indices for all layers.
The model to prune must be a string of convolutional or fully-connected nodes. For example,
the nv-Helnet family of models, the VGG-xx family of models, the Resnet-xx family of
models, AlexNet or LeNet can be pruned using this API.
This function implements the 'min_weight' filtering method described on:
[Molchanov et al.] Pruning Convolutional Neural Networks for Resource Efficient Inference,
arXiv:1611.06440.
For convolutional layers, only the norm of the kernels is considered (the norm of biases
is ignored).
Args:
model (Model): the Keras model to prune.
layer_config_overrides (dict): A dictionary of key-value pairs used for overriding
layer configuration. Use cases include changing regularizers after pruning.
output_layers_with_outbound_nodes (list): Option to specify intermediate output layers
that have `outbound_nodes`.
Returns:
model (Model): the pruned model.
"""
# get `training` config for BN reconstruction
config_map = {l['name'] : l['inbound_nodes'] for l in model.get_config()['layers']}
# Phase 1: Explore the model.
if not output_layers_with_outbound_nodes:
output_layers_with_outbound_nodes = []
model = self._explore(model)
# Phase 2: Prune the graph in Breadth First Search fashion, starting from the
# input layer.
logger.debug("Explored layers: {}".format(self._explored_layers.keys()))
logger.debug("Model layers: {}".format([l.name for l in model.layers]))
input_layer = [l for l in model.layers if (
type(l) in [keras.layers.InputLayer,
BoxInput, ClassInput,
MaskInput, ImageInput,
InfoInput])]
layers_to_explore = input_layer
model_outputs = {}
logger.info("Pruning model and appending pruned nodes to new graph")
# Loop until we reach the last layer.
while layers_to_explore:
layer = layers_to_explore.pop(0)
# Skip layers that may be revisited in the graph to prevent duplicates.
if not self._explored_layers[layer.name].visited:
# Check if all inbound layers explored for given layer.
inbound_nodes = layer._inbound_nodes
# For some reason, tf.keras does not always put things in a list.
if not isinstance(inbound_nodes, list):
inbound_nodes = [inbound_nodes]
go_to_another_layer = False
for n in inbound_nodes:
inbound_layers = n.inbound_layers
if not isinstance(inbound_layers, list):
inbound_layers = [inbound_layers]
for l in inbound_layers:
# if isinstance(l, dict):
# break
if not self._explored_layers[l.name].visited:
go_to_another_layer = True
break
if go_to_another_layer:
break
if go_to_another_layer:
# if not (len(prune_visited) == len(inbound_layers)):
# Some of the inbound layers have not gone through pruning phase yet.
# Skip this layer for now, it will come back to the list
# of layers to explore as the outbound layer of one of the
# yet unvisited layers.
continue
logger.debug("Pruning layer: {}".format(layer.name))
weights = None
outputs = None
# Layer-specific handling. Carve weights out based on results from the
# explore phase.
if type(layer) in [
keras.layers.DepthwiseConv2D,
QuantizedDepthwiseConv2D
]:
weights = self._prune_explored_conv_dw_layer(layer)
elif type(layer) in [
keras.layers.Conv2D,
keras.layers.Dense,
QuantizedConv2D,
QuantizedDense,
]:
weights = self._prune_explored_conv_or_fc_layer(layer)
elif type(layer) in [keras.layers.BatchNormalization, PatchedBatchNormalization]:
weights = self._prune_explored_batch_norm_layer(layer)
elif type(layer) in [
keras.layers.Conv2DTranspose,
QuantizedConv2DTranspose
]:
weights = self._prune_explored_conv_transpose_layer(layer)
elif type(layer) == keras.models.Model:
sub_model_layer = self.prune(layer)
weights = sub_model_layer.get_weights()
elif (type(layer) == keras.layers.Concatenate):
retained_idx = self._prune_explored_concat_layer(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
elif type(layer) == Split:
retained_idx = self._prune_explored_split_layer(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
elif type(layer) == keras.layers.Flatten:
retained_idx = self._prune_explored_flatten_layer(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
elif type(layer) == ReshapeLayer:
pass
elif type(layer) in [keras.layers.Reshape, keras.layers.Permute]:
# Make sure that the previous layer was unpruned.
if self._is_layer_pruned(layer):
if not (
type(layer) == keras.layers.Reshape and
-1 in layer.target_shape
):
raise NotImplementedError(
"Reshape is not supported after a pruned layer."
)
if (
type(layer) == keras.layers.Reshape and
-1 in layer.target_shape
):
retained_idx = self._get_previous_retained_idx(layer)
self._explored_layers[layer.name].retained_idx = retained_idx
elif type(layer) in [
keras.layers.Activation,
keras.layers.Dropout,
keras.layers.MaxPooling2D,
keras.layers.AveragePooling2D,
keras.layers.GlobalAveragePooling2D,
keras.layers.Softmax,
keras.layers.ZeroPadding2D,
keras.layers.ReLU,
keras.layers.ELU,
keras.layers.LeakyReLU,
keras.layers.InputLayer,
keras.layers.Add,
keras.layers.Subtract,
keras.layers.Multiply,
keras.layers.Average,
keras.layers.Maximum,
keras.layers.UpSampling2D,
keras.layers.Cropping2D,
AnchorBoxes,
RetinaAnchorBoxes,
YOLOAnchorBox,
BBoxPostProcessingLayer,
CropAndResize,
Proposal,
ProposalTarget,
QDQ,
AnchorLayer,
# ReshapeLayer,
BoxTargetEncoder,
ForegroundSelectorForMask,
GPUDetections,
MaskPostprocess,
MaskTargetsLayer,
MultilevelCropResize,
MultilevelProposal,
ProposalAssignment,
BoxInput, ClassInput,
MaskInput, ImageInput,
InfoInput,
ImageResizeLayer,
WeightedFusion,
keras.layers.SeparableConv2D,
]:
# These layers have no weights associated with them. Hence no transformation
# but, propogate retained indices from the previous layer.
retained_idx = self._get_previous_retained_idx(layer)
if type(layer) == Proposal:
proposal_retained_idx = retained_idx
elif type(layer) == ProposalTarget:
retained_idx = proposal_retained_idx
self._explored_layers[layer.name].retained_idx = retained_idx
elif type(layer) in [keras.layers.InputLayer,
BoxInput, ClassInput,
MaskInput, ImageInput,
InfoInput]:
pass
elif type(layer) == ConvGRU2D:
weights = self._prune_explored_conv_2d_gru_layer(layer)
elif type(layer) == keras.layers.TimeDistributed:
weights, retained_idx = self._prune_explored_td_layer(layer)
if retained_idx is not None:
self._explored_layers[layer.name].retained_idx = retained_idx
else:
# Other layers are not going through any transformation here.
raise NotImplementedError(
"Unsupported layer type for layer names"
"{} of type {}".format(layer.name, type(layer))
)
# Visit input layer.
if type(layer) in [keras.layers.InputLayer,
BoxInput, ClassInput,
MaskInput, ImageInput,
InfoInput]:
# Re-use the existing InputLayer.
outputs = layer.output
new_layer = layer
else:
# Create new layer.
layer_config = layer.get_config()
# Apply layer config overrides.
if layer_config_overrides is not None:
for key in layer_config:
if key in layer_config_overrides:
layer_config[key] = layer_config_overrides[key]
with keras.utils.CustomObjectScope(
{'PriorProbability': PriorProbability,
'mish': mish,
'swish': swish}):
new_layer = type(layer).from_config(layer_config)
# Add to model.
outputs = []
for node in layer._inbound_nodes:
prev_outputs = []
inbound_layers = node.inbound_layers
# For some reason, tf.keras does not always put things in a list.
if not isinstance(inbound_layers, list):
inbound_layers = [inbound_layers]
anchor_layer_lvl = 2
fg_select_list = [1, 0, 2, 3]
fg_select_idx = 0
box_target_list = [2, 0, 1]
box_target_idx = 0
mask_target_list = [0, 2, 3, 1]
mask_target_idx = 1
# for idx, l in enumerate(node.inbound_layers):
for idx, l in enumerate(inbound_layers):
keras_layer = self._explored_layers[l.name].keras_layer
node_indices = node.node_indices
# For some reason, tf.keras does not always put things in a list.
if not isinstance(node_indices, list):
node_indices = [node_indices]
kl_output = keras_layer.get_output_at(node_indices[idx])
if type(keras_layer) == ProposalTarget:
kl_output = kl_output[0]
if type(keras_layer) == AnchorLayer:
kl_output = kl_output[anchor_layer_lvl]
anchor_layer_lvl += 1
if type(keras_layer) == MultilevelProposal:
kl_output = kl_output[1]
if type(keras_layer) == ProposalAssignment and \
type(layer) == ForegroundSelectorForMask:
kl_output = kl_output[fg_select_list[fg_select_idx]]
fg_select_idx += 1
if type(keras_layer) == ProposalAssignment and \
type(layer) == MultilevelCropResize:
kl_output = kl_output[2]
if type(keras_layer) == ProposalAssignment and \
type(layer) == BoxTargetEncoder:
kl_output = kl_output[box_target_list[box_target_idx]]
box_target_idx += 1
if type(keras_layer) == ForegroundSelectorForMask and \
type(layer) == MultilevelCropResize:
kl_output = kl_output[2]
if type(keras_layer) == ForegroundSelectorForMask and \
type(layer) == MaskTargetsLayer:
kl_output = kl_output[mask_target_list[mask_target_idx]]
mask_target_idx += 1
if type(keras_layer) == ForegroundSelectorForMask and \
type(layer) == MaskPostprocess:
kl_output = kl_output[0]
if type(keras_layer) == GPUDetections and \
type(layer) == MultilevelCropResize:
kl_output = kl_output[1]
if type(keras_layer) == GPUDetections and \
type(layer) == MaskPostprocess:
kl_output = kl_output[2]
prev_outputs.append(kl_output)
assert prev_outputs, "Expected non-input layer to have inputs."
if len(prev_outputs) == 1:
prev_outputs = prev_outputs[0]
if type(new_layer) in [keras.layers.BatchNormalization,
PatchedBatchNormalization]:
if 'training' in config_map[layer.name][0][0][-1]:
outputs.append(
new_layer(
prev_outputs,
training=config_map[layer.name][0][0][-1]['training'])
)
else:
outputs.append(new_layer(prev_outputs))
else:
outputs.append(new_layer(prev_outputs))
if len(outputs) == 1:
outputs = outputs[0]
if weights is not None:
new_layer.set_weights(weights)
outbound_nodes = layer._outbound_nodes
if not outbound_nodes:
model_outputs[layer.output.name] = outputs
# Patch for Faster-RCNN RPN outputs.
# It's an output layer, but still has outbound_nodes
if 'rpn_out' in layer.name:
model_outputs[layer.output.name] = outputs
# Option to specify intermediate output layers that have
# have `outbound_nodes`
if layer.name in output_layers_with_outbound_nodes:
model_outputs[layer.output.name] = outputs
layer_keys = [
'permute', 'post_hoc', 'p6', 'proposal_assignment',
'foreground_selector_for_mask', 'gpu_detections']
if any(lk in layer.name for lk in layer_keys):
if isinstance(layer.output, (tuple, list)):
for i, out_i in enumerate(layer.output):
model_outputs[out_i.name] = outputs[i]
else:
model_outputs[layer.output.name] = outputs
layers_to_explore.extend([node.outbound_layer for node in outbound_nodes])
# Mark current layer as visited and assign output nodes to the layer.
self._explored_layers[layer.name].visited = True
self._explored_layers[layer.name].keras_layer = new_layer
else:
continue
# Create new keras model object from pruned specifications.
model_outputs = [model_outputs[l.name] for l in model.outputs if l.name in model_outputs]
new_model = keras.models.Model(
inputs=model.inputs, outputs=model_outputs, name=model.name
)
return new_model
def prune(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=None,
layer_config_overrides=None,
equalization_criterion="union",
output_layers_with_outbound_nodes=None,
):
"""Prune a model.
The model to prune must be a Keras model, consisting of the following layer types:
- 2D convolutions, transpose convolutions,
- fully-connected,
- batch normalization.
The following non-parametric layer types are also supported:
- non-linear activations (sigmoid, ReLU, TanH, ...),
- flatten,
- concatenation,
- dropout,
- element-wise operations (add, subtract, ...),
- and more.
For example, the nv-Helnet family of models, the VGG-xx family of models, the ResNet-xx family
of models, AlexNet, LeNet or GoogleNet can be pruned using this API.
The inbound layers to element-wise operations should not be pruned.
This function implements the 'min_weight' filtering method described on:
[Molchanov et al.] Pruning Convolutional Neural Networks for Resource Efficient Inference,
arXiv:1611.06440.
For convolutional layers, only the norm of the kernels is considered (the norm of biases
is ignored).
Args:
model (Model): the Keras model to prune.
method (str): only 'min_weight' is supported.
normalizer (str): 'max' to normalize by dividing each norm by the maximum norm within
a layer; 'L2' to normalize by dividing by the L2 norm of the vector comprising all
kernel norms.
criterion (str): only 'L2' is supported.
granularity (int): granularity of the number of filters to remove at a time.
min_num_filters (int): minimum number of filters to retain in each layer.
threshold (float): threshold to compare normalized norm against.
excluded_layers (list): list of names of layers that should not be pruned. Typical usage
is for output layers of conv nets where the number of output channels must match
a number of classes.
layer_config_overrides (dict): A dictionary of key-value pairs used for overriding layer
configuration. Use cases include changing regularizers after pruning.
equalization_criterion (str): Criteria to equalize the stats of inputs to an element
wise op layer. Options are [arithmetic_mean, geometric_mean, union, intersection].
output_layers_with_outbound_nodes (list): Option to specify intermediate output layers
that have `outbound_nodes`.
Returns:
model (Model): the pruned model.
"""
if excluded_layers is None:
excluded_layers = []
if method != "min_weight":
# We don't know how to support other pruning methods.
raise NotImplementedError("Unsupported pruning method: %s" % method)
pruner = PruneMinWeight(
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
equalization_criterion=equalization_criterion,
excluded_layers=excluded_layers
)
return pruner.prune(model, layer_config_overrides, output_layers_with_outbound_nodes)
def _get_inbound_layers(layer):
"""Helper function to get the inbound layers of a given layer.
Needed because tf.keras treats the `inbound_layers` / `inbound_nodes` attributes as single
objects when there is only one of them, whereas keras treats them as lists regardless of how
many elements they hold.
Args:
layer (keras.layers.Layer | tf.keras.layers.Layer): Layer for which to get inbound layers.
Returns:
inbound_layers (list): List of inbound layers.
"""
inbound_layers = []
inbound_nodes = layer._inbound_nodes
if not isinstance(inbound_nodes, list):
inbound_nodes = [inbound_nodes]
for n in inbound_nodes:
_inbound_layers = n.inbound_layers
if not isinstance(_inbound_layers, list):
_inbound_layers = [_inbound_layers]
inbound_layers.extend(_inbound_layers)
return inbound_layers
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/pruning/pruning.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus pruning APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.pruning import pruning
from nvidia_tao_tf1.core.pruning.pruning import prune
__all__ = ("prune", "pruning")
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/pruning/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus pruning utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nvidia_tao_tf1.core.models.import_keras import keras as keras_fn
from nvidia_tao_tf1.core.models.templates.qdq_layer import QDQ
from nvidia_tao_tf1.core.models.templates.quantized_conv2d import QuantizedConv2D
from nvidia_tao_tf1.core.models.templates.quantized_dense import QuantizedDense
from nvidia_tao_tf1.core.models.templates.quantized_depthwiseconv2d import QuantizedDepthwiseConv2D
from nvidia_tao_tf1.cv.efficientdet.layers.image_resize_layer import ImageResizeLayer
from nvidia_tao_tf1.cv.efficientdet.layers.weighted_fusion_layer import WeightedFusion
from nvidia_tao_tf1.cv.efficientdet.utils.utils import PatchedBatchNormalization
from nvidia_tao_tf1.cv.faster_rcnn.layers.custom_layers import CropAndResize, ProposalTarget
from nvidia_tao_tf1.cv.mask_rcnn.layers.box_input_layer import BoxInput
from nvidia_tao_tf1.cv.mask_rcnn.layers.class_input_layer import ClassInput
from nvidia_tao_tf1.cv.mask_rcnn.layers.image_input_layer import ImageInput
from nvidia_tao_tf1.cv.mask_rcnn.layers.info_input_layer import InfoInput
from nvidia_tao_tf1.cv.mask_rcnn.layers.mask_input_layer import MaskInput
keras = keras_fn()
TRAVERSABLE_LAYERS = [
keras.layers.BatchNormalization,
keras.layers.Activation,
keras.layers.Dropout,
keras.layers.Softmax,
keras.layers.MaxPooling2D,
keras.layers.AveragePooling2D,
keras.layers.Add,
keras.layers.Subtract,
keras.layers.Multiply,
keras.layers.Average,
keras.layers.Maximum,
keras.layers.DepthwiseConv2D,
QuantizedDepthwiseConv2D,
keras.layers.ZeroPadding2D,
keras.layers.ReLU, CropAndResize,
keras.layers.TimeDistributed,
keras.layers.LeakyReLU,
keras.layers.UpSampling2D,
keras.layers.Conv2D,
QDQ, ImageResizeLayer, WeightedFusion,
keras.layers.SeparableConv2D,
PatchedBatchNormalization
]
def normalize_stat(stat, normalizer):
"""Normalize pruning statistics.
Args:
stat (Array): array of statistics to normalize
normalizer (str): either 'L2' (normalize by dividing by L2 norm) or
'max' (normalize by dividing by max)
Returns:
The normalized array.
"""
if normalizer == "L2":
stat = stat / np.sqrt(np.sum(stat ** 2)) * len(stat)
elif normalizer == "max":
stat = stat / np.max(stat)
elif normalizer != "off":
raise NotImplementedError("Invalid pruning normalizer: %s" % normalizer)
return stat
def get_L2_norm(kernels, layer):
"""Get the L2 norms of the filters for pruning.
Args:
kernels (Array): array of kernels to compute norms of, where the last
dimension indexes individual kernels.
layer(keras Layer): the layer whose filters we are going to make statistics.
Special treatment to the DepthwiseConv2D.
Returns:
A vector of L2 norms, one for each kernel.
"""
if type(layer) in [keras.layers.DepthwiseConv2D, QuantizedDepthwiseConv2D] or \
(type(layer) == keras.layers.TimeDistributed and
type(layer.layer) in [keras.layers.DepthwiseConv2D, QuantizedDepthwiseConv2D]):
# For DepthwiseConv2D, currently we only support depthwise_multiplier = 1.
# I.e., axis 3 is always of size 1, kernel shape = (K_h, K_w, C_in, 1)
norm = np.sqrt(np.sum(kernels**2, axis=(0, 1, 3)))
else:
norm = np.sqrt(np.sum(kernels**2, axis=tuple(range(kernels.ndim-1))))
return norm
def find_prunable_parent(prunable_parents,
layer,
skip_root=False,
visited=None):
"""Recursive function to find the first prunable parent in the current branch.
Args:
prunable_parents (list): A list of prunable parents accumulated till before the current
layer was explored.
layer (keras layer object): Current layer being explored.
skip_root(bool): Whether or not to skip the root layer(the root of the recursion tree).
This is useful for depthwise conv case, because the current layer is prunable,
but we want to find its parent that is prunable rather than returning itself.
Return:
A list of keras layers which are prunable inputs to the given layer.
"""
visited = visited or {}
# exit if you have encountered a prunable parent.
if (type(layer) in [keras.layers.Conv2D,
keras.layers.Dense,
keras.layers.DepthwiseConv2D,
QuantizedConv2D,
QuantizedDepthwiseConv2D,
QuantizedDense]
and len(layer._inbound_nodes) == 1) or \
(type(layer) == keras.layers.TimeDistributed and
type(layer.layer) in [keras.layers.Conv2D,
keras.layers.Dense,
keras.layers.DepthwiseConv2D,
QuantizedConv2D,
QuantizedDepthwiseConv2D,
QuantizedDense]):
if not skip_root:
prunable_parents.extend([layer])
return list(set(prunable_parents))
# If you hit a shape manipulation layer, drop an exception.
if type(layer) not in TRAVERSABLE_LAYERS:
raise NotImplementedError(
"Pruning is not possible with {} layer " "in the way".format(layer.name)
)
# Recurse across all branches to return prunable parents.
previous_layers = []
inbound_nodes = layer._inbound_nodes
# For some reason, tf.keras does not always put things in a list.
if not isinstance(inbound_nodes, list):
inbound_nodes = [inbound_nodes]
for n in inbound_nodes:
inbound_layers = n.inbound_layers
if not isinstance(inbound_layers, list):
inbound_layers = [inbound_layers]
for l in inbound_layers:
previous_layers.append(l)
for l in previous_layers:
if visited and l in visited:
prunable_parents.extend(visited[l])
else:
# Skip the Input layers if there are multiple parents.
if type(layer) == CropAndResize:
if type(l) != ProposalTarget:
visited[l] = find_prunable_parent(prunable_parents,
l,
False,
visited)
break
else:
continue
elif type(l) not in [keras.layers.InputLayer,
BoxInput, ClassInput,
MaskInput, ImageInput,
InfoInput]:
visited[l] = find_prunable_parent(prunable_parents,
l,
False,
visited)
return list(set(prunable_parents))
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/pruning/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus pruning.
This module includes APIs to prune a Keras models.
Pruning is currently supported only for sequential models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import logging.config
import os
import sys
import time
from nvidia_tao_tf1.core.models.import_keras import keras as keras_fn
from nvidia_tao_tf1.core.pruning.pruning import prune
from nvidia_tao_tf1.core.utils.path_utils import expand_path
keras = keras_fn()
"""Root logger for pruning app."""
logger = logging.getLogger(__name__)
def prune_app(
input_filename,
output_filename,
verbose,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers,
equalization_criterion,
output_layers_with_outbound_nodes
):
"""Wrapper around :any:`modulus.pruning.pruning.prune`.
Args:
input_filename (str): path to snapshot of model to prune
output_filename (str): output filename (defaults to $(input).pruned)
verbose (boolean): whether to print debug messages
See :any:`modulus.pruning.pruning.prune` for more information on the other arguments.
"""
start_time = time.time()
# Set up logging.
verbosity = "DEBUG" if verbose else "INFO"
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", level=verbosity
)
logger.info("Loading model from %s" % (input_filename))
# Load model from disk.
model = keras.models.load_model(input_filename, compile=False)
logger.info("Original model - param count: %d" % model.count_params())
# Create list of exclude layers from command-line, if provided.
if excluded_layers is not None:
excluded_layers = excluded_layers.split(",")
# Create list of output layers with outbound nodes from command-line, if provided.
if output_layers_with_outbound_nodes is not None:
output_layers_with_outbound_nodes = output_layers_with_outbound_nodes.split(",")
# Prune model given specified parameters.
new_model = prune(
model,
method,
normalizer,
criterion,
granularity,
min_num_filters,
threshold,
excluded_layers=excluded_layers,
equalization_criterion=equalization_criterion,
output_layers_with_outbound_nodes=output_layers_with_outbound_nodes,
)
logger.info("New model - param count: %d" % new_model.count_params())
if output_filename is None:
output_filename = input_filename + ".pruned"
logger.info("Saving pruned model into %s" % (output_filename))
# Save pruned model to disk.
dirname = os.path.dirname(output_filename)
if not os.path.exists(expand_path(dirname)):
os.makedirs(expand_path(dirname))
new_model.save(output_filename)
logger.debug("Done after %s seconds" % (time.time() - start_time,))
def main(args=None):
"""Pruning application.
If MagLev was installed through ``pip`` then this application can be
run from a shell. For example::
$ maglev-prune model.h5 --threshold 0.1
See command-line help for more information.
Args:
args (list): Arguments to parse.
"""
# Reduce TensorFlow verbosity
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
parser = argparse.ArgumentParser(description="Prune a string of conv/fc nodes")
# Positional arguments.
parser.add_argument("input_filename", help="Input file (.h5 Keras snapshot)")
# Optional arguments.
parser.add_argument(
"-o",
"--output",
type=str,
default=None,
help="Output file (defaults to $(input_filename).pruned)",
)
parser.add_argument(
"--method",
type=str,
default="min_weight",
help="Pruning method (currently only 'min_weight' is supported)",
)
parser.add_argument(
"-n",
"--normalizer",
type=str,
default="max",
help="Normalizer type (off, L2, max)",
)
parser.add_argument(
"-c", "--criterion", type=str, default="L2", help="Criterion (L2, activations)"
)
parser.add_argument(
"-e",
"--excluded_layers",
type=str,
default=None,
help="Comma separated list of layers to be excluded from pruning.",
)
parser.add_argument(
"--output_layers_with_outbound_nodes",
type=str,
default=None,
help="Comma separated list of output layers that have outbound nodes.",
)
parser.add_argument(
"--equalization_criterion",
type=str,
help="Equalization criterion to be used for inputs to an element-wise op.",
choices=["union", "intersection", "arithmetic_mean", "geometric_mean"],
)
parser.add_argument("-g", "--granularity", type=int, default=8, help="Granularity")
parser.add_argument(
"-m", "--min_num_filters", type=int, default=16, help="Min number of filters"
)
parser.add_argument(
"-t", "--threshold", type=float, default=0.01, help="Pruning threshold"
)
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose messages")
if not args:
args = sys.argv[1:]
args = vars(parser.parse_args(args))
prune_app(
args["input_filename"],
args["output"],
args["verbose"],
args["method"],
args["normalizer"],
args["criterion"],
args["granularity"],
args["min_num_filters"],
args["threshold"],
args["excluded_layers"],
args["equalization_criterion"],
args["output_layers_with_outbound_nodes"],
)
if __name__ == "__main__":
main()
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/pruning/app.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Core hooks.
Hooks, like callbacks, are called at specific intervals. They typically inherit from a base object
and should override its methods.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
import glob
import itertools
import logging
import os
import re
import signal
import keras
import tensorflow as tf
from tensorflow.python.keras import backend as backend_python
logger = logging.getLogger(__name__)
class StartAndStepOpHook(tf.estimator.SessionRunHook):
"""Hook to extend calls to ``MonitoredSession.run()``.
Given a list of ops, this hook will execute this op once when the training session is created,
and after that execute it with every following training step.
Using this hook in combination with a buffer, you can preallocate one sample at the start,
and asynchronously add one sample to the buffer with each step thereafter.
"""
def __init__(self, ops):
"""Initialize the hook.
Args:
ops: A list of tensors, to be executed once when the session is created, and
every time a run is performed after that.
"""
self._ops = ops
def before_run(self, run_context):
"""Called before each call to run().
Run the ops each run.
Args:
run_context: A `SessionRunContext` object.
Returns:
A `SessionRunArgs` object.
"""
return tf.estimator.SessionRunArgs(self._ops)
def after_create_session(self, session, coord):
"""Called when new TensorFlow session is created.
Runs the ops once.
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
session.run(self._ops)
class TFCheckpointListener(tf.estimator.CheckpointSaverListener):
"""Listener hook to call upon any methods when a tensorflow file gets saved.
This hook is basically a shell to incorporate a callback.
For callbacks related to Keras HDF5 files, use KerasCheckpointListener.
"""
def __init__(self, checkpoint_dir, after_save_callable, cleanup=None):
"""Initializes a `TFCheckpointListener`.
Args:
checkpoint_dir (str): The checkpoint directory where the checkpoint files will be in.
after_save_callable ((ckpt_base (str), ckpt_files (list(str)), step (int)) -> None):
The callable to take in all the checkpoint files. `ckpt_base` is the file with base
`.ckpt` extension while `ckpt_files` are all the files generated by tensorflow.
cleanup ((step (int)) -> None): Optional. Method that is called at the end of the
Tensorflow session. Default None.
"""
self._checkpoint_dir = checkpoint_dir
self._after_save_callable = after_save_callable
self._cleanup = cleanup
def after_save(self, session, global_step_value):
"""Called right after saving the tensorflow checkpoint. This will call the callable.
Args:
session: the current instance of type `tf.Session`.
global_step_value (int): the current global step.
"""
all_files = os.listdir(self._checkpoint_dir)
extension = ".ckpt-{}".format(global_step_value)
ckpt_files = list(filter(lambda f: extension in f, all_files))
ckpt_files_with_path = list(
map(lambda s: os.path.join(self._checkpoint_dir, s), ckpt_files)
)
ckpt_base = os.path.splitext(ckpt_files_with_path[0])[0]
self._after_save_callable(ckpt_base, ckpt_files_with_path, global_step_value)
def end(self, session, global_step_value):
"""Run at the end of the session."""
if self._cleanup:
self._cleanup(global_step_value)
class KerasCheckpointListener(tf.estimator.CheckpointSaverListener):
"""Listener hook to save keras model snapshots.
Usage of this checkpoint listener toggle Keras variable initialization off during the
lifetime of the session.
Note that this object should be used in conjunction with the `KerasModelHook`.
"""
def __init__(
self,
model,
checkpoint_dir,
after_save_callable=None,
max_to_keep=None,
prefix="model",
):
"""Initialize a `KerasCheckpointListener`.
Args:
model (Keras.models.Models): a instance of a ``Keras.models.Model`` object.
checkpoint_dir (str): base directory for the checkpoint files.
after_save_callable ((filename (str), step (int)) -> None): Optional. A function to
call after the files are saved for any necessary post processing with the files.
Default None.
max_to_keep (int): Optional. Maximum number of Keras checkpoints to keep. Keeps the
latest `max_to_keep` checkpoints. Default None.
prefix (str): Prefix to add to the checkpoints' filenames. Default "model".
"""
if os.environ.get("TF_KERAS"):
self.K = tf.keras
self._previous_MANUAL_VAR_INIT = backend_python._MANUAL_VAR_INIT
else:
self.K = keras
self._previous_MANUAL_VAR_INIT = (
keras.backend.tensorflow_backend._MANUAL_VAR_INIT
)
self._model = model
self._checkpoint_dir = checkpoint_dir
self._after_save_callable = after_save_callable
self._max_to_keep = max_to_keep
self._latest_checkpoints = deque(maxlen=self._max_to_keep)
self._prefix = prefix
def begin(self):
"""Called after starting the session."""
pattern = r"^%s.keras-(\d+)\.hdf5$" % re.escape(
os.path.join(self._checkpoint_dir, self._prefix)
)
compiled = re.compile(pattern)
def extract_model_number(filename):
s = compiled.findall(filename)
return int(s[0]) if s else -1, filename
filenames = glob.glob(os.path.join(self._checkpoint_dir, "*.hdf5"))
# Weed out filenames that do not match the pattern.
filenames = [
filename for filename in filenames if compiled.match(filename) is not None
]
sorted_filenames = sorted(filenames, key=extract_model_number)
self._latest_checkpoints.extend(sorted_filenames)
def after_save(self, session, global_step_value):
"""Called right after saving the tensorflow checkpoint.
Args:
session: the current instance of type `tf.Session`.
global_step_value (int): the current global step.
"""
self.K.backend.set_session(session)
self.K.backend.manual_variable_initialization(True)
filename = os.path.join(
self._checkpoint_dir, "%s.keras-%s.hdf5" % (self._prefix, global_step_value)
)
self._model.save(filename, overwrite=True)
if self._after_save_callable:
self._after_save_callable(filename, global_step_value)
self._cleanup(filename)
def _cleanup(self, filename):
# Clean up old checkpoints if need be.
if (
self._max_to_keep is not None
and len(self._latest_checkpoints) >= self._max_to_keep
):
# First, get the name of the latest checkpoint to remove.
old_checkpoint = self._latest_checkpoints.popleft()
if os.path.exists(old_checkpoint):
os.remove(old_checkpoint)
# Now, add the new checkpoint.
if filename not in self._latest_checkpoints:
self._latest_checkpoints.append(filename)
else:
logger.info("Overwritten Keras model: {}.".format(filename))
def end(self, session, global_step_value):
"""Run at the end of the session, reset the old variale initialization setting."""
self.K.backend.manual_variable_initialization(self._previous_MANUAL_VAR_INIT)
class KerasModelHook(tf.estimator.SessionRunHook):
"""Hook to extend calls to ``MonitoredSession.run()``.
Extracts relevant information and ops from a Keras model. Extracts the update ops from a keras
model, that are for example relevant and set when using batch_norm.
Note that there are two major intended use cases for this hook:
* During training, layers that have non-trainable, but updatable weights (e.g. batch norm)
need this hook to make sure these updates are run.
* When loading a keras hdf5 from disk (e.g. as pretrained weights for finetuning, or
simply for inference), the weights' values in the hdf5 need to be copied over into
the TensorFlow graph, otherwise they would be reinitialized from scratch by running
each tf.Variable's `initializer`, which is typically random or null, and produce
garbage results.
"""
def __init__(self, models, ignore_initialized_values=False):
"""Initialize the hook.
Args:
models (list): A list of `keras.Model` used to extract relevant information from.
A single `keras.Model` is also supported for backward compatibility.
Update ops are run for all of the `keras.Model` with each session step.
ignore_initialized_values (bool): If True, Keras variable values set in Keras
background session are not copied to Tensorflow session at session creation. Set
this True when restoring session from a Tensorflow checkpoint.
"""
self.models = models if isinstance(models, list) else [models]
self._updates = [update for model in self.models for update in model.updates]
if os.environ.get("TF_KERAS"):
self.K = tf.keras
self._previous_MANUAL_VAR_INIT = backend_python._MANUAL_VAR_INIT
else:
self.K = keras
self._previous_MANUAL_VAR_INIT = (
keras.backend.tensorflow_backend._MANUAL_VAR_INIT
)
self._ignore_initialized_values = ignore_initialized_values
self._variables_initialized = []
def begin(self):
"""Called once before using the session.
Assignment operations are added to each tensorflow weight variable. This needs to be done
before the graph is finalized. The actual running of assignment operations is done later
in `after_create_session()`.
"""
self.K.backend.manual_variable_initialization(True)
for model in self.models:
for x in model.weights:
tf_dtype = tf.as_dtype(x.dtype.name.split("_")[0])
assign_placeholder = tf.compat.v1.placeholder(tf_dtype)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
self._variables_initialized.append(
tf.compat.v1.is_variable_initialized(x)
)
def end(self, session):
"""Run at the end of the session, clears the closed keras session so it can be reused."""
self.K.backend.clear_session()
# Reset the old variable initialization setting
self.K.backend.manual_variable_initialization(self._previous_MANUAL_VAR_INIT)
def after_create_session(self, session, coord):
"""Called when new TensorFlow session is created.
Use the assignment operations created in `begin()` and load in all initialized weights from
the Keras session over to our Tensorflow session.
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
if not self._ignore_initialized_values:
vars_all = list(itertools.chain(*[m.weights for m in self.models]))
vars_is_initialized = self.K.backend.get_session().run(
self._variables_initialized
)
vars_to_initialize = list(
itertools.compress(data=vars_all, selectors=vars_is_initialized)
)
assign_weights = self.K.backend.get_session().run(vars_to_initialize)
assign_ops = [v._assign_op for v in vars_to_initialize]
assign_phs = [v._assign_placeholder for v in vars_to_initialize]
session.run(assign_ops, feed_dict=dict(zip(assign_phs, assign_weights)))
else:
# Otherwise TF spits out a bunch of crap.
# Note that this looks like a potential can of worms. No mention of this
# `mark_used` method can be found in TensorFlow's documentation.
try:
for init_flag in self._variables_initialized:
init_flag.mark_used()
except Exception:
pass
self.K.backend.set_session(session)
def before_run(self, run_context):
"""Called before each call to run().
Run the ops each run.
Args:
run_context: A `SessionRunContext` object.
Returns:
A `SessionRunArgs` object.
"""
return tf.estimator.SessionRunArgs(self._updates)
class SignalHandlerHook(tf.estimator.SessionRunHook):
"""Hook to allow clean shutdown (incl. checkpointing) when receiving SIGUSR1.
Catch SIGUSR1 during training. When the step is finished, request session to stop, which causes
the session to call end() in all hooks. This will trigger checkpointing, if a hook is
configured for creating checkpoints.
"""
def __init__(self):
"""Initialize the hook."""
self._signal_caught = False
self._previous_usr1_handler = None
self._main_process_pid = -1
def begin(self):
"""Register the signal handler."""
# Remember the previously configured handler.
self._previous_usr1_handler = signal.getsignal(signal.SIGUSR1)
# begin() is always called by the main process, get its PID.
self._main_process_pid = os.getpid()
# Assign a new handler.
signal.signal(signal.SIGUSR1, self._handle_signal)
def after_run(self, run_context, run_values):
"""If self._signal_caught is set, end the session."""
if self._signal_caught:
logger.info("SignalHandlerHook requests session to stop.")
run_context.request_stop()
def end(self, session):
"""Assign the previously configured signal handler after session ends."""
signal.signal(signal.SIGUSR1, self._previous_usr1_handler)
def _handle_signal(self, signum, frame):
"""Flip self._signal_caught flag, when a signal is received."""
this_process_pid = os.getpid()
# Flip self._signal_caught flag only in the main process.
# In any other process, ignore SIGUSR1.
if this_process_pid == self._main_process_pid:
logger.info("SignalHandlerHook caught signal {}.".format(signum))
self._signal_caught = True
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/hooks/hooks.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.hooks import utils
from nvidia_tao_tf1.core.hooks.hooks import (
KerasCheckpointListener,
KerasModelHook,
SignalHandlerHook,
StartAndStepOpHook,
TFCheckpointListener,
)
from nvidia_tao_tf1.core.hooks.sample_counter_hook import SampleCounterHook
from nvidia_tao_tf1.core.hooks.task_progress_monitor_hook import TaskProgressMonitorHook
__all__ = (
"KerasCheckpointListener",
"KerasModelHook",
"SampleCounterHook",
"SignalHandlerHook",
"StartAndStepOpHook",
"TaskProgressMonitorHook",
"TFCheckpointListener",
"utils",
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/hooks/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus hook utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import nvidia_tao_tf1.core.distribution
from nvidia_tao_tf1.core.hooks.validation_hook import ValidationHook
import nvidia_tao_tf1.core.hooks
INFREQUENT_SUMMARY_KEY = "infrequent_summary"
def get_softstart_annealing_learning_rate(
progress, soft_start, annealing, base_lr, min_lr
):
"""Return learning rate at current epoch progress.
Args:
progress (float): The ratio current iteration/total number of iterations; a number between
0 and 1.
soft_start (float): The progress at which learning rate achieves base_lr when starting from
min_lr.
annealing (float): The progress at which learning rate starts to drop from base_lr to
min_lr.
base_lr (float): Maximum learning rate.
min_lr (float): Minimum learning rate.
Returns:
lr: A tensor (scalar float) indicating the learning rate.
"""
valid_progress = tf.logical_and(
tf.greater_equal(progress, 0.0), tf.less_equal(progress, 1.0)
)
assert_op = tf.Assert(valid_progress, [progress])
with tf.control_dependencies([assert_op]):
if soft_start > 0.0:
t_softstart = progress / soft_start
else: # learning rate starts from base_lr.
t_softstart = tf.constant(1.0, dtype=tf.float32)
if annealing < 1.0:
t_annealing = (1.0 - progress) / (1.0 - annealing)
else: # learning rate is never annealed.
t_annealing = tf.constant(1.0, dtype=tf.float32)
# Select appropriate schedule.
t = tf.compat.v1.where(
progress < soft_start, t_softstart, tf.constant(1.0, dtype=tf.float32)
)
t = tf.compat.v1.where(progress > annealing, t_annealing, t)
# Adapt learning rate linearly on log scale between min_lr and base_lr.
lr = tf.exp(
tf.math.log(min_lr) + t * (tf.math.log(base_lr) - tf.math.log(min_lr))
)
return lr
def get_common_training_hooks(
log_tensors,
log_every_n_secs,
checkpoint_n_steps,
model,
last_step,
checkpoint_dir,
scaffold,
summary_every_n_steps,
infrequent_summary_every_n_steps,
validation_every_n_steps=None,
evaluator=None,
after_save_callable=None,
listeners=None,
max_ckpt_to_keep=5,
):
"""Set up commonly used hooks for tensorflow training sessions.
Args:
log_tensors (dict): A dictionary of tensors to print to stdout. The keys of the dict should
be strings, and the values should be tensors.
log_every_n_secs (int): Log the ``log_tensors`` argument every ``n`` seconds.
checkpoint_n_steps (int, list): Perform a tensorflow and Keras checkpoint every ``n`` steps.
model: An instance of ``keras.models.Model`` to be saved with each snapshot.
last_step (int): The step after which the associated session's `should_stop` method should
evaluate to ``True``.
checkpoint_dir: The directory used for saving the graph, summaries and checkpoints. In case
it's ``None``, no checkpoints and model files will be saved and no tensorboard summaries
will be produced.
scaffold: An instance of the same ``tf.train.Scaffold`` that will be passed to the
training session.
summary_every_n_steps: Save sumaries every ``n`` steps. The steps per second will also
be printed to console.
infrequent_summary_every_n_steps: Save infrequent summaries every ``n`` steps. This is for
summaries that should be rarely evaluated, like images or histograms. This relates
to summaries marked with the ``INFREQUENT_SUMMARY_KEY`` key.
validation_every_n_steps (int): Validate every ``n`` steps. Should be specified if evaluator
object is not None.
evaluator: An instance of Evaluator class that performs evaluation (default=None).
after_save_callable ((filename (str), step (int)) -> None): Optional. A function to
call after the files are saved for any necessary post processing with the files.
Default None.
listeners: A list of CheckpointSaverListener objects (or child classes). Can be None.
If provided, will leave out the default listeners provided otherwise.
max_ckpt_to_keep: Maximum number of model checkpoints to keep.
Returns:
A list of hooks, all inheriting from ``tf.SessionRunHook``.
"""
hooks = [
tf.estimator.LoggingTensorHook(
tensors=log_tensors, every_n_secs=log_every_n_secs
),
tf.estimator.StopAtStepHook(last_step=last_step),
# Setup hook that cleanly stops the session if SIGUSR1 is received.
nvidia_tao_tf1.core.hooks.SignalHandlerHook(),
]
if model is not None:
hooks.append(nvidia_tao_tf1.core.hooks.KerasModelHook(model))
# If we are running in a distributed setting, we need to broadcast the initial variables.
if nvidia_tao_tf1.core.distribution.get_distributor().is_distributed():
hooks.append(
nvidia_tao_tf1.core.distribution.get_distributor().broadcast_global_variables_hook()
)
# Save checkpoints only on master to prevent other workers from corrupting them.
if nvidia_tao_tf1.core.distribution.get_distributor().is_master():
step_counter_hook = tf.estimator.StepCounterHook(
every_n_steps=summary_every_n_steps, output_dir=checkpoint_dir
)
hooks.append(step_counter_hook)
if checkpoint_dir is not None:
if listeners is None:
listeners = []
if model is not None:
keras_checkpoint_listener = nvidia_tao_tf1.core.hooks.KerasCheckpointListener(
model=model,
checkpoint_dir=checkpoint_dir,
after_save_callable=after_save_callable,
max_to_keep=max_ckpt_to_keep,
)
listeners.insert(0, keras_checkpoint_listener)
if not isinstance(checkpoint_n_steps, list):
checkpoint_n_steps = [checkpoint_n_steps]
for n_steps in checkpoint_n_steps:
checkpoint_hook = tf.estimator.CheckpointSaverHook(
checkpoint_dir=checkpoint_dir,
save_steps=n_steps,
listeners=listeners,
scaffold=scaffold,
)
hooks.append(checkpoint_hook)
# Set up the frequent and summary savers.
if summary_every_n_steps > 0:
summary_saver = tf.estimator.SummarySaverHook(
save_steps=summary_every_n_steps,
scaffold=scaffold,
output_dir=checkpoint_dir,
)
hooks.append(summary_saver)
if infrequent_summary_every_n_steps > 0:
infrequent_summary_op = tf.compat.v1.summary.merge_all(
key=INFREQUENT_SUMMARY_KEY
)
if infrequent_summary_op is None:
raise ValueError("Infrequent summaries requested, but None found.")
infrequent_summary_saver = tf.estimator.SummarySaverHook(
save_steps=infrequent_summary_every_n_steps,
output_dir=checkpoint_dir,
summary_op=infrequent_summary_op,
)
hooks.append(infrequent_summary_saver)
# Set up evaluator hook after checkpoint saver hook, so that evaluation is performed
# on the latest saved model.
if evaluator is not None:
if validation_every_n_steps is not None:
hooks.append(ValidationHook(evaluator, validation_every_n_steps))
else:
raise ValueError(
"Specify ``validation_every_n_steps`` if Evaluator is not None"
)
return hooks
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/hooks/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hook for job progress monitoring on clusters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import timedelta
import json
import logging
import os
import time
import tensorflow.compat.v1 as tf
logger = logging.getLogger(__name__)
MONITOR_JSON_FILENAME = "monitor.json"
def write_monitor_json(
save_path, loss_value, current_epoch, max_epoch, time_per_epoch, ETA
):
"""Write the monitor.json file for cluster monitoring purposes.
Args:
save_path (str): Path where monitor.json needs to be saved. Basically the
result directory.
loss_value (float): Current value of loss to be recorder in the monitor.
current_epoch (int): Current epoch.
max_epoch (int): Total number of epochs.
time_per_epoch (float): Time per epoch in seconds.
ETA (float): Time per epoch in seconds.
Returns:
monitor_data (dict): The monitor data as a dict.
"""
monitor_data = {
"loss": loss_value,
"cur_epoch": current_epoch,
"max_epoch": max_epoch,
"time_per_epoch": str(timedelta(seconds=time_per_epoch)),
"ETA": str(timedelta(seconds=ETA)),
}
# Save the json file.
filename = os.path.join(save_path, MONITOR_JSON_FILENAME)
try:
with open(filename, "w") as f:
json.dump(monitor_data, f)
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
return monitor_data
class TaskProgressMonitorHook(tf.estimator.SessionRunHook):
"""Log loss and epochs for monitoring progress of cluster jobs.
Writes the current training progress (current loss, current epoch and
maximum epoch) to a json file.
"""
def __init__(self, loss, save_path, epochs, steps_per_epoch):
"""Initialization.
Args:
loss: Loss tensor.
save_path (str): Absolute save path.
epochs (int): Number of training epochs.
steps_per_epoch (int): Number of steps per epoch.
"""
# Define the tensors to be fetched at every step.
self._fetches = {"loss": loss, "step": tf.train.get_or_create_global_step()}
self.save_path = save_path
self.epochs = epochs
self.steps_per_epoch = steps_per_epoch
# Initialize variables for epoch time calculation.
self.time_per_epoch = 0
self._step_start_time = None
# Closest estimate of the start time, in case starting from mid-epoch.
self._epoch_start_time = time.time()
def before_run(self, run_context):
"""Request loss and global step from the session.
Args:
run_context: A `SessionRunContext` object.
Returns:
A `SessionRunArgs` object.
"""
# Record start time for each step. Use the value later, if this step started an epoch.
self._step_start_time = time.time()
# Assign the tensors to be fetched.
return tf.train.SessionRunArgs(self._fetches)
def after_run(self, run_context, run_values):
"""Write the progress to json-file after each epoch.
Args:
run_context: A `SessionRunContext` object.
run_values: A `SessionRunValues` object. Contains the loss value
requested by before_run().
"""
# Get the global step value.
step = run_values.results["step"]
if (step + 1) % self.steps_per_epoch == 0:
# Last step of an epoch is completed.
epoch_end_time = time.time()
self.time_per_epoch = epoch_end_time - self._epoch_start_time
if step % self.steps_per_epoch == 0:
# First step of a new epoch is completed. Store the time when step was started.
self._epoch_start_time = self._step_start_time
loss_value = run_values.results["loss"]
current_epoch = int(step // self.steps_per_epoch)
monitor_data = write_monitor_json(
save_path=self.save_path,
loss_value=float(loss_value),
current_epoch=current_epoch,
max_epoch=self.epochs,
time_per_epoch=self.time_per_epoch,
ETA=(self.epochs - current_epoch) * self.time_per_epoch,
)
logger.info(
"Epoch %d/%d: loss: %0.5f Time taken: %s ETA: %s"
% (
monitor_data["cur_epoch"],
monitor_data["max_epoch"],
monitor_data["loss"],
monitor_data["time_per_epoch"],
monitor_data["ETA"],
)
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/hooks/task_progress_monitor_hook.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import tensorflow as tf
from tensorflow.python.training.session_run_hook import SessionRunArgs
logger = logging.getLogger(__name__)
class ValidationHook(tf.estimator.SessionRunHook):
"""Hook to perform validation during training.
Given an Evaluator and validation_every_n_steps, This hook performs validation after
every n steps.
"""
def __init__(self, evaluator, validation_every_n_steps):
"""Initialize the hook.
Args:
evaluator (Evaluator or list): Object or list of objects that performs evaluation
and returns metrics.
validation_every_n_steps (int): Perform validation every n steps.
"""
if not isinstance(evaluator, list):
evaluator = [evaluator]
for evaluator_object in evaluator:
evaluate_func = getattr(evaluator_object, "evaluate", None)
if not callable(evaluate_func):
raise ValueError(
"Evaluator {} does not have callable evaluate function!".format(
evaluator_object
)
)
self.n_evaluators = len(evaluator)
self._evaluators = evaluator
self._validation_every_n_steps = validation_every_n_steps
self._global_step_tensor = tf.compat.v1.train.get_or_create_global_step()
def before_run(self, run_context):
"""Called before each call to run().
Run the ops each run.
Args:
run_context: A `SessionRunContext` object.
Returns:
A `SessionRunArgs` object.
"""
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
"""Called after each call to run().
Run Validation after each call to run.
Args:
run_context: A `SessionRunContext` object.
run_values: A `SessionRunValues` object.
"""
self.global_step = run_values.results
if self.global_step % self._validation_every_n_steps == 0:
for evaluator in self._evaluators:
self.validation_metrics = evaluator.evaluate(
sess=self._raw_session, global_step=self.global_step
)
# print metrics only if something valid is returned
if self.validation_metrics:
logger.info(
"Validation #{}: {}".format(
self.global_step, self.validation_metrics
)
)
def after_create_session(self, session, coord):
"""Called when new TensorFlow session is created.
Get raw session for this hook.
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
self._raw_session = session
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/hooks/validation_hook.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hook for calculating the training throughput."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import time
import tensorflow.compat.v1 as tf
logger = logging.getLogger(__name__)
class SampleCounterHook(tf.estimator.SessionRunHook):
"""Hook that logs throughput in a tf.Session."""
def __init__(self, batch_size, every_n_steps=25, name=""):
"""Constructor.
Args:
batch_size (int): Number of samples in a minibatch.
every_n_steps (int): Controls how often the hook actually logs the throughput.
name (str): Name for session. Optional default "".
"""
self._batch_size = batch_size
self._every_n_steps = every_n_steps
self._start_time = None
self._step_counter = -1
self._samples_per_second = 0.0
self._name = name
def before_run(self, run_context):
"""Increment internal step counter and reset the timer if necessary.
Args:
run_context: A `SessionRunContext` object.
Returns:
A `SessionRunArgs` object.
"""
self._step_counter += 1
if self._step_counter % self._every_n_steps == 0:
self._start_time = time.time()
def after_run(self, run_context, run_values):
"""Calculate the throughput, if necessary.
Args:
run_context: A `SessionRunContext` object.
run_values: A `SessionRunValues` object.
"""
if (
self._step_counter + 1
) % self._every_n_steps == 0 or self._step_counter == 0:
time_taken = time.time() - self._start_time
self._samples_per_second = (
(self._batch_size * self._every_n_steps) / time_taken
if self._step_counter
else self._batch_size / time_taken
)
logger.info(
"{} Samples / sec: {:.3f}".format(self._name, self._samples_per_second)
)
def end(self, session):
"""Print samples per sec at the end of the run.
Args: session: A `Session` object.
"""
logger.info(
"{} Samples / sec: {:.3f}".format(self._name, self._samples_per_second)
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/hooks/sample_counter_hook.py
|
"Module containing custom ops for the tao core library."
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/lib/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus model templates for ResNets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import keras
from nvidia_tao_tf1.core.templates.utils_tf import add_activation
from nvidia_tao_tf1.core.templates.utils_tf import add_dense_head
from nvidia_tao_tf1.core.templates.utils_tf import arg_scope
from nvidia_tao_tf1.core.templates.utils_tf import CNNBlock
from nvidia_tao_tf1.core.templates.utils_tf import get_batchnorm_axis
def ResNet(nlayers,
input_tensor=None,
use_batch_norm=False,
data_format='channels_first',
add_head=False,
head_activation='softmax',
nclasses=None,
kernel_regularizer=None,
bias_regularizer=None,
activation_type='relu',
activation_kwargs=None,
all_projections=True,
freeze_blocks=None,
freeze_bn=False,
use_pooling=False,
use_bias=False):
"""
Construct a fixed-depth vanilla ResNet, based on the architectures from the original paper [1].
Args:
nlayers (int): the number of layers in the desired ResNet (e.g. 18, 34, ..., 152).
input_tensor (tensor): the input tensor.
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
add_head (bool): whether to add the original [1] classification head. Note that if you
don't include the head, the actual number of layers in the model produced by this
function is 'nlayers-1`.
head_activation (string): Activation function for classification head.
nclasses (int): the number of classes to be added to the classification head. Can be `None`
if unused.
kernel_regularizer: regularizer to apply to kernels.
bias_regularizer: regularizer to apply to biases.
activation_type (str): Type of activation.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
all_projections (bool): whether to implement cnn subblocks with all shortcuts connections
forced as 1x1 convolutional layers as mentioned in [1] to enable full pruning of
ResNets. If set as False, the template instantiated will be the classic ResNet template
as in [1] with shortcut connections as skip connections when there is no stride change
and 1x1 convolutional layers (projection layers) when there is a stride change.
Note: The classic template cannot be fully pruned. Only the first N-1 number of layers
in the ResNet subblock can be pruned. All other layers must be added to exclude layers
list while pruning, including conv1 layer.
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
use_pooling (bool): whether to use MaxPooling2D layer after first conv layer or use a
stride of 2 for first convolutional layer in subblock
use_bias(bool): Whether or not to use bias for the conv layers.
Returns:
Model: the output model after applying the ResNet on top of input `x`.
[1] Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
"""
if freeze_blocks is None:
freeze_blocks = []
# Determine proper input shape
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if input_tensor is None:
inputs = keras.layers.Input(shape=input_shape)
else:
inputs = input_tensor
freeze0 = 0 in freeze_blocks
freeze1 = 1 in freeze_blocks
freeze2 = 2 in freeze_blocks
freeze3 = 3 in freeze_blocks
freeze4 = 4 in freeze_blocks
activation_kwargs = activation_kwargs or {}
x = keras.layers.Conv2D(64, (7, 7),
strides=(2, 2),
padding='same',
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='conv1',
trainable=not freeze0,
use_bias=use_bias)(inputs)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(axis=get_batchnorm_axis(data_format),
name='bn_conv1')(x, training=False)
else:
x = keras.layers.BatchNormalization(axis=get_batchnorm_axis(data_format),
name='bn_conv1')(x)
x = add_activation(activation_type, **activation_kwargs)(x)
first_stride = 2 # Setting stride 1st convolutional subblock.
last_stride = 1 # Setting stride last convolutional subblock.
if use_pooling:
x = keras.layers.MaxPooling2D(pool_size=(3, 3),
strides=(2, 2), padding='same',
data_format=data_format)(x)
first_stride = 1
last_stride = 2
# Define a block functor which can create blocks.
with arg_scope(
[CNNBlock],
use_batch_norm=use_batch_norm,
all_projections=all_projections,
use_shortcuts=True,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_bn=freeze_bn,
activation_kwargs={},
use_bias=use_bias):
if nlayers == 10:
x = CNNBlock(repeat=1, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=1, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=1, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=1, stride=last_stride,
subblocks=[(3, 512), (3, 512)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 18:
x = CNNBlock(repeat=2, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=2, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=2, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=2, stride=last_stride,
subblocks=[(3, 512), (3, 512)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 34:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=6, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(3, 512), (3, 512)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 50:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=6, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 101:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=23, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 152:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=8, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=36, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)],
index=4, freeze_block=freeze4)(x)
else:
raise NotImplementedError('A resnet with nlayers=%d is not implemented.' % nlayers)
# Add AveragePooling2D layer if use_pooling is enabled after resnet block.
if use_pooling:
x = keras.layers.AveragePooling2D(pool_size=(7, 7),
data_format=data_format,
padding='same')(x)
# Naming model.
model_name = 'resnet%d' % nlayers
if not use_pooling:
model_name += '_nopool'
if use_batch_norm:
model_name += '_bn'
# Set up keras model object.
model = keras.models.Model(inputs=inputs, outputs=x, name=model_name)
# Add a dense head of nclasses if enabled.
if add_head:
model = add_dense_head(model, inputs, nclasses, head_activation)
return model
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/resnet_tf.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pull Google Open Images pre-trained models from NVidia GitLab."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from keras.layers import AveragePooling2D, Dense, Flatten, Input
from keras.models import Model
from keras.utils.data_utils import get_file
from nvidia_tao_tf1.core.templates import resnet
from nvidia_tao_tf1.core.templates import vgg
BASE_MODEL_PATH= os.getenv("BASE_MODEL_PATH", "")
def add_dense_head(nclasses, base_model, data_format,
kernel_regularizer=None, bias_regularizer=None):
"""Add dense head to the base model."""
output = base_model.output
output_shape = base_model.output.get_shape().as_list()
# use average pooling and flatten to replace global average pooling and add dense head
output = AveragePooling2D(pool_size=(output_shape[-2], output_shape[-1]),
data_format=data_format, padding='valid')(output)
output = Flatten()(output)
output = Dense(nclasses, activation='softmax', name='predictions',
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)(output)
final_model = Model(inputs=base_model.input, outputs=output, name=base_model.name)
return final_model
def ResNet(nlayers=18,
input_shape=(3, 224, 224),
add_head=False,
nclasses=1000,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None):
"""Build ResNet based on Pretrained weights."""
if nlayers == 10:
model_name = 'resnet10.h5'
elif nlayers == 18:
model_name = 'resnet18.h5'
elif nlayers == 34:
model_name = 'resnet34.h5'
elif nlayers == 50:
model_name = 'resnet50.h5'
else:
raise NotImplementedError('There is no pre-trained models for this number of layers')
assert data_format == 'channels_first', \
"Pretrained weights only available for channels_first models."
url = BASE_MODEL_PATH + model_name
input_image = Input(shape=input_shape)
base_model = resnet.ResNet(nlayers=nlayers,
input_tensor=input_image,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
use_batch_norm=True,
activation_type='relu',
all_projections=True,
use_pooling=False)
model_path = get_file(model_name, url, cache_subdir='models')
base_model.load_weights(model_path)
if not add_head:
return base_model
return add_dense_head(nclasses, base_model, data_format,
kernel_regularizer, bias_regularizer)
def VggNet(nlayers=16,
input_shape=(3, 224, 224),
add_head=False,
nclasses=1000,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None):
"""Build VGG based on pretrained weights."""
if nlayers == 16:
model_name = 'vgg16.h5'
elif nlayers == 19:
model_name = 'vgg19.h5'
else:
raise NotImplementedError('There is no pre-trained models for this number of layers')
assert data_format == 'channels_first', \
"Pretrained weights only available for channels_first models."
url = BASE_MODEL_PATH + model_name
input_image = Input(shape=input_shape)
base_model = vgg.VggNet(nlayers=nlayers,
inputs=input_image,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True,
activation_type='relu',
use_pooling=False)
model_path = get_file(model_name, url, cache_subdir='models')
base_model.load_weights(model_path)
if not add_head:
return base_model
return add_dense_head(nclasses, base_model, data_format,
kernel_regularizer, bias_regularizer)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/pretrained_models.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Maglev model templates for GoogLeNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras.layers import Activation
from keras.layers import AveragePooling2D
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import Dense, Flatten
from keras.layers import Dropout
# from keras.layers import Lambda
from keras.layers import MaxPooling2D
from keras.models import Model
from nvidia_tao_tf1.core.templates.utils import arg_scope
from nvidia_tao_tf1.core.templates.utils import get_batchnorm_axis
from nvidia_tao_tf1.core.templates.utils import InceptionV1Block
def GoogLeNet(inputs, use_batch_norm=True, data_format=None, add_head=False,
nclasses=1000, kernel_regularizer=None, bias_regularizer=None,
activation_type='relu', freeze_blocks=None, freeze_bn=False,
use_bias=True):
"""
Construct GoogLeNet, based on the architectures from the original paper [1].
Args:
inputs (tensor): the input tensor.
use_batch_norm (bool): whether batchnorm or Local Response BatchNormalization
if True: batchnorm should be added after each convolution.
if False: LRN is added as defined in paper [1].
LRN is not supported in pruning and model export.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
add_head (bool): whether to add the original [1] classification head. Note that if you
don't include the head, the actual number of layers in the model produced by this
function is 'nlayers-3`, as they don't include the last 3 FC layers.
nclasses (int): the number of classes to be added to the classification head. Can be `None`
if unused.
kernel_regularizer: regularizer to apply to kernels.
bias_regularizer: regularizer to apply to biases.
freeze_blocks(list): the blocks in the model to be frozen.
freeze_bn(bool): whether or not to freeze the BN layer in the model.
use_bias(bool): Whether or not to use bias for conv layers.
Returns:
Model: the output model after applying the GoogLeNet on top of input `x`.
[1] Going Deeper with Convolutions, Szegedy, Christian, et. al., Proceedings
of the IEEE Conference on Computer Vision and Pattern Recognition, 2015.
(https://arxiv.org/abs/1409.4842)
"""
if data_format is None:
data_format = K.image_data_format()
if use_batch_norm:
bn_axis = get_batchnorm_axis(data_format)
if freeze_blocks is None:
freeze_blocks = []
x = Conv2D(64,
(7, 7),
strides=(2, 2),
padding='same',
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='conv1',
trainable=not(0 in freeze_blocks),
use_bias=use_bias)(inputs)
if use_batch_norm:
if freeze_bn:
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation(activation_type)(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='pool1')(x)
# we force use_batch_norm to be True in the model builder
# TODO: <vpraveen> Uncomment when export for LRN is supported.
# if not use_batch_norm:
# x = Lambda(lambda y, arguments={'type': 'googlenet_lrn',
# 'depth_radius': 5,
# 'bias': 1.0,
# 'alpha': 0.0001,
# 'beta': 0.75,
# 'name': 'lrn1'}:
# tf.nn.lrn(y, depth_radius=5,
# bias=1.0,
# alpha=0.0001,
# beta=0.75,
# name='lrn1'))(x)
x = Conv2D(64,
(1, 1),
strides=(1, 1),
padding='same',
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='conv2_reduce',
trainable=not(0 in freeze_blocks),
use_bias=use_bias)(x)
if use_batch_norm:
if freeze_bn:
x = BatchNormalization(axis=bn_axis, name='bn_conv2_reduce')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='bn_conv2_reduce')(x)
x = Activation(activation_type)(x)
x = Conv2D(192,
(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='conv2',
trainable=not(0 in freeze_blocks),
use_bias=use_bias)(x)
if use_batch_norm:
if freeze_bn:
x = BatchNormalization(axis=bn_axis, name='bn_conv2')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='bn_conv2')(x)
x = Activation(activation_type)(x)
# # we force use_batch_norm to be True in the model builder
# TODO: <vpraveen> Uncomment when export for LRN is supported.
# if not use_batch_norm:
# x = Lambda(lambda y, arguments={'type': 'googlenet_lrn',
# 'depth_radius': 5,
# 'bias': 1.0,
# 'alpha': 0.0001,
# 'beta': 0.75,
# 'name': 'lrn2'}:
# tf.nn.lrn(y, depth_radius=5,
# bias=1.0,
# alpha=0.0001,
# beta=0.75,
# name='lrn2'))(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='pool2')(x)
# Define a block functor which can create blocks.
with arg_scope([InceptionV1Block],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
freeze_bn=freeze_bn,
use_bias=use_bias):
# Implementing GoogLeNet architecture.
# Inception_3a
x = InceptionV1Block(subblocks=(64, 96, 128, 16, 32, 32),
index='3a',
trainable=not(1 in freeze_blocks))(x)
# Inception_3b
x = InceptionV1Block(subblocks=(128, 128, 192, 32, 96, 64),
index='3b',
trainable=not(2 in freeze_blocks))(x)
# Max Pooling
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='pool3')(x)
# Inception_4a
x = InceptionV1Block(subblocks=(192, 96, 208, 16, 48, 64),
index='4a',
trainable=not(3 in freeze_blocks))(x)
# Inception_4b
x = InceptionV1Block(subblocks=(160, 112, 224, 24, 64, 64),
index='4b',
trainable=not(4 in freeze_blocks))(x)
# Inception_4c
x = InceptionV1Block(subblocks=(128, 128, 256, 24, 64, 64),
index='4c',
trainable=not(5 in freeze_blocks))(x)
# Inception_4d
x = InceptionV1Block(subblocks=(112, 144, 288, 32, 64, 64),
index='4d',
trainable=not(6 in freeze_blocks))(x)
# Inception_4e
x = InceptionV1Block(subblocks=(256, 160, 320, 32, 128, 128),
index='4e',
trainable=not(7 in freeze_blocks))(x)
if add_head:
# Add Max Pooling layer if there is a classification head to be added
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='pool4')(x)
# Inception_5a
x = InceptionV1Block(subblocks=(256, 160, 320, 32, 128, 128),
index='5a',
trainable=not(8 in freeze_blocks))(x)
# Inception_5b
x = InceptionV1Block(subblocks=(384, 192, 384, 48, 128, 128),
index='5b',
trainable=not(9 in freeze_blocks))(x)
if add_head:
# Classification block.
# Add Average Pooling layer if there is a classification head to be added
x = AveragePooling2D(pool_size=(7, 7), strides=(1, 1), padding='same',
data_format=data_format, name='avg_pool')(x)
x = Flatten(name='flatten')(x)
x = Dropout(0.4, noise_shape=None, seed=None)(x)
x = Dense(nclasses, activation='softmax', name='output_fc')(x)
# Naming model.
model_name = 'Googlenet'
if use_batch_norm:
model_name += '_bn'
# Set up keras model object.
model = Model(inputs=inputs, outputs=x, name=model_name)
return model
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/googlenet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Maglev model templates for VGG16/19."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras.layers import Dense, Dropout, Flatten
from keras.layers import MaxPooling2D
from keras.models import Model
from nvidia_tao_tf1.core.templates.utils import arg_scope
from nvidia_tao_tf1.core.templates.utils import CNNBlock
def VggNet(nlayers, inputs, use_batch_norm=False, data_format=None, add_head=False,
nclasses=None, kernel_regularizer=None, bias_regularizer=None, activation_type='relu',
use_pooling=True, freeze_bn=False, freeze_blocks=None, use_bias=True,
dropout=0.5):
"""
Construct a fixed-depth VggNet, based on the architectures from the original paper [1].
Args:
nlayers (int): the number of layers in the desired VGG (e.g. 16, 19).
inputs (tensor): the input tensor.
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
add_head (bool): whether to add the original [1] classification head. Note that if you
don't include the head, the actual number of layers in the model produced by this
function is 'nlayers-3`, as they don't include the last 3 FC layers.
nclasses (int): the number of classes to be added to the classification head. Can be `None`
if unused.
kernel_regularizer: regularizer to apply to kernels.
bias_regularizer: regularizer to apply to biases.
use_pooling (bool): whether to use MaxPooling2D layer after first conv layer or use a
stride of 2 for first convolutional layer in subblock
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
use_bias(bool): whether or not to use bias for the conv layers.
dropout(float): The drop rate for dropout.
Returns:
Model: the output model after applying the VggNet on top of input `x`.
[1] Very Deep Convolutional Networks for Large-Scale Image Recognition
(https://arxiv.org/abs/1409.1556)
"""
if data_format is None:
data_format = K.image_data_format()
if freeze_blocks is None:
freeze_blocks = []
# Perform strided convolutions if pooling disabled.
first_stride = 1
stride = 2
if use_pooling:
# Disable strided convolutions with pooling enabled.
stride = 1
# Define a block functor which can create blocks.
with arg_scope([CNNBlock],
use_batch_norm=use_batch_norm,
use_shortcuts=False,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
freeze_bn=freeze_bn,
use_bias=use_bias):
# Implementing VGG 16 architecture.
if nlayers == 16:
# Block - 1.
x = CNNBlock(repeat=2, stride=first_stride, subblocks=[(3, 64)], index=1,
freeze_block=(1 in freeze_blocks))(inputs)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block1_pool')(x)
# Block - 2.
x = CNNBlock(repeat=2, stride=stride, subblocks=[(3, 128)], index=2,
freeze_block=(2 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block2_pool')(x)
# Block - 3.
x = CNNBlock(repeat=3, stride=stride, subblocks=[(3, 256)], index=3,
freeze_block=(3 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block3_pool')(x)
# Block - 4.
x = CNNBlock(repeat=3, stride=stride, subblocks=[(3, 512)], index=4,
freeze_block=(4 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block4_pool')(x)
# Block - 5.
x = CNNBlock(repeat=3, stride=stride, subblocks=[(3, 512)], index=5,
freeze_block=(5 in freeze_blocks))(x)
# Implementing VGG 19 architecture.
elif nlayers == 19:
# Block - 1.
x = CNNBlock(repeat=2, stride=first_stride, subblocks=[(3, 64)], index=1,
freeze_block=(1 in freeze_blocks))(inputs)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block1_pool')(x)
# Block - 2.
x = CNNBlock(repeat=2, stride=stride, subblocks=[(3, 128)], index=2,
freeze_block=(2 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block2_pool')(x)
# Block - 3.
x = CNNBlock(repeat=4, stride=stride, subblocks=[(3, 256)], index=3,
freeze_block=(3 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block3_pool')(x)
# Block - 4.
x = CNNBlock(repeat=4, stride=stride, subblocks=[(3, 512)], index=4,
freeze_block=(4 in freeze_blocks))(x)
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block4_pool')(x)
# Block - 5.
x = CNNBlock(repeat=4, stride=stride, subblocks=[(3, 512)], index=5,
freeze_block=(5 in freeze_blocks))(x)
else:
raise NotImplementedError('A VGG with nlayers=%d is not implemented.' % nlayers)
if add_head:
# Add final Max Pooling layer if there are FC layers. Otherwise return the
# feature extractor trunk with a stride of 16
if use_pooling:
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format=data_format, name='block5_pool')(x)
# Classification block.
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
if dropout > 0:
x = Dropout(dropout)(x)
x = Dense(4096, activation='relu', name='fc2')(x)
if dropout > 0:
x = Dropout(dropout)(x)
x = Dense(nclasses, activation='softmax', name='output_fc')(x)
# Naming model.
model_name = 'vgg%d' % nlayers
if not use_pooling:
model_name += '_nopool'
if use_batch_norm:
model_name += '_bn'
# Set up keras model object.
model = Model(inputs=inputs, outputs=x, name=model_name)
return model
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/vgg.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
from nvidia_tao_tf1.core.models.import_keras import keras as keras_fn
from nvidia_tao_tf1.core.models.templates.utils import count_layers_by_class_name
from nvidia_tao_tf1.core.templates.resnet import ResNet
keras = keras_fn()
topologies = [
# Test the different nlayers
(18, True, 'channels_first', True, False, True),
# (34, True, 'channels_first', True), # Commented because takes ages
# (50, True, 'channels_first', True), # Commented because takes ages
# (101, True, 'channels_first', True), # Commented because takes ages
# (152, True, 'channels_first', True), # Commented because takes ages
# Without BN
(18, False, 'channels_first', True, True, False),
# Without head
(18, False, 'channels_first', False, False, True),
# channels_last:
# With BN, with head
(18, True, 'channels_last', True, False, False),
# Without BN, with head
(18, False, 'channels_last', True, True, True),
# Without BN, without head
(18, False, 'channels_last', False, False, True),
]
@pytest.mark.parametrize("nlayers, use_batch_norm, data_format, add_head,"
"all_projections, use_pooling", topologies)
def test_resnet(nlayers,
use_batch_norm,
data_format,
add_head,
all_projections,
use_pooling,
nclasses=None):
"""Test Resnets for a variety of topologies and parameters."""
if data_format == 'channels_last':
shape = (256, 256, 3)
elif data_format == 'channels_first':
shape = (3, 256, 256)
inputs = keras.layers.Input(shape=shape)
if add_head:
nclasses = 10
model = ResNet(
nlayers,
inputs,
use_batch_norm=use_batch_norm,
data_format=data_format,
add_head=add_head,
all_projections=all_projections,
use_pooling=use_pooling,
nclasses=nclasses)
# Batchnorm check
n_batchnorms = count_layers_by_class_name(model, ["BatchNormalization"])
if use_batch_norm:
assert n_batchnorms > 0
else:
assert n_batchnorms == 0
# Layer count check
n_layers_counted = count_layers_by_class_name(model, ["Conv2D", "Dense"])
expected_nlayers = nlayers if add_head else nlayers - 1
bridge_factor = 8 if all_projections else 4
# Account for bridging dimensional gap with the extra 4 Conv2D blocks.
expected_nlayers += bridge_factor
assert n_layers_counted == expected_nlayers
# Check model output shape
output_shape = model.outputs[0].get_shape()
# Set expected shape depending on whther or not pruning is set.
expected_shape = (2, 2) if use_pooling else (16, 16)
if add_head:
assert output_shape[1:] == (nclasses)
else:
if data_format == 'channels_last':
assert output_shape[1:3] == expected_shape
elif data_format == 'channels_first':
assert output_shape[2:4] == expected_shape
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/test_resnet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus model templates for HelNets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from nvidia_tao_tf1.core.decorators.arg_scope import arg_scope
from nvidia_tao_tf1.core.models.templates.qdq_layer import QDQ
from nvidia_tao_tf1.core.models.templates.quantized_conv2d import QuantizedConv2D
from nvidia_tao_tf1.core.models.templates.utils import add_activation
from nvidia_tao_tf1.core.models.templates.utils import CNNBlock
from nvidia_tao_tf1.core.models.templates.utils import get_batchnorm_axis
from nvidia_tao_tf1.core.models.templates.utils import performance_test_model
if os.environ.get("TF_KERAS"):
from tensorflow import keras
else:
import keras
logger = logging.getLogger(__name__)
def HelNet(
nlayers,
inputs,
pooling=False,
use_batch_norm=False,
use_bias=None,
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
activation_type="relu",
activation_kwargs=None,
first_filter_kernel_size=7,
dilation_rate=(1, 1),
block_repeats=None,
block_widths=(64, 128, 256, 512),
block_strides=(2, 2, 2, 1),
quantize=False,
bitwidth=8,
weights=None,
):
"""
Construct a HelNet with a set amount of layers.
The HelNet family is very similar, and in its convolutional core identical, to the ResNet family
described in [1]. The main differences are: the absence of shortcuts (skip connections); the use
of a different head; and usually one or two changes in the striding. We've also made the second
layer (max pool) optional, though it was standard for ResNets described in the paper [1].
Args:
nlayers (int): the number of layers desired for this HelNet (e.g. 6, 10, ..., 34).
inputs (tensor): the input tensor `x`.
pooling (bool): whether max-pooling with a stride of 2 should be used as the second layer.
If `False`, this stride will be added to the next convolution instead.
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_f
irst' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
activation_type (str): Type of activation.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
weights (str): download and load in pretrained weights, f.e. 'imagenet'.
first_filter_kernel_size (int): kernel size of the first filter in network.
dilation_rate (int or (int, int)): An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
block_repeats (tuple of ints): number of times to repeat each convolutional block.
block_widths (tuple of ints): width i.e. number of features maps in each convolutional block
in the model.
quantize (bool): Flag for using QuantizedConv2D and ReLU6.
bitwidth (int): number of quantization bits.
strides (tuple of ints): the convolution stride for the first conv of each block
Returns:
Model: the output model after applying the HelNet on top of input `x`.
[1] Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
"""
if quantize:
if activation_kwargs is None:
activation_kwargs = {"max_value": 6.0}
else:
activation_kwargs.update({"max_value": 6.0})
activation_type = "relu-n"
if use_bias is None:
use_bias = not (use_batch_norm)
if data_format is None:
data_format = keras.backend.image_data_format()
activation_kwargs = activation_kwargs or {}
if block_repeats is None:
if nlayers == 6:
block_repeats = (1, 1, 1, 1)
elif nlayers == 10:
block_repeats = (1, 1, 1, 1)
elif nlayers == 12:
block_repeats = (1, 1, 2, 1)
elif nlayers == 18:
block_repeats = (2, 2, 2, 2)
elif nlayers == 26:
block_repeats = (3, 4, 3, 2)
elif nlayers == 34:
block_repeats = (3, 4, 6, 3)
else:
raise NotImplementedError(
"A Helnet with nlayers=%d is not implemented." % nlayers
)
# Create HelNet-0 model for training diagnostics.
if nlayers == 0:
return performance_test_model(inputs, data_format, activation_type)
if quantize:
x = QuantizedConv2D(
64,
(first_filter_kernel_size, first_filter_kernel_size),
strides=(2, 2),
padding="same",
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
bitwidth=bitwidth,
name="conv1",
)(inputs)
else:
x = keras.layers.Conv2D(
64,
(first_filter_kernel_size, first_filter_kernel_size),
strides=(2, 2),
padding="same",
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name="conv1",
)(inputs)
if use_batch_norm:
x = keras.layers.BatchNormalization(
axis=get_batchnorm_axis(data_format), name="bn_conv1"
)(x)
x = add_activation(activation_type, **activation_kwargs)(x)
if pooling:
if quantize:
if use_batch_norm:
qdq_name = "conv1_bn_act_qdq"
else:
qdq_name = "conv1_act_qdq"
x = QDQ(name=qdq_name)(x)
x = keras.layers.MaxPooling2D(
pool_size=(2, 2), strides=(2, 2), padding="same", data_format=data_format
)(x)
first_sride = 1
else:
first_sride = block_strides[0]
# Define a block functor which can create blocks
with arg_scope(
[CNNBlock],
use_batch_norm=use_batch_norm,
use_shortcuts=False,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
activation_kwargs=activation_kwargs,
dilation_rate=dilation_rate,
use_bias=use_bias,
quantize=quantize,
bitwidth=bitwidth,
):
if nlayers == 6:
x = CNNBlock(
repeat=block_repeats[0],
stride=first_sride,
subblocks=[(3, block_widths[0])],
index=1,
)(x)
x = CNNBlock(
repeat=block_repeats[1],
stride=block_strides[1],
subblocks=[(3, block_widths[1])],
index=2,
)(x)
x = CNNBlock(
repeat=block_repeats[2],
stride=block_strides[2],
subblocks=[(3, block_widths[2])],
index=3,
)(x)
x = CNNBlock(
repeat=block_repeats[3],
stride=block_strides[3],
subblocks=[(3, block_widths[3])],
index=4,
)(x)
elif nlayers == 10:
x = CNNBlock(
repeat=block_repeats[0],
stride=first_sride,
subblocks=[(3, block_widths[0])] * 2,
index=1,
)(x)
x = CNNBlock(
repeat=block_repeats[1],
stride=block_strides[1],
subblocks=[(3, block_widths[1])] * 2,
index=2,
)(x)
x = CNNBlock(
repeat=block_repeats[2],
stride=block_strides[2],
subblocks=[(3, block_widths[2])] * 2,
index=3,
)(x)
x = CNNBlock(
repeat=block_repeats[3],
stride=block_strides[3],
subblocks=[(3, block_widths[3])] * 2,
index=4,
)(x)
elif nlayers == 12:
x = CNNBlock(
repeat=block_repeats[0],
stride=first_sride,
subblocks=[(3, block_widths[0])] * 2,
index=1,
)(x)
x = CNNBlock(
repeat=block_repeats[1],
stride=block_strides[1],
subblocks=[(3, block_widths[1])] * 2,
index=2,
)(x)
x = CNNBlock(
repeat=block_repeats[2],
stride=block_strides[2],
subblocks=[(3, block_widths[2])] * 2,
index=3,
)(x)
x = CNNBlock(
repeat=block_repeats[3],
stride=block_strides[3],
subblocks=[(3, block_widths[3])] * 2,
index=4,
)(x)
elif nlayers == 18:
x = CNNBlock(
repeat=block_repeats[0],
stride=first_sride,
subblocks=[(3, block_widths[0])] * 2,
index=1,
)(x)
x = CNNBlock(
repeat=block_repeats[1],
stride=block_strides[1],
subblocks=[(3, block_widths[1])] * 2,
index=2,
)(x)
x = CNNBlock(
repeat=block_repeats[2],
stride=block_strides[2],
subblocks=[(3, block_widths[2])] * 2,
index=3,
)(x)
x = CNNBlock(
repeat=block_repeats[3],
stride=block_strides[3],
subblocks=[(3, block_widths[3])] * 2,
index=4,
)(x)
elif nlayers == 26:
x = CNNBlock(
repeat=block_repeats[0],
stride=first_sride,
subblocks=[(3, block_widths[0])] * 2,
index=1,
)(x)
x = CNNBlock(
repeat=block_repeats[1],
stride=block_strides[1],
subblocks=[(3, block_widths[1])] * 2,
index=2,
)(x)
x = CNNBlock(
repeat=block_repeats[2],
stride=block_strides[2],
subblocks=[(3, block_widths[2])] * 2,
index=3,
)(x)
x = CNNBlock(
repeat=block_repeats[3],
stride=block_strides[3],
subblocks=[(3, block_widths[3])] * 2,
index=4,
)(x)
elif nlayers == 34:
x = CNNBlock(
repeat=block_repeats[0],
stride=first_sride,
subblocks=[(3, block_widths[0])] * 2,
index=1,
)(x)
x = CNNBlock(
repeat=block_repeats[1],
stride=block_strides[1],
subblocks=[(3, block_widths[1])] * 2,
index=2,
)(x)
x = CNNBlock(
repeat=block_repeats[2],
stride=block_strides[2],
subblocks=[(3, block_widths[2])] * 2,
index=3,
)(x)
x = CNNBlock(
repeat=block_repeats[3],
stride=block_strides[3],
subblocks=[(3, block_widths[3])] * 2,
index=4,
)(x)
else:
raise NotImplementedError(
"A Helnet with nlayers=%d is not implemented." % nlayers
)
model_name = "helnet%d_s16" % nlayers
if pooling:
model_name += "_nopool"
if use_batch_norm:
model_name += "_bn"
model = keras.models.Model(inputs=inputs, outputs=x, name=model_name)
if weights == "imagenet":
logger.warning("Imagenet weights can not be used for production models.")
if nlayers == 18:
if use_batch_norm:
weights_path = keras.utils.data_utils.get_file(
"imagenet_helnet18-bn_weights_20170729.h5",
"https://s3-us-west-2.amazonaws.com/"
"9j2raan2rcev-ai-infra-models/"
"imagenet_helnet18-bn_weights_20170729.h5",
cache_subdir="models",
md5_hash="6a2d59e48d8b9f0b41a2b02a2f3c018e",
)
else:
weights_path = keras.utils.data_utils.get_file(
"imagenet_helnet18-no-bn_weights_20170729.h5",
"https://s3-us-west-2.amazonaws.com/"
"9j2raan2rcev-ai-infra-models/"
"imagenet_helnet18-no-bn_weights_20170729.h5",
cache_subdir="models",
md5_hash="3282b1e5e7f8e769a034103c455968e6",
)
model.load_weights(weights_path, by_name=True)
return model
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/helnet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet model templates in Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from tensorflow.keras import backend as K
from tensorflow.keras.layers import (
Activation,
AveragePooling2D,
BatchNormalization,
Conv2D,
Dense,
Dropout,
Flatten,
Input,
ZeroPadding2D
)
from tensorflow.keras.models import Model
from nvidia_tao_tf1.core.templates.utils_tf import (
block,
CONV_KERNEL_INITIALIZER,
correct_pad,
DENSE_KERNEL_INITIALIZER,
force_stride16,
round_filters,
round_repeats,
swish
)
DEFAULT_BLOCKS_ARGS = (
{'kernel_size': 3, 'repeats': 1, 'filters_in': 32, 'filters_out': 16,
'expand_ratio': 1, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 2, 'filters_in': 16, 'filters_out': 24,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 2, 'filters_in': 24, 'filters_out': 40,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 3, 'filters_in': 40, 'filters_out': 80,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 3, 'filters_in': 80, 'filters_out': 112,
'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 4, 'filters_in': 112, 'filters_out': 192,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 1, 'filters_in': 192, 'filters_out': 320,
'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}
)
def EfficientNet(width_coefficient,
depth_coefficient,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
activation_fn=swish,
blocks_args=DEFAULT_BLOCKS_ARGS,
model_name='efficientnet',
add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
**kwargs):
"""Instantiates the EfficientNet architecture using given scaling coefficients.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
activation_fn: activation function.
blocks_args: list of dicts, parameters to construct block modules.
model_name: string, model name.
add_head: whether to include the fully-connected
layer at the top of the network.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `add_head` is False.
It should have exactly 3 inputs channels.
classes: optional number of classes to classify images
into, only to be specified if `add_head` is True.
data_format(str): Keras data format.
freeze_bn(bool): Freeze all the BN layers or not.
freeze_blocks(list): Block IDs to be frozen in this model.
use_bias(bool): Use bias or not for Conv layers that are followed by a BN layer.
kernel_regularizer: The kernel regularizer.
bias_regularizer: The bias regularizer.
stride16(bool): Limit the total stride of the model to 16 or not, default is stride 32.
This is used for DetectNet_v2. All other use cases will use stride 32.
# Returns
A Keras model instance.
"""
# activation_fn defaults to swish if it is None or empty string
bn_opt = {
'momentum': 0.99,
'epsilon': 1e-3
}
if activation_fn in [None, ""]:
activation_fn = swish
# old_data_format = K.image_data_format()
assert data_format == 'channels_last'
K.set_image_data_format(data_format)
if freeze_blocks is None:
freeze_blocks = []
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
# Build stem
x = img_input
x = ZeroPadding2D(
padding=correct_pad(x, 3),
name='stem_conv_pad',
data_format=data_format,
)(x)
x = Conv2D(
round_filters(32, depth_divisor, width_coefficient),
3,
strides=2,
padding='valid',
use_bias=use_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not bool(0 in freeze_blocks),
data_format=data_format,
name='stem_conv'
)(x)
if freeze_bn:
x = BatchNormalization(axis=bn_axis, name='stem_bn')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='stem_bn', **bn_opt)(x)
x = Activation(activation_fn, name='stem_activation')(x)
# Build blocks
blocks_args = deepcopy(list(blocks_args))
# in stride 16 mode, force the last stride 2 to be 1.
if stride16:
force_stride16(blocks_args)
b = 0
blocks = float(sum(args['repeats'] for args in blocks_args))
for (i, args) in enumerate(blocks_args):
assert args['repeats'] > 0
# Update block input and output filters based on depth multiplier.
args['filters_in'] = round_filters(args['filters_in'], depth_divisor, width_coefficient)
args['filters_out'] = round_filters(args['filters_out'], depth_divisor, width_coefficient)
for j in range(round_repeats(args.pop('repeats'), depth_coefficient)):
# The first block needs to take care of stride and filter size increase.
if j > 0:
args['strides'] = 1
args['filters_in'] = args['filters_out']
x = block(
x, activation_fn, drop_connect_rate * b / blocks,
freeze=bool((i + 1) in freeze_blocks),
freeze_bn=freeze_bn,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
name='block{}{}_'.format(i + 1, chr(j + 97)),
**args)
b += 1
# Build top
x = Conv2D(
round_filters(1280, depth_divisor, width_coefficient),
1,
padding='same',
use_bias=use_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
trainable=not bool((len(blocks_args) + 1) in freeze_blocks),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
name='top_conv'
)(x)
if freeze_bn:
x = BatchNormalization(axis=bn_axis, name='top_bn')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='top_bn', **bn_opt)(x)
x = Activation(activation_fn, name='top_activation')(x)
if add_head:
# global pool as: avg pool + flatten for pruning support
output_shape = x.get_shape().as_list()
if data_format == 'channels_first':
pool_size = (output_shape[-2], output_shape[-1])
else:
pool_size = (output_shape[-3], output_shape[-2])
x = AveragePooling2D(
pool_size=pool_size, name='avg_pool',
data_format=data_format, padding='valid'
)(x)
x = Flatten(name='flatten')(x)
if dropout_rate > 0:
x = Dropout(dropout_rate, name='top_dropout')(x)
# head will always not be frozen
# set the name to 'predictions' to align with that in add_dense_head()
x = Dense(
classes,
activation='softmax',
kernel_initializer=DENSE_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='predictions'
)(x)
# Create model.
model = Model(img_input, x, name=model_name)
# restore previous data format
# K.set_image_data_format(old_data_format)
return model
def EfficientNetB0(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B0."""
return EfficientNet(1.0, 1.0, 0.2,
drop_connect_rate=0,
model_name='efficientnet-b0',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activaton_fn=activation_type,
**kwargs)
def EfficientNetB1(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B1."""
return EfficientNet(1.0, 1.1, 0.2,
model_name='efficientnet-b1',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB2(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B2."""
return EfficientNet(1.1, 1.2, 0.3,
model_name='efficientnet-b2',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB3(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B3."""
return EfficientNet(1.2, 1.4, 0.3,
model_name='efficientnet-b3',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB4(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B4."""
return EfficientNet(1.4, 1.8, 0.4,
model_name='efficientnet-b4',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB5(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B5."""
return EfficientNet(1.6, 2.2, 0.4,
model_name='efficientnet-b5',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB6(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B6."""
return EfficientNet(1.8, 2.6, 0.5,
model_name='efficientnet-b6',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB7(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B7."""
return EfficientNet(2.0, 3.1, 0.5,
model_name='efficientnet-b7',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/efficientnet_tf.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import keras
import pytest
from nvidia_tao_tf1.core.models.templates.utils import count_layers_by_class_name
from nvidia_tao_tf1.core.templates.mobilenet import MobileNet, MobileNetV2
MOBILENET_NUM_CONV_LAYERS = [14, 14]
MOBILENET_NUM_DENSE_LAYERS = [0, 1]
MOBILENET_NUM_DEPTHWISE_LAYERS = [13, 13]
MOBILENETV2_NUM_CONV_LAYERS = [35, 35]
MOBILENETV2_NUM_DENSE_LAYERS = [0, 1]
MOBILENETV2_NUM_DEPTHWISE_LAYERS = [17, 17]
topologies = [
# Test the different nlayers
(256, True, 'channels_first', True, 32),
(256, False, 'channels_first', True, 32),
(256, False, 'channels_first', False, 32),
(256, True, 'channels_last', True, 16),
(256, False, 'channels_last', True, 16),
(256, False, 'channels_last', False, 16),
(512, True, 'channels_first', True, 32),
(512, False, 'channels_first', True, 32),
(512, False, 'channels_first', False, 32),
(512, True, 'channels_last', True, 16),
(512, False, 'channels_last', True, 16),
(512, False, 'channels_last', False, 16),
(224, True, 'channels_first', True, 32),
(224, False, 'channels_first', True, 32),
(224, False, 'channels_first', False, 32),
(224, True, 'channels_last', True, 16),
(224, False, 'channels_last', True, 16),
(224, False, 'channels_last', False, 16),
]
def _compute_output_size(size, stride):
for _ in range(4):
size = int(math.ceil(size / 2.0))
if stride == 32:
size = int(math.ceil(size / 2.0))
return size
@pytest.mark.parametrize("input_size, use_batch_norm, data_format, add_head, stride", topologies)
def test_mobilenet_v1(input_size, use_batch_norm, data_format, add_head, stride, nclasses=None):
"""Test MobileNet V1 for a variety of instantiation parameter combinations."""
# Set channel format.
if data_format == 'channels_last':
shape = (input_size, input_size, 3)
elif data_format == 'channels_first':
shape = (3, input_size, input_size)
# Define a keras input layer for the network
inputs = keras.layers.Input(shape=shape)
# Add 10 class dense head if needed.
if add_head:
nclasses = 10
# Instantiate model.
model = MobileNet(inputs,
use_batch_norm=use_batch_norm,
data_format=data_format,
add_head=add_head,
stride=stride,
activation_type='relu',
nclasses=nclasses)
# Batchnorm check.
n_batchnorms = count_layers_by_class_name(model, ["BatchNormalization"])
if use_batch_norm:
assert n_batchnorms > 0
else:
assert n_batchnorms == 0
# Layer count check.
n_conv_layers_counted = count_layers_by_class_name(model, ["Conv2D"])
n_dense_layers_counted = count_layers_by_class_name(model, ["Dense"])
n_depthiwise_conv_2d_layers_counted = count_layers_by_class_name(model, ['DepthwiseConv2D'])
# Setting expected number of conv layers.
if stride == 32:
if add_head:
expected_conv_layers = MOBILENET_NUM_CONV_LAYERS[1]
expected_dense_layers = MOBILENET_NUM_DENSE_LAYERS[1]
expected_depthwise_conv_2d_layers = MOBILENET_NUM_DEPTHWISE_LAYERS[1]
else:
expected_conv_layers = MOBILENET_NUM_CONV_LAYERS[0]
expected_dense_layers = MOBILENET_NUM_DENSE_LAYERS[0]
expected_depthwise_conv_2d_layers = MOBILENET_NUM_DEPTHWISE_LAYERS[0]
# Check number of layers in the instantiated model.
assert n_dense_layers_counted == expected_dense_layers
assert n_conv_layers_counted == expected_conv_layers
assert n_depthiwise_conv_2d_layers_counted == expected_depthwise_conv_2d_layers
# Check model output shape.
output_shape = tuple(model.outputs[0].get_shape().as_list())
# Set expected shape depending on whether or not pruning is set.
if add_head:
assert output_shape[1:] == (nclasses,)
else:
_output_sized_expected = _compute_output_size(input_size, stride)
expected_shape = (_output_sized_expected, _output_sized_expected)
if data_format == 'channels_last':
assert output_shape[1:3] == expected_shape
elif data_format == 'channels_first':
assert output_shape[2:4] == expected_shape
# Check the name of the instantiated model.
assert "mobilenet" in model.name
if use_batch_norm:
assert "_bn" in model.name
@pytest.mark.parametrize("input_size, use_batch_norm, data_format, add_head, stride", topologies)
def test_mobilenet_v2(input_size, use_batch_norm, data_format, add_head, stride, nclasses=None):
"""Test MobileNet V2 for a variety of instantiation parameter combinations."""
# Set channel format.
if data_format == 'channels_last':
shape = (input_size, input_size, 3)
elif data_format == 'channels_first':
shape = (3, input_size, input_size)
# Define a keras input layer for the network
inputs = keras.layers.Input(shape=shape)
# Add 10 class dense head if needed.
if add_head:
nclasses = 10
# Instantiate model.
model = MobileNetV2(inputs,
use_batch_norm=use_batch_norm,
data_format=data_format,
add_head=add_head,
stride=stride,
activation_type='relu',
nclasses=nclasses)
# Batchnorm check.
n_batchnorms = count_layers_by_class_name(model, ["BatchNormalization"])
if use_batch_norm:
assert n_batchnorms > 0
else:
assert n_batchnorms == 0
# Layer count check.
n_conv_layers_counted = count_layers_by_class_name(model, ["Conv2D"])
n_dense_layers_counted = count_layers_by_class_name(model, ["Dense"])
n_depthiwise_conv_2d_layers_counted = count_layers_by_class_name(model, ['DepthwiseConv2D'])
# Setting expected number of conv layers.
if stride == 32:
if add_head:
expected_conv_layers = MOBILENETV2_NUM_CONV_LAYERS[1]
expected_dense_layers = MOBILENETV2_NUM_DENSE_LAYERS[1]
expected_depthwise_conv_2d_layers = MOBILENETV2_NUM_DEPTHWISE_LAYERS[1]
else:
expected_conv_layers = MOBILENETV2_NUM_CONV_LAYERS[0]
expected_dense_layers = MOBILENETV2_NUM_DENSE_LAYERS[0]
expected_depthwise_conv_2d_layers = MOBILENETV2_NUM_DEPTHWISE_LAYERS[0]
# Check number of layers in the instantiated model.
assert n_dense_layers_counted == expected_dense_layers
assert n_conv_layers_counted == expected_conv_layers
assert n_depthiwise_conv_2d_layers_counted == expected_depthwise_conv_2d_layers
# Check model output shape.
output_shape = tuple(model.outputs[0].get_shape().as_list())
# Set expected shape depending on whether or not pruning is set.
if add_head:
assert output_shape[1:] == (nclasses,)
else:
_output_sized_expected = _compute_output_size(input_size, stride)
expected_shape = (_output_sized_expected, _output_sized_expected)
if data_format == 'channels_last':
assert output_shape[1:3] == expected_shape
elif data_format == 'channels_first':
assert output_shape[2:4] == expected_shape
# Check the name of the instantiated model.
assert "mobilenet_v2" in model.name
if use_batch_norm:
assert "_bn" in model.name
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/test_mobilenet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SqueezeNet Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend
from keras import layers
from keras import models
from nvidia_tao_tf1.core.templates.utils import arg_scope
from nvidia_tao_tf1.core.templates.utils import fire_module
def SqueezeNet(inputs=None,
input_shape=None,
dropout=1e-3,
add_head=False,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
nclasses=1000,
freeze_blocks=None,
skip=False):
"""
The squeeze net architecture.
For details, see https://arxiv.org/pdf/1602.07360.pdf
Args:
inputs(tensor): Input tensor.
input_shape(tuple, None): Shape of the input tensor, can be None.
dropout(float): Dropout ratio.
add_head(bool): Whether or not to add the ImageNet head. If not, will add dense head.
data_format(str): Data format, can be channels_first or channels_last.
kernel_regularizer: Kernel regularizer applied to the model.
bias_regularizer: Bias regularizer applied to the model.
nclasses(int): Number of classes the output will be classified into.
freeze_blocks(list): the list of blocks to be frozen in the model.
Returns:
The output tensor.
"""
if freeze_blocks is None:
freeze_blocks = []
if input_shape is None:
if data_format == 'channels_first':
input_shape = (3, 224, 224)
else:
input_shape = (224, 224, 3)
if inputs is None:
img_input = layers.Input(shape=input_shape, name="Input")
else:
if not backend.is_keras_tensor(inputs):
img_input = layers.Input(tensor=inputs, shape=input_shape, name="Input")
else:
img_input = inputs
x = layers.Conv2D(96,
kernel_size=(7, 7),
strides=(2, 2),
padding='same',
name='conv1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=not(0 in freeze_blocks))(img_input)
x = layers.Activation('relu', name='conv1_relu')(x)
x = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1',
data_format=data_format, padding='same')(x)
with arg_scope([fire_module],
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format):
x = fire_module(x, 2, 16, 64, trainable=not(1 in freeze_blocks))
if skip:
x = layers.add([x, fire_module(x, 3, 16, 64,
trainable=not(2 in freeze_blocks))])
else:
x = fire_module(x, 3, 16, 64, trainable=not(2 in freeze_blocks))
x = fire_module(x, 4, 32, 128, trainable=not(3 in freeze_blocks))
x = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool4',
data_format=data_format,
padding='same')(x)
if skip:
x = layers.add([x, fire_module(x, 5, 32, 128, trainable=not(4 in freeze_blocks))])
else:
x = fire_module(x, 5, 32, 128, trainable=not(4 in freeze_blocks))
x = fire_module(x, 6, 48, 192, trainable=not(5 in freeze_blocks))
if skip:
x = layers.add([x, fire_module(x, 7, 48, 192, trainable=not(6 in freeze_blocks))])
else:
x = fire_module(x, 7, 48, 192, trainable=not(6 in freeze_blocks))
x = fire_module(x, 8, 64, 256, trainable=not(7 in freeze_blocks))
x = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool8',
data_format=data_format,
padding='same')(x)
if skip:
x = layers.add([x, fire_module(x, 9, 64, 256, trainable=not(8 in freeze_blocks))])
else:
x = fire_module(x, 9, 64, 256, trainable=not(8 in freeze_blocks))
if add_head:
x = layers.Dropout(rate=dropout, name='fire9_dropout')(x)
x = layers.Conv2D(nclasses,
kernel_size=(1, 1),
padding='same',
name='conv10',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format)(x)
x = layers.Activation('relu', name='conv10_relu')(x)
x = layers.GlobalAveragePooling2D(data_format=data_format, name='pool10')(x)
x = layers.Activation("softmax", name='output')(x)
return models.Model(img_input, x)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/squeezenet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
import pytest
from nvidia_tao_tf1.core.models.templates.utils import count_layers_by_class_name
from nvidia_tao_tf1.core.templates.vgg import VggNet
topologies = [
# Test the different nlayers
(16, True, 'channels_first', True, True),
# Without BN
(19, False, 'channels_first', True, False),
# Without head
(16, False, 'channels_first', False, True),
# channels_last:
# With BN, with head
(16, True, 'channels_last', True, False),
# Without BN, with head
(19, False, 'channels_last', True, True),
# Without BN, without head
(19, False, 'channels_last', False, True),
]
@pytest.mark.parametrize("nlayers, use_batch_norm, data_format, add_head,"
"use_pooling", topologies)
def test_vggnet(nlayers, use_batch_norm, data_format, add_head, use_pooling,
nclasses=None):
"""Test Resnets for a variety of topologies and parameters."""
# Set channel format.
if data_format == 'channels_last':
shape = (256, 256, 3)
elif data_format == 'channels_first':
shape = (3, 256, 256)
# Define supported counts.
supported_counts = [16, 19]
inputs = keras.layers.Input(shape=shape)
# Add 10 class dense head if needed.
if add_head:
nclasses = 10
# Instantiate model.
if nlayers not in supported_counts:
with pytest.raises(NotImplementedError):
model = VggNet(nlayers, inputs,
use_batch_norm=use_batch_norm,
data_format=data_format,
add_head=add_head,
use_pooling=use_pooling,
activation_type='relu',
nclasses=nclasses)
else:
model = VggNet(nlayers, inputs,
use_batch_norm=use_batch_norm,
data_format=data_format,
add_head=add_head,
use_pooling=use_pooling,
activation_type='relu',
nclasses=nclasses)
# Batchnorm check.
n_batchnorms = count_layers_by_class_name(model, ["BatchNormalization"])
if use_batch_norm:
assert n_batchnorms > 0
else:
assert n_batchnorms == 0
# Layer count check.
n_conv_layers_counted = count_layers_by_class_name(model, ["Conv2D"])
n_dense_layers_counted = count_layers_by_class_name(model, ["Dense"])
expected_conv_layers = nlayers-3
expected_dense_layers = 0
if add_head:
expected_dense_layers = 3
assert n_dense_layers_counted == expected_dense_layers
assert n_conv_layers_counted == expected_conv_layers
# Check model output shape.
output_shape = model.outputs[0].get_shape()
# Set expected shape depending on whether or not pruning is set.
expected_shape = (16, 16)
if add_head:
assert output_shape[1:] == (nclasses)
else:
if data_format == 'channels_last':
assert output_shape[1:3] == expected_shape
elif data_format == 'channels_first':
assert output_shape[2:4] == expected_shape
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/test_vgg.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit model templates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.utils import get_custom_objects
from nvidia_tao_tf1.core.templates import alexnet
from nvidia_tao_tf1.core.templates import googlenet
from nvidia_tao_tf1.core.templates import mobilenet
from nvidia_tao_tf1.core.templates import resnet
from nvidia_tao_tf1.core.templates import squeezenet
from nvidia_tao_tf1.core.templates import utils
from nvidia_tao_tf1.core.templates import vgg
from nvidia_tao_tf1.core.templates.utils import swish
__all__ = ('alexnet', 'googlenet', 'mobilenet', 'resnet', 'squeezenet', 'utils', 'vgg')
get_custom_objects()["swish"] = swish
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet model templates in Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
from keras import backend as K
from keras.layers import (
Activation,
AveragePooling2D,
BatchNormalization,
Conv2D,
Dense,
Dropout,
Flatten,
Input,
ZeroPadding2D
)
from keras.models import Model
from nvidia_tao_tf1.core.templates.utils import (
block,
CONV_KERNEL_INITIALIZER,
correct_pad,
DENSE_KERNEL_INITIALIZER,
force_stride16,
round_filters,
round_repeats,
swish
)
DEFAULT_BLOCKS_ARGS = (
{'kernel_size': 3, 'repeats': 1, 'filters_in': 32, 'filters_out': 16,
'expand_ratio': 1, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 2, 'filters_in': 16, 'filters_out': 24,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 2, 'filters_in': 24, 'filters_out': 40,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 3, 'filters_in': 40, 'filters_out': 80,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 3, 'filters_in': 80, 'filters_out': 112,
'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25},
{'kernel_size': 5, 'repeats': 4, 'filters_in': 112, 'filters_out': 192,
'expand_ratio': 6, 'id_skip': True, 'strides': 2, 'se_ratio': 0.25},
{'kernel_size': 3, 'repeats': 1, 'filters_in': 192, 'filters_out': 320,
'expand_ratio': 6, 'id_skip': True, 'strides': 1, 'se_ratio': 0.25}
)
def EfficientNet(width_coefficient,
depth_coefficient,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
activation_fn=swish,
blocks_args=DEFAULT_BLOCKS_ARGS,
model_name='efficientnet',
add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
**kwargs):
"""Instantiates the EfficientNet architecture using given scaling coefficients.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
activation_fn: activation function.
blocks_args: list of dicts, parameters to construct block modules.
model_name: string, model name.
add_head: whether to include the fully-connected
layer at the top of the network.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `add_head` is False.
It should have exactly 3 inputs channels.
classes: optional number of classes to classify images
into, only to be specified if `add_head` is True.
data_format(str): Keras data format.
freeze_bn(bool): Freeze all the BN layers or not.
freeze_blocks(list): Block IDs to be frozen in this model.
use_bias(bool): Use bias or not for Conv layers that are followed by a BN layer.
kernel_regularizer: The kernel regularizer.
bias_regularizer: The bias regularizer.
stride16(bool): Limit the total stride of the model to 16 or not, default is stride 32.
This is used for DetectNet_v2. All other use cases will use stride 32.
# Returns
A Keras model instance.
"""
# activation_fn defaults to swish if it is None or empty string
if activation_fn in [None, ""]:
activation_fn = swish
old_data_format = K.image_data_format()
K.set_image_data_format(data_format)
if freeze_blocks is None:
freeze_blocks = []
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
# Build stem
x = img_input
x = ZeroPadding2D(
padding=correct_pad(x, 3),
name='stem_conv_pad'
)(x)
x = Conv2D(
round_filters(32, depth_divisor, width_coefficient),
3,
strides=2,
padding='valid',
use_bias=use_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not bool(0 in freeze_blocks),
name='stem_conv'
)(x)
if freeze_bn:
x = BatchNormalization(axis=bn_axis, name='stem_bn')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='stem_bn')(x)
x = Activation(activation_fn, name='stem_activation')(x)
# Build blocks
blocks_args = deepcopy(list(blocks_args))
# in stride 16 mode, force the last stride 2 to be 1.
if stride16:
force_stride16(blocks_args)
b = 0
blocks = float(sum(args['repeats'] for args in blocks_args))
for (i, args) in enumerate(blocks_args):
assert args['repeats'] > 0
# Update block input and output filters based on depth multiplier.
args['filters_in'] = round_filters(args['filters_in'], depth_divisor, width_coefficient)
args['filters_out'] = round_filters(args['filters_out'], depth_divisor, width_coefficient)
for j in range(round_repeats(args.pop('repeats'), depth_coefficient)):
# The first block needs to take care of stride and filter size increase.
if j > 0:
args['strides'] = 1
args['filters_in'] = args['filters_out']
x = block(
x, activation_fn, drop_connect_rate * b / blocks,
freeze=bool((i + 1) in freeze_blocks),
freeze_bn=freeze_bn,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='block{}{}_'.format(i + 1, chr(j + 97)),
**args)
b += 1
# Build top
x = Conv2D(
round_filters(1280, depth_divisor, width_coefficient),
1,
padding='same',
use_bias=use_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
trainable=not bool((len(blocks_args) + 1) in freeze_blocks),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='top_conv'
)(x)
if freeze_bn:
x = BatchNormalization(axis=bn_axis, name='top_bn')(x, training=False)
else:
x = BatchNormalization(axis=bn_axis, name='top_bn')(x)
x = Activation(activation_fn, name='top_activation')(x)
if add_head:
# global pool as: avg pool + flatten for pruning support
output_shape = x.get_shape().as_list()
if data_format == 'channels_first':
pool_size = (output_shape[-2], output_shape[-1])
else:
pool_size = (output_shape[-3], output_shape[-2])
x = AveragePooling2D(
pool_size=pool_size, name='avg_pool',
data_format=data_format, padding='valid'
)(x)
x = Flatten(name='flatten')(x)
if dropout_rate > 0:
x = Dropout(dropout_rate, name='top_dropout')(x)
# head will always not be frozen
# set the name to 'predictions' to align with that in add_dense_head()
x = Dense(
classes,
activation='softmax',
kernel_initializer=DENSE_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='predictions'
)(x)
# Create model.
model = Model(img_input, x, name=model_name)
# restore previous data format
K.set_image_data_format(old_data_format)
return model
def EfficientNetB0(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B0."""
return EfficientNet(1.0, 1.0, 0.2,
model_name='efficientnet-b0',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activaton_fn=activation_type,
**kwargs)
def EfficientNetB1(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B1."""
return EfficientNet(1.0, 1.1, 0.2,
model_name='efficientnet-b1',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB2(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B2."""
return EfficientNet(1.1, 1.2, 0.3,
model_name='efficientnet-b2',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB3(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B3."""
return EfficientNet(1.2, 1.4, 0.3,
model_name='efficientnet-b3',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB4(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B4."""
return EfficientNet(1.4, 1.8, 0.4,
model_name='efficientnet-b4',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB5(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B5."""
return EfficientNet(1.6, 2.2, 0.4,
model_name='efficientnet-b5',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB6(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B6."""
return EfficientNet(1.8, 2.6, 0.5,
model_name='efficientnet-b6',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
def EfficientNetB7(add_head=True,
input_tensor=None,
input_shape=None,
classes=1000,
data_format="channels_first",
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
stride16=False,
activation_type=None,
**kwargs):
"""EfficientNet B7."""
return EfficientNet(2.0, 3.1, 0.5,
model_name='efficientnet-b7',
add_head=add_head,
input_tensor=input_tensor,
input_shape=input_shape,
classes=classes,
data_format=data_format,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
stride16=stride16,
activation_fn=activation_type,
**kwargs)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/efficientnet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSPDarkNet models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras import layers
from keras import models
from nvidia_tao_tf1.core.templates.utils import _mish_conv
from nvidia_tao_tf1.core.templates.utils import arg_scope
def CSPDarkNet(nlayers,
input_tensor=None,
input_shape=None,
add_head=False,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
nclasses=1000,
use_batch_norm=True,
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
force_relu=False,
activation="leaky_relu"):
"""
The DarkNet model architecture.
Args:
nlayers(int): 19 or 53.
input_tensor(tensor): Input tensor.
input_shape(tuple, None): Shape of the input tensor, can be None.
alpha(float): The leaky rate for Leaky ReLU.
add_head(bool): Whether or not to add the ImageNet head. If not, will add dense head.
data_format(str): Data format, can be channels_first or channels_last.
kernel_regularizer: Kernel regularizer applied to the model.
bias_regularizer: Bias regularizer applied to the model.
nclasses(int): Number of classes the output will be classified into.
use_batch_norm(bool): Whether or not to use the BN layer.
activation_type(str): Activation type, can be relu or relu6.
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
force_relu(bool): Whether to use ReLU instead of Mish
activation(str): Activation type.
Returns:
The output tensor.
"""
if freeze_blocks is None:
freeze_blocks = []
if input_shape is None:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
concat_axis = 1 if data_format == 'channels_first' else -1
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor)
else:
img_input = input_tensor
with arg_scope([_mish_conv],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
padding='same',
freeze_bn=freeze_bn,
use_bias=use_bias,
force_relu=force_relu,
activation=activation):
x = _mish_conv(img_input, filters=32, kernel=3, strides=1,
name='conv1', trainable=not(0 in freeze_blocks))
if nlayers == 53:
x = _mish_conv(x, filters=64, kernel=3, strides=2, name='conv2',
trainable=not(1 in freeze_blocks))
z = _mish_conv(x, filters=64, kernel=1, strides=1, name='conv2_1',
trainable=not(1 in freeze_blocks))
x = _mish_conv(x, filters=64, kernel=1, strides=1, name='conv2_2',
trainable=not(1 in freeze_blocks))
y = _mish_conv(x, filters=32, kernel=1, strides=1, name='b1_conv1_1',
trainable=not(1 in freeze_blocks))
y = _mish_conv(y, filters=64, kernel=3, strides=1, name='b1_conv1_2',
trainable=not(1 in freeze_blocks))
x = layers.Add(name='b1_add1')([x, y])
x = _mish_conv(x, filters=64, kernel=1, strides=1, name='b1_partial_trans',
trainable=not(1 in freeze_blocks))
x = layers.Concatenate(axis=concat_axis, name='b1_concat')([x, z])
x = _mish_conv(x, filters=64, kernel=1, strides=1, name='b1_final_trans',
trainable=not(1 in freeze_blocks))
x = _mish_conv(x, filters=128, kernel=3, strides=2, name='conv3',
trainable=not(2 in freeze_blocks))
z = _mish_conv(x, filters=64, kernel=1, strides=1, name='conv3_1',
trainable=not(2 in freeze_blocks))
x = _mish_conv(x, filters=64, kernel=1, strides=1, name='conv3_2',
trainable=not(2 in freeze_blocks))
for i in range(2):
y = _mish_conv(x, filters=64, kernel=1, strides=1,
name='b2_conv{}_1'.format(i+1), trainable=not(2 in freeze_blocks))
y = _mish_conv(y, filters=64, kernel=3, strides=1,
name='b2_conv{}_2'.format(i+1), trainable=not(2 in freeze_blocks))
x = layers.Add(name='b2_add{}'.format(i+1))([x, y])
x = _mish_conv(x, filters=64, kernel=1, strides=1, name='b2_partial_trans',
trainable=not(2 in freeze_blocks))
x = layers.Concatenate(axis=concat_axis, name='b2_concat')([x, z])
x = _mish_conv(x, filters=128, kernel=1, strides=1, name='b2_final_trans',
trainable=not(2 in freeze_blocks))
x = _mish_conv(x, filters=256, kernel=3, strides=2, name='conv4',
trainable=not(3 in freeze_blocks))
z = _mish_conv(x, filters=128, kernel=1, strides=1, name='conv4_1',
trainable=not(3 in freeze_blocks))
x = _mish_conv(x, filters=128, kernel=1, strides=1, name='conv4_2',
trainable=not(3 in freeze_blocks))
for i in range(8):
y = _mish_conv(x, filters=128, kernel=1, strides=1,
name='b3_conv{}_1'.format(i+1), trainable=not(3 in freeze_blocks))
y = _mish_conv(y, filters=128, kernel=3, strides=1,
name='b3_conv{}_2'.format(i+1), trainable=not(3 in freeze_blocks))
x = layers.Add(name='b3_add{}'.format(i+1))([x, y])
x = _mish_conv(x, filters=128, kernel=1, strides=1, name='b3_partial_trans',
trainable=not(3 in freeze_blocks))
x = layers.Concatenate(axis=concat_axis, name='b3_concat')([x, z])
x = _mish_conv(x, filters=256, kernel=1, strides=1, name='b3_final_trans',
trainable=not(3 in freeze_blocks))
x = _mish_conv(x, filters=512, kernel=3, strides=2, name='conv5',
trainable=not(4 in freeze_blocks))
z = _mish_conv(x, filters=256, kernel=1, strides=1, name='conv5_1',
trainable=not(4 in freeze_blocks))
x = _mish_conv(x, filters=256, kernel=1, strides=1, name='conv5_2',
trainable=not(4 in freeze_blocks))
for i in range(8):
y = _mish_conv(x, filters=256, kernel=1, strides=1,
name='b4_conv{}_1'.format(i+1), trainable=not(4 in freeze_blocks))
y = _mish_conv(y, filters=256, kernel=3, strides=1,
name='b4_conv{}_2'.format(i+1), trainable=not(4 in freeze_blocks))
x = layers.Add(name='b4_add{}'.format(i+1))([x, y])
x = _mish_conv(x, filters=256, kernel=1, strides=1, name='b4_partial_trans',
trainable=not(4 in freeze_blocks))
x = layers.Concatenate(axis=concat_axis, name='b4_concat')([x, z])
x = _mish_conv(x, filters=512, kernel=1, strides=1, name='b4_final_trans',
trainable=not(4 in freeze_blocks))
x = _mish_conv(x, filters=1024, kernel=3, strides=2, name='conv6',
trainable=not(5 in freeze_blocks))
z = _mish_conv(x, filters=512, kernel=1, strides=1, name='conv6_1',
trainable=not(5 in freeze_blocks))
x = _mish_conv(x, filters=512, kernel=1, strides=1, name='conv6_2',
trainable=not(5 in freeze_blocks))
for i in range(4):
y = _mish_conv(x, filters=512, kernel=1, strides=1,
name='b5_conv{}_1'.format(i+1), trainable=not(5 in freeze_blocks))
y = _mish_conv(y, filters=512, kernel=3, strides=1,
name='b5_conv{}_2'.format(i+1), trainable=not(5 in freeze_blocks))
x = layers.Add(name='b5_add{}'.format(i+1))([x, y])
x = _mish_conv(x, filters=512, kernel=1, strides=1, name='b5_partial_trans',
trainable=not(5 in freeze_blocks))
x = layers.Concatenate(axis=concat_axis, name='b5_concat')([x, z])
x = _mish_conv(x, filters=1024, kernel=1, strides=1, name='b5_final_trans',
trainable=not(5 in freeze_blocks))
elif nlayers == 19:
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_1')(x)
x = _mish_conv(x, filters=64, kernel=3, strides=1, name='b1_conv1',
trainable=not(1 in freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_2')(x)
x = _mish_conv(x, filters=128, kernel=3, strides=1,
name='b2_conv1', trainable=not(2 in freeze_blocks))
z = _mish_conv(x, filters=64, kernel=1, strides=1, name='b2_part1',
trainable=not(2 in freeze_blocks))
x = _mish_conv(x, filters=64, kernel=1, strides=1, name='b2_part2',
trainable=not(2 in freeze_blocks))
x = _mish_conv(x, filters=64, kernel=1, strides=1,
name='b2_conv2', trainable=not(2 in freeze_blocks))
x = _mish_conv(x, filters=64, kernel=3, strides=1,
name='b2_conv3', trainable=not(2 in freeze_blocks))
x = _mish_conv(x, filters=64, kernel=1, strides=1, name='b2_partial_trans',
trainable=not(2 in freeze_blocks))
x = layers.Concatenate(axis=concat_axis, name='b2_concat')([x, z])
x = _mish_conv(x, filters=128, kernel=1, strides=1, name='b2_final_trans',
trainable=not(2 in freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_3')(x)
x = _mish_conv(x, filters=256, kernel=3, strides=1,
name='b3_conv1', trainable=not(3 in freeze_blocks))
z = _mish_conv(x, filters=128, kernel=1, strides=1, name='b3_part1',
trainable=not(3 in freeze_blocks))
x = _mish_conv(x, filters=128, kernel=1, strides=1, name='b3_part2',
trainable=not(3 in freeze_blocks))
x = _mish_conv(x, filters=128, kernel=1, strides=1,
name='b3_conv2', trainable=not(3 in freeze_blocks))
x = _mish_conv(x, filters=128, kernel=3, strides=1,
name='b3_conv3', trainable=not(3 in freeze_blocks))
x = _mish_conv(x, filters=128, kernel=1, strides=1, name='b3_partial_trans',
trainable=not(3 in freeze_blocks))
x = layers.Concatenate(axis=concat_axis, name='b3_concat')([x, z])
x = _mish_conv(x, filters=256, kernel=1, strides=1, name='b3_final_trans',
trainable=not(3 in freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_4')(x)
x = _mish_conv(x, filters=512, kernel=3, strides=1,
name='b4_conv1', trainable=not(4 in freeze_blocks))
z = _mish_conv(x, filters=256, kernel=1, strides=1, name='b4_part1',
trainable=not(4 in freeze_blocks))
x = _mish_conv(x, filters=256, kernel=1, strides=1, name='b4_part2',
trainable=not(4 in freeze_blocks))
x = _mish_conv(x, filters=256, kernel=1, strides=1,
name='b4_conv2', trainable=not(4 in freeze_blocks))
x = _mish_conv(x, filters=256, kernel=3, strides=1,
name='b4_conv3', trainable=not(4 in freeze_blocks))
x = _mish_conv(x, filters=256, kernel=1, strides=1,
name='b4_conv4', trainable=not(4 in freeze_blocks))
x = _mish_conv(x, filters=256, kernel=3, strides=1,
name='b4_conv5', trainable=not(4 in freeze_blocks))
x = _mish_conv(x, filters=256, kernel=1, strides=1, name='b4_partial_trans',
trainable=not(4 in freeze_blocks))
x = layers.Concatenate(axis=concat_axis, name='b4_concat')([x, z])
x = _mish_conv(x, filters=512, kernel=1, strides=1, name='b4_final_trans',
trainable=not(4 in freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_5')(x)
x = _mish_conv(x, filters=1024, kernel=3, strides=1,
name='b5_conv1', trainable=not(5 in freeze_blocks))
z = _mish_conv(x, filters=512, kernel=1, strides=1, name='b5_part1',
trainable=not(5 in freeze_blocks))
x = _mish_conv(x, filters=512, kernel=1, strides=1, name='b5_part2',
trainable=not(5 in freeze_blocks))
x = _mish_conv(x, filters=512, kernel=1, strides=1,
name='b5_conv2', trainable=not(5 in freeze_blocks))
x = _mish_conv(x, filters=512, kernel=3, strides=1,
name='b5_conv3', trainable=not(5 in freeze_blocks))
x = _mish_conv(x, filters=512, kernel=1, strides=1,
name='b5_conv4', trainable=not(5 in freeze_blocks))
x = _mish_conv(x, filters=512, kernel=3, strides=1,
name='b5_conv5', trainable=not(5 in freeze_blocks))
x = _mish_conv(x, filters=512, kernel=1, strides=1, name='b5_partial_trans',
trainable=not(5 in freeze_blocks))
x = layers.Concatenate(axis=concat_axis, name='b5_concat')([x, z])
x = _mish_conv(x, filters=1024, kernel=1, strides=1, name='b5_final_trans',
trainable=not(5 in freeze_blocks))
else:
raise NotImplementedError('A CSPDarkNet with nlayers=%d is not implemented.' % nlayers)
# if add_head, make it a network with a stride of 32, otherwise, the stride is 16.
if add_head:
x = layers.GlobalAveragePooling2D(data_format=data_format, name='avgpool')(x)
x = layers.Dense(nclasses, activation='softmax', name='predictions',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(x)
# Create model.
model_name = 'cspdarknet%d' % nlayers
if use_batch_norm:
model_name += '_bn'
if add_head:
model_name += '_add_head'
model = models.Model(img_input, x, name=model_name)
return model
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/cspdarknet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DarkNet models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras import layers
from keras import models
from nvidia_tao_tf1.core.templates.utils import _leaky_conv
from nvidia_tao_tf1.core.templates.utils import arg_scope
def DarkNet(nlayers,
input_tensor=None,
input_shape=None,
alpha=0.1,
add_head=False,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
nclasses=1000,
use_batch_norm=True,
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
force_relu=False):
"""
The DarkNet model architecture.
Args:
nlayers(int): 19 or 53.
input_tensor(tensor): Input tensor.
input_shape(tuple, None): Shape of the input tensor, can be None.
alpha(float): The leaky rate for Leaky ReLU.
add_head(bool): Whether or not to add the ImageNet head. If not, will add dense head.
data_format(str): Data format, can be channels_first or channels_last.
kernel_regularizer: Kernel regularizer applied to the model.
bias_regularizer: Bias regularizer applied to the model.
nclasses(int): Number of classes the output will be classified into.
use_batch_norm(bool): Whether or not to use the BN layer.
activation_type(str): Activation type, can be relu or relu6.
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
force_relu(bool): Whether to use ReLU instead of LeakyReLU
Returns:
The output tensor.
"""
if freeze_blocks is None:
freeze_blocks = []
if input_shape is None:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor)
else:
img_input = input_tensor
with arg_scope([_leaky_conv],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
alpha=alpha,
padding='same',
freeze_bn=freeze_bn,
use_bias=use_bias,
force_relu=force_relu):
x = _leaky_conv(img_input, filters=32, kernel=3, strides=1,
name='conv1', trainable=not(0 in freeze_blocks))
if nlayers == 53:
x = _leaky_conv(x, filters=64, kernel=3, strides=2, name='conv2',
trainable=not(1 in freeze_blocks))
y = _leaky_conv(x, filters=32, kernel=1, strides=1, name='b1_conv1_1',
trainable=not(1 in freeze_blocks))
y = _leaky_conv(y, filters=64, kernel=3, strides=1, name='b1_conv1_2',
trainable=not(1 in freeze_blocks))
x = layers.Add(name='b1_add1')([x, y])
x = _leaky_conv(x, filters=128, kernel=3, strides=2, name='conv3',
trainable=not(2 in freeze_blocks))
for i in range(2):
y = _leaky_conv(x, filters=64, kernel=1, strides=1,
name='b2_conv{}_1'.format(i+1), trainable=not(2 in freeze_blocks))
y = _leaky_conv(y, filters=128, kernel=3, strides=1,
name='b2_conv{}_2'.format(i+1), trainable=not(2 in freeze_blocks))
x = layers.Add(name='b2_add{}'.format(i+1))([x, y])
x = _leaky_conv(x, filters=256, kernel=3, strides=2, name='conv4',
trainable=not(3 in freeze_blocks))
for i in range(8):
y = _leaky_conv(x, filters=128, kernel=1, strides=1,
name='b3_conv{}_1'.format(i+1), trainable=not(3 in freeze_blocks))
y = _leaky_conv(y, filters=256, kernel=3, strides=1,
name='b3_conv{}_2'.format(i+1), trainable=not(3 in freeze_blocks))
x = layers.Add(name='b3_add{}'.format(i+1))([x, y])
x = _leaky_conv(x, filters=512, kernel=3, strides=2, name='conv5',
trainable=not(4 in freeze_blocks))
for i in range(8):
y = _leaky_conv(x, filters=256, kernel=1, strides=1,
name='b4_conv{}_1'.format(i+1), trainable=not(4 in freeze_blocks))
y = _leaky_conv(y, filters=512, kernel=3, strides=1,
name='b4_conv{}_2'.format(i+1), trainable=not(4 in freeze_blocks))
x = layers.Add(name='b4_add{}'.format(i+1))([x, y])
x = _leaky_conv(x, filters=1024, kernel=3, strides=2, name='conv6',
trainable=not(5 in freeze_blocks))
for i in range(4):
y = _leaky_conv(x, filters=512, kernel=1, strides=1,
name='b5_conv{}_1'.format(i+1), trainable=not(5 in freeze_blocks))
y = _leaky_conv(y, filters=1024, kernel=3, strides=1,
name='b5_conv{}_2'.format(i+1), trainable=not(5 in freeze_blocks))
x = layers.Add(name='b5_add{}'.format(i+1))([x, y])
elif nlayers == 19:
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_1')(x)
x = _leaky_conv(x, filters=64, kernel=3, strides=1, name='b1_conv1',
trainable=not(1 in freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_2')(x)
x = _leaky_conv(x, filters=128, kernel=3, strides=1,
name='b2_conv1', trainable=not(2 in freeze_blocks))
x = _leaky_conv(x, filters=64, kernel=1, strides=1,
name='b2_conv2', trainable=not(2 in freeze_blocks))
x = _leaky_conv(x, filters=128, kernel=3, strides=1,
name='b2_conv3', trainable=not(2 in freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_3')(x)
x = _leaky_conv(x, filters=256, kernel=3, strides=1,
name='b3_conv1', trainable=not(3 in freeze_blocks))
x = _leaky_conv(x, filters=128, kernel=1, strides=1,
name='b3_conv2', trainable=not(3 in freeze_blocks))
x = _leaky_conv(x, filters=256, kernel=3, strides=1,
name='b3_conv3', trainable=not(3 in freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_4')(x)
x = _leaky_conv(x, filters=512, kernel=3, strides=1,
name='b4_conv1', trainable=not(4 in freeze_blocks))
x = _leaky_conv(x, filters=256, kernel=1, strides=1,
name='b4_conv2', trainable=not(4 in freeze_blocks))
x = _leaky_conv(x, filters=512, kernel=3, strides=1,
name='b4_conv3', trainable=not(4 in freeze_blocks))
x = _leaky_conv(x, filters=256, kernel=1, strides=1,
name='b4_conv4', trainable=not(4 in freeze_blocks))
x = _leaky_conv(x, filters=512, kernel=3, strides=1,
name='b4_conv5', trainable=not(4 in freeze_blocks))
x = layers.MaxPooling2D(pool_size=2, strides=2, data_format=data_format,
padding='same', name='maxpool_5')(x)
x = _leaky_conv(x, filters=1024, kernel=3, strides=1,
name='b5_conv1', trainable=not(5 in freeze_blocks))
x = _leaky_conv(x, filters=512, kernel=1, strides=1,
name='b5_conv2', trainable=not(5 in freeze_blocks))
x = _leaky_conv(x, filters=1024, kernel=3, strides=1,
name='b5_conv3', trainable=not(5 in freeze_blocks))
x = _leaky_conv(x, filters=512, kernel=1, strides=1,
name='b5_conv4', trainable=not(5 in freeze_blocks))
x = _leaky_conv(x, filters=1024, kernel=3, strides=1,
name='b5_conv5', trainable=not(5 in freeze_blocks))
else:
raise NotImplementedError('A DarkNet with nlayers=%d is not implemented.' % nlayers)
# if add_head, make it a network with a stride of 32, otherwise, the stride is 16.
if add_head:
x = layers.GlobalAveragePooling2D(data_format=data_format, name='avgpool')(x)
x = layers.Dense(nclasses, activation='softmax', name='predictions',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(x)
# Create model.
model_name = 'darknet%d' % nlayers
if use_batch_norm:
model_name += '_bn'
if add_head:
model_name += '_add_head'
model = models.Model(img_input, x, name=model_name)
return model
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/darknet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNet V1 and V2 models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend
from keras import layers
from keras import models
from nvidia_tao_tf1.core.templates.utils import _conv_block, _depthwise_conv_block, \
_inverted_res_block, _make_divisible
from nvidia_tao_tf1.core.templates.utils import arg_scope
def MobileNet(inputs,
input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
stride=32,
add_head=True,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
nclasses=1000,
use_batch_norm=True,
activation_type='relu',
freeze_bn=False,
freeze_blocks=None,
use_bias=False):
"""
The MobileNet model architecture.
Args:
inputs(tensor): Input tensor.
input_shape(tuple, None): Shape of the input tensor, can be None.
alpha(float): The alpha parameter, defaults to 1.0.
depth_multiplier(int): Depth multiplier for Depthwise Conv, defaults to 1.
dropout(float): Dropout ratio.
stride(int): The total stride of this model.
add_head(bool): Whether or not to add the ImageNet head. If not, will add dense head.
data_format(str): Data format, can be channels_first or channels_last.
kernel_regularizer: Kernel regularizer applied to the model.
bias_regularizer: Bias regularizer applied to the model.
nclasses(int): Number of classes the output will be classified into.
use_batch_norm(bool): Whether or not to use the BN layer.
activation_type(str): Activation type, can be relu or relu6.
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor.
"""
# Determine proper input shape and default size.
assert stride in [16, 32], (
"Only stride 16 and 32 are supported, got {}".format(stride)
)
old_data_format = backend.image_data_format()
backend.set_image_data_format(data_format)
if freeze_blocks is None:
freeze_blocks = []
if input_shape is None:
if backend.image_data_format() == 'channels_first':
input_shape = (3, 224, 224)
else:
input_shape = (224, 224, 3)
if inputs is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(inputs):
img_input = layers.Input(tensor=inputs, shape=input_shape)
else:
img_input = inputs
with arg_scope([_conv_block, _depthwise_conv_block],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
freeze_bn=freeze_bn,
use_bias=use_bias):
x = _conv_block(img_input, 32, alpha, strides=(2, 2),
trainable=not(0 in freeze_blocks))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1,
trainable=not(1 in freeze_blocks))
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier,
strides=(2, 2), block_id=2,
trainable=not(2 in freeze_blocks))
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3,
trainable=not(3 in freeze_blocks))
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier,
strides=(2, 2), block_id=4,
trainable=not(4 in freeze_blocks))
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5,
trainable=not(5 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier,
strides=(2, 2), block_id=6,
trainable=not(6 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7,
trainable=not(7 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8,
trainable=not(8 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9,
trainable=not(9 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10,
trainable=not(10 in freeze_blocks))
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11,
trainable=not(11 in freeze_blocks))
# make it a network with a stride of 32, otherwise, the stride is 16.
if stride == 32:
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier,
strides=(2, 2), block_id=12,
trainable=not(12 in freeze_blocks))
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13,
trainable=not(13 in freeze_blocks))
if add_head:
x = layers.AveragePooling2D(pool_size=(7, 7),
data_format=data_format, padding='valid')(x)
x = layers.Flatten(name='flatten_1')(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Dense(nclasses, activation='softmax', name='predictions',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(x)
# Create model.
model_name = 'mobilenet'
if use_batch_norm:
model_name += '_bn'
if add_head:
model_name += '_add_head'
model = models.Model(img_input, x, name=model_name)
backend.set_image_data_format(old_data_format)
return model
def MobileNetV2(inputs,
input_shape=None,
alpha=1.0,
depth_multiplier=1,
stride=32,
add_head=True,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
activation_type='relu',
all_projections=False,
nclasses=1000,
freeze_bn=False,
freeze_blocks=None,
use_bias=False):
"""
The MobileNet V2 model architecture.
Args:
inputs(tensor): Input tensor.
input_shape(tuple, None): Shape of the input tensor, can be None.
alpha(float): The alpha parameter, defaults to 1.0.
depth_multiplier(int): Depth multiplier for Depthwise Conv, defaults to 1.
stride(int): The total stride of this model.
add_head(bool): Whether or not to add the ImageNet head. If not, will add dense head.
data_format(str): Data format, can be channels_first or channels_last.
kernel_regularizer: Kernel regularizer applied to the model.
bias_regularizer: Bias regularizer applied to the model.
nclasses(int): Number of classes the output will be classified into.
use_batch_norm(bool): Whether or not to use the BN layer.
activation_type(str): Activation type, can be relu or relu6.
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
Returns:
The output tensor.
"""
assert stride in [16, 32], (
"Only stride 16 and 32 are supported, got {}".format(stride)
)
old_data_format = backend.image_data_format()
backend.set_image_data_format(data_format)
if freeze_blocks is None:
freeze_blocks = []
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
if input_shape is None:
if backend.image_data_format() == 'channels_first':
input_shape = (3, 224, 224)
else:
input_shape = (224, 224, 3)
if inputs is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(inputs):
img_input = layers.Input(tensor=inputs, shape=input_shape)
else:
img_input = inputs
first_block_filters = _make_divisible(32 * alpha, 8)
# Use explicit padding.
x = layers.ZeroPadding2D((1, 1), name='conv1_pad')(img_input)
x = layers.Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2),
padding='valid',
use_bias=use_bias,
name='conv1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not(0 in freeze_blocks))(x)
if use_batch_norm:
if freeze_bn:
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='bn_conv1')(x, training=False)
else:
x = layers.BatchNormalization(axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name='bn_conv1')(x)
if activation_type == 'relu6':
x = layers.ReLU(6., name='re_lu_0')(x)
else:
x = layers.ReLU(name='re_lu_0')(x)
with arg_scope([_inverted_res_block],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activation_type=activation_type,
all_projections=all_projections,
use_bias=use_bias,
freeze_bn=freeze_bn):
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0,
trainable=not(1 in freeze_blocks))
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1,
trainable=not(2 in freeze_blocks))
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2,
trainable=not(3 in freeze_blocks))
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3,
trainable=not(4 in freeze_blocks))
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4,
trainable=not(5 in freeze_blocks))
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5,
trainable=not(6 in freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2,
expansion=6, block_id=6,
trainable=not(7 in freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=7,
trainable=not(8 in freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=8,
trainable=not(9 in freeze_blocks))
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
expansion=6, block_id=9,
trainable=not(10 in freeze_blocks))
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=10,
trainable=not(11 in freeze_blocks))
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=11,
trainable=not(12 in freeze_blocks))
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
expansion=6, block_id=12,
trainable=not(13 in freeze_blocks))
if stride == 32:
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=2,
expansion=6, block_id=13,
trainable=not(14 in freeze_blocks))
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
expansion=6, block_id=14,
trainable=not(15 in freeze_blocks))
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
expansion=6, block_id=15,
trainable=not(16 in freeze_blocks))
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1,
expansion=6, block_id=16,
trainable=not(17 in freeze_blocks))
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we
# increase the number of output channels
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = layers.Conv2D(last_block_filters,
kernel_size=1,
use_bias=use_bias,
name='conv_1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not(18 in freeze_blocks))(x)
if use_batch_norm:
if freeze_bn:
x = layers.BatchNormalization(epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name='conv_1_bn')(x, training=False)
else:
x = layers.BatchNormalization(epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name='conv_1_bn')(x)
if activation_type == 'relu6':
x = layers.ReLU(6., name='re_lu_head')(x)
else:
x = layers.ReLU(name='re_lu_head')(x)
if add_head:
x = layers.AveragePooling2D(pool_size=(7, 7),
data_format=data_format,
padding='valid')(x)
x = layers.Flatten(name='flatten_1')(x)
x = layers.Dense(nclasses,
activation='softmax',
name='predictions',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(x)
# Create model.
model_name = 'mobilenet_v2'
if use_batch_norm:
model_name += '_bn'
if add_head:
model_name += '_add_head'
model = models.Model(img_input, x, name=model_name)
backend.set_image_data_format(old_data_format)
return model
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/mobilenet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shufflenet Encoder model template class."""
import keras
from keras import backend as K
from keras.engine.topology import get_source_inputs
from keras.layers import Activation, Add, Concatenate, Input
from keras.layers import AveragePooling2D, BatchNormalization, Conv2D, MaxPooling2D
from keras.layers import DepthwiseConv2D
from keras.models import Model
from keras_applications.imagenet_utils import _obtain_input_shape
import numpy as np
K.set_image_data_format('channels_first')
def ShuffleNet(include_top=False, input_tensor=None, scale_factor=1.0, pooling='max',
input_shape=(224, 224, 3), groups=1, load_model=None, bottleneck_ratio=0.25,
classes=1000):
"""ShuffleNet implementation.
ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun
https://arxiv.org/pdf/1707.01083.pdf
Note that only TensorFlow is supported for now, therefore it only works
with the data format `image_data_format='channels_last'` in your Keras
config at `~/.keras/keras.json`.
Args:
include_top: bool(True)
whether to include the fully-connected layer at the top of the network.
input_tensor:
optional Keras tensor (i.e. output of `layers.Input()`) to use as
image input for the model.
scale_factor:
scales the number of output channels
input_shape:
pooling:
Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
groups: int
number of groups per channel
num_shuffle_units: list([3,7,3])
number of stages (list length) and the number of shufflenet units in a
stage beginning with stage 2 because stage 1 is fixed
e.g. idx 0 contains 3 + 1 (first shuffle unit in each stage differs)
shufflenet units for stage 2 idx 1 contains 7 + 1 Shufflenet Units
for stage 3 and idx 2 contains 3 + 1 Shufflenet Units
bottleneck_ratio:
bottleneck ratio implies the ratio of bottleneck channels to output channels.
For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
the width of the bottleneck feature map.
classes: int(1000)
number of classes to predict
"""
if K.backend() != 'tensorflow':
raise RuntimeError('Only TensorFlow backend is currently supported, '
'as other backends do not support ')
num_shuffle_units = [3, 7, 3]
name = "ShuffleNet_%.2gX_g%d_br_%.2g_%s" % (scale_factor, groups, bottleneck_ratio,
"".join([str(x) for x in num_shuffle_units]))
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=28,
require_flatten=include_top,
data_format='channels_first')
out_dim_stage_two = {1: 144, 2: 200, 3: 240, 4: 272, 8: 384}
if groups not in out_dim_stage_two:
raise ValueError("Invalid number of groups.")
if pooling not in ['max', 'avg']:
raise ValueError("Invalid value for pooling.")
if not (float(scale_factor) * 4).is_integer():
raise ValueError("Invalid value for scale_factor. Should be x over 4.")
exp = np.insert(np.arange(0, len(num_shuffle_units), dtype=np.float32), 0, 0)
out_channels_in_stage = 2 ** exp
out_channels_in_stage *= out_dim_stage_two[groups] # calculate output channels for each stage
out_channels_in_stage[0] = 24 # first stage has always 24 output channels
out_channels_in_stage *= scale_factor
out_channels_in_stage = out_channels_in_stage.astype(int)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# create shufflenet architecture
x = Conv2D(filters=out_channels_in_stage[0], kernel_size=(3, 3), padding='same',
use_bias=False, strides=(2, 2), activation="relu", name="conv1",
data_format='channels_first')(img_input)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same',
name="maxpool1", data_format='channels_first')(x)
# create stages containing shufflenet units beginning at stage 2
for stage in range(0, len(num_shuffle_units)):
repeat = num_shuffle_units[stage]
x = _block(x, out_channels_in_stage, repeat=repeat,
bottleneck_ratio=bottleneck_ratio,
groups=groups, stage=stage + 2)
x = keras.layers.Conv2D(filters=classes, kernel_size=(1, 1), name="score_fr",
data_format="channels_first")(x)
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs=inputs, outputs=x, name=name)
if load_model is not None:
model.load_weights('', by_name=True)
return model
def _block(x, channel_map, bottleneck_ratio, repeat=1, groups=1, stage=1):
"""Creates a bottleneck block containing `repeat + 1` shuffle units.
Args:
x:
Input tensor of with `channels_last` data format
channel_map: list
list containing the number of output channels for a stage
repeat: int(1)
number of repetitions for a shuffle unit with stride 1
groups: int(1)
number of groups per channel
bottleneck_ratio: float
bottleneck ratio implies the ratio of bottleneck channels to output channels.
For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
the width of the bottleneck feature map.
stage: int(1)
stage number
"""
x = _shuffle_unit(x, in_channels=channel_map[stage - 2],
out_channels=channel_map[stage - 1], strides=2,
groups=groups, bottleneck_ratio=bottleneck_ratio,
stage=stage, block=1)
for i in range(1, repeat + 1):
x = _shuffle_unit(x, in_channels=channel_map[stage - 1],
out_channels=channel_map[stage - 1], strides=1,
groups=groups, bottleneck_ratio=bottleneck_ratio,
stage=stage, block=(i + 1))
return x
def _shuffle_unit(inputs, in_channels, out_channels, groups, bottleneck_ratio,
strides=2, stage=1, block=1):
"""Creates a shuffleunit.
Args:
inputs:
Input tensor of with `channels_last` data format
in_channels:
number of input channels
out_channels:
number of output channels
strides:
An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
groups: int(1)
number of groups per channel
bottleneck_ratio: float
bottleneck ratio implies the ratio of bottleneck channels to output channels.
For example, bottleneck ratio = 1 : 4 means the output feature map is 4 times
the width of the bottleneck feature map.
stage: int(1)
stage number
block: int(1)
block number
"""
if K.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
prefix = 'stage%d/block%d' % (stage, block)
# default: 1/4 of the output channel of a ShuffleNet Unit
bottleneck_channels = int(out_channels * bottleneck_ratio)
groups = (1 if stage == 2 and block == 1 else groups)
x = _group_conv(inputs, in_channels, out_channels=bottleneck_channels,
groups=(1 if stage == 2 and block == 1 else groups),
name='%s/1x1_gconv_1' % prefix)
x = BatchNormalization(axis=bn_axis, name='%s/bn_gconv_1' % prefix)(x)
x = Activation('relu', name='%s/relu_gconv_1' % prefix)(x)
x = ChannelShuffle(groups=groups)(x)
x = DepthwiseConv2D(kernel_size=(3, 3), padding="same", use_bias=False,
strides=strides, data_format="channels_first",
name='%s/1x1_dwconv_1' % prefix)(x)
x = BatchNormalization(axis=bn_axis, name='%s/bn_dwconv_1' % prefix)(x)
x = _group_conv(x, bottleneck_channels, out_channels=out_channels
if strides == 1 else out_channels - in_channels,
groups=groups, name='%s/1x1_gconv_2' % prefix)
x = BatchNormalization(axis=bn_axis, name='%s/bn_gconv_2' % prefix)(x)
if strides < 2:
ret = Add(name='%s/add' % prefix)([x, inputs])
else:
avg = AveragePooling2D(pool_size=3, strides=2, padding='same',
name='%s/avg_pool' % prefix)(inputs)
ret = Concatenate(bn_axis, name='%s/concat' % prefix)([x, avg])
ret = Activation('relu', name='%s/relu_out' % prefix)(ret)
return ret
class GroupLayer(keras.layers.Layer):
"""Group Layer Class."""
def __init__(self, offset=0, ig=0, **kwargs):
"""Init function.
Args:
offset (int): Offset to sample the input.
ig (int): Number of input channels per groups.
"""
self.ig = ig
self.offset = offset
super(GroupLayer, self).__init__(**kwargs)
def call(self, inputs):
"""Function to construct the input."""
return inputs[:, self.offset: self.offset + self.ig, :, :]
class ChannelShuffle(keras.layers.Layer):
"""Channel Shuffle Class."""
def __init__(self, groups=1, **kwargs):
"""Init function.
Args:
groups (int): No. of groups for the group convolution.
"""
self.groups = groups
super(ChannelShuffle, self).__init__(**kwargs)
def call(self, inputs):
"""Function to Shuffle the channels in the input."""
x = K.permute_dimensions(inputs, (0, 2, 3, 1)) # Made tensor channel last
height, width, in_channels = x.shape.as_list()[1:]
channels_per_group = in_channels // self.groups
x = K.reshape(x, [-1, height, width, self.groups, channels_per_group])
x = K.permute_dimensions(x, (0, 1, 2, 4, 3)) # transpose
x = K.reshape(x, [-1, height, width, in_channels]) # bs x h x w x c
x = K.permute_dimensions(x, (0, 3, 1, 2))
return x
def _group_conv(x, in_channels, out_channels, groups, kernel=1, stride=1, name=''):
"""Grouped convolution.
Args:
x:
Input tensor of with `channels_last` data format
in_channels:
number of input channels
out_channels:
number of output channels
groups:
number of groups per channel
kernel: int(1)
An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
stride: int(1)
An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for all spatial dimensions.
name: str
A string to specifies the layer name
"""
if groups == 1:
return Conv2D(filters=out_channels, kernel_size=kernel, padding='same',
use_bias=False, strides=stride,
name=name, data_format="channels_first")(x)
# number of intput channels per group
ig = in_channels // groups
assert out_channels % groups == 0
offset = groups[0] * ig
cat = GroupLayer(offset=offset, ig=ig)(x)
for i in range(groups[1:]):
offset = i * ig
group = GroupLayer(offset=offset, ig=ig)(x)
cat = Concatenate(name='%s/concat' % name)([cat, group])
return cat
def channel_shuffle(x, groups):
"""Shuffle the Channels by grouping.
Args:
x:
Input tensor of with `channels_last` data format
groups: int
number of groups per channel
Returns:
channel shuffled output tensor
Examples:
Example for a 1D Array with 3 groups
>>> d = np.array([0,1,2,3,4,5,6,7,8])
>>> x = np.reshape(d, (3,3))
>>> x = np.transpose(x, [1,0])
>>> x = np.reshape(x, (9,))
'[0 1 2 3 4 5 6 7 8] --> [0 3 6 1 4 7 2 5 8]'
"""
x = K.permute_dimensions(x, (0, 2, 3, 1)) # Made tensor channel last
height, width, in_channels = x.shape.as_list()[1:]
channels_per_group = in_channels // groups
x = K.reshape(x, [-1, height, width, groups, channels_per_group])
x = K.permute_dimensions(x, (0, 1, 2, 4, 3)) # transpose
x = K.reshape(x, [-1, height, width, in_channels]) # bs x h x w x c
x = K.permute_dimensions(x, (0, 3, 1, 2))
return x
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/shufflenet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus model templates for ResNets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from nvidia_tao_tf1.core.models.import_keras import keras as keras_fn
from nvidia_tao_tf1.core.templates.utils import add_activation
from nvidia_tao_tf1.core.templates.utils import add_dense_head
from nvidia_tao_tf1.core.templates.utils import arg_scope
from nvidia_tao_tf1.core.templates.utils import CNNBlock
from nvidia_tao_tf1.core.templates.utils import get_batchnorm_axis
keras = keras_fn()
K = keras.backend
def ResNet(nlayers,
input_tensor=None,
use_batch_norm=False,
data_format=None,
add_head=False,
head_activation='softmax',
nclasses=None,
kernel_regularizer=None,
bias_regularizer=None,
activation_type='relu',
activation_kwargs=None,
all_projections=False,
freeze_blocks=None,
freeze_bn=False,
use_pooling=True,
use_bias=True):
"""
Construct a fixed-depth vanilla ResNet, based on the architectures from the original paper [1].
Args:
nlayers (int): the number of layers in the desired ResNet (e.g. 18, 34, ..., 152).
inputs (tensor): the input tensor.
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
add_head (bool): whether to add the original [1] classification head. Note that if you
don't include the head, the actual number of layers in the model produced by this
function is 'nlayers-1`.
head_activation (string): Activation function for classification head.
nclasses (int): the number of classes to be added to the classification head. Can be `None`
if unused.
kernel_regularizer: regularizer to apply to kernels.
bias_regularizer: regularizer to apply to biases.
activation_type (str): Type of activation.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
all_projections (bool): whether to implement cnn subblocks with all shortcuts connections
forced as 1x1 convolutional layers as mentioned in [1] to enable full pruning of
ResNets. If set as False, the template instantiated will be the classic ResNet template
as in [1] with shortcut connections as skip connections when there is no stride change
and 1x1 convolutional layers (projection layers) when there is a stride change.
Note: The classic template cannot be fully pruned. Only the first N-1 number of layers
in the ResNet subblock can be pruned. All other layers must be added to exclude layers
list while pruning, including conv1 layer.
freeze_bn(bool): Whether or not to freeze the BN layers.
freeze_blocks(list): the list of blocks in the model to be frozen.
use_pooling (bool): whether to use MaxPooling2D layer after first conv layer or use a
stride of 2 for first convolutional layer in subblock
use_bias(bool): Whether or not to use bias for the conv layers.
Returns:
Model: the output model after applying the ResNet on top of input `x`.
[1] Deep Residual Learning for Image Recognition (https://arxiv.org/abs/1512.03385)
"""
if freeze_blocks is None:
freeze_blocks = []
# Determine proper input shape
if K.image_data_format() == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if input_tensor is None:
inputs = keras.layers.Input(shape=input_shape)
else:
inputs = input_tensor
if not os.environ.get("TF_KERAS", "0") == "1":
if not K.is_keras_tensor(input_tensor):
inputs = keras.layers.Input(tensor=input_tensor, shape=input_shape)
freeze0 = 0 in freeze_blocks
freeze1 = 1 in freeze_blocks
freeze2 = 2 in freeze_blocks
freeze3 = 3 in freeze_blocks
freeze4 = 4 in freeze_blocks
if data_format is None:
data_format = K.image_data_format()
activation_kwargs = activation_kwargs or {}
x = keras.layers.Conv2D(
64, (7, 7),
strides=(2, 2),
padding='same',
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='conv1',
trainable=not freeze0,
use_bias=use_bias)(inputs)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=get_batchnorm_axis(data_format),
name='bn_conv1')(x, training=False)
else:
x = keras.layers.BatchNormalization(
axis=get_batchnorm_axis(data_format),
name='bn_conv1')(x)
x = add_activation(activation_type, **activation_kwargs)(x)
first_stride = 2 # Setting stride 1st convolutional subblock.
last_stride = 1 # Setting stride last convolutional subblock.
if use_pooling:
x = keras.layers.MaxPooling2D(
pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format)(x)
first_stride = 1
last_stride = 2
# Define a block functor which can create blocks.
with arg_scope(
[CNNBlock],
use_batch_norm=use_batch_norm,
all_projections=all_projections,
use_shortcuts=True,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_bn=freeze_bn,
activation_kwargs={},
use_bias=use_bias):
if nlayers == 10:
x = CNNBlock(repeat=1, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=1, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=1, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=1, stride=last_stride,
subblocks=[(3, 512), (3, 512)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 18:
x = CNNBlock(repeat=2, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=2, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=2, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=2, stride=last_stride,
subblocks=[(3, 512), (3, 512)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 34:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(3, 64), (3, 64)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(3, 128), (3, 128)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=6, stride=2,
subblocks=[(3, 256), (3, 256)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(3, 512), (3, 512)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 50:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=6, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 101:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=4, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=23, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 152:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=8, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=36, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)],
index=4, freeze_block=freeze4)(x)
elif nlayers == 200:
x = CNNBlock(repeat=3, stride=first_stride,
subblocks=[(1, 64), (3, 64), (1, 256)],
index=1, freeze_block=freeze1)(x)
x = CNNBlock(repeat=24, stride=2,
subblocks=[(1, 128), (3, 128), (1, 512)],
index=2, freeze_block=freeze2)(x)
x = CNNBlock(repeat=36, stride=2,
subblocks=[(1, 256), (3, 256), (1, 1024)],
index=3, freeze_block=freeze3)(x)
x = CNNBlock(repeat=3, stride=last_stride,
subblocks=[(1, 512), (3, 512), (1, 2048)],
index=4, freeze_block=freeze4)(x)
else:
raise NotImplementedError('A resnet with nlayers=%d is not implemented.' % nlayers)
# Add keras.layers.AveragePooling2D layer if use_pooling is enabled after resnet block.
if use_pooling:
x = keras.layers.AveragePooling2D(
pool_size=(7, 7),
data_format=data_format, padding='same')(x)
# Naming model.
model_name = 'resnet%d' % nlayers
if not use_pooling:
model_name += '_nopool'
if use_batch_norm:
model_name += '_bn'
# Set up keras model object.
model = keras.models.Model(inputs=inputs, outputs=x, name=model_name)
# Add a dense head of nclasses if enabled.
if add_head:
model = add_dense_head(model, inputs, nclasses, head_activation)
return model
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/resnet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA utilities for tf model templates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import inspect
import math
import os
import re
import threading
import tensorflow as tf
from nvidia_tao_tf1.core.models.import_keras import keras as keras_fn
from nvidia_tao_tf1.cv.yolo_v4.layers.split import Split
keras = keras_fn()
bn_axis_map = {'channels_last': 3, 'channels_first': 1}
SUBBLOCK_IDS = ['1x1', '3x3_reduce', '3x3', '5x5_reduce', '5x5', 'pool', 'pool_proj']
_ARGSTACK = [{}]
_DECORATED_OPS = {}
def _get_arg_stack():
if _ARGSTACK:
return _ARGSTACK
_ARGSTACK.append({})
return _ARGSTACK
def _current_arg_scope():
stack = _get_arg_stack()
return stack[-1]
def _key_op(op):
return getattr(op, "_key_op", str(op))
def _name_op(op):
return (op.__module__, op.__name__)
def _kwarg_names(func):
kwargs_length = len(func.__defaults__) if func.__defaults__ else 0
return func.__code__.co_varnames[-kwargs_length : func.__code__.co_argcount]
def _add_op(op):
key_op = _key_op(op)
if key_op not in _DECORATED_OPS:
_DECORATED_OPS[key_op] = _kwarg_names(op)
def get_batchnorm_axis(data_format):
"""Convert a data_format string to the correct index in a 4 dimensional tensor.
Args:
data_format (str): either 'channels_last' or 'channels_first'.
Returns:
int: the axis corresponding to the `data_format`.
"""
return bn_axis_map[data_format]
def has_arg_scope(func):
"""Check whether a func has been decorated with @add_arg_scope or not.
Args:
func: function to check.
Returns:
a boolean.
"""
return _key_op(func) in _DECORATED_OPS
@contextlib.contextmanager
def arg_scope(list_ops_or_scope, **kwargs):
"""Store the default arguments for the given set of list_ops.
For usage, please see examples at top of the file.
Args:
list_ops_or_scope: List or tuple of operations to set argument scope for or
a dictionary containing the current scope. When list_ops_or_scope is a
dict, kwargs must be empty. When list_ops_or_scope is a list or tuple,
then every op in it need to be decorated with @add_arg_scope to work.
**kwargs: keyword=value that will define the defaults for each op in
list_ops. All the ops need to accept the given set of arguments.
Yields:
the current_scope, which is a dictionary of {op: {arg: value}}
Raises:
TypeError: if list_ops is not a list or a tuple.
ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
"""
if isinstance(list_ops_or_scope, dict):
# Assumes that list_ops_or_scope is a scope that is being reused.
if kwargs:
raise ValueError(
"When attempting to re-use a scope by suppling a"
"dictionary, kwargs must be empty."
)
current_scope = list_ops_or_scope.copy()
try:
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
else:
# Assumes that list_ops_or_scope is a list/tuple of ops with kwargs.
if not isinstance(list_ops_or_scope, (list, tuple)):
raise TypeError(
"list_ops_or_scope must either be a list/tuple or reused"
"scope (i.e. dict)"
)
try:
current_scope = _current_arg_scope().copy()
for op in list_ops_or_scope:
if inspect.isclass(op):
# If we decorated a class, use the scope on the initializer
op = op.__init__
key_op = _key_op(op)
if not has_arg_scope(op):
raise ValueError(
"%s::%s is not decorated with @add_arg_scope" % _name_op(op)
)
if key_op in current_scope:
current_kwargs = current_scope[key_op].copy()
current_kwargs.update(kwargs)
current_scope[key_op] = current_kwargs
else:
current_scope[key_op] = kwargs.copy()
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
def add_arg_scope(func):
"""Decorate a function with args so it can be used within an arg_scope.
Args:
func: function to decorate.
Returns:
A tuple with the decorated function func_with_args().
"""
@functools.wraps(func)
def func_with_args(*args, **kwargs):
current_scope = _current_arg_scope()
current_args = kwargs
key_func = _key_op(func)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
_add_op(func)
setattr(func_with_args, "_key_op", _key_op(func))
setattr(func_with_args, "__doc__", func.__doc__)
return func_with_args
def arg_scoped_arguments(func):
"""Return the list kwargs that arg_scope can set for a func.
Args:
func: function which has been decorated with @add_arg_scope.
Returns:
a list of kwargs names.
"""
assert has_arg_scope(func)
return _DECORATED_OPS[_key_op(func)]
class subblock_ids(object):
"""A operator to get index of subblock, overload [] operation."""
def __getitem__(self, key):
"""
Generate a subblock ID and return.
Args:
key (int): an index used to generate the subblock ID.
"""
cur = key
subblock_id = ''
while cur >= 0:
ch = chr(ord('a') + cur % 26)
subblock_id = ch + subblock_id
cur = cur // 26 - 1
return subblock_id
class InceptionV1Block(object):
"""A functor for creating a Inception v1 block of layers."""
@add_arg_scope
def __init__(self,
use_batch_norm,
data_format,
kernel_regularizer,
bias_regularizer,
subblocks,
index,
freeze_bn=False,
activation_type='relu',
use_bias=True,
trainable=True,
use_td=False):
"""
Initialization of the block functor object.
Args:
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
subblocks (tuple): A tuple of size 6, defining number of feature-maps for
subbblocks in an inception block.
For GoogleNet from "Going deeper with convolutions" by Szegedy, Christian, et. al.
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2015
Inception_3a: (64, 96, 128, 16, 32, 32)
Defines Inception block with following parallel branches
1) 64 outputs from 1x1 convolutions
2.1) 96 outputs from 1x1 convolutions --> 2.2) 128 outputs from 3x3 convolutions
3.1) 16 outputs from 1x1 convolutions --> 3.2) 32 outputs from 5x5 convolutions
4.1) Max pooling with 3x3 pooling size --> 4.2) 32 outputs from 1x1 convolutions
the outputs of 1, 2.2, 3.2, and 4.2 are concatenated to produce final output.
index (int): the index of the block to be created.
activation_type (str): activation function type.
freeze_bn(bool): Whether or not to freeze the BN layer.
use_bias(bool): Whether or not to use bias for Conv/Dense, etc.
trainable(bool): Whether or not to set the weights to be trainable.
use_td(bool): Whether or not to wrap the layers into a TimeDistributed layer.
This is useful in FasterRCNN.
"""
self.use_batch_norm = use_batch_norm
self.data_format = data_format
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activation_type = activation_type
self.subblocks = subblocks
self.index = index
self.name = 'inception_%s' % index
self.freeze_bn = freeze_bn
self.use_bias = use_bias
self.trainable = trainable
self.use_td = use_td
def __call__(self, x):
"""Build the block.
Args:
x (tensor): input tensor.
Returns:
tensor: the output tensor after applying the block on top of input `x`.
"""
x = self._subblocks(x, name_prefix=self.name)
return x
def _subblocks(self, x, name_prefix=None):
"""
Stack several convolutions in a specific sequence given by a list of subblocks.
Args:
x (tensor): the input tensor.
name_prefix (str): name prefix for all the layers created in this function.
Returns:
tensor: the output tensor after applying the ResNet block on top of input `x`.
"""
nblocks = len(self.subblocks)
if(nblocks != 6):
print("Inception V1 block must have 6 subblocks")
return(x)
if self.use_batch_norm:
bn_axis = get_batchnorm_axis(self.data_format)
# First branch is 1x1 conv with padding = 0, and stride = 1
layer = keras.layers.Conv2D(
self.subblocks[0],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[0]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x1 = layer(x)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[0])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x1 = layer(x1, training=False)
else:
x1 = layer(x1)
x1 = keras.layers.Activation(self.activation_type)(x1)
# Second branch is 1x1 conv with padding = 0, and stride = 1
layer = keras.layers.Conv2D(
self.subblocks[1],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[1]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x2 = layer(x)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[1])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x2 = layer(x2, training=False)
else:
x2 = layer(x2)
x2 = keras.layers.Activation(self.activation_type)(x2)
# Second branch is 1x1 conv with padding = 0, and stride = 1 followed by 3x3 conv
layer = keras.layers.Conv2D(
self.subblocks[2],
(3, 3),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[2]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x2 = layer(x2)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[2])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x2 = layer(x2, training=False)
else:
x2 = layer(x2)
x2 = keras.layers.Activation(self.activation_type)(x2)
# Third branch is 1x1 conv with stride = 1
layer = keras.layers.Conv2D(
self.subblocks[3],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[3]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x3 = layer(x)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[3])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x3 = layer(x3, training=False)
else:
x3 = layer(x3)
x3 = keras.layers.Activation(self.activation_type)(x3)
# Third branch is 1x1 conv with padding = 0, and stride = 1 followed by 5x5 conv
layer = keras.layers.Conv2D(
self.subblocks[4],
(5, 5),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[4]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x3 = layer(x3)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[4])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x3 = layer(x3, training=False)
else:
x3 = layer(x3)
x3 = keras.layers.Activation(self.activation_type)(x3)
# Fourth branch is max pool stride = 1, and a 1x1 conv
layer = keras.layers.MaxPooling2D(
pool_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=self.data_format,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[5]))
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x4 = layer(x)
layer = keras.layers.Conv2D(
self.subblocks[5],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[6]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x4 = layer(x4)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[6])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x4 = layer(x4, training=False)
else:
x4 = layer(x4)
x4 = keras.layers.Activation(self.activation_type)(x4)
if self.data_format == 'channels_first':
concat_axis = 1
if self.use_td:
concat_axis += 1
else:
concat_axis = -1
layer = keras.layers.Concatenate(axis=concat_axis, name='%s_output' % (name_prefix))
x = layer([x1, x2, x3, x4])
return x
def update_config(model, inputs, config, name_pattern=None):
"""
Update the configuration of an existing model.
Note that the input tensors to apply the new model to must be different
from those of the original model. This is because when Keras
clones a model it retains the original input layer and adds an extra one
on top.
In order to update the configuration of only certain layers,
a name pattern (regular expression) may be provided.
Args:
model (Model): the model to update the regularizers of.
inputs (tensors): the tensor to apply the new model to.
config (dict): dictionary of layer attributes to update.
name_pattern (str): pattern to match layers against. Those that
do not match will not be updated.
"""
# Loop through all layers and update those that have a regularizer.
for layer in model.layers:
if name_pattern is None or re.match(name_pattern, layer.name):
for name, value in config.items():
if hasattr(layer, name):
setattr(layer, name, value)
new_model = model # clone_model(model, [inputs])
new_model.set_weights(model.get_weights())
return new_model
def update_regularizers(model, inputs, kernel_regularizer, bias_regularizer, name_pattern=None):
"""
Update the weight decay regularizers of an existing model.
Note that the input tensors to apply the new model to must be different
from those of the original model. This is because when Keras
clones a model it retains the original input layer and adds an extra one
on top.
In order to update the regularizers of only certain layers,
a name pattern (regular expression) may be provided.
Args:
model (Model): the model to update the regularizers of.
inputs (tensors): the tensor to apply the new model to.
kernel_regularizer (object): regularizer to apply to kernels.
bias_regularizer (object): regularizer to apply to biases.
name_pattern (str): pattern to match layers against. Those that
do not match will not be updated.
"""
config = {'bias_regularizer': bias_regularizer,
'kernel_regularizer': kernel_regularizer}
return update_config(model, inputs, config, name_pattern)
@add_arg_scope
def _conv_block(inputs, filters, alpha, kernel=(3, 3),
strides=(1, 1), kernel_regularizer=None,
bias_regularizer=None, use_batch_norm=True,
activation_type='relu', data_format='channels_first',
freeze_bn=False, trainable=True,
use_bias=False):
"""
Construct a conv block to be used in MobileNet.
Args:
inputs(tensor): The input tensor.
filters(int): The number of filters.
alpha(float): The alpha parameter for MobileNet to control the final number of filters.
kernel(int, tuple): The kernel size, can be a int or a tuple.
strides(int, tuple): The strides.
kernel_regularizer: Kernel regularizer to be applied to the block.
bias_regularizer: Bias regularizer to be applied to the block.
use_batch_norm(bool): Whether or not to use batch normalization layer.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format for Keras, can be channels_first or channels_last.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor of this block.
"""
channel_axis = get_batchnorm_axis(data_format)
filters = int(filters * alpha)
# Use explicit padding here to avoid TF asymmetric padding.
# This will be fused into Conv layer, and TRT inference is faster than TF asymmetric padding.
# For accuracy, we found they are almost the same for the two padding styles.
x = keras.layers.ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs)
x = keras.layers.Conv2D(
filters,
kernel,
padding='valid',
use_bias=use_bias,
strides=strides,
name='conv1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(axis=channel_axis,
name='conv1_bn')(x, training=False)
else:
x = keras.layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name='conv_block_relu6')(x)
else:
x = keras.layers.ReLU(name='conv_block_relu')(x)
return x
@add_arg_scope
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1),
block_id=1, kernel_regularizer=None,
bias_regularizer=None, use_batch_norm=True,
activation_type='relu', data_format='channels_first',
freeze_bn=False, trainable=True,
use_bias=False):
"""
Depthwise conv block as building blocks for MobileNet.
Args:
inputs(tensor): The input tensor.
pointwise_conv_filters(int): The number of depthwise conv filters.
alpha(float): The alpha parameter for MobileNet.
depth_multiplier(int): The depth multiplier(defaut: 1)
strides(int, tuple): The strides, can be a int or a tuple.
block_id(int): The block_id, used to name the blocks.
kernel_regularizer: The kernel regularizer.
bias_regularizer: The bias regularizer.
use_batch_norm(bool): Whether or not to use batch normalization layer.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format for Keras, can be channels_first or channels_last.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor.
"""
channel_axis = get_batchnorm_axis(data_format)
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
# Also use explicit padding here to avoid TF style padding.
x = keras.layers.ZeroPadding2D((1, 1), name='conv_pad_%d' % block_id)(inputs)
x = keras.layers.DepthwiseConv2D(
(3, 3),
padding='valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=use_bias,
name='conv_dw_%d' % block_id,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=channel_axis,
name='conv_dw_%d_bn' % block_id)(x, training=False)
else:
x = keras.layers.BatchNormalization(axis=channel_axis,
name='conv_dw_%d_bn' % block_id)(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name='conv_dw_%d_relu6' % block_id)(x)
else:
x = keras.layers.ReLU(name='conv_dw_%d_relu' % block_id)(x)
x = keras.layers.Conv2D(
pointwise_conv_filters,
(1, 1),
padding='valid',
use_bias=use_bias,
strides=(1, 1),
name='conv_pw_%d' % block_id,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=channel_axis,
name='conv_pw_%d_bn' % block_id)(x, training=False)
else:
x = keras.layers.BatchNormalization(
axis=channel_axis,
name='conv_pw_%d_bn' % block_id)(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name='conv_pw_relu6_%d' % block_id)(x)
else:
x = keras.layers.ReLU(name='conv_pw_relu_%d' % block_id)(x)
return x
@add_arg_scope
def _leaky_conv(inputs, filters, alpha=0.1, kernel=(3, 3),
strides=(1, 1), kernel_regularizer=None,
bias_regularizer=None, use_batch_norm=True,
padding='same', data_format='channels_first',
freeze_bn=False, trainable=True, force_relu=False,
use_bias=False, name='conv1', use_td=False):
"""
Construct a leaky relu conv block to be used in DarkNet.
Args:
inputs(tensor): The input tensor.
filters(int): The number of filters.
alpha(float): leaky rate for LeakyReLU
kernel(int, tuple): The kernel size, can be a int or a tuple.
strides(int, tuple): The strides.
padding(str): same or valid.
kernel_regularizer: Kernel regularizer to be applied to the block.
bias_regularizer: Bias regularizer to be applied to the block.
use_batch_norm(bool): Whether or not to use batch normalization layer.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format for Keras, can be channels_first or channels_last.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
force_relu(bool): For ReLU activation.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
name(str): name of the layer.
use_td(bool): use TimeDistributed wrapper or not, default is False.
Returns:
The output tensor of this block.
"""
channel_axis = get_batchnorm_axis(data_format)
_layer = keras.layers.Conv2D(
filters,
kernel,
strides=strides,
padding=padding,
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=use_bias,
trainable=trainable,
name=name)
if use_td:
_layer = keras.layers.TimeDistributed(_layer)
x = _layer(inputs)
if use_batch_norm:
_layer = keras.layers.BatchNormalization(axis=channel_axis, name=name+'_bn')
if use_td:
_layer = keras.layers.TimeDistributed(_layer)
if freeze_bn:
x = _layer(x, training=False)
else:
x = _layer(x)
if force_relu:
# still use _lrelu as name
x = keras.layers.ReLU(name=name+'_lrelu')(x)
else:
x = keras.layers.LeakyReLU(alpha=alpha, name=name+'_lrelu')(x)
return x
@add_arg_scope
def _mish_conv(inputs, filters, kernel=(3, 3),
strides=(1, 1), kernel_regularizer=None,
bias_regularizer=None, use_batch_norm=True,
padding='same', data_format='channels_first',
freeze_bn=False, trainable=True, force_relu=False,
use_bias=False, name='conv1', use_td=False,
activation="leaky_relu"):
"""
Construct a mish conv block to be used in DarkNet.
Args:
inputs(tensor): The input tensor.
filters(int): The number of filters.
kernel(int, tuple): The kernel size, can be a int or a tuple.
strides(int, tuple): The strides.
padding(str): same or valid.
kernel_regularizer: Kernel regularizer to be applied to the block.
bias_regularizer: Bias regularizer to be applied to the block.
use_batch_norm(bool): Whether or not to use batch normalization layer.
data_format(str): Data format for Keras, can be channels_first or channels_last.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
force_relu(bool): Whether to use ReLU instead of Mish
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
name(str): name of the layer.
use_td(bool): use TimeDistributed wrapper or not, default is False.
activation(str): Activation type.
Returns:
The output tensor of this block.
"""
channel_axis = get_batchnorm_axis(data_format)
_layer = keras.layers.Conv2D(filters,
kernel,
strides=strides,
padding=padding,
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=use_bias,
trainable=trainable,
name=name)
if use_td:
_layer = keras.layers.TimeDistributed(_layer)
x = _layer(inputs)
if use_batch_norm:
_layer = keras.layers.BatchNormalization(
axis=channel_axis, name=name+'_bn'
)
if use_td:
_layer = keras.layers.TimeDistributed(_layer)
if freeze_bn:
x = _layer(x, training=False)
else:
x = _layer(x)
if force_relu:
# TODO(@zhimengf): This should be deprecated in the future
# Use the general yolov4_config.activation parameter instead
# For now, let's keep it for backward compatibility of spec
x = keras.layers.ReLU(name=name+'_mish')(x)
elif activation == "mish":
x = keras.layers.Activation(mish, name=name+'_mish')(x)
elif activation == "relu":
# still use _mish as name
x = keras.layers.ReLU(name=name+'_mish')(x)
else:
# default case: LeakyReLU
x = keras.layers.LeakyReLU(alpha=0.1, name=name+'_mish')(x)
return x
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@add_arg_scope
def _inverted_res_block(inputs, expansion, stride, alpha, filters,
block_id, kernel_regularizer=None, bias_regularizer=None,
use_batch_norm=True, activation_type='relu',
data_format='channels_first', all_projections=True,
trainable=True, freeze_bn=False,
use_bias=False):
"""
Inverted residual block as building blocks for MobileNet V2.
Args:
inputs(tensor): Input tensor.
expansion(float): Expansion factor of the filter numbers.
stride(int, tuple): Stride of this block.
alpha(float): alpha parameter.
filters(int): Number of filters.
block_id(int): block id for this block, as a name.
kernel_regularizer: Kernel regularizer to be applied.
bias_regularizer: Bias regularizer to be applied.
use_batch_norm(bool): Whether or not to use BN layers.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format, can be channels_first or channels_last.
all_projections(bool): Whether to use all projection layers to replace the shortcuts.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor.
"""
channel_axis = get_batchnorm_axis(data_format)
in_channels = inputs._keras_shape[channel_axis]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'block_{}_'.format(block_id)
if block_id:
# Expand
x = keras.layers.Conv2D(
expansion * in_channels,
kernel_size=1,
padding='valid',
use_bias=use_bias,
activation=None,
name=prefix + 'expand',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'expand_bn')(x, training=False)
else:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'expand_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name='re_lu_%d' % (block_id + 1))(x)
else:
x = keras.layers.ReLU(name='re_lu_%d' % (block_id + 1))(x)
else:
prefix = 'expanded_conv_'
# Depthwise
# Use explicit padding
x = keras.layers.ZeroPadding2D((1, 1), name=prefix + 'depthwise_pad')(x)
x = keras.layers.DepthwiseConv2D(
kernel_size=3,
strides=stride,
activation=None,
use_bias=use_bias,
padding='valid',
name=prefix + 'depthwise',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'depthwise_bn')(x, training=False)
else:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'depthwise_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name=prefix + 'relu6')(x)
else:
x = keras.layers.ReLU(name=prefix + 'relu')(x)
# Project
x = keras.layers.Conv2D(
pointwise_filters,
kernel_size=1,
padding='valid',
use_bias=use_bias,
activation=None,
name=prefix + 'project',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project_bn')(x, training=False)
else:
x = keras.layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project_bn')(x)
if in_channels == pointwise_filters and stride == 1:
if all_projections:
inputs_projected = keras.layers.Conv2D(
in_channels,
kernel_size=1,
padding='valid',
use_bias=False,
activation=None,
name=prefix + 'projected_inputs',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(inputs)
return keras.layers.Add(name=prefix + 'add')([inputs_projected, x])
return keras.layers.Add(name=prefix + 'add')([inputs, x])
return x
def get_uid(base_name):
"""Return a unique ID."""
get_uid.lock.acquire()
if base_name not in get_uid.seqn:
get_uid.seqn[base_name] = 0
uid = get_uid.seqn[base_name]
get_uid.seqn[base_name] += 1
get_uid.lock.release()
return uid
get_uid.seqn = {}
get_uid.lock = threading.Lock()
def add_activation(activation_type, **kwargs):
"""
Create an activation layer based on activation type and additional arguments.
Note that the needed kwargs depend on the activation type.
Args:
activation_type (str): String to indicate activation type.
kwargs (dict): Additional keyword arguments depending on the activation type.
Returns:
activation_layer (a subclass of keras.layers.Layer): The layer type
depends on activation_type.
"""
if activation_type == 'relu-n':
max_value = kwargs.get('max_value', None)
activation_layer = keras.layers.ReLU(max_value=max_value)
elif activation_type == 'lrelu':
alpha = kwargs['alpha']
activation_layer = keras.layers.LeakyReLU(alpha=alpha)
elif activation_type == 'elu':
alpha = kwargs['alpha']
activation_layer = keras.layers.ELU(alpha=alpha)
else:
activation_layer = keras.layers.Activation(activation_type, **kwargs)
return activation_layer
class CNNBlock(object):
"""A functor for creating a block of layers."""
@add_arg_scope
def __init__(self,
use_batch_norm,
use_shortcuts,
data_format,
kernel_regularizer,
bias_regularizer,
repeat,
stride,
subblocks,
index=None,
activation_type='relu',
freeze_bn=False,
freeze_block=False,
activation_kwargs=None,
dilation_rate=(1, 1),
all_projections=False,
use_bias=True,
use_td=False):
"""
Initialization of the block functor object.
Args:
use_batch_norm (bool): whether batchnorm should be added after each convolution.
use_shortcuts (bool): whether shortcuts should be used. A typical ResNet by definition
uses shortcuts, but these can be toggled off to use the same ResNet topology without
the shortcuts.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
repeat (int): repeat number.
stride (int): The filter stride to be applied only to the first subblock (typically used
for downsampling). Strides are set to 1 for all layers beyond the first subblock.
subblocks (list of tuples): A list of tuples defining settings for each consecutive
convolution. Example:
`[(3, 64), (3, 64)]`
The two items in each tuple represents the kernel size and the amount of filters in
a convolution, respectively. The convolutions are added in the order of the list.
index (int): the index of the block to be created.
activation_type (str): activation function type.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
dilation_rate (int or (int, int)): An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
all_projections (bool): A boolean flag to determinte whether all shortcut connections
should be implemented as projection layers to facilitate full pruning or not.
use_bias (bool): whether the layer uses a bias vector.
use_td (bool): Whether or not to wrap the layers into a TimeDistributed layer
to make it work for 5D tensors.
"""
self.use_batch_norm = use_batch_norm
self.use_shortcuts = use_shortcuts
self.all_projections = all_projections
self.data_format = data_format
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activation_type = activation_type
self.activation_kwargs = activation_kwargs or {}
self.dilation_rate = dilation_rate
self.repeat = repeat
self.stride = stride
self.use_bias = use_bias
self.subblocks = subblocks
self.subblock_ids = subblock_ids()
self.freeze_bn = freeze_bn
self.freeze_block = freeze_block
self.use_td = use_td
if index is not None:
self.name = 'block_%d' % index
else:
self.name = 'block_%d' % (get_uid('block') + 1)
def __call__(self, x):
"""Build the block.
Args:
x (tensor): input tensor.
Returns:
tensor: the output tensor after applying the block on top of input `x`.
"""
for i in range(self.repeat):
name = '%s%s_' % (self.name, self.subblock_ids[i])
if i == 0:
# Set the stride only on the first layer.
stride = self.stride
dimension_changed = True
else:
stride = 1
dimension_changed = False
x = self._subblocks(x,
stride,
dimension_changed,
name_prefix=name,
freeze=self.freeze_block,
use_td=self.use_td)
return x
def _subblocks(self, x, stride, dimension_changed, name_prefix=None, freeze=False,
use_td=False):
"""
Stack several convolutions in a specific sequence given by a list of subblocks.
Args:
x (tensor): the input tensor.
stride (int): The filter stride to be applied only to the first subblock (typically used
for downsampling). Strides are set to 1 for all layers beyond the first subblock.
dimension_changed (bool): This indicates whether the dimension has been changed for this
block. If this is true, then we need to account for the change, or else we will be
unable to re-add the shortcut tensor due to incompatible dimensions. This can be
solved by applying a (1x1) convolution [1]. (The paper also notes the possibility of
zero-padding the shortcut tensor to match any larger output dimension, but this is
not implemented.)
name_prefix (str): name prefix for all the layers created in this function.
freeze (bool): Whether or not to freeze this block.
use_td (bool): Whether or not to wrap layers into a TimeDistributed layer to make it
work for 5D tensors.
Returns:
tensor: the output tensor after applying the ResNet block on top of input `x`.
"""
bn_axis = get_batchnorm_axis(self.data_format)
shortcut = x
nblocks = len(self.subblocks)
for i in range(nblocks):
kernel_size, filters = self.subblocks[i]
if i == 0:
strides = (stride, stride)
else:
strides = (1, 1)
layer = keras.layers.Conv2D(
filters, (kernel_size, kernel_size),
strides=strides,
padding='same',
dilation_rate=self.dilation_rate,
data_format=self.data_format,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%sconv_%d' % (name_prefix, i + 1),
trainable=not freeze)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
if self.use_batch_norm:
layer = keras.layers.BatchNormalization(
axis=bn_axis,
name='%sbn_%d' % (name_prefix, i + 1)
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
if i != nblocks - 1: # All except last conv in block.
x = add_activation(self.activation_type,
name='%s%s_%d' % (name_prefix, self.activation_type, i + 1))(x)
if self.use_shortcuts:
if self.all_projections:
# Implementing shortcut connections as 1x1 projection layers irrespective of
# dimension change.
layer = keras.layers.Conv2D(
filters, (1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%sconv_shortcut' % name_prefix,
trainable=not freeze)
if use_td:
layer = keras.layers.TimeDistributed(layer)
shortcut = layer(shortcut)
if self.use_batch_norm:
_name = '%sbn_shortcut' % name_prefix
layer = keras.layers.BatchNormalization(axis=bn_axis,
name=_name)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
shortcut = layer(shortcut, training=False)
else:
shortcut = layer(shortcut)
else:
# Add projection layers to shortcut only if there is a change in dimesion.
if dimension_changed: # Dimension changed.
layer = keras.layers.Conv2D(
filters, (1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%sconv_shortcut' % name_prefix,
trainable=not freeze)
if use_td:
layer = keras.layers.TimeDistributed(layer)
shortcut = layer(shortcut)
if self.use_batch_norm:
_name = '%sbn_shortcut' % name_prefix
layer = keras.layers.BatchNormalization(
axis=bn_axis,
name=_name)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
shortcut = layer(shortcut, training=False)
else:
shortcut = layer(shortcut)
x = keras.layers.add([x, shortcut])
x = add_activation(self.activation_type,
name='%s%s' % (name_prefix, self.activation_type))(x)
return x
@add_arg_scope
def fire_module(inputs, block_id, squeeze, expand, kernel_regularizer=None,
bias_regularizer=None, data_format='channels_first',
trainable=True):
"""
The squeeze net fire module architecture.
For details, see https://arxiv.org/pdf/1602.07360.pdf
Args:
inputs(tensor): Input tensor.
block_id(int): Block id for current module
squeeze(int): number of filters for squeeze conv layer
expand(int): number of filters for expand conv layers (1x1 and 3x3)
kernel_regularizer: Kernel regularizer applied to the model.
bias_regularizer: Bias regularizer applied to the model.
data_format(str): Data format, can be channels_first or channels_last.
trainable(bool): whether to make the conv layer trainable or not.
Returns:
The output tensor.
"""
concat_axis = 1 if data_format == 'channels_first' else 3
x = keras.layers.Conv2D(
squeeze,
kernel_size=(1, 1),
padding='same',
name='fire' + str(block_id) + '_squeeze_conv',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=trainable)(inputs)
x = keras.layers.Activation('relu', name='fire' + str(block_id) + '_squeeze')(x)
b_1x1 = keras.layers.Conv2D(
expand,
kernel_size=(1, 1),
padding='same',
name='fire' + str(block_id) + '_expand_conv1x1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=trainable)(x)
b_1x1 = keras.layers.Activation('relu', name='fire' + str(block_id) + '_expand_1x1')(b_1x1)
b_3x3 = keras.layers.Conv2D(
expand,
kernel_size=(3, 3),
padding='same',
name='fire' + str(block_id) + '_expand_conv3x3',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=trainable)(x)
b_3x3 = keras.layers.Activation('relu', name='fire' + str(block_id) + '_expand_3x3')(b_3x3)
return keras.layers.Concatenate(axis=concat_axis, name='fire' + str(block_id))([b_1x1, b_3x3])
def swish(x):
"""Swish activation function.
# Arguments
x: Input tensor.
# Returns
The Swish activation: `x * sigmoid(x)`.
# References
[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
return x * keras.backend.sigmoid(x)
def mish(x):
"""Mish activation function.
See details: https://arxiv.org/pdf/1908.08681.pdf
Args:
x: input tensor
Returns:
mish(x) = x * tanh(ln(1 + e^x))
"""
return x * tf.math.tanh(tf.math.softplus(x))
initializer_distribution = "normal"
if os.getenv("TF_KERAS", "0") == "1":
initializer_distribution = "untruncated_normal"
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# EfficientNet actually uses an untruncated normal distribution for
# initializing conv layers, but keras.initializers.VarianceScaling use
# a truncated distribution.
# We decided against a custom initializer for better serializability.
'distribution': initializer_distribution
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
def correct_pad(inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
# Arguments
input_size: An integer or tuple/list of 2 integers.
kernel_size: An integer or tuple/list of 2 integers.
# Returns
A tuple.
"""
img_dim = 2 if keras.backend.image_data_format() == 'channels_first' else 1
input_size = keras.backend.int_shape(inputs)[img_dim:(img_dim + 2)]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return ((correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]))
def round_filters(filters, divisor, width_coefficient):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
DEFAULT_DATA_FORMAT = "channels_last" if os.getenv("TF_KERAS", "0") == "1" else "channels_first"
def block(inputs, activation_fn=swish, drop_rate=0., name='',
filters_in=32, filters_out=16, kernel_size=3, strides=1,
expand_ratio=1, se_ratio=0., id_skip=True, freeze=False,
freeze_bn=False, use_td=False, kernel_regularizer=None,
bias_regularizer=None, use_bias=False, data_format=DEFAULT_DATA_FORMAT):
"""A mobile inverted residual block.
# Arguments
inputs: input tensor.
activation_fn: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.
freeze(bool): Freeze this block or not.
freeze_bn(bool): Freeze all the BN layers in this block or not.
use_td(bool): Use TimeDistributed wrapper layers for this block or not.
This is used to support 5D input tensors, e.g. in FasterRCNN use case.
kernel_regularizer: The kernel regularizer.
bias_regularizer: The bias regularizer.
use_bias(bool): Use bias or not for Conv layers followed by a BN layer.
# Returns
output tensor for the block.
"""
bn_opt = {
'momentum': 0.99,
'epsilon': 1e-3
}
bn_axis = 3 if keras.backend.image_data_format() == 'channels_last' else 1
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
layer = keras.layers.Conv2D(
filters,
1,
padding='same',
use_bias=use_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not freeze,
data_format=data_format,
name=name + 'expand_conv'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(inputs)
layer = keras.layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn',
**bn_opt)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
x = keras.layers.Activation(activation_fn, name=name + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
layer = keras.layers.ZeroPadding2D(
padding=correct_pad(x, kernel_size),
data_format=data_format,
name=name + 'dwconv_pad'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
conv_pad = 'valid'
else:
conv_pad = 'same'
layer = keras.layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=conv_pad,
use_bias=use_bias,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=not freeze,
name=name + 'dwconv'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
layer = keras.layers.BatchNormalization(axis=bn_axis, name=name + 'bn',
**bn_opt)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
x = keras.layers.Activation(activation_fn, name=name + 'activation')(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
# Global pooling is needed if we are going to support dynamic
# input shape(e.g., in FasterRCNN) for this backbone
# AveragePooling2D requires static input shape, hence cannot work with
# dynamic shapes
if use_td:
# GlobalAveragePooling2D cannot work well with TimeDistributed layer
# because when converted to UFF, GlobalAveragePooling2D becomes Mean
# Op in UFF, and it cannot handle 5D input by itself like Conv2D does.
# So we rely on some manual shape transforms, so it sees 4D input
# (N, R*C, H, W), and reshape back to (N, R, C, 1, 1) after global pooling.
R, C, H, W = x.get_shape().as_list()[1:]
assert None not in (R, C, H, W), (
"Expect R, C, H, W all not None. While got {}".format((R, C, H, W))
)
# Another issue is for pruning. Reshape cannot follow a pruned layer
# in modulus pruning due to dimension change after pruning.
# while for current special case, we essentially reshape to (N, -1, H, W)
# whenever the filter number C changes or not during pruning.
# So in this case, the logic is still correct even if the number C is changed.
# But we cannot hard-code the target shape to (R*C, H, W) in case C changes.
# Instead, the target shape is actually (N, -1, H, W) whenever C changes or not.
se = keras.layers.Reshape((-1, H, W), name=name + 'pre_pool_reshape')(x)
se = keras.layers.GlobalAveragePooling2D(
data_format=data_format, name=name + 'se_squeeze')(se)
layer = keras.layers.Reshape((R, -1, 1, 1), name=name + 'post_pool_reshape')
se = layer(se)
else:
se = keras.layers.GlobalAveragePooling2D(
data_format=data_format, name=name + 'se_squeeze')(x)
# _, cc = se.get_shape()
se_shape = (1, 1, -1) if data_format == 'channels_last' else (-1, 1, 1)
se = keras.layers.Reshape(se_shape, name=name + 'se_reshape')(se)
layer = keras.layers.Conv2D(
filters_se,
1,
padding='same',
activation=activation_fn,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not freeze,
data_format=data_format,
name=name + 'se_reduce'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
se = layer(se)
layer = keras.layers.Conv2D(
filters,
1,
padding='same',
activation='sigmoid',
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=not freeze,
name=name + 'se_expand'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
se = layer(se)
x = keras.layers.Multiply(name=name + 'se_excite')([x, se])
# Output phase
layer = keras.layers.Conv2D(
filters_out,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not freeze,
data_format=data_format,
name=name + 'project_conv'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
layer = keras.layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn',
**bn_opt)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
if (id_skip is True and strides == 1 and filters_in == filters_out):
if drop_rate > 0:
layer = keras.layers.Dropout(
drop_rate,
noise_shape=(None, 1, 1, 1),
name=name + 'drop',
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
x = keras.layers.Add(name=name + 'add')([x, inputs])
return x
def force_stride16(block_args):
"""Force the block args to make the model have stride 16."""
last_block = -1
for idx, block in enumerate(block_args):
if block['strides'] == 2:
last_block = idx
assert last_block >= 0, (
"Cannot find stride 2 in the block args."
)
# pop the layer with last stride 2 and following layers
# to keep the total stride of 16
block_args = block_args[:last_block]
def add_dense_head(model, inputs, nclasses, activation):
"""
Create a model that stacks a dense head on top of a another model. It is also flattened.
Args:
model (Model): the model on top of which the head should be created.
inputs (tensor): the inputs (tensor) to the previously supplied model.
nclasses (int): the amount of outputs of the dense map
activation (string): activation function to use e.g. 'softmax' or 'linear'.
Returns:
Model: A model with the head stacked on top of the `model` input.
"""
x = model.outputs[0]
head_name = "head_fc%d" % (nclasses)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(nclasses, activation=activation, name=head_name)(x)
model = keras.models.Model(
inputs=inputs, outputs=x, name="%s_fc%d" % (model.name, nclasses)
)
return model
def csp_tiny_block(x, num_filters, name, trainable=True, kernel_regularizer=None,
bias_regularizer=None, data_format="channels_first",
freeze_bn=False, force_relu=False, use_bias=False, use_td=False,
use_batch_norm=True, activation="leaky_relu"):
"""Building block for CSPDarkNet tiny."""
concat_axis = 1 if data_format == "channels_first" else -1
with arg_scope(
[_mish_conv],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
padding='same',
freeze_bn=freeze_bn,
use_bias=use_bias,
force_relu=force_relu,
trainable=trainable,
activation=activation
):
x = _mish_conv(x, num_filters, kernel=(3, 3), name=name+"_conv_0")
route = x
x = Split(groups=2, group_id=1, name=name+"_split_0")(x)
x = _mish_conv(x, num_filters // 2, kernel=(3, 3), name=name+"_conv_1")
route_1 = x
x = _mish_conv(x, num_filters // 2, kernel=(3, 3), name=name+"_conv_2")
x = keras.layers.Concatenate(axis=concat_axis, name=name+"_concat_0")([x, route_1])
x = _mish_conv(x, num_filters, kernel=(1, 1), name=name+"_conv_3")
x = keras.layers.Concatenate(axis=concat_axis, name=name+"_concat_1")([route, x])
x = keras.layers.MaxPooling2D(
pool_size=[2, 2], name=name+"_pool_0",
data_format=data_format)(x)
return x
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model template for backbone of YOLOv4-tiny."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras.layers import (
Dense,
GlobalAveragePooling2D,
Input
)
from keras.models import Model
from nvidia_tao_tf1.core.templates.utils import _mish_conv
from nvidia_tao_tf1.core.templates.utils import arg_scope, csp_tiny_block
def CSPDarkNetTiny(
input_tensor=None,
input_shape=None,
add_head=False,
data_format='channels_first',
kernel_regularizer=None,
bias_regularizer=None,
nclasses=1000,
use_batch_norm=True,
freeze_bn=False,
freeze_blocks=None,
use_bias=False,
force_relu=False,
activation="leaky_relu"
):
"""
The DarkNet-tiny model architecture in YOLOv4-tiny.
Reference: https://arxiv.org/abs/2011.08036
"""
if freeze_blocks is None:
freeze_blocks = []
if input_shape is None:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor)
else:
img_input = input_tensor
with arg_scope(
[_mish_conv],
use_batch_norm=use_batch_norm,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
padding='same',
freeze_bn=freeze_bn,
use_bias=use_bias,
force_relu=force_relu,
activation=activation
):
x = _mish_conv(img_input, 32, kernel=(3, 3), strides=(2, 2), name="conv_0",
trainable=not(0 in freeze_blocks))
x = _mish_conv(x, 64, kernel=(3, 3), strides=(2, 2), name="conv_1",
trainable=not(1 in freeze_blocks))
x = csp_tiny_block(x, num_filters=64, name="conv_2",
trainable=not(2 in freeze_blocks),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format, freeze_bn=freeze_bn,
force_relu=force_relu, use_bias=use_bias,
activation=activation,
use_batch_norm=use_batch_norm)
x = csp_tiny_block(x, num_filters=128, name="conv_3",
trainable=not(3 in freeze_blocks),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format, freeze_bn=freeze_bn,
force_relu=force_relu, use_bias=use_bias,
activation=activation,
use_batch_norm=use_batch_norm)
x = csp_tiny_block(x, num_filters=256, name="conv_4",
trainable=not(4 in freeze_blocks),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format, freeze_bn=freeze_bn,
force_relu=force_relu, use_bias=use_bias,
activation=activation,
use_batch_norm=use_batch_norm)
x = _mish_conv(x, 512, kernel=(3, 3), name="conv_5",
trainable=not(5 in freeze_blocks),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format, freeze_bn=freeze_bn,
force_relu=force_relu, use_bias=use_bias)
if add_head:
x = GlobalAveragePooling2D(data_format=data_format, name='avgpool')(x)
x = Dense(nclasses, activation='softmax', name='predictions',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(x)
# Create model.
model_name = 'cspdarknet_tiny'
if use_batch_norm:
model_name += '_bn'
if add_head:
model_name += '_add_head'
model = Model(img_input, x, name=model_name)
return model
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/cspdarknet_tiny.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA utilities for tf model templates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import inspect
import math
import re
import threading
import tensorflow as tf
from tensorflow import keras
bn_axis_map = {'channels_last': 3, 'channels_first': 1}
SUBBLOCK_IDS = ['1x1', '3x3_reduce', '3x3', '5x5_reduce', '5x5', 'pool', 'pool_proj']
_ARGSTACK = [{}]
_DECORATED_OPS = {}
def _get_arg_stack():
if _ARGSTACK:
return _ARGSTACK
_ARGSTACK.append({})
return _ARGSTACK
def _current_arg_scope():
stack = _get_arg_stack()
return stack[-1]
def _key_op(op):
return getattr(op, "_key_op", str(op))
def _name_op(op):
return (op.__module__, op.__name__)
def _kwarg_names(func):
kwargs_length = len(func.__defaults__) if func.__defaults__ else 0
return func.__code__.co_varnames[-kwargs_length : func.__code__.co_argcount]
def _add_op(op):
key_op = _key_op(op)
if key_op not in _DECORATED_OPS:
_DECORATED_OPS[key_op] = _kwarg_names(op)
@contextlib.contextmanager
def arg_scope(list_ops_or_scope, **kwargs):
"""Store the default arguments for the given set of list_ops.
For usage, please see examples at top of the file.
Args:
list_ops_or_scope: List or tuple of operations to set argument scope for or
a dictionary containing the current scope. When list_ops_or_scope is a
dict, kwargs must be empty. When list_ops_or_scope is a list or tuple,
then every op in it need to be decorated with @add_arg_scope to work.
**kwargs: keyword=value that will define the defaults for each op in
list_ops. All the ops need to accept the given set of arguments.
Yields:
the current_scope, which is a dictionary of {op: {arg: value}}
Raises:
TypeError: if list_ops is not a list or a tuple.
ValueError: if any op in list_ops has not be decorated with @add_arg_scope.
"""
if isinstance(list_ops_or_scope, dict):
# Assumes that list_ops_or_scope is a scope that is being reused.
if kwargs:
raise ValueError(
"When attempting to re-use a scope by suppling a"
"dictionary, kwargs must be empty."
)
current_scope = list_ops_or_scope.copy()
try:
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
else:
# Assumes that list_ops_or_scope is a list/tuple of ops with kwargs.
if not isinstance(list_ops_or_scope, (list, tuple)):
raise TypeError(
"list_ops_or_scope must either be a list/tuple or reused"
"scope (i.e. dict)"
)
try:
current_scope = _current_arg_scope().copy()
for op in list_ops_or_scope:
if inspect.isclass(op):
# If we decorated a class, use the scope on the initializer
op = op.__init__
key_op = _key_op(op)
if not has_arg_scope(op):
raise ValueError(
"%s::%s is not decorated with @add_arg_scope" % _name_op(op)
)
if key_op in current_scope:
current_kwargs = current_scope[key_op].copy()
current_kwargs.update(kwargs)
current_scope[key_op] = current_kwargs
else:
current_scope[key_op] = kwargs.copy()
_get_arg_stack().append(current_scope)
yield current_scope
finally:
_get_arg_stack().pop()
def add_arg_scope(func):
"""Decorate a function with args so it can be used within an arg_scope.
Args:
func: function to decorate.
Returns:
A tuple with the decorated function func_with_args().
"""
@functools.wraps(func)
def func_with_args(*args, **kwargs):
current_scope = _current_arg_scope()
current_args = kwargs
key_func = _key_op(func)
if key_func in current_scope:
current_args = current_scope[key_func].copy()
current_args.update(kwargs)
return func(*args, **current_args)
_add_op(func)
setattr(func_with_args, "_key_op", _key_op(func))
setattr(func_with_args, "__doc__", func.__doc__)
return func_with_args
def has_arg_scope(func):
"""Check whether a func has been decorated with @add_arg_scope or not.
Args:
func: function to check.
Returns:
a boolean.
"""
return _key_op(func) in _DECORATED_OPS
def arg_scoped_arguments(func):
"""Return the list kwargs that arg_scope can set for a func.
Args:
func: function which has been decorated with @add_arg_scope.
Returns:
a list of kwargs names.
"""
assert has_arg_scope(func)
return _DECORATED_OPS[_key_op(func)]
def add_dense_head(model, inputs, nclasses, activation):
"""
Create a model that stacks a dense head on top of a another model. It is also flattened.
Args:
model (Model): the model on top of which the head should be created.
inputs (tensor): the inputs (tensor) to the previously supplied model.
nclasses (int): the amount of outputs of the dense map
activation (string): activation function to use e.g. 'softmax' or 'linear'.
Returns:
Model: A model with the head stacked on top of the `model` input.
"""
x = model.outputs[0]
head_name = "head_fc%d" % (nclasses)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(nclasses, activation=activation, name=head_name)(x)
model = keras.models.Model(
inputs=inputs, outputs=x, name="%s_fc%d" % (model.name, nclasses)
)
return model
def get_batchnorm_axis(data_format):
"""Convert a data_format string to the correct index in a 4 dimensional tensor.
Args:
data_format (str): either 'channels_last' or 'channels_first'.
Returns:
int: the axis corresponding to the `data_format`.
"""
return bn_axis_map[data_format]
class subblock_ids(object):
"""A operator to get index of subblock, overload [] operation."""
def __getitem__(self, key):
"""
Generate a subblock ID and return.
Args:
key (int): an index used to generate the subblock ID.
"""
cur = key
subblock_id = ''
while cur >= 0:
ch = chr(ord('a') + cur % 26)
subblock_id = ch + subblock_id
cur = cur // 26 - 1
return subblock_id
class InceptionV1Block(object):
"""A functor for creating a Inception v1 block of layers."""
@add_arg_scope
def __init__(self,
use_batch_norm,
data_format,
kernel_regularizer,
bias_regularizer,
subblocks,
index,
freeze_bn=False,
activation_type='relu',
use_bias=True,
trainable=True,
use_td=False):
"""
Initialization of the block functor object.
Args:
use_batch_norm (bool): whether batchnorm should be added after each convolution.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
subblocks (tuple): A tuple of size 6, defining number of feature-maps for
subbblocks in an inception block.
For GoogleNet from "Going deeper with convolutions" by Szegedy, Christian, et. al.
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2015
Inception_3a: (64, 96, 128, 16, 32, 32)
Defines Inception block with following parallel branches
1) 64 outputs from 1x1 convolutions
2.1) 96 outputs from 1x1 convolutions --> 2.2) 128 outputs from 3x3 convolutions
3.1) 16 outputs from 1x1 convolutions --> 3.2) 32 outputs from 5x5 convolutions
4.1) Max pooling with 3x3 pooling size --> 4.2) 32 outputs from 1x1 convolutions
the outputs of 1, 2.2, 3.2, and 4.2 are concatenated to produce final output.
index (int): the index of the block to be created.
activation_type (str): activation function type.
freeze_bn(bool): Whether or not to freeze the BN layer.
use_bias(bool): Whether or not to use bias for Conv/Dense, etc.
trainable(bool): Whether or not to set the weights to be trainable.
use_td(bool): Whether or not to wrap the layers into a TimeDistributed layer.
This is useful in FasterRCNN.
"""
self.use_batch_norm = use_batch_norm
self.data_format = data_format
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activation_type = activation_type
self.subblocks = subblocks
self.index = index
self.name = 'inception_%s' % index
self.freeze_bn = freeze_bn
self.use_bias = use_bias
self.trainable = trainable
self.use_td = use_td
def __call__(self, x):
"""Build the block.
Args:
x (tensor): input tensor.
Returns:
tensor: the output tensor after applying the block on top of input `x`.
"""
x = self._subblocks(x, name_prefix=self.name)
return x
def _subblocks(self, x, name_prefix=None):
"""
Stack several convolutions in a specific sequence given by a list of subblocks.
Args:
x (tensor): the input tensor.
name_prefix (str): name prefix for all the layers created in this function.
Returns:
tensor: the output tensor after applying the ResNet block on top of input `x`.
"""
nblocks = len(self.subblocks)
if(nblocks != 6):
print("Inception V1 block must have 6 subblocks")
return(x)
if self.use_batch_norm:
bn_axis = get_batchnorm_axis(self.data_format)
# First branch is 1x1 conv with padding = 0, and stride = 1
layer = keras.layers.Conv2D(
self.subblocks[0],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[0]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x1 = layer(x)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[0])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x1 = layer(x1, training=False)
else:
x1 = layer(x1)
x1 = keras.layers.Activation(self.activation_type)(x1)
# Second branch is 1x1 conv with padding = 0, and stride = 1
layer = keras.layers.Conv2D(
self.subblocks[1],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[1]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x2 = layer(x)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[1])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x2 = layer(x2, training=False)
else:
x2 = layer(x2)
x2 = keras.layers.Activation(self.activation_type)(x2)
# Second branch is 1x1 conv with padding = 0, and stride = 1 followed by 3x3 conv
layer = keras.layers.Conv2D(
self.subblocks[2],
(3, 3),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[2]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x2 = layer(x2)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[2])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x2 = layer(x2, training=False)
else:
x2 = layer(x2)
x2 = keras.layers.Activation(self.activation_type)(x2)
# Third branch is 1x1 conv with stride = 1
layer = keras.layers.Conv2D(
self.subblocks[3],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[3]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x3 = layer(x)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[3])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x3 = layer(x3, training=False)
else:
x3 = layer(x3)
x3 = keras.layers.Activation(self.activation_type)(x3)
# Third branch is 1x1 conv with padding = 0, and stride = 1 followed by 5x5 conv
layer = keras.layers.Conv2D(
self.subblocks[4],
(5, 5),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[4]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x3 = layer(x3)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[4])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x3 = layer(x3, training=False)
else:
x3 = layer(x3)
x3 = keras.layers.Activation(self.activation_type)(x3)
# Fourth branch is max pool stride = 1, and a 1x1 conv
layer = keras.layers.MaxPooling2D(
pool_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=self.data_format,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[5]))
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x4 = layer(x)
layer = keras.layers.Conv2D(
self.subblocks[5],
(1, 1),
strides=(1, 1),
padding='same',
data_format=self.data_format,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%s_%s' % (name_prefix, SUBBLOCK_IDS[6]),
use_bias=self.use_bias,
trainable=self.trainable)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
x4 = layer(x4)
if self.use_batch_norm:
_name = '%s_%s_bn' % (name_prefix, SUBBLOCK_IDS[6])
layer = keras.layers.BatchNormalization(axis=bn_axis, name=_name)
if self.use_td:
layer = keras.layers.TimeDistributed(layer)
if self.freeze_bn:
x4 = layer(x4, training=False)
else:
x4 = layer(x4)
x4 = keras.layers.Activation(self.activation_type)(x4)
if self.data_format == 'channels_first':
concat_axis = 1
if self.use_td:
concat_axis += 1
else:
concat_axis = -1
layer = keras.layers.Concatenate(axis=concat_axis, name='%s_output' % (name_prefix))
x = layer([x1, x2, x3, x4])
return x
def update_config(model, inputs, config, name_pattern=None):
"""
Update the configuration of an existing model.
Note that the input tensors to apply the new model to must be different
from those of the original model. This is because when Keras
clones a model it retains the original input layer and adds an extra one
on top.
In order to update the configuration of only certain layers,
a name pattern (regular expression) may be provided.
Args:
model (Model): the model to update the regularizers of.
inputs (tensors): the tensor to apply the new model to.
config (dict): dictionary of layer attributes to update.
name_pattern (str): pattern to match layers against. Those that
do not match will not be updated.
"""
# Loop through all layers and update those that have a regularizer.
for layer in model.layers:
if name_pattern is None or re.match(name_pattern, layer.name):
for name, value in config.items():
if hasattr(layer, name):
setattr(layer, name, value)
new_model = model # clone_model(model, [inputs])
new_model.set_weights(model.get_weights())
return new_model
def update_regularizers(model, inputs, kernel_regularizer, bias_regularizer, name_pattern=None):
"""
Update the weight decay regularizers of an existing model.
Note that the input tensors to apply the new model to must be different
from those of the original model. This is because when Keras
clones a model it retains the original input layer and adds an extra one
on top.
In order to update the regularizers of only certain layers,
a name pattern (regular expression) may be provided.
Args:
model (Model): the model to update the regularizers of.
inputs (tensors): the tensor to apply the new model to.
kernel_regularizer (object): regularizer to apply to kernels.
bias_regularizer (object): regularizer to apply to biases.
name_pattern (str): pattern to match layers against. Those that
do not match will not be updated.
"""
config = {'bias_regularizer': bias_regularizer,
'kernel_regularizer': kernel_regularizer}
return update_config(model, inputs, config, name_pattern)
@add_arg_scope
def _conv_block(inputs, filters, alpha, kernel=(3, 3),
strides=(1, 1), kernel_regularizer=None,
bias_regularizer=None, use_batch_norm=True,
activation_type='relu', data_format='channels_first',
freeze_bn=False, trainable=True,
use_bias=False):
"""
Construct a conv block to be used in MobileNet.
Args:
inputs(tensor): The input tensor.
filters(int): The number of filters.
alpha(float): The alpha parameter for MobileNet to control the final number of filters.
kernel(int, tuple): The kernel size, can be a int or a tuple.
strides(int, tuple): The strides.
kernel_regularizer: Kernel regularizer to be applied to the block.
bias_regularizer: Bias regularizer to be applied to the block.
use_batch_norm(bool): Whether or not to use batch normalization layer.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format for Keras, can be channels_first or channels_last.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor of this block.
"""
channel_axis = get_batchnorm_axis(data_format)
filters = int(filters * alpha)
# Use explicit padding here to avoid TF asymmetric padding.
# This will be fused into Conv layer, and TRT inference is faster than TF asymmetric padding.
# For accuracy, we found they are almost the same for the two padding styles.
x = keras.layers.ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs)
x = keras.layers.Conv2D(
filters,
kernel,
padding='valid',
use_bias=use_bias,
strides=strides,
name='conv1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(axis=channel_axis,
name='conv1_bn')(x, training=False)
else:
x = keras.layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name='conv_block_relu6')(x)
else:
x = keras.layers.ReLU(name='conv_block_relu')(x)
return x
@add_arg_scope
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1),
block_id=1, kernel_regularizer=None,
bias_regularizer=None, use_batch_norm=True,
activation_type='relu', data_format='channels_first',
freeze_bn=False, trainable=True,
use_bias=False):
"""
Depthwise conv block as building blocks for MobileNet.
Args:
inputs(tensor): The input tensor.
pointwise_conv_filters(int): The number of depthwise conv filters.
alpha(float): The alpha parameter for MobileNet.
depth_multiplier(int): The depth multiplier(defaut: 1)
strides(int, tuple): The strides, can be a int or a tuple.
block_id(int): The block_id, used to name the blocks.
kernel_regularizer: The kernel regularizer.
bias_regularizer: The bias regularizer.
use_batch_norm(bool): Whether or not to use batch normalization layer.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format for Keras, can be channels_first or channels_last.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor.
"""
channel_axis = get_batchnorm_axis(data_format)
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
# Also use explicit padding here to avoid TF style padding.
x = keras.layers.ZeroPadding2D((1, 1), name='conv_pad_%d' % block_id)(inputs)
x = keras.layers.DepthwiseConv2D(
(3, 3),
padding='valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=use_bias,
name='conv_dw_%d' % block_id,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=channel_axis,
name='conv_dw_%d_bn' % block_id)(x, training=False)
else:
x = keras.layers.BatchNormalization(axis=channel_axis,
name='conv_dw_%d_bn' % block_id)(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name='conv_dw_%d_relu6' % block_id)(x)
else:
x = keras.layers.ReLU(name='conv_dw_%d_relu' % block_id)(x)
x = keras.layers.Conv2D(
pointwise_conv_filters,
(1, 1),
padding='valid',
use_bias=use_bias,
strides=(1, 1),
name='conv_pw_%d' % block_id,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=channel_axis,
name='conv_pw_%d_bn' % block_id)(x, training=False)
else:
x = keras.layers.BatchNormalization(
axis=channel_axis,
name='conv_pw_%d_bn' % block_id)(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name='conv_pw_relu6_%d' % block_id)(x)
else:
x = keras.layers.ReLU(name='conv_pw_relu_%d' % block_id)(x)
return x
@add_arg_scope
def _leaky_conv(inputs, filters, alpha=0.1, kernel=(3, 3),
strides=(1, 1), kernel_regularizer=None,
bias_regularizer=None, use_batch_norm=True,
padding='same', data_format='channels_first',
freeze_bn=False, trainable=True,
use_bias=False, name='conv1', use_td=False):
"""
Construct a leaky relu conv block to be used in DarkNet.
Args:
inputs(tensor): The input tensor.
filters(int): The number of filters.
alpha(float): leaky rate for LeakyReLU
kernel(int, tuple): The kernel size, can be a int or a tuple.
strides(int, tuple): The strides.
padding(str): same or valid.
kernel_regularizer: Kernel regularizer to be applied to the block.
bias_regularizer: Bias regularizer to be applied to the block.
use_batch_norm(bool): Whether or not to use batch normalization layer.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format for Keras, can be channels_first or channels_last.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
name(str): name of the layer.
use_td(bool): use TimeDistributed wrapper or not, default is False.
Returns:
The output tensor of this block.
"""
channel_axis = get_batchnorm_axis(data_format)
_layer = keras.layers.Conv2D(
filters,
kernel,
strides=strides,
padding=padding,
data_format=data_format,
activation=None,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_bias=use_bias,
trainable=trainable,
name=name)
if use_td:
_layer = keras.layers.TimeDistributed(_layer)
x = _layer(inputs)
if use_batch_norm:
_layer = keras.layers.BatchNormalization(axis=channel_axis, name=name+'_bn')
if use_td:
_layer = keras.layers.TimeDistributed(_layer)
if freeze_bn:
x = _layer(x, training=False)
else:
x = _layer(x)
x = keras.layers.LeakyReLU(alpha=alpha, name=name+'_lrelu')(x)
return x
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@add_arg_scope
def _inverted_res_block(inputs, expansion, stride, alpha, filters,
block_id, kernel_regularizer=None, bias_regularizer=None,
use_batch_norm=True, activation_type='relu',
data_format='channels_first', all_projections=True,
trainable=True, freeze_bn=False,
use_bias=False):
"""
Inverted residual block as building blocks for MobileNet V2.
Args:
inputs(tensor): Input tensor.
expansion(float): Expansion factor of the filter numbers.
stride(int, tuple): Stride of this block.
alpha(float): alpha parameter.
filters(int): Number of filters.
block_id(int): block id for this block, as a name.
kernel_regularizer: Kernel regularizer to be applied.
bias_regularizer: Bias regularizer to be applied.
use_batch_norm(bool): Whether or not to use BN layers.
activation_type(str): Activation type, can be relu or relu6.
data_format(str): Data format, can be channels_first or channels_last.
all_projections(bool): Whether to use all projection layers to replace the shortcuts.
freeze_bn(bool): Whether or not to freeze the BN layer.
trainable(bool): Make the conv layer trainable or not.
use_bias(bool): Whether or not use bias for the conv layer
that is immediately before the BN layers.
Returns:
The output tensor.
"""
channel_axis = get_batchnorm_axis(data_format)
in_channels = inputs._keras_shape[channel_axis]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'block_{}_'.format(block_id)
if block_id:
# Expand
x = keras.layers.Conv2D(
expansion * in_channels,
kernel_size=1,
padding='valid',
use_bias=use_bias,
activation=None,
name=prefix + 'expand',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'expand_bn')(x, training=False)
else:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'expand_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name='re_lu_%d' % (block_id + 1))(x)
else:
x = keras.layers.ReLU(name='re_lu_%d' % (block_id + 1))(x)
else:
prefix = 'expanded_conv_'
# Depthwise
# Use explicit padding
x = keras.layers.ZeroPadding2D((1, 1), name=prefix + 'depthwise_pad')(x)
x = keras.layers.DepthwiseConv2D(
kernel_size=3,
strides=stride,
activation=None,
use_bias=use_bias,
padding='valid',
name=prefix + 'depthwise',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'depthwise_bn')(x, training=False)
else:
x = keras.layers.BatchNormalization(
epsilon=1e-3,
axis=channel_axis,
momentum=0.999,
name=prefix + 'depthwise_bn')(x)
if activation_type == 'relu6':
x = keras.layers.ReLU(6., name=prefix + 'relu6')(x)
else:
x = keras.layers.ReLU(name=prefix + 'relu')(x)
# Project
x = keras.layers.Conv2D(
pointwise_filters,
kernel_size=1,
padding='valid',
use_bias=use_bias,
activation=None,
name=prefix + 'project',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(x)
if use_batch_norm:
if freeze_bn:
x = keras.layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project_bn')(x, training=False)
else:
x = keras.layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + 'project_bn')(x)
if in_channels == pointwise_filters and stride == 1:
if all_projections:
inputs_projected = keras.layers.Conv2D(
in_channels,
kernel_size=1,
padding='valid',
use_bias=False,
activation=None,
name=prefix + 'projected_inputs',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=trainable)(inputs)
return keras.layers.Add(name=prefix + 'add')([inputs_projected, x])
return keras.layers.Add(name=prefix + 'add')([inputs, x])
return x
def get_uid(base_name):
"""Return a unique ID."""
get_uid.lock.acquire()
if base_name not in get_uid.seqn:
get_uid.seqn[base_name] = 0
uid = get_uid.seqn[base_name]
get_uid.seqn[base_name] += 1
get_uid.lock.release()
return uid
get_uid.seqn = {}
get_uid.lock = threading.Lock()
def add_activation(activation_type, **kwargs):
"""
Create an activation layer based on activation type and additional arguments.
Note that the needed kwargs depend on the activation type.
Args:
activation_type (str): String to indicate activation type.
kwargs (dict): Additional keyword arguments depending on the activation type.
Returns:
activation_layer (a subclass of keras.layers.Layer): The layer type
depends on activation_type.
"""
if activation_type == 'relu-n':
max_value = kwargs.get('max_value', None)
activation_layer = keras.layers.ReLU(max_value=max_value)
elif activation_type == 'lrelu':
alpha = kwargs['alpha']
activation_layer = keras.layers.LeakyReLU(alpha=alpha)
elif activation_type == 'elu':
alpha = kwargs['alpha']
activation_layer = keras.layers.ELU(alpha=alpha)
else:
activation_layer = keras.layers.Activation(activation_type, **kwargs)
return activation_layer
class CNNBlock(object):
"""A functor for creating a block of layers."""
@add_arg_scope
def __init__(self,
use_batch_norm,
use_shortcuts,
data_format,
kernel_regularizer,
bias_regularizer,
repeat,
stride,
subblocks,
index=None,
activation_type='relu',
freeze_bn=False,
freeze_block=False,
activation_kwargs=None,
dilation_rate=(1, 1),
all_projections=False,
use_bias=True):
"""
Initialization of the block functor object.
Args:
use_batch_norm (bool): whether batchnorm should be added after each convolution.
use_shortcuts (bool): whether shortcuts should be used. A typical ResNet by definition
uses shortcuts, but these can be toggled off to use the same ResNet topology without
the shortcuts.
data_format (str): either 'channels_last' (NHWC) or 'channels_first' (NCHW).
kernel_regularizer (float): regularizer to apply to kernels.
bias_regularizer (float): regularizer to apply to biases.
repeat (int): repeat number.
stride (int): The filter stride to be applied only to the first subblock (typically used
for downsampling). Strides are set to 1 for all layers beyond the first subblock.
subblocks (list of tuples): A list of tuples defining settings for each consecutive
convolution. Example:
`[(3, 64), (3, 64)]`
The two items in each tuple represents the kernel size and the amount of filters in
a convolution, respectively. The convolutions are added in the order of the list.
index (int): the index of the block to be created.
activation_type (str): activation function type.
activation_kwargs (dict): Additional activation keyword arguments to be fed to
the add_activation function.
dilation_rate (int or (int, int)): An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
all_projections (bool): A boolean flag to determinte whether all shortcut connections
should be implemented as projection layers to facilitate full pruning or not.
use_bias (bool): whether the layer uses a bias vector.
"""
self.use_batch_norm = use_batch_norm
self.use_shortcuts = use_shortcuts
self.all_projections = all_projections
self.data_format = data_format
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activation_type = activation_type
self.activation_kwargs = activation_kwargs or {}
self.dilation_rate = dilation_rate
self.repeat = repeat
self.stride = stride
self.use_bias = use_bias
self.subblocks = subblocks
self.subblock_ids = subblock_ids()
self.freeze_bn = freeze_bn
self.freeze_block = freeze_block
if index is not None:
self.name = 'block_%d' % index
else:
self.name = 'block_%d' % (get_uid('block') + 1)
def __call__(self, x):
"""Build the block.
Args:
x (tensor): input tensor.
Returns:
tensor: the output tensor after applying the block on top of input `x`.
"""
for i in range(self.repeat):
name = '%s%s_' % (self.name, self.subblock_ids[i])
if i == 0:
# Set the stride only on the first layer.
stride = self.stride
dimension_changed = True
else:
stride = 1
dimension_changed = False
x = self._subblocks(x,
stride,
dimension_changed,
name_prefix=name,
freeze=self.freeze_block)
return x
def _subblocks(self, x, stride, dimension_changed, name_prefix=None, freeze=False):
"""
Stack several convolutions in a specific sequence given by a list of subblocks.
Args:
x (tensor): the input tensor.
stride (int): The filter stride to be applied only to the first subblock (typically used
for downsampling). Strides are set to 1 for all layers beyond the first subblock.
dimension_changed (bool): This indicates whether the dimension has been changed for this
block. If this is true, then we need to account for the change, or else we will be
unable to re-add the shortcut tensor due to incompatible dimensions. This can be
solved by applying a (1x1) convolution [1]. (The paper also notes the possibility of
zero-padding the shortcut tensor to match any larger output dimension, but this is
not implemented.)
name_prefix (str): name prefix for all the layers created in this function.
Returns:
tensor: the output tensor after applying the ResNet block on top of input `x`.
"""
bn_axis = get_batchnorm_axis(self.data_format)
shortcut = x
nblocks = len(self.subblocks)
for i in range(nblocks):
kernel_size, filters = self.subblocks[i]
if i == 0:
strides = (stride, stride)
else:
strides = (1, 1)
x = keras.layers.Conv2D(
filters, (kernel_size, kernel_size),
strides=strides,
padding='same',
dilation_rate=self.dilation_rate,
data_format=self.data_format,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%sconv_%d' % (name_prefix, i + 1),
trainable=not freeze)(x)
if self.use_batch_norm:
if self.freeze_bn:
x = keras.layers.BatchNormalization(
axis=bn_axis,
name='%sbn_%d' % (name_prefix, i + 1))(x, training=False)
else:
x = keras.layers.BatchNormalization(
axis=bn_axis, name='%sbn_%d' % (name_prefix, i + 1))(x)
if i != nblocks - 1: # All except last conv in block.
x = add_activation(self.activation_type,
name='%s%s_%d' % (name_prefix, self.activation_type, i + 1))(x)
if self.use_shortcuts:
if self.all_projections:
# Implementing shortcut connections as 1x1 projection layers irrespective of
# dimension change.
shortcut = keras.layers.Conv2D(
filters, (1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%sconv_shortcut' % name_prefix,
trainable=not freeze)(shortcut)
if self.use_batch_norm:
if self.freeze_bn:
_name = '%sbn_shortcut' % name_prefix
shortcut = keras.layers.BatchNormalization(
axis=bn_axis,
name=_name)(shortcut, training=False)
else:
shortcut = keras.layers.BatchNormalization(
axis=bn_axis, name='%sbn_shortcut' % name_prefix)(shortcut)
else:
# Add projection layers to shortcut only if there is a change in dimesion.
if dimension_changed: # Dimension changed.
shortcut = keras.layers.Conv2D(
filters, (1, 1),
strides=(stride, stride),
data_format=self.data_format,
dilation_rate=self.dilation_rate,
use_bias=self.use_bias,
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
name='%sconv_shortcut' % name_prefix,
trainable=not freeze)(shortcut)
if self.use_batch_norm:
if self.freeze_bn:
shortcut = keras.layers.BatchNormalization(
axis=bn_axis, name='%sbn_shortcut' % name_prefix)(shortcut,
training=False)
else:
shortcut = keras.layers.BatchNormalization(
axis=bn_axis, name='%sbn_shortcut' % name_prefix)(shortcut)
x = keras.layers.add([x, shortcut])
x = add_activation(self.activation_type,
name='%s%s' % (name_prefix, self.activation_type))(x)
return x
@add_arg_scope
def fire_module(inputs, block_id, squeeze, expand, kernel_regularizer=None,
bias_regularizer=None, data_format='channels_first',
trainable=True):
"""
The squeeze net fire module architecture.
For details, see https://arxiv.org/pdf/1602.07360.pdf
Args:
inputs(tensor): Input tensor.
block_id(int): Block id for current module
squeeze(int): number of filters for squeeze conv layer
expand(int): number of filters for expand conv layers (1x1 and 3x3)
kernel_regularizer: Kernel regularizer applied to the model.
bias_regularizer: Bias regularizer applied to the model.
data_format(str): Data format, can be channels_first or channels_last.
trainable(bool): whether to make the conv layer trainable or not.
Returns:
The output tensor.
"""
concat_axis = 1 if data_format == 'channels_first' else 3
x = keras.layers.Conv2D(
squeeze,
kernel_size=(1, 1),
padding='same',
name='fire' + str(block_id) + '_squeeze_conv',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=trainable)(inputs)
x = keras.layers.Activation('relu', name='fire' + str(block_id) + '_squeeze')(x)
b_1x1 = keras.layers.Conv2D(
expand,
kernel_size=(1, 1),
padding='same',
name='fire' + str(block_id) + '_expand_conv1x1',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=trainable)(x)
b_1x1 = keras.layers.Activation('relu', name='fire' + str(block_id) + '_expand_1x1')(b_1x1)
b_3x3 = keras.layers.Conv2D(
expand,
kernel_size=(3, 3),
padding='same',
name='fire' + str(block_id) + '_expand_conv3x3',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=trainable)(x)
b_3x3 = keras.layers.Activation('relu', name='fire' + str(block_id) + '_expand_3x3')(b_3x3)
return keras.layers.Concatenate(axis=concat_axis, name='fire' + str(block_id))([b_1x1, b_3x3])
def swish(x):
"""Swish activation function.
# Arguments
x: Input tensor.
# Returns
The Swish activation: `x * sigmoid(x)`.
# References
[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
return x * keras.backend.sigmoid(x)
def mish(x):
"""Mish activation function.
See details: https://arxiv.org/pdf/1908.08681.pdf
Args:
x: input tensor
Returns:
mish(x) = x * tanh(ln(1 + e^x))
"""
return x * tf.math.tanh(tf.math.softplus(x))
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# EfficientNet actually uses an untruncated normal distribution for
# initializing conv layers, but keras.initializers.VarianceScaling use
# a truncated distribution.
# We decided against a custom initializer for better serializability.
'distribution': 'untruncated_normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
def correct_pad(inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
# Arguments
input_size: An integer or tuple/list of 2 integers.
kernel_size: An integer or tuple/list of 2 integers.
# Returns
A tuple.
"""
img_dim = 2 if keras.backend.image_data_format() == 'channels_first' else 1
input_size = keras.backend.int_shape(inputs)[img_dim:(img_dim + 2)]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return ((correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]))
def round_filters(filters, divisor, width_coefficient):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
def block(inputs, activation_fn=swish, drop_rate=0., name='',
filters_in=32, filters_out=16, kernel_size=3, strides=1,
expand_ratio=1, se_ratio=0., id_skip=True, freeze=False,
freeze_bn=False, use_td=False, kernel_regularizer=None,
bias_regularizer=None, use_bias=False, data_format='channels_last'):
"""A mobile inverted residual block.
# Arguments
inputs: input tensor.
activation_fn: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.
freeze(bool): Freeze this block or not.
freeze_bn(bool): Freeze all the BN layers in this block or not.
use_td(bool): Use TimeDistributed wrapper layers for this block or not.
This is used to support 5D input tensors, e.g. in FasterRCNN use case.
kernel_regularizer: The kernel regularizer.
bias_regularizer: The bias regularizer.
use_bias(bool): Use bias or not for Conv layers followed by a BN layer.
# Returns
output tensor for the block.
"""
bn_opt = {
'momentum': 0.99,
'epsilon': 1e-3
}
bn_axis = 3 if keras.backend.image_data_format() == 'channels_last' else 1
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
layer = keras.layers.Conv2D(
filters,
1,
padding='same',
use_bias=use_bias,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not freeze,
data_format=data_format,
name=name + 'expand_conv'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(inputs)
layer = keras.layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn',
**bn_opt)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
x = keras.layers.Activation(activation_fn, name=name + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
layer = keras.layers.ZeroPadding2D(
padding=correct_pad(x, kernel_size),
data_format=data_format,
name=name + 'dwconv_pad'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
conv_pad = 'valid'
else:
conv_pad = 'same'
layer = keras.layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=conv_pad,
use_bias=use_bias,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=not freeze,
name=name + 'dwconv'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
layer = keras.layers.BatchNormalization(axis=bn_axis, name=name + 'bn',
**bn_opt)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
x = keras.layers.Activation(activation_fn, name=name + 'activation')(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
# Global pooling is needed if we are going to support dynamic
# input shape(e.g., in FasterRCNN) for this backbone
# AveragePooling2D requires static input shape, hence cannot work with
# dynamic shapes
if use_td:
# GlobalAveragePooling2D cannot work well with TimeDistributed layer
# because when converted to UFF, GlobalAveragePooling2D becomes Mean
# Op in UFF, and it cannot handle 5D input by itself like Conv2D does.
# So we rely on some manual shape transforms, so it sees 4D input
# (N, R*C, H, W), and reshape back to (N, R, C, 1, 1) after global pooling.
R, C, H, W = x.get_shape().as_list()[1:]
assert None not in (R, C, H, W), (
"Expect R, C, H, W all not None. While got {}".format((R, C, H, W))
)
# Another issue is for pruning. Reshape cannot follow a pruned layer
# in modulus pruning due to dimension change after pruning.
# while for current special case, we essentially reshape to (N, -1, H, W)
# whenever the filter number C changes or not during pruning.
# So in this case, the logic is still correct even if the number C is changed.
# But we cannot hard-code the target shape to (R*C, H, W) in case C changes.
# Instead, the target shape is actually (N, -1, H, W) whenever C changes or not.
se = keras.layers.Reshape((-1, H, W), name=name + 'pre_pool_reshape')(x)
se = keras.layers.GlobalAveragePooling2D(
data_format=data_format, name=name + 'se_squeeze')(se)
layer = keras.layers.Reshape((R, -1, 1, 1), name=name + 'post_pool_reshape')
se = layer(se)
else:
se = keras.layers.GlobalAveragePooling2D(
data_format=data_format, name=name + 'se_squeeze')(x)
# _, cc = se.get_shape()
se_shape = (1, 1, -1) if data_format == 'channels_last' else (-1, 1, 1)
se = keras.layers.Reshape(se_shape, name=name + 'se_reshape')(se)
layer = keras.layers.Conv2D(
filters_se,
1,
padding='same',
activation=activation_fn,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not freeze,
data_format=data_format,
name=name + 'se_reduce'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
se = layer(se)
layer = keras.layers.Conv2D(
filters,
1,
padding='same',
activation='sigmoid',
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
data_format=data_format,
trainable=not freeze,
name=name + 'se_expand'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
se = layer(se)
x = keras.layers.Multiply(name=name + 'se_excite')([x, se])
# Output phase
layer = keras.layers.Conv2D(
filters_out,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not freeze,
data_format=data_format,
name=name + 'project_conv'
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
layer = keras.layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn',
**bn_opt)
if use_td:
layer = keras.layers.TimeDistributed(layer)
if freeze_bn:
x = layer(x, training=False)
else:
x = layer(x)
if (id_skip is True and strides == 1 and filters_in == filters_out):
if drop_rate > 0:
layer = keras.layers.Dropout(
drop_rate,
noise_shape=(None, 1, 1, 1),
name=name + 'drop',
)
if use_td:
layer = keras.layers.TimeDistributed(layer)
x = layer(x)
x = keras.layers.Add(name=name + 'add')([x, inputs])
return x
def force_stride16(block_args):
"""Force the block args to make the model have stride 16."""
last_block = -1
for idx, block in enumerate(block_args):
if block['strides'] == 2:
last_block = idx
assert last_block >= 0, (
"Cannot find stride 2 in the block args."
)
# pop the layer with last stride 2 and following layers
# to keep the total stride of 16
block_args = block_args[:last_block]
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/utils_tf.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus model templates for alexnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from nvidia_tao_tf1.core.models.import_keras import keras as keras_fn
keras = keras_fn()
K = keras.backend
def AlexNet(input_shape,
nclasses=1000,
data_format=None,
kernel_regularizer=None,
bias_regularizer=None,
add_head=True,
weights=None,
hidden_fc_neurons=4096,
freeze_blocks=None):
"""
Construct AlexNet with/without dense head layers.
Args:
input_shape (tuple): shape of the input image. The shape must be
provided as per the data_format input for the model.
(C, W, H) for channels_first or (W, H, C for
channels last), where
C = number of channels,
W = width of image,
H = height of image.
nclasses (int): number of output classes (defaulted to 1000 outputs)
data_format (str): either 'channels last' or 'channels_first'
kernel_regularizer (keras.regularizer attribute): Regularization type
for kernels.
keras.regularizer.l1(wd) or,
keras.regularizer.l2(wd) where,
wd = weight_decay.
bias_regularizer (keras.regularizer attribute): Regularization type
for biases.
keras.regularizer.l1(wd) or,
keras.regularizer.l2(wd) where,
wd = weight_decay.
add_head (bool) : whether to add FC layer heads to the model or not.
If 'False', the network will not have to FC-6 to FC-8 dense layers.
If 'True' , the network will have the FC layers appended to it
weights (str) = path to the pretrained weights .h5 file
hidden_fc_neurons (int): number of neurons in hidden fully-connected
layers. The original AlexNet has 4096 of those but a smaller number
can be used to build a more parsimonious model.
freeze_blocks(list): the list of blocks to be frozen in the model.
Returns:
Model: The output model after applying Alexnet on input 'x'
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in ["channels_first", "channels_last"]:
raise ValueError("Unsupported data_format (%s)" % data_format)
if freeze_blocks is None:
freeze_blocks = []
# Input layer
input_image = keras.layers.Input(shape=(input_shape))
# Conv block 1
conv1 = keras.layers.Conv2D(
96,
kernel_size=(11, 11),
strides=(4, 4),
data_format=data_format,
name='conv1',
kernel_regularizer=kernel_regularizer,
padding='same',
bias_regularizer=bias_regularizer,
activation='relu',
trainable=not(1 in freeze_blocks))(input_image)
conv1 = keras.layers.MaxPooling2D(
pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format,
name='pool1')(conv1)
# Conv block 2
conv2 = keras.layers.Conv2D(
256, (5, 5),
strides=(1, 1),
data_format=data_format,
name='conv2',
kernel_regularizer=kernel_regularizer,
padding='same',
bias_regularizer=bias_regularizer,
activation='relu',
trainable=not(2 in freeze_blocks))(conv1)
conv2 = keras.layers.MaxPooling2D(
pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format,
name='pool2')(conv2)
# 'Conv block 3'
conv3 = keras.layers.Conv2D(
384, (3, 3),
strides=(1, 1),
data_format=data_format,
name='conv3',
kernel_regularizer=kernel_regularizer,
padding='same',
bias_regularizer=bias_regularizer,
activation='relu',
trainable=not(3 in freeze_blocks))(conv2)
# 'Conv block 4'
conv4 = keras.layers.Conv2D(
384, (3, 3),
strides=(1, 1),
data_format=data_format,
name='conv4',
kernel_regularizer=kernel_regularizer,
padding='same',
bias_regularizer=bias_regularizer,
activation='relu',
trainable=not(4 in freeze_blocks))(conv3)
# 'Conv block 5'
x = keras.layers.Conv2D(
256, (3, 3),
strides=(1, 1),
data_format=data_format,
name='conv5',
kernel_regularizer=kernel_regularizer,
padding='same',
bias_regularizer=bias_regularizer,
activation='relu',
trainable=not(5 in freeze_blocks))(conv4)
# 'FC Layers'
if add_head:
conv5 = keras.layers.Flatten(name='flatten')(x)
# FC - 6
fc6 = keras.layers.Dense(
hidden_fc_neurons,
name='fc6',
activation='relu',
kernel_initializer='glorot_uniform',
use_bias=True,
bias_initializer='zeros',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not(6 in freeze_blocks))(conv5)
# FC - 7
fc7 = keras.layers.Dense(
hidden_fc_neurons,
name='fc7',
activation='relu',
kernel_initializer='glorot_uniform',
use_bias=True,
bias_initializer='zeros',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not(7 in freeze_blocks))(fc6)
# FC - 8
x = keras.layers.Dense(
nclasses,
activation='softmax',
name='head_fc8',
kernel_initializer='glorot_uniform',
use_bias=True,
bias_initializer='zeros',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=not(8 in freeze_blocks))(fc7)
# Setting up graph
model = keras.models.Model(inputs=input_image, outputs=x, name='AlexNet')
# Loading pretrained weights if mentioned
if weights is not None:
if os.path.exists(weights):
model.load_weights(weights, by_name=True)
return model
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/templates/alexnet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus export APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import tempfile
import keras
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
process_quantized_layers,
)
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import graph_io
from tensorflow.python.tools import freeze_graph
from tensorflow.python.training import saver as saver_lib
import uff
from uff.model.utils import convert_to_str
"""Logger for UFF export APIs."""
logger = logging.getLogger(__name__)
def _reload_model_for_inference(model, custom_objects=None):
"""Reload a model specifically for doing inference.
In order to export a model we need remove training-specific
parts of the graph. For example, BatchNormalization layers
may feature conditional branching to do training and inference
alternately. This confused the UFF export tool.
NOTE: the current Keras session is cleared in this function.
Do not use this function during training.
Args:
model (Model): Keras model to reload in inference mode.
custom_objects (dict): dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization for export.
Returns:
A model that can be used for inference only.
"""
# Save model to a temp file so we can reload it later.
os_handle, tmp_model_file_name = tempfile.mkstemp(suffix=".h5")
os.close(os_handle)
model.save(tmp_model_file_name)
# Make sure Keras session is clean and tuned for inference.
keras.backend.clear_session()
keras.backend.set_learning_phase(0)
@classmethod
def apply_fused_padding(cls, tf_node, inputs, tf_nodes):
tf_padding = convert_to_str(tf_node.attr["padding"].s)
padding = None
fields = {}
if tf_padding == "SAME":
fields["implicit_padding"] = "same"
elif tf_padding == "VALID":
fields["implicit_padding"] = None
tf_lhs_node = tf_nodes[inputs[0]]
if tf_lhs_node.op == "Pad":
tf_padding_node = tf_nodes[tf_lhs_node.input[1]]
p = cls.convert_tf2numpy_const_node(tf_padding_node)
before, after = p[:, 0].tolist(), p[:, 1].tolist()
if before == after:
padding = before
inputs[0] = tf_lhs_node.input[0]
if tf_nodes[inputs[0]].op == "Identity":
logger.info("Modulus patch identity layer in padding inputs.")
inputs[0] = tf_nodes[inputs[0]].input[0]
else:
raise ValueError("Padding mode %s not supported" % tf_padding)
return inputs, padding, fields
def compose_call(prev_call_method):
def call(self, inputs, training=False):
return prev_call_method(self, inputs, training)
return call
def dropout_patch_call(self, inputs, training=False):
# Just return the input tensor. Keras will map this to ``keras.backend.identity``,
# which the TensorRT 3.0 UFF parser supports.
return inputs
# Patch BatchNormalization and Dropout call methods so they don't create
# the training part of the graph.
prev_batchnorm_call = keras.layers.normalization.BatchNormalization.call
prev_dropout_call = keras.layers.Dropout.call
logger.debug("Patching keras BatchNormalization...")
keras.layers.normalization.BatchNormalization.call = compose_call(
prev_batchnorm_call
)
logger.debug("Patching keras Dropout...")
keras.layers.Dropout.call = dropout_patch_call
logger.debug("Patching UFF TensorFlow converter apply_fused_padding...")
uff.converters.tensorflow.converter.TensorFlowToUFFConverter.apply_fused_padding = (
apply_fused_padding
)
# Reload the model.
model = keras.models.load_model(
tmp_model_file_name, compile=False, custom_objects=custom_objects
)
# Unpatch Keras.
logger.debug("Unpatching keras BatchNormalization layer...")
keras.layers.normalization.BatchNormalization.call = prev_batchnorm_call
logger.debug("Unpatching keras Dropout layer...")
keras.layers.Dropout.call = prev_dropout_call
# Delete temp file.
os.remove(tmp_model_file_name)
return model
def keras_to_pb(model, output_filename, output_node_names, custom_objects=None):
"""Export a Keras model to Protobuf format.
The Protobuf format is a TensorFlow-specific representation
of the model.
NOTE: the current Keras session is cleared in this function.
Do not use this function during training.
Args:
model (Model): Keras model to export.
output_filename (str): file to write exported model to.
output_node_names (list of str): list of model output node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
If None, then the model output layers are used as output nodes.
custom_objects (dict): dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization for export.
Returns:
tuple<in_tensor_name(s), out_tensor_name(s), in_tensor_shape(s)>:
in_tensor_name(s): The name(s) of the input nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
out_tensor_name(s): The name(s) of the output nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
in_tensor_shape(s): The shape(s) of the input tensors for this network. If there is only
one input tensor, it will be returned as a single list<int>, otherwise
a list<list<int>>.
"""
model = _reload_model_for_inference(model, custom_objects=custom_objects)
layers_with_external_state_io = [
layer for layer in model.layers if hasattr(layer, "is_stateful")
]
def get_layer_name(layer):
_layer_outputs = layer.get_output_at(0)
if isinstance(_layer_outputs, list):
return [lo.name.split(":")[0] for lo in _layer_outputs]
return _layer_outputs.name.split(":")[0]
# Get names of input and output nodes.
in_tensors = model.inputs
in_tensor_shape = keras.backend.int_shape(in_tensors[0])
in_name = in_tensors[0].op.name
if layers_with_external_state_io:
in_name = [in_name]
in_tensor_shape = [in_tensor_shape]
for layer in layers_with_external_state_io:
if layer.is_stateful:
in_name.append(layer.state_input_name)
else:
# Add feature maps of past frames for stateless models
in_name.extend(layer._past_feature_names)
shape = layer.input_shape
shape = shape if shape[0] is None or isinstance(shape[0], int) else shape[0]
in_tensor_shape.append(shape)
if output_node_names is None:
output_node_names = [t.op.name for t in model.outputs]
# Replace the sliced output node with original output layers. For example, an output node
# named `sliced_output_cov/Sigmoid` will be replaced with `output_cov/Sigmoid`
layer_output_names = [get_layer_name(layer) for layer in model.layers]
original_output_names = []
for name in output_node_names:
# For each sliced output node, search its original node by name and use the original
# node to replace the sliced output node.
if name.startswith("sliced_output_"):
original_output_name_prefix = name.split("/")[0][7:]
original_output_names += [
output_name
for output_name in layer_output_names
if output_name.startswith(original_output_name_prefix)
]
else:
original_output_names.append(name)
output_node_names = original_output_names
# Add output node names for the recurrent layers,
# to handle the state external to TRT model.
for layer in layers_with_external_state_io:
if layer.is_stateful:
temporal_output_node_name = get_layer_name(layer)
else:
temporal_output_node_name = layer.get_input_at(0).name.split(":")[0]
if temporal_output_node_name not in output_node_names:
output_node_names.append(temporal_output_node_name)
# Freeze model.
sess = keras.backend.get_session()
# TensorFlow freeze_graph expects a comma separated string of output node names.
output_node_names_tf = ",".join(output_node_names)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
# Save the checkpoint file to a temporary location.
os_handle, tmp_ckpt_file_name = tempfile.mkstemp(suffix=".ckpt")
os.close(os_handle)
checkpoint_path = saver.save(sess, tmp_ckpt_file_name)
graph_io.write_graph(sess.graph, ".", output_filename)
freeze_graph.freeze_graph(
input_graph=output_filename,
input_saver="",
input_binary=False,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names_tf,
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
output_graph=output_filename,
clear_devices=False,
initializer_nodes="",
)
# Clean up.
os.remove(tmp_ckpt_file_name)
return in_name, output_node_names, in_tensor_shape
def pb_to_uff(input_filename, output_filename, out_names, text=False, quiet=True):
"""Convert a TensorFlow model to UFF.
The input model needs to be passed as a frozen Protobuf file.
The export UFF model may be parsed and optimized by TensorRT.
Args:
input_filename (str): path to protobuf file.
output_filename (str): file to write exported model to.
out_names (list of str): list of the names of the output nodes.
text (boolean): whether to save .pbtxt file.
quiet (boolean): whether to enable quiet mode.
"""
uff.from_tensorflow_frozen_model(
input_filename,
out_names,
output_filename=output_filename,
text=text,
quiet=quiet,
)
def keras_to_uff(model, output_filename, output_node_names=None, custom_objects=None):
"""Export a Keras model to UFF format.
UFF stands for Universal Framework Format and is an NVIDIA
TensorRT file format for storing a neural network's topology and
weights.
NOTE: the current Keras session is cleared in this function.
Do not use this function during training.
Args:
model (Model): Keras model to export.
output_filename (str): file to write exported model to.
output_node_names (list of str): list of model output node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
If not provided, then the last layer is assumed to be the output node.
custom_objects (dict): dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization for export.
Returns:
tuple<in_tensor_name(s), out_tensor_name(s), in_tensor_shape(s)>:
in_tensor_name(s): The name(s) of the input nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
out_tensor_name(s): The name(s) of the output nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
in_tensor_shape(s): The shape(s) of the input tensors for this network. If there is only
one input tensor, it will be returned as a single list<int>, otherwise
a list<list<int>>.
These must be passed to the TensorRT optimization tool to identify input and output blobs.
"""
# First, convert model to a temporary TensorFlow Protobuf.
if check_for_quantized_layers(model):
calib_json = output_filename + ".json"
model, _ = process_quantized_layers(model, "uff", calib_json=calib_json)
os_handle, tmp_pb_file_name = tempfile.mkstemp(suffix=".pb")
os.close(os_handle)
in_tensor_name, out_tensor_names, in_tensor_shapes = keras_to_pb(
model, tmp_pb_file_name, output_node_names, custom_objects=custom_objects
)
# Second, convert protobuf to UFF.
pb_to_uff(tmp_pb_file_name, output_filename, out_tensor_names)
# Clean up.
os.remove(tmp_pb_file_name)
# Return a string instead of a list if there is only one output node.
if len(out_tensor_names) == 1:
out_tensor_names = out_tensor_names[0]
return in_tensor_name, out_tensor_names, in_tensor_shapes
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/export/_uff.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus export APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
from nvidia_tao_tf1.core.export import caffe
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx
from nvidia_tao_tf1.core.export._uff import keras_to_pb, keras_to_uff
from nvidia_tao_tf1.core.export.caffe import keras_to_caffe
from nvidia_tao_tf1.core.export.data import TensorFile
# Below lazily calls some functions that depend on TensorRT.
# TensorRT currently has a bug where it takes up memory upon importing, so we want to defer the
# import of tensorrt to when it is actually used.
# TODO(xiangbok): Remove lazy calling when fixed in TensorRT (bugfix not yet in release).
class LazyModuleMethodCall(object):
def __init__(self, name, attr):
self._name = name
self._attr = attr
def __call__(self, *args, **kwargs):
module = importlib.import_module(name=self._name)
return getattr(module, self._attr)(*args, **kwargs)
keras_to_tensorrt = LazyModuleMethodCall(
"nvidia_tao_tf1.core.export._tensorrt", "keras_to_tensorrt"
)
load_tensorrt_engine = LazyModuleMethodCall(
"nvidia_tao_tf1.core.export._tensorrt", "load_tensorrt_engine"
)
tf_to_tensorrt = LazyModuleMethodCall("nvidia_tao_tf1.core.export._tensorrt", "tf_to_tensorrt")
__all__ = (
"caffe",
"keras_to_caffe",
"keras_to_onnx",
"keras_to_pb",
"keras_to_tensorrt",
"keras_to_uff",
"load_tensorrt_engine",
"TensorFile",
"tf_to_tensorrt",
)
|
tao_tensorflow1_backend-main
|
nvidia_tao_tf1/core/export/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.