# MIT License # Copyright (c) Microsoft # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Copyright (c) [2025] [Microsoft] # SPDX-License-Identifier: MIT import torch import torch.nn as nn from . import SparseTensor from . import DEBUG __all__ = [ 'SparseGroupNorm', 'SparseLayerNorm', 'SparseGroupNorm32', 'SparseLayerNorm32', ] class SparseGroupNorm(nn.GroupNorm): def __init__(self, num_groups, num_channels, eps=1e-5, affine=True): super(SparseGroupNorm, self).__init__(num_groups, num_channels, eps, affine) def forward(self, input: SparseTensor) -> SparseTensor: nfeats = torch.zeros_like(input.feats) for k in range(input.shape[0]): if DEBUG: assert (input.coords[input.layout[k], 0] == k).all(), f"SparseGroupNorm: batch index mismatch" bfeats = input.feats[input.layout[k]] bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1) bfeats = super().forward(bfeats) bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0) nfeats[input.layout[k]] = bfeats return input.replace(nfeats) class SparseLayerNorm(nn.LayerNorm): def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True): super(SparseLayerNorm, self).__init__(normalized_shape, eps, elementwise_affine) def forward(self, input: SparseTensor) -> SparseTensor: nfeats = torch.zeros_like(input.feats) for k in range(input.shape[0]): bfeats = input.feats[input.layout[k]] bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1) bfeats = super().forward(bfeats) bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0) nfeats[input.layout[k]] = bfeats return input.replace(nfeats) class SparseGroupNorm32(SparseGroupNorm): """ A GroupNorm layer that converts to float32 before the forward pass. """ def forward(self, x: SparseTensor) -> SparseTensor: return super().forward(x.float()).type(x.dtype) class SparseLayerNorm32(SparseLayerNorm): """ A LayerNorm layer that converts to float32 before the forward pass. """ def forward(self, x: SparseTensor) -> SparseTensor: return super().forward(x.float()).type(x.dtype)