python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Owner(s): ["oncall: distributed"]
from typing import Any, Callable
import torch
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._symbolic_trace import TracingConfig
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
from torch.distributed.fsdp.wrap import always_wrap_policy, ParamExecOrderWrapPolicy
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer0 = torch.nn.Linear(6, 6)
self.layer1 = torch.nn.Linear(6, 6, bias=False)
self.layer2 = torch.nn.Sequential(
torch.nn.Linear(6, 3, bias=False),
torch.nn.ReLU(),
torch.nn.Linear(3, 6, bias=False),
)
self.relu = torch.nn.ReLU()
def forward(self, x: Any, use_all_params: bool = True):
# `layer0` -> `layer2` -> `layer1`
# the forward execution order is NOT consistent with the model definition order.
z = self.relu(self.layer0(x))
z = self.relu(self.layer2(z))
if use_all_params:
z = self.relu(self.layer1(z))
return z
def get_input(self, device: torch.device):
return (torch.randn((8, 6)).to(device),)
def get_loss(self, input, output):
return (output - input[0]).sum()
@staticmethod
def wrap(
sharding_strategy: ShardingStrategy,
device: torch.device,
wrap_policy: Callable,
) -> torch.nn.Module:
model = Model()
fsdp_model = FSDP(
model, auto_wrap_policy=wrap_policy, sharding_strategy=sharding_strategy
)
return fsdp_model.to(device)
class TestFSDPExecOrder(FSDPTest):
@property
def device(self):
return torch.device("cuda")
@skip_if_lt_x_gpu(2)
@parametrize(
"sharding_strategy",
[ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP],
)
def test_fsdp_flatten_params_exec_order(
self,
sharding_strategy: ShardingStrategy,
):
"""
Test ``_fsdp_params_exec_order`` with ``ParamExecOrderWrapPolicy``,
after running one iteration of forward and backward pass.
Here ``torch.fx`` is not enabled inside ``ParamExecOrderWrapPolicy``.
"""
wrap_policy = ParamExecOrderWrapPolicy(init_policy=always_wrap_policy)
fsdp_model = Model.wrap(sharding_strategy, self.device, wrap_policy=wrap_policy)
self.assertTrue(fsdp_model._is_param_exec_order_prep_stage())
# run one iteration to record the execution ordering
input = fsdp_model.module.get_input(self.device)
output = fsdp_model(*input)
loss = fsdp_model.module.get_loss(input, output).to(self.device)
loss.backward()
params_list = list(fsdp_model.parameters())
# Since the forward execution order is NOT consistent with
# the model definition order, the ordering in flatten_named_params_exec_order
# should be different from named_parameters.
self.assertEqual(
fsdp_model._fsdp_params_exec_order,
[params_list[0], params_list[2], params_list[3], params_list[1]],
)
self.assertTrue(fsdp_model._use_param_exec_order_policy())
self.assertTrue(not fsdp_model._is_param_exec_order_prep_stage())
@skip_if_lt_x_gpu(2)
@parametrize(
"sharding_strategy",
[ShardingStrategy.FULL_SHARD, ShardingStrategy.SHARD_GRAD_OP],
)
def test_fsdp_flatten_params_exec_order_symbolic_trace(
self,
sharding_strategy: ShardingStrategy,
):
"""
Tests ``ParamExecOrderWrapPolicy`` with symbolic tracing.
With symbolic tracing enabled, ``_is_param_exec_order_prep_stage``
should always set as False.
"""
wrap_policy = ParamExecOrderWrapPolicy(
init_policy=always_wrap_policy,
tracing_config=TracingConfig(concrete_args={"use_all_params": False}),
)
fsdp_model = Model.wrap(
sharding_strategy,
self.device,
wrap_policy=wrap_policy,
)
params_list = list(fsdp_model.parameters())
# Since the forward execution order is NOT consistent with the model definition order,
# the ordering in flatten_named_params_exec_order should be different from named_parameters
self.assertEqual(
fsdp_model._fsdp_params_exec_order,
[params_list[0], params_list[2], params_list[3]],
)
self.assertTrue(fsdp_model._use_param_exec_order_policy())
self.assertTrue(not fsdp_model._is_param_exec_order_prep_stage())
instantiate_parametrized_tests(TestFSDPExecOrder)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_param_exec_order_wrap.py
|
# Owner(s): ["oncall: distributed"]
import itertools
import sys
from contextlib import suppress
from copy import deepcopy
from functools import partial
from typing import Any, Dict
import torch
import torch.nn as nn
from torch import distributed as dist
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
)
from torch.distributed.fsdp import CPUOffload, FullStateDictConfig
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import (
LocalStateDictConfig,
MixedPrecision,
StateDictType,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullyShardedDataParallel,
)
from torch.distributed.fsdp.shard_utils import _gather_state_dict
from torch.distributed.fsdp.wrap import (
enable_wrap,
transformer_auto_wrap_policy,
wrap,
)
from torch.nn import (
Linear,
Module,
TransformerDecoderLayer,
TransformerEncoderLayer,
)
from torch.nn.parallel import DistributedDataParallel
from torch.optim import SGD
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
SkipModel,
TransformerWithSharedParams,
_assert_module_states,
_get_state_dict,
_zero_model,
get_full_params,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
INNER_SHAPE = [4, 4]
OUTER_SHAPE = [4, 5]
BUFFER_SHAPE = [5, 5]
NON_ROOT_FSDP_PREFIX = 'non_fsdp_lin'
_UNFLATTENED_STATE_DICT_IMPLS = ["state_dict", "sharded_state_dict"]
_FLATTENED_STATE_DICT_IMPLS = ["local_state_dict"]
_SUPPORTED_STATE_DICT_IMPLS = (
_UNFLATTENED_STATE_DICT_IMPLS + _FLATTENED_STATE_DICT_IMPLS
)
STATE_DICT_MAPPING = {
"state_dict": StateDictType.FULL_STATE_DICT,
"local_state_dict": StateDictType.LOCAL_STATE_DICT,
"sharded_state_dict": StateDictType.SHARDED_STATE_DICT,
}
class Model(Module):
def __init__(self, wrap_fsdp, register_buffers=False, ignore_inner=False):
super().__init__()
self.inner = Linear(*INNER_SHAPE)
if register_buffers:
self.inner.register_buffer("buffer", torch.randn(BUFFER_SHAPE))
if wrap_fsdp:
self.inner = FSDP(self.inner, ignored_modules=([self.inner] if ignore_inner else []))
self.outer = Linear(*OUTER_SHAPE)
if register_buffers:
self.outer.register_buffer("buffer", torch.randn(BUFFER_SHAPE))
def forward(self, x):
# Forward twice.
i = self.inner(x)
j = self.inner(x)
return self.outer(i + j)
class TestFSDPStateDict(FSDPTest):
@property
def world_size(self):
return 2
def _broadcast_state_dict(self, state_dict):
olist = [state_dict if self.rank == 0 else None]
dist.broadcast_object_list(olist)
return olist[0]
def _compare_models(self, model, model_new, assert_fn, check_fp16=False):
with FullyShardedDataParallel.summon_full_params(model):
with FullyShardedDataParallel.summon_full_params(model_new):
params = list(model.parameters())
params_new = list(model_new.parameters())
assert_fn(params, params_new)
if check_fp16:
for tensor in model_new.parameters():
self.assertEqual(tensor.dtype, torch.float16)
def _get_simple_nested_model(self, *fsdp_args, wrap=True, checkpoint_wrap=False, **fsdp_kwargs):
if wrap:
lin1 = nn.Linear(10, 10, bias=False).cuda()
lin2 = nn.Linear(10, 10, bias=False).cuda()
if checkpoint_wrap:
lin1 = checkpoint_wrapper(lin1)
lin2 = checkpoint_wrapper(lin2)
seq = nn.Sequential(FSDP(lin1, *fsdp_args, **fsdp_kwargs), lin2)
if checkpoint_wrap:
seq = checkpoint_wrapper(seq)
model = FSDP(seq, *fsdp_args, **fsdp_kwargs)
else:
model = nn.Sequential(
nn.Linear(10, 10, bias=False).cuda(), nn.Linear(10, 10, bias=False).cuda()
)
return model
def _get_simple_model(self, *fsdp_args, checkpoint_wrap=False, **fsdp_kwargs):
lin = nn.Linear(10, 10, bias=False).cuda()
if checkpoint_wrap:
lin = checkpoint_wrapper(lin)
model = FSDP(lin, *fsdp_args, **fsdp_kwargs)
return model
def _get_non_fsdp_root_module(self, *fsdp_args, wrap=True, **fsdp_kwargs):
class FSDPContainer(nn.Module):
def __init__(self, fsdp_1, fsdp_2):
super().__init__()
self.non_fsdp_lin = nn.Linear(10, 10, bias=False).cuda()
self.fsdp_1 = fsdp_1
self.fsdp_2 = fsdp_2
def forward(self, x):
x = self.non_fsdp_lin(x)
x = self.fsdp_1(x)
x = self.fsdp_2(x)
return x
return FSDPContainer(
self._get_simple_nested_model(*fsdp_args, wrap=wrap, **fsdp_kwargs),
self._get_simple_nested_model(*fsdp_args, wrap=wrap, **fsdp_kwargs),
)
def _get_state_dict_mgr(
self,
model: nn.Module,
state_dict_type: str,
state_dict_rank0_and_offload: bool,
):
_state_dict_type = STATE_DICT_MAPPING[state_dict_type]
if state_dict_type == "state_dict":
config = FullStateDictConfig(
rank0_only=state_dict_rank0_and_offload,
offload_to_cpu=state_dict_rank0_and_offload,
)
else:
config = None
return FSDP.state_dict_type(model, _state_dict_type, config)
def _validate_state_dict_contents(
self, model, fsdp_state_dict, state_dict_rank0_and_offload, ignore_keys=None
):
if state_dict_rank0_and_offload:
if self.rank == 0:
self.assertNotEqual(fsdp_state_dict, {})
for key, tensor in fsdp_state_dict.items():
if ignore_keys and key in ignore_keys:
continue
self.assertEqual(
tensor.device,
torch.device("cpu"),
f"{key} is unexpectedly on device {tensor.device}",
)
else:
# For non-FSDP roots, the non FSDP portion can still have parameters on rank 0,
# so bypass the check for now.
if isinstance(model, FSDP):
self.assertEqual(fsdp_state_dict, {})
@skip_if_lt_x_gpu(2)
@parametrize("checkpoint_wrap", ["first", "second", "both"])
def test_fsdp_state_dict_with_activation_checkpoint(self, checkpoint_wrap):
"""Tests saving the state dict, zeroing a target model's parameters, and
loading the state dict, where the source and target models may have a
checkpoint wrapper."""
for model_call in [
partial(self._get_simple_model),
partial(self._get_simple_nested_model)
]:
model = model_call(checkpoint_wrap=(checkpoint_wrap in ["first", "both"]))
state_dict = _get_state_dict(model, False, False)
# Possibly wrap new model in activation checkpoint wrapper to test save/
# load with this wrapper
model_new = model_call(checkpoint_wrap=(checkpoint_wrap in ["second", "both"]))
_zero_model(model_new)
self._compare_models(model, model_new, self.assertNotEqual)
# Would fail if checkpoint_wrapper did not correctly implement state_dict pre/post hooks
model_new.load_state_dict(state_dict, strict=True)
self._compare_models(model, model_new, self.assertEqual)
@skip_if_lt_x_gpu(2)
def test_state_dict_rank0_offload_save_load_flow(self):
"""Tests saving a model checkpoint only on rank 0 and loading it only
on rank 0 with ``sync_module_states=True`` to emulate the workflow to
avoid redundant CPU memory usage."""
auto_wrap_policy = partial(
transformer_auto_wrap_policy,
transformer_layer_cls={TransformerEncoderLayer, TransformerDecoderLayer},
)
fsdp_kwargs = {"auto_wrap_policy": auto_wrap_policy}
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE,
fsdp_kwargs,
)
# Force model parameters and buffers to be nonzero
with FSDP.summon_full_params(fsdp_model):
for tensor in itertools.chain(fsdp_model.parameters(), fsdp_model.buffers()):
if torch.count_nonzero(tensor) == 0:
with torch.no_grad():
tensor.add_(torch.tensor(1, dtype=tensor.dtype, device=tensor.device))
with self._get_state_dict_mgr(fsdp_model, "state_dict", True):
state_dict = deepcopy(_get_state_dict(fsdp_model))
# Initialize a non-wrapped model on all ranks
new_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.NO_FSDP,
CUDAInitMode.CUDA_BEFORE,
)
_zero_model(new_model, zero_buffers=True)
# Only load the checkpoint on rank 0
if self.rank == 0:
new_model.load_state_dict(state_dict, strict=True)
_assert_module_states(
new_model,
process_group=self.process_group,
assert_fn=self.assertNotEqual,
)
# Broadcast the module states from rank 0 with `sync_module_states=True`
new_fsdp_model = FSDP(
new_model,
device_id=torch.cuda.current_device(),
auto_wrap_policy=auto_wrap_policy,
sync_module_states=True,
)
# Check FSDP models are equal across ranks
with FSDP.summon_full_params(new_fsdp_model):
_assert_module_states(
new_fsdp_model,
process_group=self.process_group,
assert_fn=self.assertEqual,
)
# Check FSDP models correctly loaded the checkpoint
with FullyShardedDataParallel.summon_full_params(fsdp_model):
with FullyShardedDataParallel.summon_full_params(new_fsdp_model):
params = list(fsdp_model.parameters())
params_new = list(new_fsdp_model.parameters())
self.assertEqual(params, params_new)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
@parametrize("fp16", [True, False])
@parametrize("state_dict_rank0_and_offload", [True, False])
def test_basic_save_and_load_state_dict(
self, state_dict_type, cpu_offload, fp16, state_dict_rank0_and_offload
):
"""
Tests that we can save a state_dict and load it into a blank model
with various configs such as fp16 and cpu offload and parameters
match as expected.
"""
if state_dict_rank0_and_offload and state_dict_type != "state_dict":
return
for model_call in [
partial(self._get_non_fsdp_root_module, cpu_offload=cpu_offload),
partial(self._get_simple_nested_model, cpu_offload=cpu_offload),
partial(self._get_simple_model, cpu_offload=cpu_offload),
]:
model = model_call()
ctx = self._get_state_dict_mgr(
model, state_dict_type, state_dict_rank0_and_offload
)
with ctx:
fsdp_state_dict = _get_state_dict(
model, cpu_offload.offload_params, fp16
)
ignore_keys = [k for k in fsdp_state_dict.keys() if NON_ROOT_FSDP_PREFIX in k]
self._validate_state_dict_contents(
model, fsdp_state_dict, state_dict_rank0_and_offload, ignore_keys=ignore_keys,
)
if fp16:
# Verify fp16 is the type
for tensor in fsdp_state_dict.values():
self.assertEqual(tensor.dtype, torch.float16)
model_new = model_call()
if not cpu_offload.offload_params:
model_new = model_new.cuda()
if fp16:
model_new.half()
# zero the model to ensure parameters are different.
_zero_model(model_new)
self._compare_models(model, model_new, self.assertNotEqual)
# Verify parameters are the same in the new model.
if state_dict_rank0_and_offload:
# Broadcast the state dict and move it back to GPU in
# preparation for loading.
if not isinstance(model, FSDP):
# Move everything to CPU to avoid running into
# https://github.com/pytorch/pytorch/issues/77113, some params
# will still be on GPU for non FSDP root modules.
for k in fsdp_state_dict.keys():
fsdp_state_dict[k] = fsdp_state_dict[k].cpu()
fsdp_state_dict = self._broadcast_state_dict(fsdp_state_dict)
for key in fsdp_state_dict.keys():
fsdp_state_dict[key] = fsdp_state_dict[key].cuda()
with FSDP.state_dict_type(model_new, STATE_DICT_MAPPING[state_dict_type]):
model_new.load_state_dict(fsdp_state_dict, strict=True)
self._compare_models(model, model_new, self.assertEqual, check_fp16=fp16)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
@parametrize("mixed_precision", [True, False])
@parametrize("state_dict_rank0_and_offload", [True, False])
def test_save_and_load_after_forward_state_dict(
self, state_dict_type, mixed_precision, state_dict_rank0_and_offload
):
"""
Test that saving after some training results in params being updated as
expected.
"""
if state_dict_rank0_and_offload and state_dict_type != "state_dict":
return
torch.cuda.set_device(self.rank)
mixed_precision = (
MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
)
if mixed_precision
else None
)
model = self._get_simple_nested_model(mixed_precision=mixed_precision)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
initial_params = get_full_params(model)
for _ in range(6):
inp = torch.randn(1, 10, device=torch.cuda.current_device())
output = model(*inp)
loss = output.sum()
expected_dtype = torch.float32 if mixed_precision is None else torch.float16
self.assertEqual(expected_dtype, loss.dtype)
loss.backward()
optim.step()
trained_params = get_full_params(model)
# Ensure some training occured
self.assertNotEqual(initial_params, trained_params)
# Save a copy of the state_dict
fsd_mgr = self._get_state_dict_mgr(
model, state_dict_type, state_dict_rank0_and_offload
)
with fsd_mgr:
state_dict = model.state_dict()
if state_dict_type == "state_dict":
state_dict = {k: v.clone() for k, v in state_dict.items()}
else:
for sharded_tensor in state_dict.values():
shard = sharded_tensor._local_shards[0]
shard.tensor = shard.tensor.clone().detach_()
self._validate_state_dict_contents(model, state_dict, state_dict_rank0_and_offload)
_zero_model(model)
# Ensure checkpointed params have the full param dtype
for tensor in state_dict.values():
self.assertEqual(tensor.dtype, torch.float32)
# Load state_dict into zeroed model
if state_dict_rank0_and_offload:
# Broadcast the state dict and move it back to GPU in
# preparation for loading.
state_dict = self._broadcast_state_dict(state_dict)
for key in state_dict.keys():
state_dict[key] = state_dict[key].cuda()
with FSDP.state_dict_type(model, STATE_DICT_MAPPING[state_dict_type]):
model.load_state_dict(state_dict, strict=True)
loaded_params = get_full_params(model)
self.assertEqual(loaded_params, trained_params)
def _initialize_model(
self,
wrap_fsdp: bool,
wrap_ddp: bool = True,
register_buffers: bool = False,
):
# keep everything deterministic for input data
torch.manual_seed(0)
model = Model(wrap_fsdp, register_buffers=register_buffers).cuda()
if wrap_fsdp:
model = FSDP(model)
elif wrap_ddp:
model = DistributedDataParallel(model, device_ids=[self.rank])
return model
@staticmethod
def _state_dict(model: Module, state_dict_type: str):
try:
enum_val = STATE_DICT_MAPPING[state_dict_type]
except KeyError:
raise ValueError(f"No state_dict type for {state_dict_type}")
with FSDP.state_dict_type(model, enum_val):
return model.state_dict()
@staticmethod
def _load_state_dict(
model: Module, state_dict_type: str, state_dict: Dict[str, Any]
):
try:
enum_val = STATE_DICT_MAPPING[state_dict_type]
except KeyError:
raise ValueError(f"No state_dict for {state_dict_type}")
with FSDP.state_dict_type(model, enum_val):
return model.load_state_dict(state_dict, strict=True)
def _dist_train(self, wrap_fsdp: bool, state_dict_type: str = ""):
# TODO: Move this test to common_fsdp.
model = self._initialize_model(wrap_fsdp)
optim = SGD(model.parameters(), lr=0.1)
in_data = torch.rand(64, 4, requires_grad=True, device=torch.device("cuda"))
for _ in range(3):
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
if wrap_fsdp:
blank_model = FSDP(Model(True).cuda())
_zero_model(blank_model)
state_dict = self._state_dict(model, state_dict_type)
self._load_state_dict(blank_model, state_dict_type, state_dict)
return get_full_params(blank_model)
else:
return list(model.parameters())
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
def test_state_dict_save_load_flow(self, state_dict_type):
fsdp_params = self._dist_train(wrap_fsdp=True, state_dict_type=state_dict_type)
ddp_params = self._dist_train(wrap_fsdp=False)
self.assertEqual(ddp_params, fsdp_params)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
def test_fsdp_state_dict_keys(self, state_dict_type):
state_dict = self._state_dict(self._initialize_model(True), state_dict_type)
if state_dict_type == "local_state_dict":
self.assertEqual(set(["flat_param", "inner.flat_param"]), state_dict.keys())
elif state_dict_type in ("state_dict", "sharded_state_dict"):
# Keys should match local model.
local_model = self._initialize_model(wrap_fsdp=False, wrap_ddp=False)
local_keys = local_model.state_dict().keys()
self.assertEqual(state_dict.keys(), local_keys)
else:
raise NotImplementedError(f"No test for {state_dict_type}!")
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _UNFLATTENED_STATE_DICT_IMPLS)
@parametrize("state_dict_rank0_and_offload", [True, False])
@parametrize("fsdp_root", [True, False])
def test_state_dict_load_into_local_module(
self, state_dict_type, state_dict_rank0_and_offload, fsdp_root,
):
"""
Tests that FSDP's state_dict can be loaded into a local model.
"""
if state_dict_rank0_and_offload and state_dict_type != "state_dict":
return
if not fsdp_root:
model = self._get_non_fsdp_root_module()
else:
model = self._initialize_model(wrap_fsdp=True, register_buffers=True)
optim = SGD(model.parameters(), lr=0.1)
if not fsdp_root:
in_data = torch.randn(1, 10, requires_grad=True, device=torch.device("cuda"))
else:
in_data = torch.rand(64, 4, requires_grad=True, device=torch.device("cuda"))
for _ in range(3):
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
with FullyShardedDataParallel.summon_full_params(model):
fsdp_params = deepcopy(list(model.parameters()))
# get FSDP state_dict. Note that by default we return full_state_dict.
sd_mgr = self._get_state_dict_mgr(
model, state_dict_type, state_dict_rank0_and_offload
)
with sd_mgr:
fsdp_state_dict = model.state_dict()
ignore_keys = [k for k in fsdp_state_dict.keys() if NON_ROOT_FSDP_PREFIX in k]
self._validate_state_dict_contents(
model, fsdp_state_dict, state_dict_rank0_and_offload, ignore_keys=ignore_keys,
)
# Create zeroed local model
if not fsdp_root:
blank_local_model = self._get_non_fsdp_root_module(wrap=False)
else:
blank_local_model = self._initialize_model(
wrap_fsdp=False, wrap_ddp=False, register_buffers=True
)
# Nothing should be FSDP
for mod in blank_local_model.modules():
self.assertFalse(isinstance(mod, FSDP))
for param in blank_local_model.parameters():
with torch.no_grad():
param.zero_()
fsdp_state_dict = _gather_state_dict(fsdp_state_dict)
# Load fsdp's full state dict into the local and verify params are as
# expected.
if state_dict_rank0_and_offload:
# Broadcast + CUDA state_dict
if not isinstance(model, FSDP):
# Some portions of the model on rank 0 might not be on CPU,
# move everything to CPU to avoid running into
# https://github.com/pytorch/pytorch/issues/77113.
for k, t in fsdp_state_dict.items():
if t.device != torch.device("cpu"):
fsdp_state_dict[k] = t.cpu()
fsdp_state_dict = self._broadcast_state_dict(fsdp_state_dict)
for key in fsdp_state_dict.keys():
fsdp_state_dict[key] = fsdp_state_dict[key].cuda()
# if self.rank == 0:
blank_local_model.load_state_dict(fsdp_state_dict, strict=True)
local_params = list(blank_local_model.parameters())
for fsdp_param, local_param in zip(fsdp_params, local_params):
self.assertEqual(fsdp_param, local_param)
@skip_if_lt_x_gpu(2)
@parametrize("state_dict_type", _SUPPORTED_STATE_DICT_IMPLS)
@parametrize("double_nest", [True])
def test_state_dict_skip_module(self, state_dict_type, double_nest):
torch.cuda.set_device(self.rank)
def _create_module(wrap_fsdp=True):
LINEAR_SKIP = "linear_skip"
ctx = enable_wrap(wrapper_cls=FSDP) if wrap_fsdp else suppress()
with ctx:
module = SkipModel(double_nest=double_nest)
# Full name of linear_skip param tensors in SkipModel, as would be
# stored in checkpoint.
linear_skip_tensor_names = [
k
for k in dict(module.named_parameters()).keys()
if LINEAR_SKIP in k
]
# skip SkipModule
linear_skip = getattr(module, LINEAR_SKIP)
delattr(module, LINEAR_SKIP)
# Wrap FSDP
fsdp = wrap(module)
# reattach
setattr(module, LINEAR_SKIP, linear_skip)
return fsdp, linear_skip_tensor_names
fsdp, linear_skip_tensor_names = _create_module()
# Run a forward pass
inp = torch.randn((1, 10), device=torch.cuda.current_device())
loss = fsdp(inp)
loss.sum().backward()
with FSDP.state_dict_type(fsdp, STATE_DICT_MAPPING[state_dict_type]):
state_dict = fsdp.state_dict()
if self.rank == 0 and state_dict_type != "local_state_dict":
sd_keys = list(state_dict.keys())
expected = list(SkipModel(double_nest=False).state_dict().keys())
self.assertEqual(sorted(sd_keys), sorted(expected))
# TODO: parameters in linear_skip_tensor_names should not be handled
# by FSDP.state_dict(). Have a check once this is implemented in
# FSDP.state_dict().
# Check that it can be loaded into FSDP.
new_fsdp, _ = _create_module()
_zero_model(new_fsdp)
for (p1, p2) in zip(fsdp.parameters(), new_fsdp.parameters()):
self.assertNotEqual(p1, p2)
with FSDP.state_dict_type(new_fsdp, STATE_DICT_MAPPING[state_dict_type]):
if state_dict_type != "local_state_dict":
# FlatParameter has not supported deepcopy yet.
state_dict = deepcopy(state_dict)
new_fsdp.load_state_dict(state_dict, strict=True)
for (p1, p2) in zip(fsdp.parameters(), new_fsdp.parameters()):
self.assertEqual(p1, p2)
# Test that the checkpoint can be loaded into a local model.
local, _ = _create_module(wrap_fsdp=False)
for param in local.parameters():
with torch.no_grad():
param.zero_()
with fsdp.summon_full_params(fsdp):
for (p1, p2) in zip(fsdp.parameters(), local.parameters()):
self.assertNotEqual(p1, p2)
if state_dict_type == "local_state_dict":
return
state_dict = _gather_state_dict(state_dict)
with fsdp.summon_full_params(fsdp):
if self.rank == 0:
local.load_state_dict(state_dict, strict=True)
for (p1, p2) in zip(fsdp.parameters(), local.parameters()):
self.assertEqual(p1, p2)
@skip_if_lt_x_gpu(2)
def test_wrong_state_dict_config(self):
model = FSDP(Model(wrap_fsdp=True).cuda())
with self.assertRaisesRegex(RuntimeError, "Expected state_dict_config of type"):
with model.state_dict_type(
model, StateDictType.FULL_STATE_DICT, LocalStateDictConfig()
):
pass
@skip_if_lt_x_gpu(2)
@parametrize("prefix", [True, False])
@parametrize("ignore_inner", [True, False])
def test_state_dict_with_ignored_modules(self, prefix, ignore_inner):
# Initialize an FSDP-wrapped model with an ignored module that includes
# both parameters and a buffer
model = Model(wrap_fsdp=True, register_buffers=True, ignore_inner=ignore_inner).cuda()
ignored_modules = [model.outer]
ignored_tensor_to_tensor_name = {
model.outer.bias: "outer.bias",
model.outer.weight: "outer.weight",
}
if ignore_inner:
ignored_tensor_to_tensor_name = {
**ignored_tensor_to_tensor_name,
model.inner.bias: "inner.bias",
model.inner.weight: "inner.weight",
}
# Note that when model.inner is not ignored this test also ensures
# non-ignored buffers are not cloned.
buffer_to_buffer_name = {
model.inner.buffer: "inner.buffer", model.outer.buffer: "outer.buffer",
}
fsdp_model = FSDP(model, ignored_modules=ignored_modules)
prefix_str = "foo." if prefix else ""
with FSDP.state_dict_type(fsdp_model, StateDictType.FULL_STATE_DICT):
sd1 = fsdp_model.state_dict(prefix=prefix_str)
with FSDP.summon_full_params(fsdp_model):
fsdp_params = deepcopy(list(fsdp_model.parameters()))
# Check that the ignored parameters and all buffers are not cloned
for tensor, tensor_name in {
**ignored_tensor_to_tensor_name,
**buffer_to_buffer_name,
}.items():
prefixed_tensor_name = f"{prefix_str}{tensor_name}"
self.assertTrue(prefixed_tensor_name in sd1)
self.assertEqual(tensor.data_ptr(), sd1[prefixed_tensor_name].data_ptr(), f"{prefixed_tensor_name}")
# Check that the state dict can be loaded into a non-wrapped version of
# the model
nonwrapped_model = Model(wrap_fsdp=False, register_buffers=True).cuda()
for param in nonwrapped_model.parameters():
with torch.no_grad():
param.zero_()
to_load = {k[len(prefix_str):] : v for k, v in sd1.items()}
nonwrapped_model.load_state_dict(to_load, strict=True)
local_params = list(nonwrapped_model.parameters())
for fsdp_param, local_param in zip(fsdp_params, local_params):
self.assertEqual(fsdp_param, local_param)
# Check that if we save a state dict again, the ignored parameters and
# buffer still have the same data pointer
with FSDP.state_dict_type(fsdp_model, StateDictType.FULL_STATE_DICT):
sd2 = fsdp_model.state_dict(prefix=prefix_str)
for tensor, tensor_name in {
**ignored_tensor_to_tensor_name,
**buffer_to_buffer_name,
}.items():
prefixed_tensor_name = f"{prefix_str}{tensor_name}"
self.assertTrue(prefixed_tensor_name in sd2)
self.assertEqual(tensor.data_ptr(), sd2[prefixed_tensor_name].data_ptr())
self.assertEqual(sd1[prefixed_tensor_name].data_ptr(), sd2[prefixed_tensor_name].data_ptr())
@skip_if_lt_x_gpu(2)
def test_state_dict_type(self):
module = SkipModel(double_nest=True)
with enable_wrap(wrapper_cls=FSDP):
fsdp = wrap(module)
with FSDP.state_dict_type(fsdp, StateDictType.LOCAL_STATE_DICT):
pass
for module in FSDP.fsdp_modules(fsdp):
self.assertEqual(module._state_dict_type, StateDictType.FULL_STATE_DICT)
instantiate_parametrized_tests(TestFSDPStateDict)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_state_dict.py
|
# Owner(s): ["oncall: distributed"]
import sys
import time
from statistics import mean
from unittest.mock import patch
import torch
import torch.nn as nn
from torch import distributed as dist
from torch.cuda import Event
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
FSDPTest,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
get_cycles_per_ms,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class Layer(nn.Module):
def __init__(self, compute_cycles, has_params: bool):
super().__init__()
self.sleep_cycles = compute_cycles
self.optional_param = None
if has_params:
self.optional_param = nn.Parameter(torch.rand(1))
def forward(self, x):
# Get 2 events.
self.e1 = Event(enable_timing=True)
self.e2 = Event(enable_timing=True)
# Record the fake forward compute time.
self.e1.record()
if self.sleep_cycles > 0:
torch.cuda._sleep(self.sleep_cycles)
if self.optional_param is not None:
x = x + self.optional_param # force the param to be part of the graph
self.e2.record()
return x
def get_time(self):
# return the recorded duration.
return self.e1.elapsed_time(self.e2)
def _create_model(compute_cycles, has_params: bool):
model = FSDP(
nn.Sequential(
FSDP(Layer(compute_cycles, has_params)),
FSDP(Layer(compute_cycles, has_params)),
FSDP(Layer(compute_cycles, has_params)),
FSDP(Layer(compute_cycles, has_params)),
)
).cuda()
return model
class Min10:
def __init__(self):
self.data = []
def add(self, new_data):
if len(self.data) < 10:
self.data.append(new_data)
else:
self.data = sorted(self.data)
if new_data < self.data[-1]:
self.data[-1] = new_data
def avg(self):
return mean(self.data)
class TestForwardOverlapWorldSizeOne(FSDPTest):
@property
def world_size(self):
return 1
def _dist_train(self):
rank = self.rank
world_size = self.world_size
# Save the original torch.distributed._all_gather_base function since we will
# patch it to include an artificial delay.
orig_all_gather = torch.distributed._all_gather_base
def run(compute_cycles, all_gather_cycles):
has_params = all_gather_cycles > 0
model = _create_model(compute_cycles, has_params)
# Get the input and sets the input's requires_grad to True because
# we have a fake compute in the forward pass.
batch = torch.rand(1).cuda()
batch.requires_grad = True
# We run 20 iterations but only collect timing data from the minimal 10
# data points because nondeterministic system events can disturb the timing.
cpu_iter = Min10()
cpu_wait = Min10()
gpu_compute = Min10()
gpu_total = Min10()
for _ in range(20):
# Get two events for measuring the overall time.
e1 = Event(enable_timing=True)
e2 = Event(enable_timing=True)
cpu_start = time.process_time()
all_gather_called = False
def _delayed_all_gather(*args, **kwargs):
nonlocal all_gather_called
all_gather_called = True
torch.cuda._sleep(all_gather_cycles)
assert orig_all_gather
return orig_all_gather(*args, **kwargs)
# forward pass
#
# Even though both e1 & e2 are on the compute stream, since
# compute depends on all_gather, e2-e1 includes all_gather time.
e1.record()
with patch("torch.distributed._all_gather_base", _delayed_all_gather):
out = model(batch)
if has_params and world_size > 1:
self.assertTrue(all_gather_called)
else:
self.assertFalse(all_gather_called)
e2.record()
# backward pass
out.backward()
model.zero_grad(set_to_none=True)
cpu_iter_time = time.process_time() - cpu_start
# wait for gpu
out.item()
cpu_wait_for_gpu_time = time.process_time() - cpu_start - cpu_iter_time
# get sum of the compute time
times = []
for mod in model.modules():
if not isinstance(mod, Layer):
continue
times.append(mod.get_time())
# get gpu compute + all_gather time
overall_gpu_time = e1.elapsed_time(e2)
cpu_iter.add(cpu_iter_time)
cpu_wait.add(cpu_wait_for_gpu_time)
gpu_compute.add(sum(times))
gpu_total.add(overall_gpu_time)
del model
return {
"cpu_iter": cpu_iter.avg(),
"cpu_wait": cpu_wait.avg(),
"gpu_compute": gpu_compute.avg(),
"gpu_total": gpu_total.avg(),
}
sleep_cycles = int(100 * get_cycles_per_ms())
e1 = run(0, 0) # no compute, no all-gather
e2 = run(0, sleep_cycles) # no compute, only all-gather
e3 = run(sleep_cycles, 0) # only compute, no all-gather
e4 = run(sleep_cycles, sleep_cycles) # both compute and all-gather
debug_string = f"\nrank{rank}:\n e1: {e1}\n e2: {e2}\n e3: {e3}\n e4: {e4}"
print(debug_string)
# Check the cpu/gpu timing. CPU should run ahead of GPU. Therefore, cpu-gpu
# wait should be long, except when there is no real work on GPU.
#
# If the assertions fail below, we likely have a cpu-gpu wait in the forward/backward pass.
# e4["cpu_iter"] may not be short as cpu may take some time to queue both compute and all-gather.
short = [
e1["cpu_iter"],
e2["cpu_iter"],
e3["cpu_iter"],
e1["cpu_wait"],
]
long = [e3["cpu_wait"], e4["cpu_wait"]]
if world_size == 1:
short.append(e2["cpu_wait"]) # all gather should not be happening.
else:
long.append(
e2["cpu_wait"]
) # all gather should happen and prolong the cpu-gpu wait.
for s in short:
for l in long:
# 10X longer is a safe margin, since the GPU work timing is around 100X more
# of that of the CPU.
self.assertTrue(s * 10 < l)
# Check the GPU timing.
short = [e1["gpu_compute"], e1["gpu_total"], e2["gpu_compute"]]
long = [e3["gpu_compute"], e3["gpu_total"], e4["gpu_compute"], e4["gpu_total"]]
if world_size == 1:
short.append(e2["gpu_total"]) # all gather should not be happening.
else:
long.append(
e2["gpu_total"]
) # all gather should happen and prolong the cpu-gpu wait.
for s in short:
for l in long:
# 10X longer is a safe margin, since the time is around 100X longer
# when there is work on GPU vs. no work.
self.assertTrue(s * 10 < l)
# Check the GPU overlapping when there is all-gather.
if world_size > 1:
compute_only = e3["gpu_compute"]
all_gather_only = e2["gpu_total"]
both = e4["gpu_total"]
self.assertTrue(compute_only + all_gather_only > 1.1 * both)
@skip_if_lt_x_gpu(2)
def test_forward_overlap(self):
self._dist_train()
class TestForwardOverlapWorldSizeTwo(TestForwardOverlapWorldSizeOne):
@property
def world_size(self):
return 2
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_overlap.py
|
# Owner(s): ["oncall: distributed"]
import contextlib
from copy import deepcopy
from functools import partial
import torch
import torch.nn as nn
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullyShardedDataParallel as FSDP,
CPUOffload,
)
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
)
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_fsdp import (
FSDPTest,
_maybe_wrap_fsdp,
)
from torch.testing._internal.common_utils import (
run_tests,
parametrize,
instantiate_parametrized_tests,
)
from torch.utils.checkpoint import checkpoint
_save_on_cpu_called = False
def get_patched_save_on_cpu():
orig_save_on_cpu = torch.distributed.algorithms._checkpoint.checkpoint_wrapper.save_on_cpu
def patched_save_on_cpu(*args, **kwargs):
global _save_on_cpu_called
_save_on_cpu_called = True
return orig_save_on_cpu(*args, **kwargs)
return patched_save_on_cpu
@contextlib.contextmanager
def patch_save_on_cpu(new_save_on_cpu):
orig_save_on_cpu = torch.distributed.algorithms._checkpoint.checkpoint_wrapper.save_on_cpu
torch.distributed.algorithms._checkpoint.checkpoint_wrapper.save_on_cpu = new_save_on_cpu
try:
yield
finally:
torch.distributed.algorithms._checkpoint.checkpoint_wrapper.save_on_cpu = orig_save_on_cpu
class TestFSDPCheckpoint(FSDPTest):
class SequentialModule(nn.Module):
def __init__(
self,
checkpoint_layer=False,
offload_activations=False,
wrap_fsdp=False,
*fsdp_args,
**fsdp_kwargs,
):
torch.manual_seed(0)
torch.cuda.manual_seed(0)
super().__init__()
l1 = nn.Linear(3, 3).cuda()
l2 = nn.Linear(3, 3).cuda()
l3 = nn.Linear(3, 3).cuda()
if checkpoint_layer:
ckpt_wrapper = partial(
checkpoint_wrapper, offload_to_cpu=offload_activations
)
l1 = ckpt_wrapper(l1)
l2 = ckpt_wrapper(l2)
l3 = ckpt_wrapper(l3)
fsdp_wrapper = partial(
_maybe_wrap_fsdp, wrap_fsdp=wrap_fsdp, *fsdp_args, **fsdp_kwargs
)
self.ffn = nn.Sequential(
fsdp_wrapper(l1),
fsdp_wrapper(l2),
fsdp_wrapper(l3),
)
def forward(self, x):
return self.ffn(x)
def _verify_parity(self, losses, outputs, models):
assert losses
assert outputs
assert models
for (l, o) in zip(losses[1:], outputs[1:]):
self.assertEqual(losses[0], l)
self.assertEqual(outputs[0], o)
# Verify grads
ref_model = models[0]
ref_grads = [p.grad for p in ref_model.parameters()]
for m in models[1:]:
grads = [p.grad for p in m.parameters()]
for ref_g, g in zip(ref_grads, grads):
self.assertEqual(ref_g, g)
@skip_if_lt_x_gpu(2)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
@parametrize("offload_activations", [True, False])
def test_checkpoint_fsdp_wrapping(self, cpu_offload, offload_activations):
# Test checkpoint(FSDP(layer1), FSDP(layer2), ....)
ckpt_sequential_wrapped_fsdp = checkpoint_wrapper(
TestFSDPCheckpoint.SequentialModule(
wrap_fsdp=True, cpu_offload=cpu_offload
),
offload_to_cpu=offload_activations,
)
# Test FSDP(checkpoint(layer1)), FSDP(checkpoint(layer2)), ....
inner_ckpt = TestFSDPCheckpoint.SequentialModule(
checkpoint_layer=True,
offload_activations=offload_activations,
wrap_fsdp=True,
cpu_offload=cpu_offload,
)
baseline = TestFSDPCheckpoint.SequentialModule(
wrap_fsdp=True, cpu_offload=cpu_offload
)
# note that reentrant-based checkpointing requires inputs to have grad
# flag set.
inp = torch.randn(10, 3, device=torch.cuda.current_device(), requires_grad=True)
global _save_on_cpu_called
models = [ckpt_sequential_wrapped_fsdp, inner_ckpt, baseline]
with patch_save_on_cpu(get_patched_save_on_cpu()):
for i in range(2):
losses = []
outputs = []
for m in models:
check_offload = m != baseline and i == 0 and offload_activations
if check_offload:
self.assertFalse(_save_on_cpu_called)
out = m(inp)
if check_offload:
self.assertTrue(_save_on_cpu_called)
_save_on_cpu_called = False
loss = out.sum()
loss.backward()
losses.append(loss)
outputs.append(out)
self._verify_parity(losses, outputs, models)
@skip_if_lt_x_gpu(2)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=True), CPUOffload(offload_params=False)],
)
@parametrize("offload_activations", [True, False])
def test_basic_checkpoint_end_to_end(self, cpu_offload, offload_activations):
global _save_on_cpu_called
with patch_save_on_cpu(get_patched_save_on_cpu()):
seq = TestFSDPCheckpoint.SequentialModule().to(torch.cuda.current_device())
# Runs FSDP with no checkpointing
fsdp_only_seq = FSDP(deepcopy(seq), cpu_offload=cpu_offload)
# Runs checkpoint-wrapped FSDP
checkpointed_fsdp = checkpoint_wrapper(
FSDP(deepcopy(seq), cpu_offload=cpu_offload),
offload_to_cpu=offload_activations,
)
# Runs FSDP-wrapped checkpointed module
fsdp_wrapped_checkpoint = FSDP(
checkpoint_wrapper(deepcopy(seq), offload_to_cpu=offload_activations),
cpu_offload=cpu_offload,
)
# Runs FSDP with manual calls to checkpoint.
fsdp_call_checkpoint = FSDP(deepcopy(seq), cpu_offload=cpu_offload)
# note that reentrant-based checkpointing requires inputs to have grad
# flag set.
inp = torch.randn(10, 3, device=torch.cuda.current_device(), requires_grad=True)
models = [
fsdp_only_seq,
checkpointed_fsdp,
fsdp_wrapped_checkpoint,
fsdp_call_checkpoint,
]
# Ensure _save_on_cpu is not yet called
self.assertFalse(_save_on_cpu_called)
for i in range(6):
losses = []
outputs = []
for m in models:
check_offload = m != fsdp_only_seq and i == 0 and offload_activations
if m == fsdp_call_checkpoint:
# _save_on_cpu should not be called yet
self.assertFalse(_save_on_cpu_called)
offload_ctx = (
get_patched_save_on_cpu()(pin_memory=True)
if offload_activations
else contextlib.suppress()
)
with offload_ctx:
out = checkpoint(m, inp)
else:
# _save_on_cpu should not be called yet
self.assertFalse(_save_on_cpu_called)
out = m(inp)
if check_offload:
self.assertTrue(_save_on_cpu_called)
loss = out.sum()
loss.backward()
losses.append(loss)
outputs.append(out)
_save_on_cpu_called = False
self._verify_parity(losses, outputs, models)
instantiate_parametrized_tests(TestFSDPCheckpoint)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_checkpoint.py
|
# Owner(s): ["oncall: distributed"]
import functools
import os
import tempfile
import unittest
from enum import Enum, auto
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributed.fsdp.fully_sharded_data_parallel import (
BackwardPrefetch,
CPUOffload,
)
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullyShardedDataParallel as FSDP,
)
from torch.distributed.fsdp.wrap import (
_or_policy,
_wrap_batchnorm_individually,
always_wrap_policy,
enable_wrap,
size_based_auto_wrap_policy,
transformer_auto_wrap_policy,
wrap,
)
from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
DummyProcessGroup,
FSDPInitMode,
FSDPTest,
TransformerWithSharedParams,
_maybe_cuda,
)
from torch.testing._internal.common_utils import (
FILE_SCHEMA,
TestCase,
find_free_port,
instantiate_parametrized_tests,
parametrize,
run_tests,
)
class BatchNormNet(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(10, 10, bias=False)
self.bn1 = nn.BatchNorm1d(10)
self.bn2 = nn.BatchNorm2d(10)
self.bn3 = nn.BatchNorm3d(10)
self.sync_bn = nn.SyncBatchNorm(10)
class WrapMethod(Enum):
FSDP_CTOR = auto()
# FSDP_CTOR is the supported way forward, but keep WRAP_API in case we miss
# any use cases and fix them to work with FSDP_CTOR over time.
WRAP_API = auto()
class TestFSDPWrap(FSDPTest):
"""
Tests main API for wrapping FSDP, which is to pass auto_wrap_policy into
FSDP constructor.
"""
def setUp(self) -> None:
super().setUp()
class NestedSequentialModel:
@staticmethod
def get_model(cuda=True):
sequential = nn.Sequential(
nn.Linear(5, 5),
nn.Linear(5, 5),
nn.Sequential(nn.Linear(5, 5), nn.Linear(5, 5)),
)
if cuda:
sequential = sequential.cuda()
return sequential
@staticmethod
def verify_model_all_wrapped(cls, model):
cls.assertTrue(isinstance(model, FSDP))
cls.assertTrue(isinstance(model.module[0], FSDP))
cls.assertTrue(isinstance(model.module[1], FSDP))
cls.assertTrue(isinstance(model.module[2], FSDP))
cls.assertTrue(isinstance(model.module[2].module[0], FSDP))
cls.assertTrue(isinstance(model.module[2].module[1], FSDP))
@staticmethod
def verify_model(cls, model):
cls.assertTrue(isinstance(model, FSDP))
cls.assertTrue(isinstance(model.module[0], nn.Linear))
cls.assertTrue(isinstance(model.module[1], nn.Linear))
cls.assertTrue(isinstance(model.module[2], FSDP))
# following modules were not wrapped by the policy.
cls.assertTrue(isinstance(model.module[2].module[0], nn.Linear))
cls.assertTrue(isinstance(model.module[2].module[1], nn.Linear))
def _get_linear(self, fin, fout):
return nn.Linear(fin, fout, bias=False)
def _get_already_wrapped_fsdp(
self, cuda_init_mode=CUDAInitMode.CUDA_BEFORE, nested=False
) -> FSDP:
fn_self = self
class MyModel(nn.Module):
def __init__(self, nested):
super().__init__()
# TODO: test the various init modes.
move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
# if nested=True, the FSDP module will be nested one layer deep
# and we should pick that up.
if nested:
self.lin1 = nn.Sequential(
_maybe_cuda(fn_self._get_linear(1, 1), move_to_cuda),
FSDP(_maybe_cuda(fn_self._get_linear(1, 1), move_to_cuda)),
)
else:
self.lin1 = FSDP(
_maybe_cuda(fn_self._get_linear(1, 1), move_to_cuda)
)
self.lin2 = FSDP(_maybe_cuda(fn_self._get_linear(1, 1), move_to_cuda))
self.lin3 = FSDP(_maybe_cuda(fn_self._get_linear(1, 1), move_to_cuda))
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.lin3(self.lin2(self.lin1(input)))
model = MyModel(nested=nested)
return model
@skip_if_lt_x_gpu(2)
@parametrize("nested", [True, False])
@parametrize("cuda_init_mode", [CUDAInitMode.CUDA_AFTER, CUDAInitMode.CUDA_BEFORE])
def test_error_already_wrapped(self, nested, cuda_init_mode):
"""
Test that an error is raised if we attempt to wrap when submodules are
already FSDP.
"""
wrapped_fsdp = self._get_already_wrapped_fsdp(nested=nested, cuda_init_mode=cuda_init_mode)
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
wrapped_fsdp = wrapped_fsdp.cuda()
with self.assertRaisesRegex(ValueError, "to NOT be FullyShardedDataParallel"):
mod = FSDP(wrapped_fsdp, auto_wrap_policy=size_based_auto_wrap_policy)
@skip_if_lt_x_gpu(2)
@parametrize("use_or_policy", [True, False])
def test_wrap_batchnorm_individually(self, use_or_policy):
def never_wrap_policy(*args, **kwargs):
return False
policy = (
functools.partial(
_or_policy,
policies=[never_wrap_policy, _wrap_batchnorm_individually]
) if use_or_policy else _wrap_batchnorm_individually
)
model = BatchNormNet()
fsdp = FSDP(model, auto_wrap_policy=policy)
# Batchnorms should be wrapped
for layer in [fsdp.bn1, fsdp.bn2, fsdp.bn3, fsdp.sync_bn]:
self.assertTrue(isinstance(layer, FSDP))
self.assertFalse(isinstance(fsdp.lin, FSDP))
@skip_if_lt_x_gpu(2)
def test_bn_always_wrapped_individually(self):
"""
Ensures that by using _or_policy with _wrap_batchnorm_individually, even
if the other policy results in a module containing a BN unit being
wrapped, the contained BN unit will still be individually wrapped.
"""
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.bn_container = BatchNormNet()
def wrap_bn_container(module, recurse, *args, **kwargs):
if recurse:
return True
return isinstance(module, BatchNormNet)
my_policy = functools.partial(
_or_policy,
policies=[wrap_bn_container, _wrap_batchnorm_individually]
)
mod = MyModule()
fsdp = FSDP(mod, auto_wrap_policy=my_policy)
# Wrapping should be FSDP(FSDP(BatchNormNet(FSDP(BN))))
# and not FSDP(FSDP(BatchNormNet(BN))) (in the latter the inner
# BN is not individually wrapped.)
for bn in [
fsdp.bn_container.bn1,
fsdp.bn_container.bn2,
fsdp.bn_container.bn3,
fsdp.bn_container.sync_bn
]:
self.assertTrue(isinstance(bn, FSDP))
# if we just wrapped BN container, individual batchnorms are not
# wrapped.
mod = MyModule()
fsdp = FSDP(mod, auto_wrap_policy=wrap_bn_container)
self.assertTrue(isinstance(mod.bn_container, FSDP))
for bn in [
fsdp.bn_container.bn1,
fsdp.bn_container.bn2,
fsdp.bn_container.bn3,
fsdp.bn_container.sync_bn
]:
self.assertFalse(isinstance(bn, FSDP))
@skip_if_lt_x_gpu(2)
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=False), CPUOffload(offload_params=True)]
)
@parametrize(
"backward_prefetch",
[BackwardPrefetch.BACKWARD_POST, BackwardPrefetch.BACKWARD_PRE]
)
@parametrize("forward_prefetch", [True, False])
@parametrize(
"cuda_init_mode",
[CUDAInitMode.CUDA_AFTER, CUDAInitMode.CUDA_BEFORE]
)
def test_main_wrap_api(self, cpu_offload, backward_prefetch, forward_prefetch, cuda_init_mode):
if cuda_init_mode == CUDAInitMode.CUDA_AFTER and cpu_offload.offload_params:
# they don't work together, expected
return
move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
class Nested(nn.Module):
def __init__(self):
super().__init__()
self.nested_lin = _maybe_cuda(nn.Linear(1, 1, bias=False), move_to_cuda)
def forward(self, input):
return self.nested_lin(input)
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.lin1 = _maybe_cuda(nn.Linear(1, 1, bias=False), move_to_cuda)
self.lin2 = _maybe_cuda(nn.Linear(1, 1, bias=False), move_to_cuda)
self.lin3 = _maybe_cuda(nn.Linear(1, 1, bias=False), move_to_cuda)
self.lin4 = Nested()
def forward(self, input):
return self.lin4(self.lin3(self.lin2(self.lin1(input))))
model = MyModel()
wrapped_model = FSDP(
model,
auto_wrap_policy=functools.partial(
size_based_auto_wrap_policy,
min_num_params=0, # wrap all modules
),
cpu_offload=cpu_offload,
backward_prefetch=backward_prefetch,
forward_prefetch=forward_prefetch,
)
if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
wrapped_model = wrapped_model.cuda()
modules_in_fsdp_graph_order = [
wrapped_model.module.lin1,
wrapped_model.module.lin2,
wrapped_model.module.lin3,
wrapped_model.module.lin4.module.nested_lin,
wrapped_model.module.lin4,
wrapped_model
]
for module in modules_in_fsdp_graph_order:
self.assertTrue(isinstance(module, FSDP))
self._check_cpu_offload(module, cpu_offload)
self._check_backward_prefetch(module, backward_prefetch)
self._check_forward_prefetch(module, forward_prefetch)
# Run model a few times for sanity check.
optim = torch.optim.SGD(wrapped_model.parameters(), lr=1e-2, momentum=0.9)
inp = torch.ones(1).cuda()
for _ in range(6):
optim.zero_grad()
loss = wrapped_model(inp).sum()
loss.backward()
optim.step()
# Since we ran with backward prefetch, verify backward prefetch related
# data.
for i, module in enumerate(modules_in_fsdp_graph_order):
self.assertEqual(i, module._my_fsdp_idx_in_graph)
self.assertTrue(
module._fsdp_graph_order == modules_in_fsdp_graph_order
)
class TestAutoWrap(TestCase):
def setUp(self) -> None:
super().setUp()
# For all the tests here, we use a fake group
self.process_group = DummyProcessGroup(rank=0, size=1)
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
@parametrize("wrap_method", [WrapMethod.FSDP_CTOR, WrapMethod.WRAP_API])
def test_wrap(self, wrap_method):
if wrap_method == WrapMethod.WRAP_API:
with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group):
layer = wrap(nn.Linear(5, 5))
else:
assert wrap_method == WrapMethod.FSDP_CTOR
layer = FSDP(
nn.Linear(5, 5),
process_group=self.process_group,
auto_wrap_policy=functools.partial(size_based_auto_wrap_policy, min_num_params=1)
)
self.assertTrue(isinstance(layer, FSDP))
self.assertEqual(layer.rank, self.process_group.rank())
self.assertEqual(layer.world_size, self.process_group.size())
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
def test_wrap_disabled_outside_context(self):
pg = self.process_group
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.lin = wrap(nn.Linear(5, 5), process_group=pg)
model = MyModel()
with enable_wrap(wrapper_cls=FSDP, process_group=pg):
model = wrap(model)
self.assertTrue(isinstance(model, FSDP))
self.assertFalse(isinstance(model.lin, FSDP))
self.assertTrue(isinstance(model.lin, nn.Linear))
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
def test_wrap_override_defaults(self):
new_process_group = DummyProcessGroup(rank=0, size=2)
with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group):
layer = wrap(nn.Linear(5, 5), process_group=new_process_group)
self.assertTrue(isinstance(layer, FSDP))
self.assertTrue(layer.process_group is new_process_group)
self.assertEqual(layer.rank, 0)
self.assertEqual(layer.world_size, 2)
@unittest.skipIf(not torch.cuda.is_available(), "Test Requires CUDA")
def test_always_wrap(self):
"""
Test to ensure that if `always_wrap_policy` is
passed into FSDP, all submodules are wrapped.
"""
seq = TestFSDPWrap.NestedSequentialModel.get_model(cuda=True)
model = FSDP(seq, process_group=self.process_group, auto_wrap_policy=always_wrap_policy)
TestFSDPWrap.NestedSequentialModel.verify_model_all_wrapped(self, model)
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
def test_transformer_auto_wrap_policy(self):
"""Tests the ``transformer_auto_wrap_policy``."""
auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls={TransformerEncoderLayer, TransformerDecoderLayer},
)
fsdp_kwargs = {"auto_wrap_policy": auto_wrap_policy}
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
CUDAInitMode.CUDA_BEFORE,
fsdp_kwargs,
)
modules = list(fsdp_model.modules())
encoder_layers = set(fsdp_model.module.transformer.encoder.layers)
decoder_layers = set(fsdp_model.module.transformer.decoder.layers)
for module in modules:
if module is fsdp_model or module in encoder_layers or module in decoder_layers:
self.assertTrue(isinstance(module, FSDP))
else:
self.assertFalse(isinstance(module, FSDP))
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
def test_auto_wrap_api(self):
"""
Test to ensure with auto wrap, we wrap child modules correctly based on the min_num_params.
``nn.Linear(5, 5)`` does not exceed the bucket size, but combined they do.
"""
sequential = TestFSDPWrap.NestedSequentialModel.get_model(cuda=False)
my_auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=40
)
model = FSDP(
sequential,
process_group=self.process_group,
auto_wrap_policy=my_auto_wrap_policy
)
TestFSDPWrap.NestedSequentialModel.verify_model(self, model)
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
def test_auto_wrap_preset_exclude_wrap(self):
"""
Test to ensure excluded modules are not wrapped, regardless if the total param size is greater than the
min_num_params. the size_based_auto_wrap_policy excludes wrapping for {nn.ModuleList, nn.ModuleDict}
"""
sequential = nn.ModuleList([nn.Linear(5, 5), nn.Linear(5, 5)])
my_auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=40
)
model = FSDP(
sequential,
process_group=self.process_group,
auto_wrap_policy=my_auto_wrap_policy
)
self.assertTrue(isinstance(model, FSDP))
self.assertTrue(isinstance(model[0], nn.Linear))
self.assertTrue(isinstance(model[1], nn.Linear))
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
def test_auto_wrap_preset_exclude_wrap_include_children(self):
"""
Test to ensure excluded modules are not wrapped, but children are if param size is greater than
min_num_params
"""
sequential = nn.ModuleList([nn.Linear(10, 10)])
my_auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=40
)
model = FSDP(sequential, process_group=self.process_group, auto_wrap_policy=my_auto_wrap_policy)
self.assertTrue(isinstance(model, FSDP))
self.assertTrue(isinstance(model[0], FSDP))
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
def test_auto_wrap_preset_force_leaf(self):
"""
Test to ensure force-leaf modules are not wrapped, and children are not wrapped. The
size_based_auto_wrap_policy forces leaf modules of type {nn.MultiheadAttention} to not be wrapped
"""
sequential = nn.Sequential(nn.Linear(10, 10), nn.MultiheadAttention(100, 1))
my_auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=40
)
model = FSDP(sequential, process_group=self.process_group, auto_wrap_policy=my_auto_wrap_policy)
self.assertTrue(isinstance(model.module[0], FSDP))
# Assert children of multihead attention are not wrapped
self.assertTrue(isinstance(model.module[1], nn.MultiheadAttention))
self.assertTrue(isinstance(model.module[1].out_proj, nn.Linear))
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
def test_auto_wrap_preset_force_leaf_custom(self):
"""
Test to ensure force-leaf modules are not wrapped.
"""
my_auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy,
min_num_params=40,
force_leaf_modules=size_based_auto_wrap_policy.FORCE_LEAF_MODULES.union(
{nn.Linear}
),
)
sequential = nn.Sequential(
nn.Linear(10, 10), nn.ModuleList([nn.Linear(10, 10)])
)
model = FSDP(sequential, process_group=self.process_group, auto_wrap_policy=my_auto_wrap_policy)
# Model was wrapped in FSDP as no inner modules were wrapped.
self.assertTrue(isinstance(model, FSDP))
self.assertTrue(isinstance(model.module[0], nn.Linear))
self.assertTrue(isinstance(model.module[1], nn.ModuleList))
@unittest.skipIf(not torch.cuda.is_available(), "Test Requires CUDA")
@parametrize("cuda_init_mode", [CUDAInitMode.CUDA_BEFORE, CUDAInitMode.CUDA_AFTER])
@parametrize(
"cpu_offload",
[CPUOffload(offload_params=False), CPUOffload(offload_params=True)]
)
@parametrize("use_device_id", [True, False])
def test_auto_wrap_smoke_test(self, cuda_init_mode, cpu_offload, use_device_id):
# CPU offload and CUDA after don't work together as expected.
if (
cpu_offload.offload_params and cuda_init_mode == CUDAInitMode.CUDA_AFTER
):
return
device = torch.device("cuda")
torch.cuda.set_device(0)
device_id = (
torch.device("cuda", torch.cuda.current_device()) if use_device_id else None
)
# Random port in case the next test run quickly, same port would cause conflict.
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(find_free_port())
file_name = tempfile.NamedTemporaryFile(delete=False).name
torch.distributed.init_process_group(
backend="nccl",
init_method=f"{FILE_SCHEMA}_{file_name}",
rank=0,
world_size=1,
)
# NOTE: We move model to CUDA after init with FSDP to simulate real use
# cases where full model cannot be loaded onto GPU, but their shards can.
cuda_after_init = cuda_init_mode == CUDAInitMode.CUDA_AFTER
try:
sequential = TestFSDPWrap.NestedSequentialModel.get_model(cuda=(not cuda_after_init))
my_auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=40
)
model = FSDP(
sequential, cpu_offload=cpu_offload, auto_wrap_policy=my_auto_wrap_policy, device_id=device_id
)
TestFSDPWrap.NestedSequentialModel.verify_model(self, model)
if cuda_after_init:
model = model.cuda()
input = torch.rand((1, 5), dtype=torch.float).to(device)
output = model(input)
loss = F.mse_loss(input, output)
loss.backward()
finally:
torch.distributed.destroy_process_group()
try:
os.remove(file_name)
except FileNotFoundError:
pass
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
@parametrize("wrap_method", [WrapMethod.FSDP_CTOR, WrapMethod.WRAP_API])
def test_always_wrap_with_ignored_modules(self, wrap_method: WrapMethod):
sequential = TestFSDPWrap.NestedSequentialModel.get_model(cuda=False)
ignored_modules = [sequential[1], sequential[2][0]]
fsdp_kwargs = {
"process_group": self.process_group,
"auto_wrap_policy": always_wrap_policy,
"ignored_modules": ignored_modules,
}
if wrap_method == WrapMethod.FSDP_CTOR:
model = FSDP(sequential, **fsdp_kwargs)
elif wrap_method == WrapMethod.WRAP_API:
with enable_wrap(wrapper_cls=FSDP, **fsdp_kwargs):
model = wrap(sequential)
else:
assert 0, f"Unsupported wrap method: {wrap_method}"
# All non-ignored modules should be wrapped with FSDP
self.assertTrue(isinstance(model, FSDP))
self.assertTrue(isinstance(model.module[0], FSDP))
self.assertTrue(isinstance(model.module[1], nn.Linear))
self.assertTrue(isinstance(model.module[2], FSDP))
self.assertTrue(isinstance(model.module[2].module[0], nn.Linear))
self.assertTrue(isinstance(model.module[2].module[1], FSDP))
@unittest.skipIf(torch.cuda.device_count() < 2, "Requires at least 2 GPUs")
@parametrize("wrap_method", [WrapMethod.FSDP_CTOR, WrapMethod.WRAP_API])
def test_auto_wrap_with_ignored_modules(self, wrap_method: WrapMethod):
sequential = TestFSDPWrap.NestedSequentialModel.get_model(cuda=False)
ignored_modules = [sequential[1], sequential[2][0]]
my_auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=40,
)
fsdp_kwargs = {
"process_group": self.process_group,
"auto_wrap_policy": my_auto_wrap_policy,
"ignored_modules": ignored_modules,
}
if wrap_method == WrapMethod.FSDP_CTOR:
model = FSDP(sequential, **fsdp_kwargs)
elif wrap_method == WrapMethod.WRAP_API:
with enable_wrap(wrapper_cls=FSDP, **fsdp_kwargs):
model = wrap(sequential)
else:
assert 0, f"Unsupported wrap method: {wrap_method}"
# Since the 2nd linear (`sequential[1]`) is ignored, the wrapping
# policy does not exceed the parameter threshold before the inner
# sequential (`sequential[2]`) anymore; hence, it flattens
# `sequential[0]` and `sequential[2][0]` into `model` and leaves
# `sequential[1]` and `sequential[2][1]` as-is since they are ignored
self.assertTrue(isinstance(model, FSDP))
self.assertTrue(isinstance(model.module[0], nn.Linear))
self.assertTrue(isinstance(model.module[1], nn.Linear))
self.assertTrue(isinstance(model.module[2], nn.Sequential))
self.assertTrue(isinstance(model.module[2][0], nn.Linear))
self.assertTrue(isinstance(model.module[2][1], nn.Linear))
instantiate_parametrized_tests(TestFSDPWrap)
instantiate_parametrized_tests(TestAutoWrap)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_wrap.py
|
# Owner(s): ["oncall: distributed"]
import sys
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import distributed as dist
from torch.distributed.algorithms._comm_hooks import default_hooks
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import MixedPrecision
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
from torch.testing._internal.common_distributed import (
requires_nccl,
requires_nccl_version,
sandcastle_skip_if,
skip_if_lt_x_gpu,
skip_if_rocm,
)
from torch.testing._internal.common_fsdp import FSDPTest
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
run_tests,
)
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
# bfloat16 is only supported by CUDA 11+
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and torch.version.cuda is not None
and int(torch.version.cuda.split('.')[0]) >= 11)
class Net(nn.Module):
def __init__(self, has_wrapping, sharding_strategy, mixed_precision=None):
# to ensure determinism
torch.manual_seed(0)
torch.cuda.manual_seed(0)
super().__init__()
if has_wrapping:
self.net = FSDP(nn.Sequential(
nn.Linear(8, 16),
nn.ReLU(),
FSDP(
nn.Linear(16, 8),
device_id=torch.cuda.current_device(),
sharding_strategy=sharding_strategy,
mixed_precision=mixed_precision,
)
),
device_id=torch.cuda.current_device(),
sharding_strategy=sharding_strategy,
mixed_precision=mixed_precision,
)
else:
self.net = nn.Sequential(
nn.Linear(8, 16),
nn.ReLU(),
nn.Linear(16, 8)
)
self.out = nn.Linear(8, 4)
def forward(self, x):
return self.out(F.relu(self.net(x)))
class DummyState(object):
__slots__ = [
"process_group"
]
def __init__(self, process_group):
self.process_group = process_group
class DummyHook(object):
def dummy_hook(self, state: DummyState, grad: torch.Tensor):
pass
class TestCommunicationHooks(FSDPTest):
@skip_if_lt_x_gpu(2)
@parametrize(
"sharding_strategy",
[
ShardingStrategy.NO_SHARD
])
def test_default_communication_hook_behavior(
self,
sharding_strategy: Optional[ShardingStrategy]
):
"""
Tests FSDP's default communication hook's behavior and correctness.
Arguments:
sharding_strategy (Optional[ShardingStrategy]): Configures the FSDP algorithm.
"""
m = torch.nn.Linear(1, 5, bias=False)
inpt = torch.tensor([self.rank]).float().cuda(self.rank)
net_default_hook = FSDP(
m,
device_id=torch.cuda.current_device(),
sharding_strategy=sharding_strategy
).to(self.rank)
# Check that default hook is set to `all_reduce`
for entry in FSDP.fsdp_modules(net_default_hook):
self.assertEqual(entry.communication_hook, default_hooks.allreduce_hook)
for _ in range(4):
# Clear gradients
net_default_hook.zero_grad()
loss = net_default_hook(inpt).sum()
loss.backward()
# For each worker, the gradient on the weight should be worker_rank.
grad = net_default_hook.params[0].grad
expected_grad = (
sum(i for i in range(dist.get_world_size())) / dist.get_world_size()
)
# Verify default hook produces expected gradients
self.assertEqual(
grad[0].item(),
expected_grad,
msg=f"Expected hook grad of {expected_grad} but got {grad[0].item()}")
def _get_submodules(self, fsdp_net):
return [
submodule for submodule in FSDP.fsdp_modules(fsdp_net)
if not submodule.check_is_root()
]
def _init_model(self, core, sharding_strategy, mixed_precision=None):
device = torch.device("cuda")
return FSDP(
core,
device_id=torch.cuda.current_device(),
sharding_strategy=sharding_strategy,
mixed_precision=mixed_precision,
).to(device)
@skip_if_lt_x_gpu(2)
@parametrize("has_wrapping", [True, False])
@parametrize(
"sharding_strategy",
[
ShardingStrategy.NO_SHARD,
ShardingStrategy.FULL_SHARD,
ShardingStrategy.SHARD_GRAD_OP
])
def test_default_communication_hook_initialization(
self,
has_wrapping: bool,
sharding_strategy: Optional[ShardingStrategy]
):
"""
Tests FSDP's communication hook interface behavior.
Arguments:
has_wrapping (bool): Configures wrapping of a module.
sharding_strategy (Optional[ShardingStrategy]): Configures the FSDP algorithm.
"""
# Initialize a model
fsdp_model_with_hook = self._init_model(
Net(has_wrapping=has_wrapping, sharding_strategy=sharding_strategy),
sharding_strategy=sharding_strategy
)
dummy_state = DummyState(process_group=None)
# FSDP currently supports communication hooks for a NO_SHARD strategy
# Check that a `NotImplementedError` is raised for other strategies
if sharding_strategy != ShardingStrategy.NO_SHARD:
# Check that default hook is set to None
for entry in FSDP.fsdp_modules(fsdp_model_with_hook):
self.assertIsNone(entry.communication_hook)
self.assertIsNone(entry.communication_hook_state)
with self.assertRaisesRegex(
NotImplementedError,
'^Communication hooks are currently only available for a NO_SHARD strategy.$'
):
fsdp_model_with_hook.register_comm_hook(dummy_state, DummyHook.dummy_hook)
else:
# Check that default hook is set to `all_reduce`
for entry in FSDP.fsdp_modules(fsdp_model_with_hook):
self.assertEqual(entry.communication_hook, default_hooks.allreduce_hook)
dummy_state = DummyState(process_group=None)
fsdp_model_with_hook.register_comm_hook(
dummy_state,
DummyHook.dummy_hook
)
# Check that we can't register comm hook twice
with self.assertRaisesRegex(AssertionError, '^communication hook can be only registered once$'):
fsdp_model_with_hook.register_comm_hook(
dummy_state,
DummyHook.dummy_hook
)
# Check dummy hook was registered for the root and all submodules if any
for entry in FSDP.fsdp_modules(fsdp_model_with_hook):
self.assertEqual(
entry.communication_hook,
DummyHook.dummy_hook
)
self.assertEqual(
entry.communication_hook_state,
dummy_state
)
@skip_if_lt_x_gpu(2)
@parametrize(
"sharding_strategy",
[
ShardingStrategy.NO_SHARD
])
def test_registering_hook_non_root(
self,
sharding_strategy: Optional[ShardingStrategy]
):
"""
Tests FSDP's communication hook registering for submodules.
Make sure it can't be registered for non-root submodules.
Currently tests only ``NO_SHARD`` strategy.
Arguments:
sharding_strategy (Optional[ShardingStrategy]): Configures the FSDP algorithm.
"""
fsdp_model_with_hook = self._init_model(
Net(has_wrapping=True, sharding_strategy=sharding_strategy),
sharding_strategy=sharding_strategy
)
dummy_state = DummyState(process_group=None)
# Creating a list of non-root submodules to test
submodules = self._get_submodules(fsdp_model_with_hook)
# Check that assertion is raised for registering a comm hook on a non-root
with self.assertRaisesRegex(AssertionError, '^register_comm_hook can only be called on a root instance.$'):
submodules[1].register_comm_hook(dummy_state, DummyHook.dummy_hook)
@skip_if_lt_x_gpu(2)
@parametrize(
"sharding_strategy",
[
ShardingStrategy.NO_SHARD
])
def test_registering_hook_submodules(
self,
sharding_strategy: Optional[ShardingStrategy]
):
"""
Tests FSDP's communication hook registering for submodules.
Checks behavior if a hook was registered for a non-root submodule
Currently tests only ``NO_SHARD`` strategy.
Arguments:
sharding_strategy (Optional[ShardingStrategy]): Configures the FSDP algorithm.
"""
fsdp_model_with_hook = self._init_model(
Net(has_wrapping=True, sharding_strategy=sharding_strategy),
sharding_strategy=sharding_strategy
)
dummy_state = DummyState(process_group=None)
submodules = self._get_submodules(fsdp_model_with_hook)
# Simulate a registration of a hook on a submodule
submodules[1]._hook_registered = True
# Check that an error is raised when some of submodules have a non-default hook assigned
with self.assertRaisesRegex(AssertionError, '^communication hook can be only registered once$'):
fsdp_model_with_hook.register_comm_hook(dummy_state, DummyHook.dummy_hook)
# Reinitialize the model
fsdp_model_with_hook = self._init_model(
Net(has_wrapping=True, sharding_strategy=sharding_strategy),
sharding_strategy=sharding_strategy
)
submodules = self._get_submodules(fsdp_model_with_hook)
submodules[1].communication_hook = DummyHook.dummy_hook
# Check that an error is raised when some of submodules have a non-default hook assigned
with self.assertRaisesRegex(
AssertionError,
f'^communication hook should be default, but it is {submodules[1].communication_hook.__name__} instead$'
):
fsdp_model_with_hook.register_comm_hook(
dummy_state,
DummyHook.dummy_hook
)
def _check_low_precision_hook(self, state, hook, sharding_strategy, dtype, has_wrapping):
# keep everything deterministic for input data
torch.manual_seed(0)
torch.cuda.manual_seed(0)
fsdp_with_hook = self._init_model(
Net(has_wrapping=has_wrapping, sharding_strategy=sharding_strategy),
sharding_strategy=sharding_strategy
)
fsdp_with_hook.register_comm_hook(state, hook)
mp_only_grad = MixedPrecision(reduce_dtype=dtype)
fsdp_with_mp = self._init_model(
Net(has_wrapping=has_wrapping, sharding_strategy=sharding_strategy, mixed_precision=mp_only_grad),
sharding_strategy=sharding_strategy,
mixed_precision=mp_only_grad
)
optim_hook = torch.optim.SGD(fsdp_with_hook.parameters(), lr=0.1)
optim_mp = torch.optim.SGD(fsdp_with_mp.parameters(), lr=0.1)
in_data = torch.rand(16, 8).cuda()
fsdp_with_hook.train()
fsdp_with_mp.train()
loss_hook = fsdp_with_hook(in_data).sum()
loss_mp = fsdp_with_mp(in_data).sum()
loss_hook.backward()
# Make sure grads were cast to the parameter's precision
self.assertEqual(fsdp_with_hook.params[0].dtype, state.parameter_type)
loss_mp.backward()
optim_hook.step()
optim_mp.step()
dist.barrier()
for hook_param, mp_param in zip(fsdp_with_hook.parameters(), fsdp_with_mp.parameters()):
self.assertEqual(hook_param.grad, mp_param.grad)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@parametrize("has_wrapping", [True, False])
@parametrize(
"sharding_strategy",
[
ShardingStrategy.NO_SHARD
])
def test_fp16_hook(
self,
has_wrapping: bool,
sharding_strategy: Optional[ShardingStrategy]
):
state = default_hooks.LowPrecisionState(process_group=None)
hook = default_hooks.fp16_compress_hook
self._check_low_precision_hook(state, hook, sharding_strategy, torch.float16, has_wrapping)
@requires_nccl()
@requires_nccl_version((2, 10), "Need NCCL 2.10+ for BF16_COMPRESS")
@sandcastle_skip_if(
not BFLOAT16_AVAILABLE,
"BFloat16 is only supported by CUDA 11+",
)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
@parametrize("has_wrapping", [True, False])
@parametrize(
"sharding_strategy",
[
ShardingStrategy.NO_SHARD
])
def test_bf16_hook(
self,
has_wrapping: bool,
sharding_strategy: Optional[ShardingStrategy]
):
state = default_hooks.LowPrecisionState(process_group=None)
hook = default_hooks.bf16_compress_hook
self._check_low_precision_hook(state, hook, sharding_strategy, torch.bfloat16, has_wrapping)
instantiate_parametrized_tests(TestCommunicationHooks)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/fsdp/test_fsdp_comm_hooks.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard.partial_tensor import (
_PartialTensor,
)
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
TEST_GPU_NUM
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
_chunk_sharding_specs_list_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestPartialTensorReshard(ShardedTensorTestBase):
def _run_partial_tensor_n_reshard(
self, reshard_spec, input_size, world_size, reduce_op, dtype=torch.float, pg=None
):
results_compare = []
local_result = []
pg = pg if pg is not None else dist.distributed_c10d._get_default_group()
for rank in range(pg.size()):
torch.manual_seed(rank)
results = []
for _ in range(world_size):
tensor = torch.rand(*input_size, dtype=dtype).cuda(self.rank)
results.append(tensor)
if self.rank % pg.size() == rank:
local_result.append(tensor.clone().detach())
results_compare.append(torch.cat(results))
parital_tensor = _PartialTensor(
torch.cat(local_result), pg, reduce_op=reduce_op
)
local_sharded_result = parital_tensor.reshard(reshard_spec)
local_shards = local_sharded_result.local_shards()
results_compare = torch.stack(results_compare)
if reduce_op == dist.ReduceOp.SUM:
results_compare = torch.sum(results_compare, dim=0)
else:
results_compare = torch.max(results_compare, dim=0).values
rank_idx = None
for idx, placement in enumerate(reshard_spec.placements):
if placement.rank() == self.rank % pg.size():
rank_idx = idx
local_result_compare = results_compare.chunk(pg.size())[rank_idx]
self.assertEqual(1, len(local_shards))
self.assertEqual(local_shards[0].tensor, local_result_compare)
def _reshard_spec_for_subgroup(self, rank):
if rank in [0, 1]:
return ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
)
else:
return ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:2",
"rank:1/cuda:3",
],
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_partial_tensor_reshard(self):
specs = _chunk_sharding_specs_list_for_test([0], seed=7)
spec = specs[0]
self._run_partial_tensor_n_reshard(spec, [13, 21], 4, dist.ReduceOp.SUM)
self._run_partial_tensor_n_reshard(spec, [12, 22], 4, dist.ReduceOp.MAX)
self._run_partial_tensor_n_reshard(spec, [13, 21], 3, dist.ReduceOp.SUM)
self._run_partial_tensor_n_reshard(spec, [17, 21], 2, dist.ReduceOp.MAX)
sub_pgs = [dist.new_group([0, 1]), dist.new_group([2, 3])]
pg = sub_pgs[self.rank // 2]
spec = self._reshard_spec_for_subgroup(self.rank)
self._run_partial_tensor_n_reshard(spec, [12, 22], 4, dist.ReduceOp.MAX, pg=pg)
self._run_partial_tensor_n_reshard(spec, [13, 22], 3, dist.ReduceOp.SUM, pg=pg)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_partial_tensor_reshard_errors(self):
enumerable_sharding_spec = EnumerableShardingSpec(
[
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
]
)
with self.assertRaisesRegex(
NotImplementedError, "Only ChunkShardingSpec supported for reshard."
):
self._run_partial_tensor_n_reshard(
enumerable_sharding_spec, [13, 21], 4, dist.ReduceOp.SUM
)
self._run_partial_tensor_n_reshard(
enumerable_sharding_spec, [12, 22], 4, dist.ReduceOp.MAX
)
specs = _chunk_sharding_specs_list_for_test([0], seed=7)
spec = specs[0]
with self.assertRaisesRegex(
NotImplementedError, "Only real partial tensor supported for reshard."
):
self._run_partial_tensor_n_reshard(
spec, [13, 21], 4, dist.ReduceOp.SUM, dtype=torch.cfloat
)
self._run_partial_tensor_n_reshard(
spec, [12, 22], 4, dist.ReduceOp.MAX, dtype=torch.cfloat
)
class TestPartialTensorOps(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_transpose(self):
partial_tensor = _PartialTensor(torch.rand(5, 10))
partial_tensor = partial_tensor.transpose(0, 1)
self.assertEqual(partial_tensor.size(), torch.Size((10, 5)))
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_cat(self):
t1 = torch.rand(5, 10)
t2 = torch.rand(3, 10)
t3 = torch.rand(4, 10)
partial_tensors = [_PartialTensor(t1), _PartialTensor(t2), _PartialTensor(t3)]
partial_concat = torch.cat(partial_tensors)
local_concat = torch.cat([t1, t2, t3])
self.assertEqual(local_concat.size(), partial_concat.size())
# Test dim kwarg
t1 = torch.rand(5, 10)
t2 = torch.rand(5, 12)
t3 = torch.rand(5, 11)
partial_tensors = [_PartialTensor(t1), _PartialTensor(t2), _PartialTensor(t3)]
partial_concat = torch.cat(partial_tensors, dim=1)
local_concat = torch.cat([t1, t2, t3], dim=1)
self.assertEqual(local_concat.size(), partial_concat.size())
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_cat_errors(self):
with self.assertRaisesRegex(
RuntimeError, 'All inputs need to be an instance of _PartialTensor'
):
torch.cat([_PartialTensor(torch.rand(10)), torch.rand(10)])
with self.assertRaisesRegex(
RuntimeError, 'reduce_ops need to be the same'
):
torch.cat([_PartialTensor(torch.rand(10)), _PartialTensor(torch.rand(10), reduce_op=dist.ReduceOp.MAX)])
with self.assertRaisesRegex(
RuntimeError, '"out" kwarg is not supported'
):
torch.cat([_PartialTensor(torch.rand(10)), _PartialTensor(torch.rand(10))], out=torch.rand(10))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/test_partial_tensor.py
|
# Owner(s): ["oncall: distributed"]
import io
import torch
import torch.distributed._shard.sharded_tensor as sharded_tensor
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed._shard import _shard_tensor
from torch.distributed._shard.replicated_tensor import ReplicatedTensor
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
gen_binary_op_func
)
from torch.testing._internal.distributed._shard.sharded_tensor import TEST_GPU_NUM
class TestReplicatedTensor(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_replicated_tensor_basics(self):
local_tensor = torch.ones(3, 3, device=f"cuda:{self.rank}") * 4
replica_tensor = ReplicatedTensor(local_tensor)
# validate it's a replicated tensor by checking values on all rank
validated = replica_tensor.validate()
self.assertEqual(validated, True)
res = replica_tensor + 2
self.assertIsInstance(res, torch.Tensor)
self.assertNotIsInstance(res, ReplicatedTensor)
self.assertEqual(res, torch.ones(3, 3) * 6)
# modify local tensor on certain rank, and test if validation raise
if self.rank == 2:
local_tensor += 3
with self.assertRaisesRegex(ValueError, 'have different values'):
replica_tensor.validate()
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_replicated_tensor_inter_op_replicated_tensor(self):
local_tensor = torch.ones(3, 3, device=f"cuda:{self.rank}")
replica_tensor1 = ReplicatedTensor(local_tensor * 4)
replica_tensor2 = ReplicatedTensor(local_tensor * 6)
new_tensor = replica_tensor1 * replica_tensor2
self.assertIsInstance(new_tensor, ReplicatedTensor)
self.assertEqual(new_tensor, torch.ones(3, 3) * 24)
# test replicated tensor inter-op with different pgs
new_pg = dist.new_group(ranks=[1, 2, 3])
replica_tensor_new_group = ReplicatedTensor(local_tensor * 3, process_group=new_pg)
with self.assertRaisesRegex(RuntimeError, 'must be in the same'):
replica_tensor_new_group * replica_tensor1
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_replicated_tensor_inter_op_tensor(self):
local_tensor = torch.ones(3, 3, device=f"cuda:{self.rank}") * 4
replica_tensor = ReplicatedTensor(local_tensor)
local_rand_tensor = torch.randn(3, 3, device=f"cuda:{self.rank}")
new_tensor = replica_tensor + local_rand_tensor
self.assertIsInstance(new_tensor, torch.Tensor)
self.assertNotIsInstance(new_tensor, ReplicatedTensor)
self.assertEqual(new_tensor, local_tensor + local_rand_tensor)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_replicated_tensor_inter_op_sharded_tensor(self):
torch.manual_seed(self.rank)
local_tensor1 = torch.rand(12, 3, device=f"cuda:{self.rank}") * 4
local_tensor2 = torch.ones(12, 3, device=f"cuda:{self.rank}") * 4
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = _shard_tensor(local_tensor1, spec, src_rank=0)
replica_tensor = ReplicatedTensor(local_tensor2)
ops = ["torch.add", "torch.sub", "torch.mul", "torch.div", "+", "-", "*", "/"]
for op in ops:
binary_op = gen_binary_op_func(op)
res = binary_op(st, replica_tensor)
self.assertIsInstance(res, sharded_tensor.ShardedTensor)
self.assertNotIsInstance(res, ReplicatedTensor)
output = torch.empty((12, 3), device=self.rank) if self.rank == 0 else None
res.gather(dst=0, out=output)
if self.rank == 0:
local_output = binary_op(local_tensor1, local_tensor2)
self.assertEqual(output, local_output)
# reflective
reflect_res = binary_op(replica_tensor, st)
self.assertIsInstance(reflect_res, sharded_tensor.ShardedTensor)
self.assertNotIsInstance(reflect_res, ReplicatedTensor)
reflect_output = torch.empty((12, 3), device=self.rank) if self.rank == 0 else None
reflect_res.gather(dst=0, out=reflect_output)
if self.rank == 0:
reflect_local_output = binary_op(local_tensor2, local_tensor1)
self.assertEqual(reflect_output, reflect_local_output)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_replicated_tensor_implicit_broadcasting(self):
# use same seed
torch.manual_seed(self.rank)
# test implicit broadcasting
local_tensor1 = torch.rand(12, 3, device=f"cuda:{self.rank}") * 4
# we use size (3) to trigger the implicit broadcasting logic
# and it will fail if implicit broadcasting not happen.
local_tensor2 = torch.ones(3, device=f"cuda:{self.rank}")
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = _shard_tensor(local_tensor1, spec, src_rank=0)
replica_tensor = ReplicatedTensor(local_tensor2)
ops = ["torch.add", "torch.sub", "torch.mul", "torch.div", "+", "-", "*", "/"]
for op in ops:
binary_op = gen_binary_op_func(op)
# replicated tensor should automatically broadcasted
res = binary_op(st, replica_tensor)
self.assertIsInstance(res, sharded_tensor.ShardedTensor)
output = torch.empty((12, 3), device=self.rank) if self.rank == 0 else None
res.gather(dst=0, out=output)
if self.rank == 0:
local_output = binary_op(local_tensor1, local_tensor2)
self.assertEqual(output, local_output)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_replicated_tensor_inter_op_sharded_tensor_errors(self):
local_tensor = torch.ones(3, 3, device=f"cuda:{self.rank}") * 4
replica_tensor = ReplicatedTensor(local_tensor)
torch.manual_seed(self.rank)
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st1 = sharded_tensor.rand(spec, (20, 3, 3))
st2 = sharded_tensor.rand(spec, (30, 3, 3))
with self.assertRaisesRegex(RuntimeError, 'Implicit broadcasting'):
st1 + st2
with self.assertRaisesRegex(RuntimeError, 'not supported for ShardedTensor'):
st1 % replica_tensor
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_with_ddp(self):
# Test Replicated params for DDP
replica_tensor = ReplicatedTensor(torch.rand(4, 8, device=self.rank))
model = torch.nn.Linear(8, 2).cuda(self.rank)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
ddp = DDP(model)
# Test module.parameters.
params = list(ddp.parameters())
self.assertEqual(2, len(params))
self.assertEqual(ddp.module.weight, params[0])
self.assertEqual(ddp.module.bias, params[1])
params = list(model.parameters())
self.assertEqual(2, len(params))
self.assertEqual(model.weight, params[0])
self.assertEqual(model.bias, params[1])
# Validate output
out = ddp(replica_tensor)
self.assertIsInstance(out, ReplicatedTensor)
# Test backward and optimizer.
# Validate backward.
out.sum().backward()
self.assertIsNotNone(model.weight.grad)
self.assertIsNotNone(model.bias.grad)
self.assertIsNotNone(ddp.module.weight.grad)
self.assertIsNotNone(ddp.module.bias.grad)
original_params = []
for param_group in optim.param_groups:
for original_param in param_group['params']:
self.assertIsNotNone(original_param.grad)
original_params.append(original_param)
self.assertEqual(model.weight.grad, original_params[0].grad)
self.assertEqual(model.bias.grad, original_params[1].grad)
self.assertEqual(model.weight.grad, ddp.module.weight.grad)
self.assertEqual(model.bias.grad, ddp.module.bias.grad)
# Validate optimizer.
optim.step()
self.assertEqual(model.weight, ddp.module.weight)
self.assertEqual(model.weight, original_params[0])
self.assertEqual(model.bias, ddp.module.bias)
self.assertEqual(model.bias, original_params[1])
# Validate zero_grad
optim.zero_grad()
self.assertEqual(model.weight.grad, torch.zeros_like(model.weight.grad))
self.assertEqual(model.weight.grad, ddp.module.weight.grad)
self.assertEqual(model.weight.grad, original_params[0].grad)
self.assertEqual(model.bias.grad, torch.zeros_like(model.bias.grad))
self.assertEqual(model.bias.grad, ddp.module.bias.grad)
self.assertEqual(model.bias.grad, original_params[1].grad)
# Validate zero_grad set_to_none
optim.zero_grad(set_to_none=True)
self.assertIsNone(model.weight.grad)
self.assertEqual(model.weight.grad, ddp.module.weight.grad)
self.assertEqual(model.weight.grad, original_params[0].grad)
self.assertIsNone(model.bias.grad)
self.assertEqual(model.bias.grad, ddp.module.bias.grad)
self.assertEqual(model.bias.grad, original_params[1].grad)
# Multiple forward passes.
for _ in range(5):
out = ddp(replica_tensor)
self.assertIsInstance(out, ReplicatedTensor)
# Test with context manager.
from torch.nn.parallel._replicated_tensor_ddp_utils import _ddp_replicated_tensor
with _ddp_replicated_tensor(False):
for _ in range(5):
with _ddp_replicated_tensor(True):
ddp = DDP(model)
out = ddp(replica_tensor)
self.assertIsInstance(out, ReplicatedTensor)
# Test save and load.
with _ddp_replicated_tensor(False):
ddp = DDP(model)
expected_state_dict = ddp.state_dict()
buffer = io.BytesIO()
torch.save(ddp, buffer)
buffer.seek(0)
obj = torch.load(buffer)
self.assertEqual(expected_state_dict, obj.state_dict())
with _ddp_replicated_tensor(True):
ddp = DDP(model)
buffer = io.BytesIO()
torch.save(ddp, buffer)
buffer.seek(0)
obj = torch.load(buffer)
self.assertEqual(expected_state_dict, obj.state_dict())
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_unsqueeze(self):
local_tensor = torch.rand(3, 3, device=self.rank)
replicated_tensor = ReplicatedTensor(local_tensor)
unsqueezed_replicated_tensor = replicated_tensor.unsqueeze(0)
unsqueezed_local_tensor = local_tensor.unsqueeze(0)
self.assertIsInstance(unsqueezed_replicated_tensor, ReplicatedTensor)
self.assertIsInstance(torch.unsqueeze(replicated_tensor, 0), ReplicatedTensor)
self.assertEqual(unsqueezed_local_tensor, unsqueezed_replicated_tensor)
self.assertEqual(torch.unsqueeze(replicated_tensor, 0), unsqueezed_replicated_tensor)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_getitem(self):
local_tensor = torch.rand(3, 3, device=self.rank)
replicated_tensor = ReplicatedTensor(local_tensor)
replicated_tensor_view = replicated_tensor[0]
local_tensor_view = local_tensor[0]
self.assertIsInstance(replicated_tensor_view, ReplicatedTensor)
self.assertEqual(local_tensor_view, replicated_tensor_view)
|
pytorch-master
|
test/distributed/_shard/test_replicated_tensor.py
|
# Owner(s): ["oncall: distributed"]
import sys
import copy
import torch
import torch.nn as nn
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.distributed._shard import shard_module
from torch.distributed._shard.sharding_plan import ShardingPlan
from torch.distributed._shard.sharder import Sharder
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
# a simple collection of embedding bag implementation
class CustomEmbeddingBagCollection(nn.Module):
def __init__(self, num_bags, num_embeddings_per_bag, num_dims):
super().__init__()
self.num_bags = num_bags
self.embedding_bags: nn.ModuleDict = nn.ModuleDict()
for i in range(num_bags):
self.embedding_bags[f"embedding_bag_{i}"] = nn.EmbeddingBag(
num_embeddings_per_bag,
num_dims,
mode="sum")
def forward(self, inputs):
outputs = []
for bag in self.embedding_bags.values():
outputs.append(bag(inputs))
return torch.cat(outputs)
# a simple sharded version of EBC
class CustomShardedEBC(nn.Module):
def __init__(self, ebc, split_idx, specs):
super().__init__()
self.split_idx = split_idx
row_spec, col_spec = specs
# create embedding bags base on the spec
self.embedding_bags: nn.ModuleDict = nn.ModuleDict()
assert self.split_idx < ebc.num_bags
for i in range(ebc.num_bags):
bag_key = f"embedding_bag_{i}"
if i < self.split_idx:
shard_module(ebc, plan=ShardingPlan(plan={f"embedding_bags.{bag_key}.weight": row_spec}))
else:
shard_module(ebc, plan=ShardingPlan(plan={f"embedding_bags.{bag_key}.weight": col_spec}))
self.embedding_bags[bag_key] = ebc.embedding_bags[bag_key]
class CustomSharder(Sharder):
def __init__(self, devices, split_sharding_idx):
self.devices = devices
self.split_sharding_idx = split_sharding_idx
self.rowwise_spec = ChunkShardingSpec(dim=0, placements=devices)
self.colwise_spec = ChunkShardingSpec(dim=1, placements=devices)
def shard(self, ebc: nn.Module) -> nn.Module:
if not isinstance(ebc, CustomEmbeddingBagCollection):
raise RuntimeError("The custom sharder only supports CustomEmbeddingBagCollection")
return CustomShardedEBC(ebc, self.split_sharding_idx, (self.rowwise_spec, self.colwise_spec))
class TestCustomSharder(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_custom_sharder(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.ebc = CustomEmbeddingBagCollection(10, 10, 8)
def forward(self, inputs):
return self.ebc(inputs)
custom_sharder = CustomSharder(
devices=[f"rank:{i}/cuda:{i}" for i in range(TEST_GPU_NUM)],
split_sharding_idx=TEST_GPU_NUM // 2
)
sharding_plan = ShardingPlan(
plan={
"ebc": custom_sharder,
})
local_model = MyModule().cuda(self.rank)
sharded_model = copy.deepcopy(local_model)
# shard the module with the provided sharding plan
shard_module(sharded_model, sharding_plan)
# check to make sure the module already been sharded
emb_bags = sharded_model.ebc.embedding_bags
self.assertTrue(isinstance(emb_bags["embedding_bag_0"].weight, ShardedTensor))
self.assertTrue(isinstance(emb_bags["embedding_bag_9"].weight, ShardedTensor))
self.assertEqual(emb_bags["embedding_bag_0"].weight.sharding_spec(), custom_sharder.rowwise_spec)
self.assertEqual(emb_bags["embedding_bag_9"].weight.sharding_spec(), custom_sharder.colwise_spec)
# make sure we can run sharded computation and compare outputs
# with the local model version
input = torch.arange(8).reshape((2, 4)).cuda(self.rank)
local_output = local_model(input)
sharded_output = sharded_model(input)
self.assertEqual(local_output, sharded_output)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_custom_sharder_errors(self):
custom_sharder = CustomSharder(
devices=[f"rank:{i}/cuda:{i}" for i in range(TEST_GPU_NUM)],
split_sharding_idx=TEST_GPU_NUM // 2
)
sharding_plan = ShardingPlan(
plan={
"": custom_sharder,
})
sharded_model = CustomEmbeddingBagCollection(10, 10, 8).cuda(self.rank)
with self.assertRaisesRegex(
KeyError, "path must not be empty for custom sharder!"
):
# shard the module with the provided sharding plan
shard_module(sharded_model, sharding_plan)
# test conflicted sharding plan
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:0", "rank:1/cuda:1"])
sharding_plan = ShardingPlan(
plan={
"embedding_bags.embedding_bag_0.weight": spec,
"embedding_bags": custom_sharder,
})
with self.assertRaisesRegex(
RuntimeError, "should not conflict with the submodule tree"
):
# shard the module with the provided sharding plan
shard_module(sharded_model, sharding_plan)
|
pytorch-master
|
test/distributed/_shard/test_sharder.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch.distributed._shard.sharded_tensor import (
Shard,
ShardMetadata,
ShardedTensor,
ShardedTensorMetadata,
)
from torch.distributed._shard.sharded_tensor.metadata import TensorProperties
from torch.testing._internal.common_utils import (
TestCase,
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.distributed._shard.checkpoint.utils import find_state_dict_object
from torch.distributed._shard.checkpoint.metadata import MetadataIndex
from torch.testing._internal.distributed.distributed_utils import (
with_fake_comms
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
def create_sharded_tensor(rank, world_size, shards_per_rank):
shards_metadata = []
local_shards = []
for idx in range(0, world_size * shards_per_rank):
shard_rank = idx // shards_per_rank
shard_md = ShardMetadata(shard_offsets=[idx * 8], shard_sizes=[8], placement=f"rank:{shard_rank}/cpu")
shards_metadata.append(shard_md)
if shard_rank == rank:
shard = Shard.from_tensor_and_offsets(
torch.rand(*shard_md.shard_sizes),
shard_offsets=shard_md.shard_offsets,
rank=rank
)
local_shards.append(shard)
sharded_tensor_md = ShardedTensorMetadata(
shards_metadata=shards_metadata,
size=torch.Size([8 * len(shards_metadata)]),
tensor_properties=TensorProperties.create_from_tensor(torch.zeros(1))
)
return ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards=local_shards,
sharded_tensor_metadata=sharded_tensor_md
)
class TestMedatadaIndex(TestCase):
def test_init_convert_offset(self):
a = MetadataIndex("foo", [1, 2])
b = MetadataIndex("foo", torch.Size([1, 2]))
self.assertEqual(a, b)
def test_index_hint_ignored_on_equals(self):
a = MetadataIndex("foo")
b = MetadataIndex("foo", index=99)
self.assertEqual(a, b)
def test_index_hint_ignored_on_hash(self):
a = MetadataIndex("foo")
b = MetadataIndex("foo", index=99)
self.assertEqual(hash(a), hash(b))
def test_flat_data(self):
state_dict = {
"a": torch.rand(10),
"b": [1, 2, 3],
}
a = find_state_dict_object(state_dict, MetadataIndex("a"))
self.assertEqual(a, state_dict["a"])
a = find_state_dict_object(state_dict, MetadataIndex("a", index=99))
self.assertEqual(a, state_dict["a"])
b = find_state_dict_object(state_dict, MetadataIndex("b"))
self.assertEqual(b, state_dict["b"])
b = find_state_dict_object(state_dict, MetadataIndex("b", index=1))
self.assertEqual(b, state_dict["b"])
with self.assertRaisesRegex(ValueError, "FQN"):
find_state_dict_object(state_dict, MetadataIndex("c"))
with self.assertRaisesRegex(ValueError, "ShardedTensor"):
find_state_dict_object(state_dict, MetadataIndex("a", [0]))
with self.assertRaisesRegex(ValueError, "ShardedTensor"):
find_state_dict_object(state_dict, MetadataIndex("b", [1]))
@with_fake_comms(rank=0, world_size=2)
def test_sharded_tensor_lookup(self):
st = create_sharded_tensor(rank=0, world_size=2, shards_per_rank=3)
state_dict = {"st": st}
obj = find_state_dict_object(state_dict, MetadataIndex("st", [8]))
self.assertEqual(obj, st.local_shards()[1].tensor)
# good hint
obj = find_state_dict_object(state_dict, MetadataIndex("st", [8], index=1))
self.assertEqual(obj, st.local_shards()[1].tensor)
# bad hint
obj = find_state_dict_object(state_dict, MetadataIndex("st", [8], index=2))
self.assertEqual(obj, st.local_shards()[1].tensor)
# broken hint
obj = find_state_dict_object(state_dict, MetadataIndex("st", [8], index=99))
self.assertEqual(obj, st.local_shards()[1].tensor)
with self.assertRaisesRegex(ValueError, "no offset was provided"):
find_state_dict_object(state_dict, MetadataIndex("st"))
with self.assertRaisesRegex(ValueError, "Could not find shard"):
find_state_dict_object(state_dict, MetadataIndex("st", [1]))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/checkpoint/test_utils.py
|
# Owner(s): ["oncall: distributed"]
import sys
import os
import shutil
import tempfile
from typing import Dict
import torch
import torch.distributed as dist
from torch.distributed._shard import sharded_tensor
from torch.distributed._shard.sharded_tensor import ShardedTensor, state_dict_hook
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardingSpec,
ShardMetadata,
)
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
MyShardedModel1
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.distributed._shard.checkpoint import (
FileSystemReader,
FileSystemWriter,
load_state_dict,
save_state_dict,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
def assert_state_dict_equal(
self: TestCase,
state_dict_1: Dict[str, torch.Tensor],
state_dict_2: Dict[str, torch.Tensor],
) -> bool:
self.assertEqual(
len(state_dict_1), len(state_dict_2), "state_dict must be the same size"
)
self.assertEqual(
set(state_dict_1.keys()),
set(state_dict_2.keys()),
"state_dict keys do not match",
)
for key, value_1 in state_dict_1.items():
value_2 = state_dict_2[key]
if isinstance(value_1, ShardedTensor):
for local_shard_1, local_shard_2 in zip(
value_1.local_shards(), value_2.local_shards()
):
self.assertTrue(
torch.equal(local_shard_1.tensor, local_shard_1.tensor),
f"Key {key}'s shard does not match",
)
elif isinstance(value_1, torch.Tensor):
self.assertTrue(
torch.equal(value_1, value_2), f"Key {key}'s tensor does not match"
)
return True
class MyTestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear_1 = torch.nn.Linear(5, 5)
self.linear_2 = torch.nn.Linear(5, 1)
self.emb = torch.nn.EmbeddingBag(5, 10)
# The ShardedModels are borrowed from test/distributed/_sharded_tensor/test_sharded_tensor.py
class MyShardedModel3(torch.nn.Module):
def __init__(
self,
spec: ShardingSpec,
) -> None:
super(MyShardedModel3, self).__init__()
self.sharded_tensor: ShardedTensor = sharded_tensor.rand(
spec, 10, 20, init_rrefs=False
)
class TestDistributedStateDictSaveLoad(TestCase):
def test_read_write_only_tensor(self) -> None:
with tempfile.TemporaryDirectory() as path:
state_dict_to_save = MyTestModule().state_dict()
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer, no_dist=True)
state_dict_to_load_to = MyTestModule().state_dict()
with self.assertRaises(AssertionError):
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
# Load from file without any resharding
fs_reader = FileSystemReader(path=path)
load_state_dict(state_dict=state_dict_to_load_to, storage_reader=fs_reader, no_dist=True)
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
class TestDistributedStateDictSaveLoadWithSharedTensor(ShardedTensorTestBase):
@property
def world_size(self) -> int:
return 2
@with_comms(init_rpc=False, backend="gloo")
def test_read_write_shard_tensor(self) -> None:
paths = [tempfile.mkdtemp()]
dist.broadcast_object_list(paths)
path = paths[0]
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0",
"rank:1",
],
)
model_to_save = MyShardedModel1(spec, init_rrefs=False)
# Test save
model_to_save._register_state_dict_hook(state_dict_hook)
state_dict_to_save = model_to_save.state_dict()
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer)
dist.barrier()
# Create a new model
model_to_load = MyShardedModel1(spec, init_rrefs=False)
# This is not the correct hook for loading the state dict
# model_to_load._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
model_to_load._register_state_dict_hook(state_dict_hook)
state_dict_to_load_to = model_to_load.state_dict()
dist.barrier()
with self.assertRaises(AssertionError):
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
# Test load.
fs_reader = FileSystemReader(path=path)
load_state_dict(state_dict=state_dict_to_load_to, storage_reader=fs_reader)
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
dist.barrier()
class TestDistributedReshardOnLoad(ShardedTensorTestBase):
@property
def world_size(self) -> int:
return 2
def get_file_path(self) -> str:
paths = [tempfile.mkdtemp()] if dist.get_rank() == 0 else [None]
dist.broadcast_object_list(paths)
return paths[0]
def load_tensor(self, tensor: ShardedTensor) -> torch.Tensor:
res = torch.zeros(tensor.shape, device="cpu") if dist.get_rank() == 0 else None
tensor.gather(out=res)
return res
@with_comms(init_rpc=False, backend="gloo")
def test_load_with_different_shard_plan(self) -> None:
path = self.get_file_path()
# We hardcode the assumption of how many shards are around
self.assertEqual(self.world_size, dist.get_world_size())
specs = [
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
ChunkShardingSpec(
dim=0,
placements=[
"rank:0",
"rank:1",
],
),
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
ChunkShardingSpec(
dim=0,
placements=[
"rank:0",
"rank:1",
"rank:1",
"rank:0",
],
),
# This requires the tensors to be [10, 20]
EnumerableShardingSpec(
shards=[
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[2, 20],
placement="rank:0",
),
ShardMetadata(
shard_offsets=[2, 0],
shard_sizes=[1, 20],
placement="rank:1",
),
ShardMetadata(
shard_offsets=[3, 0],
shard_sizes=[3, 20],
placement="rank:0",
),
ShardMetadata(
shard_offsets=[6, 0],
shard_sizes=[3, 20],
placement="rank:1",
),
ShardMetadata(
shard_offsets=[9, 0],
shard_sizes=[1, 20],
placement="rank:0",
),
]
),
# This requires the tensors to be [10, 20]
EnumerableShardingSpec(
shards=[
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[8, 20],
placement="rank:1",
),
ShardMetadata(
shard_offsets=[8, 0],
shard_sizes=[2, 20],
placement="rank:0",
),
]
),
]
for s0 in specs:
for s1 in specs:
if s0 == s1:
continue
if dist.get_rank() == 0:
shutil.rmtree(path, ignore_errors=True)
os.makedirs(path)
dist.barrier()
model_to_save = MyShardedModel3(s0)
model_to_save._register_state_dict_hook(state_dict_hook)
state_dict_to_save = model_to_save.state_dict()
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer)
dist.barrier()
model_to_load = MyShardedModel3(s1)
model_to_load._register_state_dict_hook(state_dict_hook)
state_dict_to_load_to = model_to_load.state_dict()
dist.barrier()
fs_reader = FileSystemReader(path=path)
load_state_dict(
state_dict=state_dict_to_load_to, storage_reader=fs_reader
)
dist.barrier()
store_tensor = self.load_tensor(model_to_save.sharded_tensor)
dist.barrier()
load_tensor = self.load_tensor(model_to_load.sharded_tensor)
if dist.get_rank() == 0:
self.assertTrue(
torch.allclose(store_tensor, load_tensor), msg=f"{s0} vs {s1}"
)
@with_comms(init_rpc=False, backend="gloo")
def test_load_rowwise_to_colwise(self) -> None:
path = self.get_file_path()
self.assertEqual(self.world_size, dist.get_world_size())
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
src_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0",
"rank:1",
],
)
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
dst_spec = ChunkShardingSpec(
dim=1,
placements=[
"rank:0",
"rank:1",
],
)
if dist.get_rank() == 0:
shutil.rmtree(path, ignore_errors=True)
os.makedirs(path)
model_to_save = MyShardedModel3(src_spec).cuda(dist.get_rank())
model_to_save._register_state_dict_hook(state_dict_hook)
state_dict_to_save = model_to_save.state_dict()
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer)
model_to_load = MyShardedModel3(dst_spec).cuda(dist.get_rank())
model_to_load._register_state_dict_hook(state_dict_hook)
state_dict_to_load_to = model_to_load.state_dict()
fs_reader = FileSystemReader(path=path)
load_state_dict(state_dict=state_dict_to_load_to, storage_reader=fs_reader)
# We can't use torch.allclose since each ST has a different sharding spec
store_tensor = self.load_tensor(model_to_save.sharded_tensor)
load_tensor = self.load_tensor(model_to_load.sharded_tensor)
if dist.get_rank() == 0:
self.assertTrue(torch.allclose(store_tensor, load_tensor))
@with_comms(init_rpc=False, backend="gloo")
def test_save_load_bytes(self) -> None:
path = self.get_file_path()
state_dict_to_save = {
'bytes0': [1],
'bytes1': 'string'
}
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer)
state_dict_to_load = {
'bytes0': [2],
'bytes1': 'other'
}
fs_reader = FileSystemReader(path=path)
load_state_dict(state_dict=state_dict_to_load, storage_reader=fs_reader)
self.assertEqual([1], state_dict_to_load['bytes0'])
self.assertEqual('string', state_dict_to_load['bytes1'])
@with_comms(init_rpc=False, backend="gloo")
def test_switch_between_sharded_tensor_to_tensor(self) -> None:
path = self.get_file_path()
tensor_size = 32
specs = [
ChunkShardingSpec(
dim=0,
placements=[
"rank:0",
"rank:1",
],
),
ChunkShardingSpec(
dim=0,
placements=[
"rank:0",
"rank:1",
"rank:1",
"rank:0",
],
),
EnumerableShardingSpec(
shards=[
ShardMetadata(
shard_offsets=[0],
shard_sizes=[8],
placement="rank:1",
),
ShardMetadata(
shard_offsets=[8],
shard_sizes=[tensor_size - 8],
placement="rank:0",
),
]
),
EnumerableShardingSpec(
shards=[
ShardMetadata(
shard_offsets=[0],
shard_sizes=[10],
placement="rank:0",
),
ShardMetadata(
shard_offsets=[10],
shard_sizes=[tensor_size - 10],
placement="rank:1",
),
]
),
]
for save_spec in specs:
for load_spec in specs:
save_dict = {
'sharded': sharded_tensor.rand(save_spec, tensor_size),
'replicated': torch.rand(tensor_size, device=f"cpu:{self.rank}")
}
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=save_dict, storage_writer=fs_writer)
# Freaky Friday the tensors
load_dict = {
'sharded': torch.zeros(tensor_size, device=f"cpu:{self.rank}"),
'replicated': sharded_tensor.zeros(load_spec, tensor_size)
}
fs_reader = FileSystemReader(path=path)
load_state_dict(state_dict=load_dict, storage_reader=fs_reader)
save_dict_sharded = self.load_tensor(save_dict['sharded'])
load_dict_replicated = self.load_tensor(load_dict['replicated'])
if dist.get_rank() == 0:
self.assertTrue(
torch.allclose(save_dict_sharded, load_dict['sharded']),
f"save-spec {save_spec} load-spec {load_spec}"
)
self.assertTrue(
torch.allclose(save_dict['replicated'], load_dict_replicated),
f"save-spec {save_spec} load-spec {load_spec}"
)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/checkpoint/test_file_system_checkpoint_cpu.py
|
# Owner(s): ["oncall: distributed"]
import sys
import os
import shutil
import tempfile
from typing import Dict
import torch
import torch.distributed as dist
from torch.distributed._shard import sharded_tensor
from torch.distributed._shard.sharded_tensor import ShardedTensor, state_dict_hook
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardingSpec,
ShardMetadata,
)
from torch.testing._internal.common_distributed import requires_nccl, skip_if_lt_x_gpu
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
MyShardedModel1
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.distributed._shard.checkpoint import (
FileSystemReader,
FileSystemWriter,
load_state_dict,
save_state_dict,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
def assert_state_dict_equal(
self: TestCase,
state_dict_1: Dict[str, torch.Tensor],
state_dict_2: Dict[str, torch.Tensor],
) -> bool:
self.assertEqual(
len(state_dict_1), len(state_dict_2), "state_dict must be the same size"
)
self.assertEqual(
set(state_dict_1.keys()),
set(state_dict_2.keys()),
"state_dict keys do not match",
)
for key, value_1 in state_dict_1.items():
value_2 = state_dict_2[key]
if isinstance(value_1, ShardedTensor):
for local_shard_1, local_shard_2 in zip(
value_1.local_shards(), value_2.local_shards()
):
self.assertTrue(
torch.equal(local_shard_1.tensor, local_shard_1.tensor),
f"Key {key}'s shard does not match",
)
elif isinstance(value_1, torch.Tensor):
self.assertTrue(
torch.equal(value_1, value_2), f"Key {key}'s tensor does not match"
)
return True
class MyTestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear_1 = torch.nn.Linear(5, 5)
self.linear_2 = torch.nn.Linear(5, 1)
self.emb = torch.nn.EmbeddingBag(5, 10)
# The ShardedModels are borrowed from test/distributed/_sharded_tensor/test_sharded_tensor.py
class MyShardedModel3(torch.nn.Module):
def __init__(
self,
spec: ShardingSpec,
) -> None:
super(MyShardedModel3, self).__init__()
self.sharded_tensor: ShardedTensor = sharded_tensor.rand(
spec, 10, 20, init_rrefs=False
)
class TestDistributedStateDictSaveLoad(TestCase):
def test_read_write_only_tensor(self) -> None:
with tempfile.TemporaryDirectory() as path:
state_dict_to_save = MyTestModule().state_dict()
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer, no_dist=True)
state_dict_to_load_to = MyTestModule().state_dict()
with self.assertRaises(AssertionError):
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
# Load from file without any resharding
fs_reader = FileSystemReader(path=path)
load_state_dict(state_dict=state_dict_to_load_to, storage_reader=fs_reader, no_dist=True)
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
class TestDistributedStateDictSaveLoadWithSharedTensor(ShardedTensorTestBase):
@property
def world_size(self) -> int:
return 2
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_read_write_shard_tensor(self) -> None:
paths = [tempfile.mkdtemp()]
dist.broadcast_object_list(paths)
path = paths[0]
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
)
model_to_save = MyShardedModel1(spec, init_rrefs=False)
# Test save
model_to_save._register_state_dict_hook(state_dict_hook)
state_dict_to_save = model_to_save.state_dict()
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer)
dist.barrier()
# Create a new model
model_to_load = MyShardedModel1(spec, init_rrefs=False)
# This is not the correct hook for loading the state dict
# model_to_load._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
model_to_load._register_state_dict_hook(state_dict_hook)
state_dict_to_load_to = model_to_load.state_dict()
dist.barrier()
with self.assertRaises(AssertionError):
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
# Test load.
fs_reader = FileSystemReader(path=path)
load_state_dict(state_dict=state_dict_to_load_to, storage_reader=fs_reader)
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
dist.barrier()
class TestDistributedReshardOnLoad(ShardedTensorTestBase):
@property
def world_size(self) -> int:
return 2
def get_file_path(self) -> str:
paths = [tempfile.mkdtemp()] if dist.get_rank() == 0 else [None]
dist.broadcast_object_list(paths)
return paths[0]
def load_tensor(self, tensor: ShardedTensor) -> torch.Tensor:
res = torch.zeros(tensor.shape, device="cuda:0") if dist.get_rank() == 0 else None
tensor.gather(out=res)
return res
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_load_with_different_shard_plan(self) -> None:
path = self.get_file_path()
# We hardcode the assumption of how many shards are around
self.assertEqual(self.world_size, dist.get_world_size())
specs = [
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
),
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:1/cuda:1",
"rank:0/cuda:0",
],
),
# This requires the tensors to be [10, 20]
EnumerableShardingSpec(
shards=[
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[2, 20],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[2, 0],
shard_sizes=[1, 20],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[3, 0],
shard_sizes=[3, 20],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[6, 0],
shard_sizes=[3, 20],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[9, 0],
shard_sizes=[1, 20],
placement="rank:0/cuda:0",
),
]
),
# This requires the tensors to be [10, 20]
EnumerableShardingSpec(
shards=[
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[8, 20],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[8, 0],
shard_sizes=[2, 20],
placement="rank:0/cuda:0",
),
]
),
]
for s0 in specs:
for s1 in specs:
if s0 == s1:
continue
if dist.get_rank() == 0:
shutil.rmtree(path, ignore_errors=True)
os.makedirs(path)
dist.barrier()
model_to_save = MyShardedModel3(s0)
model_to_save._register_state_dict_hook(state_dict_hook)
state_dict_to_save = model_to_save.state_dict()
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer)
dist.barrier()
model_to_load = MyShardedModel3(s1)
model_to_load._register_state_dict_hook(state_dict_hook)
state_dict_to_load_to = model_to_load.state_dict()
dist.barrier()
fs_reader = FileSystemReader(path=path)
load_state_dict(
state_dict=state_dict_to_load_to, storage_reader=fs_reader
)
dist.barrier()
store_tensor = self.load_tensor(model_to_save.sharded_tensor)
dist.barrier()
load_tensor = self.load_tensor(model_to_load.sharded_tensor)
if dist.get_rank() == 0:
self.assertTrue(
torch.allclose(store_tensor, load_tensor), msg=f"{s0} vs {s1}"
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_load_rowwise_to_colwise(self) -> None:
path = self.get_file_path()
self.assertEqual(self.world_size, dist.get_world_size())
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
src_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
)
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
dst_spec = ChunkShardingSpec(
dim=1,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
)
if dist.get_rank() == 0:
shutil.rmtree(path, ignore_errors=True)
os.makedirs(path)
model_to_save = MyShardedModel3(src_spec).cuda(dist.get_rank())
model_to_save._register_state_dict_hook(state_dict_hook)
state_dict_to_save = model_to_save.state_dict()
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer)
model_to_load = MyShardedModel3(dst_spec).cuda(dist.get_rank())
model_to_load._register_state_dict_hook(state_dict_hook)
state_dict_to_load_to = model_to_load.state_dict()
fs_reader = FileSystemReader(path=path)
load_state_dict(state_dict=state_dict_to_load_to, storage_reader=fs_reader)
# We can't use torch.allclose since each ST has a different sharding spec
store_tensor = self.load_tensor(model_to_save.sharded_tensor)
load_tensor = self.load_tensor(model_to_load.sharded_tensor)
if dist.get_rank() == 0:
self.assertTrue(torch.allclose(store_tensor, load_tensor))
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_save_load_bytes(self) -> None:
path = self.get_file_path()
state_dict_to_save = {
'bytes0': [1],
'bytes1': 'string'
}
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=state_dict_to_save, storage_writer=fs_writer)
state_dict_to_load = {
'bytes0': [2],
'bytes1': 'other'
}
fs_reader = FileSystemReader(path=path)
load_state_dict(state_dict=state_dict_to_load, storage_reader=fs_reader)
self.assertEqual([1], state_dict_to_load['bytes0'])
self.assertEqual('string', state_dict_to_load['bytes1'])
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_switch_between_sharded_tensor_to_tensor(self) -> None:
path = self.get_file_path()
tensor_size = 32
specs = [
ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
),
ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:1/cuda:1",
"rank:0/cuda:0",
],
),
EnumerableShardingSpec(
shards=[
ShardMetadata(
shard_offsets=[0],
shard_sizes=[8],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[8],
shard_sizes=[tensor_size - 8],
placement="rank:0/cuda:0",
),
]
),
EnumerableShardingSpec(
shards=[
ShardMetadata(
shard_offsets=[0],
shard_sizes=[10],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[10],
shard_sizes=[tensor_size - 10],
placement="rank:1/cuda:1",
),
]
),
]
for save_spec in specs:
for load_spec in specs:
save_dict = {
'sharded': sharded_tensor.rand(save_spec, tensor_size),
'replicated': torch.rand(tensor_size, device=self.rank)
}
fs_writer = FileSystemWriter(path=path)
save_state_dict(state_dict=save_dict, storage_writer=fs_writer)
# Freaky Friday the tensors
load_dict = {
'sharded': torch.zeros(tensor_size, device=self.rank),
'replicated': sharded_tensor.zeros(load_spec, tensor_size)
}
fs_reader = FileSystemReader(path=path)
load_state_dict(state_dict=load_dict, storage_reader=fs_reader)
save_dict_sharded = self.load_tensor(save_dict['sharded'])
load_dict_replicated = self.load_tensor(load_dict['replicated'])
if dist.get_rank() == 0:
self.assertTrue(
torch.allclose(save_dict_sharded, load_dict['sharded']),
f"save-spec {save_spec} load-spec {load_spec}"
)
self.assertTrue(
torch.allclose(save_dict['replicated'], load_dict_replicated),
f"save-spec {save_spec} load-spec {load_spec}"
)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/checkpoint/test_file_system_checkpoint.py
|
# Owner(s): ["oncall: distributed"]
import random
import sys
from typing import Optional, List, Union, cast
from torch.distributed._shard.checkpoint import (
StorageReader,
StorageWriter,
CheckpointException,
load_state_dict,
save_state_dict,
)
import torch
import torch.distributed as dist
import torch.nn
import torch.futures
from torch.futures import Future
from torch.testing._internal.common_utils import TestCase
from torch.distributed._shard.checkpoint.resharding import (
_prepare_sharded_tensor_write,
_create_storage_key
)
from torch.distributed._shard import sharded_tensor
from torch.distributed._shard.checkpoint.state_dict_saver import (
_prepare,
)
from torch.distributed._shard.checkpoint.metadata import (
Metadata,
BytesReadRequest,
BytesWriteRequest,
MetadataIndex,
TensorReadRequest,
TensorWriteRequest,
)
from torch.distributed._shard.sharded_tensor import (
state_dict_hook,
ShardedTensor,
)
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sharded: ShardedTensor = sharded_tensor.zeros(self.spec(), 4, 4)
self.regular = torch.nn.Parameter(torch.ones(4, 4))
self.extra_sharded: Optional[ShardedTensor] = None
self.extra_param: Optional[torch.nn.Parameter] = None
self._register_state_dict_hook(state_dict_hook)
def spec(self) -> ChunkShardingSpec:
# pyre-fixme [28]: Unexpected keyword argument `dim` to call `dist._sharding_spec.api.ChunkShardingSpec.__init__`.
return ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
)
class TestDistributedCheckpointing(ShardedTensorTestBase):
@property
def world_size(self) -> int:
return 2
def gen_metadata(self) -> Metadata:
module = TestModule()
# compute the default saved metadata (must pass include_non_replicated_tensors or we'll get incomplete MD)
metadata, _, _ = _prepare(module.state_dict(), True)
# _prepare only produc
metadata = [metadata]
dist.broadcast_object_list(metadata)
return metadata[0]
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_tensor_metadata_with_missing_rank_spec(self) -> None:
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:1/cuda:1",
],
)
st = sharded_tensor.zeros(spec, 4, 4, dtype=torch.float64)
mapping = dict()
(_, md, storage_md) = _prepare_sharded_tensor_write("fqn", st, "tensor", mapping)
self.assertEqual(1, len(storage_md))
self.assertEqual(1, len(mapping))
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_storage_key_mapping(self) -> None:
device = f"cuda:{dist.get_rank()}"
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
)
state_dict = {
'sharded': sharded_tensor.rand(spec, (10, 10, )),
'replicated': torch.rand(4, device=device),
'bytes': [1, 2, 3, 4],
}
metadata, bytes_reqs, tensor_reqs = _prepare(state_dict, write_replicated_data=self.rank == 0)
if self.rank == 0:
self.assertEqual(1, len(bytes_reqs))
self.assertEqual(2, len(tensor_reqs))
self.assertTrue('bytes' in metadata.state_dict_metadata)
self.assertTrue(MetadataIndex('bytes') in metadata.storage_data)
# tensor ordering is unspecified
if len(tensor_reqs[0].tensor.size()) == 1:
replicated = tensor_reqs[0]
shard = tensor_reqs[1]
else:
replicated = tensor_reqs[1]
shard = tensor_reqs[0]
self.assertTrue('replicated' in metadata.state_dict_metadata)
storage_key = MetadataIndex('replicated', torch.Size([0]))
self.assertTrue(storage_key in metadata.storage_data)
self.assertTrue(metadata.storage_data[storage_key], replicated.storage_key)
else:
self.assertEqual(0, len(bytes_reqs))
self.assertEqual(1, len(tensor_reqs))
shard = tensor_reqs[0]
local_shard = state_dict["sharded"].local_shards()[0]
self.assertTrue('sharded' in metadata.state_dict_metadata)
storage_key = MetadataIndex('sharded', torch.Size(local_shard.metadata.shard_offsets))
self.assertTrue(storage_key in metadata.storage_data)
self.assertTrue(metadata.storage_data[storage_key], shard.storage_key)
class TestStorageKeys(TestCase):
def test_create_key_handles_collision(self):
keys = dict()
key0 = _create_storage_key(keys, "foo")
key1 = _create_storage_key(keys, "foo")
self.assertNotEqual(key0, key1)
class TestStorageBase:
def __init__(
self,
fail_conf
):
self.fail_conf = fail_conf
self.rank = 0 if not dist.is_initialized() else dist.get_rank()
def _get_ranks(self, name):
return self.fail_conf[name] if name in self.fail_conf else None
def _fail_rank(self, name):
ranks = self._get_ranks(name)
if ranks is not None and self.rank in ranks:
raise ValueError(f"rank fail {self.rank} for {name}")
def _fail_rank_async(self, name):
ranks = self._get_ranks(name)
fut = Future()
if ranks is not None and self.rank in ranks:
fut.set_exception(ValueError(f"async rank fail {self.rank} for {name}"))
else:
fut.set_result(None)
return fut
class FaultyStorageWriter(TestStorageBase, StorageWriter):
def __init__(
self,
fail_conf
):
super(FaultyStorageWriter, self).__init__(fail_conf)
def prepare(self) -> None:
self._fail_rank("fail_prepare")
def write_bytes(self, requests: List[BytesWriteRequest]) -> Future[None]:
self._fail_rank("fail_write_bytes_on_ranks")
return self._fail_rank_async("fail_write_bytes_on_ranks_async")
def write_tensors(self, requests: List[TensorWriteRequest]) -> Future[None]:
self._fail_rank("fail_write_tensors_on_ranks")
return self._fail_rank_async("fail_write_tensors_on_ranks_async")
def finish(self, metadata: Metadata) -> None:
self._fail_rank("fail_finish")
def prepare_storage(self, storage_writes: List[Union[TensorWriteRequest, BytesWriteRequest]]) -> None:
self._fail_rank("fail_prepare_storage")
class FaultyStorageReader(TestStorageBase, StorageReader):
def __init__(
self,
metadata,
fail_conf
):
super(FaultyStorageReader, self).__init__(fail_conf)
self.metadata = metadata
def read_bytes(self, requests: List[BytesReadRequest]) -> Future[None]:
self._fail_rank("fail_read_bytes")
bad_ranks = self._get_ranks("fail_deser_bytes")
for r in requests:
if bad_ranks is not None and self.rank in bad_ranks:
# this is not "guaranteed" to fail, but hard to beat
rand = random.Random(1237)
r.bytes.write(rand.randbytes(32))
else:
torch.save([1, 2, 3], r.bytes)
return self._fail_rank_async("fail_read_bytes_async")
def read_tensors(self, requests: List[TensorReadRequest]) -> Future[None]:
self._fail_rank("fail_read_tensors")
return self._fail_rank_async("fail_read_tensors_async")
def read_metadata(self) -> Metadata:
self._fail_rank("fail_read_metadata")
return self.metadata
class TestDistributedFailure(ShardedTensorTestBase):
def get_spec(self):
return ChunkShardingSpec(
dim=0,
placements=[
f"rank:{r}/cuda:{r}" for r in range(dist.get_world_size())
]
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_dummy_writer_works(self) -> None:
state_dict = {
'sharded': sharded_tensor.rand(self.get_spec(), 20, 20),
'replicated': torch.rand(10, 10),
'bytes': [1, 2, 3, 4]
}
save_state_dict(state_dict, FaultyStorageWriter({}))
def _test_dist_failure(self, callback, kwargs):
bad_ranks = list(kwargs.values())[0] if len(kwargs) > 0 else []
# Empty bad_ranks means it must work
if len(bad_ranks) == 0:
callback()
else:
with self.assertRaises(CheckpointException) as cm:
callback()
e = cast(CheckpointException, cm.exception)
for rank, wrapped_ex in e.failures.items():
ex = wrapped_ex[0]
self.assertTrue(rank in bad_ranks, msg=f"{rank} did not fail")
if not kwargs.get("ignore_exception_type", False):
self.assertEqual(ValueError, type(ex), str(ex))
failed_ranks = e.failures.keys()
for rank in bad_ranks:
self.assertTrue(rank in failed_ranks, msg=f"{rank} was supposed to fail was fine")
def _test_save(self, state_dict, coordinator=0, **kwargs):
no_dist = not dist.is_initialized()
def _save():
save_state_dict(
state_dict,
storage_writer=FaultyStorageWriter(kwargs),
coordinator_rank=coordinator,
no_dist=no_dist,
)
self._test_dist_failure(_save, kwargs)
def _test_load(self, state_dict, coordinator=0, **kwargs):
no_dist = not dist.is_initialized()
write_replicated = dist.is_initialized() and dist.get_rank() == coordinator
def _load():
metadata, _, _ = _prepare(state_dict, write_replicated)
load_state_dict(
state_dict,
storage_reader=FaultyStorageReader(metadata, kwargs),
coordinator_rank=coordinator,
no_dist=no_dist,
)
self._test_dist_failure(_load, kwargs)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_save_error_handling(self) -> None:
state_dict = {
'sharded': sharded_tensor.rand(self.get_spec(), 20, 20),
'replicated': torch.rand(10, 10),
'bytes': [1, 2, 3, 4]
}
self._test_save(state_dict, fail_prepare=[0])
self._test_save(state_dict, fail_finish=[0])
self._test_save(state_dict, fail_prepare_storage=[0])
self._test_save(state_dict, fail_write_tensors_on_ranks=[1])
self._test_save(state_dict, fail_write_tensors_on_ranks_async=[2])
self._test_save(state_dict, fail_write_bytes_on_ranks=[3])
self._test_save(state_dict, fail_write_bytes_on_ranks_async=[1])
self._test_save(state_dict, fail_write_tensors_on_ranks_async=[1, 3])
self._test_save(state_dict, coordinator=1, fail_prepare=[1])
self._test_save(state_dict, coordinator=1, fail_finish=[1])
def test_save_error_handling_no_dist(self) -> None:
state_dict = {
'replicated': torch.rand(10, 10),
'bytes': [1, 2, 3, 4]
}
self.assertFalse(dist.is_initialized())
self._test_save(state_dict, fail_prepare=[0])
self._test_save(state_dict, fail_finish=[0])
self._test_save(state_dict, fail_prepare_storage=[0])
self._test_save(state_dict, fail_write_tensors_on_ranks=[0])
self._test_save(state_dict, fail_write_tensors_on_ranks_async=[0])
self._test_save(state_dict, fail_write_bytes_on_ranks=[0])
self._test_save(state_dict, fail_write_bytes_on_ranks_async=[0])
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_load_error_handling(self) -> None:
state_dict = {
'sharded': sharded_tensor.rand(self.get_spec(), 20, 20),
'replicated': torch.rand(10, 10),
'bytes': [1, 2, 3, 4]
}
self._test_load(state_dict)
self._test_load(state_dict, fail_read_metadata=[0])
self._test_load(state_dict, fail_read_bytes=[1])
self._test_load(state_dict, fail_read_bytes_async=[2])
self._test_load(state_dict, fail_read_tensors=[3])
self._test_load(state_dict, fail_read_tensors_async=[1])
# We don't want to depend on the actual exception raised by pickle
self._test_load(state_dict, fail_deser_bytes=[2], ignore_exception_type=True)
self._test_load(state_dict, coordinator=1, fail_read_metadata=[3])
self._test_load(state_dict, coordinator=2, fail_read_bytes=[0])
self._test_load(state_dict, coordinator=3, fail_read_tensors_async=[2])
def test_load_error_handling_no_dist(self) -> None:
state_dict = {
'replicated': torch.rand(10, 10),
'bytes': [1, 2, 3, 4]
}
self._test_load(state_dict)
self._test_load(state_dict, fail_read_metadata=[0])
self._test_load(state_dict, fail_read_bytes=[0])
self._test_load(state_dict, fail_read_bytes_async=[0])
self._test_load(state_dict, fail_read_tensors=[0])
self._test_load(state_dict, fail_read_tensors_async=[0])
self._test_load(state_dict, fail_deser_bytes=[0], ignore_exception_type=True)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/checkpoint/test_checkpoint.py
|
# Owner(s): ["oncall: distributed"]
import sys
from itertools import product
import torch
from torch.distributed._shard import (
sharded_tensor,
_shard_tensor,
)
from torch.distributed._shard.sharding_spec import (
EnumerableShardingSpec,
ShardMetadata,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
_chunk_sharding_specs_list_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestReshard(ShardedTensorTestBase):
def _run_sharded_tensor_reshard(self, sharding_spec, reshard_spec, input_size):
torch.manual_seed(0)
local_tensor = torch.rand(*input_size).cuda(self.rank)
st = _shard_tensor(local_tensor, sharding_spec)
st_compare = _shard_tensor(local_tensor, reshard_spec)
st.reshard(reshard_spec)
self.assertEqual(1, len(st.local_shards()))
self.assertEqual(1, len(st_compare.local_shards()))
st_compare._metadata.shards_metadata.sort(key=lambda metadata: metadata.placement.rank())
self.assertEqual(st._metadata, st_compare._metadata)
self.assertEqual(st.local_tensor(), st_compare.local_tensor())
self.assertEqual(
st.local_shards()[0].metadata, st_compare.local_shards()[0].metadata
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_reshard(self):
dims = [0, 1]
for sharding_dim, reshard_dim in product(dims, dims):
specs = _chunk_sharding_specs_list_for_test(
[sharding_dim, reshard_dim], seed=5
)
spec, reshard_spec = specs[0], specs[1]
self._run_sharded_tensor_reshard(spec, reshard_spec, [13, 21])
self._run_sharded_tensor_reshard(spec, reshard_spec, [14, 23])
self._run_sharded_tensor_reshard(spec, reshard_spec, [15, 26])
self._run_sharded_tensor_reshard(spec, reshard_spec, [12, 24])
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_reshard_errors(self):
specs = _chunk_sharding_specs_list_for_test([0, 1], seed=6)
spec, reshard_spec = specs[0], specs[1]
enumerable_sharding_spec = EnumerableShardingSpec(
[
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
]
)
st = sharded_tensor.rand(spec, 24, 12)
with self.assertRaisesRegex(
NotImplementedError, "Only ChunkShardingSpec supported for reshard."
):
st.reshard(enumerable_sharding_spec)
st._local_shards = [st.local_shards()[0], st.local_shards()[0]]
with self.assertRaisesRegex(
NotImplementedError, "Only single local shard supported for reshard."
):
st.reshard(reshard_spec)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/test_sharded_tensor_reshard.py
|
# Owner(s): ["oncall: distributed"]
import copy
import math
import io
import itertools
import pickle
import sys
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import distributed_c10d
from torch.distributed._shard import sharded_tensor
from torch.distributed._shard.api import (
shard_parameter,
_shard_tensor,
load_with_process_group,
_collect_local_shard,
_reshard_output,
)
from torch.distributed._shard.sharded_tensor import (
custom_sharded_op_impl,
pre_load_state_dict_hook,
state_dict_hook,
ShardedTensor,
Shard
)
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
)
from torch.distributed._shard.sharded_tensor.utils import (
_parse_and_validate_remote_device
)
from torch.distributed._shard.sharded_tensor.api import (
TensorProperties,
_create_tensor_from_params,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
tp_transports,
)
from torch.testing._internal.common_utils import (
TestCase,
TEST_WITH_DEV_DBG_ASAN,
run_tests,
sandcastle_skip_if,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.distributed.remote_device import _remote_device
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
_chunk_sharding_specs_list_for_test,
MyShardedModel1,
)
if TEST_WITH_DEV_DBG_ASAN:
print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
class TestShardedTensorMetadata(TestCase):
def test_serialize_and_deserialize(self):
shard_metadatas = [
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
)
]
dtypes = [
torch.float, torch.double, torch.cfloat, torch.cdouble, torch.half,
torch.bfloat16, torch.uint8, torch.int8, torch.short, torch.int,
torch.long, torch.bool]
layouts = [torch.strided, torch.sparse_coo]
requires_grads = [True, False]
memory_formats = [torch.contiguous_format, torch.channels_last, torch.preserve_format]
pin_memories = [True, False]
for tensor_properties_input in itertools.product(dtypes, layouts, requires_grads, memory_formats, pin_memories):
dtype, layout, requires_grad, memory_format, pin_memory = tensor_properties_input
expected_st_metadata = sharded_tensor.ShardedTensorMetadata(
shard_metadatas,
(10, 10),
TensorProperties(dtype, layout, requires_grad, memory_format, pin_memory)
)
pickled_obj = pickle.dumps(expected_st_metadata)
st_metadata = pickle.loads(pickled_obj)
self.assertEqual(expected_st_metadata, st_metadata)
class TestCreateTensorFromParams(TestCase):
@sandcastle_skip_if(torch.cuda.device_count() < 1, 'CUDA GPU is needed')
def test_empty(self):
expected_dtype = torch.double
tensor_properties = TensorProperties(
dtype=expected_dtype,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format)
local_device = torch.device('cuda:0')
local_tensor = _create_tensor_from_params(
5, 10, local_device=local_device, tensor_properties=tensor_properties)
self.assertEqual(local_device, local_tensor.device)
self.assertEqual(expected_dtype, local_tensor.dtype)
self.assertEqual(torch.strided, local_tensor.layout)
self.assertEqual(False, local_tensor.requires_grad)
class TestShardParameter(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_shard_parameter(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
fc = torch.nn.Linear(12, 12).cuda(self.rank)
weight_og = fc.weight.clone()
shard_parameter(fc, 'weight', spec)
# Verify.
self.assertTrue(isinstance(fc.weight, ShardedTensor))
local_shards = fc.weight.local_shards()
self.assertEqual(1, len(local_shards))
self.assertEqual(torch.Size([3, 12]), local_shards[0].tensor.size())
self.assertEqual(3, local_shards[0].tensor.size(0))
self.assertEqual(12, local_shards[0].tensor.size(1))
self.assertEqual(torch.narrow(weight_og, 0, 3 * self.rank, 3), local_shards[0].tensor)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_shard_parameter_errors(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
fc = torch.nn.Linear(12, 12).cuda(self.rank)
with self.assertRaisesRegex(ValueError, 'does not match with src_rank'):
shard_parameter(fc, 'weight', spec, src_rank=self.rank)
with self.assertRaisesRegex(AttributeError, 'has no attribute'):
shard_parameter(fc, 'foo', spec)
with self.assertRaisesRegex(ValueError, 'Expected Linear.bias to be a Tensor, but found str'):
del fc.bias
fc.bias = "foo"
shard_parameter(fc, 'bias', spec)
with self.assertRaisesRegex(ValueError, 'not a contiguous Tensor'):
fc.bias = torch.rand(10, 10).cuda(self.rank).t()
shard_parameter(fc, 'bias', spec)
spec = ChunkShardingSpec(
dim=0,
placements=[
f"rank:{self.rank}/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
with self.assertRaisesRegex(ValueError, 'does not match with sharding_spec'):
shard_parameter(fc, 'weight', spec)
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
])
with self.assertRaisesRegex(NotImplementedError, 'not implemented yet!'):
shard_parameter(fc, 'weight', spec)
class TestShardTensor(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_shard_tensor(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
tensor = torch.rand(12, 12).cuda(self.rank)
st = _shard_tensor(tensor, spec)
# Verify.
self.assertTrue(isinstance(st, sharded_tensor.ShardedTensor))
local_shard = st.local_tensor()
self.assertEqual(1, len(st.local_shards()))
self.assertEqual(torch.Size([3, 12]), local_shard.size())
self.assertEqual(torch.narrow(tensor, 0, 3 * self.rank, 3), local_shard)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_shard_tensor_errors(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
tensor = torch.rand(12, 12).cuda(self.rank)
with self.assertRaisesRegex(ValueError, 'does not match with src_rank'):
_shard_tensor(tensor, spec, src_rank=self.rank)
with self.assertRaisesRegex(ValueError, 'not a contiguous Tensor'):
tensor_t = torch.rand(12, 12).cuda(self.rank).t()
_shard_tensor(tensor_t, spec)
spec = ChunkShardingSpec(
dim=0,
placements=[
f"rank:{self.rank}/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
with self.assertRaisesRegex(ValueError, 'does not match with sharding_spec'):
_shard_tensor(tensor, spec)
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
])
with self.assertRaisesRegex(
NotImplementedError, 'not implemented yet!'
):
_shard_tensor(tensor, spec)
class TestModuleHookApi(ShardedTensorTestBase):
class DummyNNModule(torch.nn.Module):
def __init__(self, spec, tensor_size):
super().__init__()
self.st = sharded_tensor.rand(spec, *tensor_size)
def forward(self):
return self.st
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_reshard_output(self):
specs = _chunk_sharding_specs_list_for_test([0, 1], seed=5)
spec, reshard_spec = specs[0], specs[1]
test_module = self.DummyNNModule(spec, [24, 12])
st = test_module()
local_shard = st.local_tensor()
pg = dist.distributed_c10d._get_default_group()
st_compare = ShardedTensor._init_from_local_shards(
copy.deepcopy(st.local_shards()),
st.size(),
process_group=pg,
)
st_compare._sharding_spec = copy.deepcopy(spec)
st_compare.reshard(reshard_spec)
test_module = _reshard_output(test_module, reshard_spec)
st = test_module()
local_shard = st.local_tensor()
local_shard_compare = st_compare.local_tensor()
self.assertEqual(local_shard, local_shard_compare)
self.assertEqual(local_shard.size(0), 24)
self.assertEqual(local_shard.size(1), 3)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_collect_local_shard(self):
specs = _chunk_sharding_specs_list_for_test([0], seed=5)
spec = specs[0]
test_module = self.DummyNNModule(spec, [23, 15])
st = test_module()
local_shard = st.local_tensor()
test_module = _collect_local_shard(test_module)
output = test_module()
self.assertTrue(isinstance(output, torch.Tensor))
self.assertEqual(local_shard, output)
class TestLocalTensor(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_local_tensor(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, 24, 12)
local_shard = st.local_tensor()
self.assertEqual(torch.Size([6, 12]), local_shard.size())
self.assertEqual(st.local_tensor(), local_shard)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_local_tensor_error(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:1/cuda:1",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:2/cuda:2",
"rank:2/cuda:2",
"rank:3/cuda:3",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, 24, 12)
with self.assertRaisesRegex(
NotImplementedError, "Only single local shard is supported."
):
local_shard = st.local_tensor()
class TestShardedTensorChunked(ShardedTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_metadata(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
st_metadata = st.metadata()
self.assertEqual(torch.Size([10, 20]), st_metadata.size)
self.assertEqual(torch.Size([10, 20]), st.size())
self.assertEqual(torch.float, st.dtype)
self.assertEqual(torch.strided, st.layout)
self.assertEqual(False, st.requires_grad)
self.assertTrue(st.is_contiguous())
self.assertFalse(st.is_pinned())
st = sharded_tensor.empty(spec, 10, 20, requires_grad=True, init_rrefs=True)
self.assertEqual(True, st.requires_grad)
st = sharded_tensor.empty(spec, 10, 20, dtype=torch.double, init_rrefs=True)
self.assertEqual(torch.double, st.dtype)
# Need CPU for pin_memory
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cpu",
"rank:1/cpu",
"rank:2/cpu",
"rank:3/cpu",
],
)
st = sharded_tensor.empty(spec, 10, 20, pin_memory=True, init_rrefs=True)
self.assertEqual(True, st.is_pinned())
# test read only properties, they're read only as we can't simply change
# the global metadata without changing the underlying shard's properties
with self.assertRaisesRegex(RuntimeError, "torch function '__set__'"):
st.requires_grad = True
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_complete_world_size(self):
for dim in [0, -2]:
spec = ChunkShardingSpec(
dim=dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
# Validate local shard.
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
if self.rank == 3:
self.assertEqual((1, 20), local_shard.size())
else:
self.assertEqual((3, 20), local_shard.size())
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([rank * 3, 0], shard_metadata.shard_offsets)
if rank == 3:
self.assertEqual([1, 20], shard_metadata.shard_sizes)
else:
self.assertEqual([3, 20], shard_metadata.shard_sizes)
self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
self.assertEqual(3, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual(f'rank:{rpc_rank}/cuda:{rpc_rank}', str(shard.metadata.placement))
if rpc_rank == 3:
self.assertEqual((1, 20), shard.tensor.size())
else:
self.assertEqual((3, 20), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_ones(self):
""" Test sharded_tensor.ones(...) """
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
st = sharded_tensor.ones(spec, h, w)
# Validate local shard is initialized with torch.ones
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
# The split: for rank!=3 ceil(h/4)=3 for rank=3 1
expected_h = 1 if self.rank == 3 else math.ceil(h / 4)
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(local_shard, torch.ones(expected_h, w))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_even(self) -> None:
""" Test _sharded_tensor.gather(...) with evenly distributed._shards"""
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
st = sharded_tensor.ones(spec, h, w)
full_tensor = None
dst = 1
if self.rank == dst:
full_tensor = torch.zeros(
h,
w,
device=torch.device(f"cuda:{dst}"),
)
st.gather(dst, full_tensor)
if self.rank == dst:
self.assertEqual(full_tensor, torch.ones(h, w))
else:
self.assertIsNone(full_tensor)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_uneven(self) -> None:
""" Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:1/cuda:1",
"rank:2/cuda:2",
],
)
h, w = 10, 20
st = sharded_tensor.ones(spec, h, w)
full_tensor = None
dst = 1
if self.rank == dst:
full_tensor = torch.zeros(
h,
w,
device=torch.device(f"cuda:{dst}"),
)
st.gather(dst, full_tensor)
if self.rank == dst:
self.assertEqual(full_tensor, torch.ones(h, w))
else:
self.assertIsNone(full_tensor)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_zeros(self):
""" Test sharded_tensor.zeros(...) """
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
st = sharded_tensor.zeros(spec, h, w)
# Validate local shard is initialized with torch.zeros
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
# The split: for rank!=3 ceil(h/4)=3 for rank=3 1
expected_h = 1 if self.rank == 3 else math.ceil(h / 4)
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(local_shard, torch.zeros(expected_h, w))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_rand(self):
""" Test sharded_tensor.rand(...)/randn(...) """
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 8, 2
seed = 1234
expected_h = 2
expected_device = torch.device(f"cuda:{self.rank}")
dtype = torch.double
torch.manual_seed(seed)
# Test sharded_tensor.rand creation
expected = torch.rand(expected_h, w, device=expected_device, dtype=dtype)
# reset seed to ensure the same random numbers are generated
torch.manual_seed(seed)
st = sharded_tensor.rand(spec, h, w, dtype=dtype)
# Validate local shard is initialized with torch.rand
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(expected_device, local_shard.device)
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(expected, local_shard)
# Test sharded_tensor.randn creation
torch.manual_seed(seed)
expected_randn = torch.randn(expected_h, w, device=expected_device, dtype=dtype)
# reset seed to ensure the same random numbers are generated
torch.manual_seed(seed)
st_randn = sharded_tensor.randn(spec, h, w, dtype=dtype)
# Validate local shard is initialized with torch.randn
local_shards = st_randn.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(expected_device, local_shard.device)
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(expected_randn, local_shard)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_full(self):
""" Test sharded_tensor.full(...) """
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
fill_value = 1234
st = sharded_tensor.full(spec, size=(h, w), fill_value=fill_value, dtype=torch.int32)
# Validate local shard is initialized with torch.full
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
# The split: for rank!=3 ceil(h/4)=3 for rank=3 1
expected_h = 1 if self.rank == 3 else math.ceil(h / 4)
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(local_shard,
torch.full(size=(expected_h, w), fill_value=fill_value, dtype=torch.int32))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_like(self):
""" Test tensor like methods, i.e. torch.zeros_like(...), torch.full_like, etc. """
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 8, 8
expected_h = 2
seed = 1234
dtype = torch.double
expected_device = torch.device(f"cuda:{self.rank}")
st = sharded_tensor.rand(spec, (h, w), dtype=dtype)
tensor_like_ops = {
torch.zeros_like: torch.zeros,
torch.ones_like: torch.ones,
torch.rand_like: torch.rand,
torch.randn_like: torch.randn,
torch.empty_like: torch.empty,
torch.full_like: torch.full
}
for op, expect_local_op in tensor_like_ops.items():
if op == torch.full_like:
# special handle full/full_like as it needs to have additional fill_value arg
expect_tensor = expect_local_op((expected_h, w), 8.8, device=expected_device, dtype=dtype)
new_op_st = op(st, 8.8, dtype=dtype)
self.assertEqual(new_op_st.local_tensor(), expect_tensor)
elif op == torch.empty_like:
# empty/empty_like we only compare the shape
expect_tensor = expect_local_op(expected_h, w, device=expected_device, dtype=dtype)
new_op_st = op(st, dtype=dtype)
self.assertEqual(new_op_st.local_tensor().shape, expect_tensor.shape)
else:
torch.manual_seed(seed)
expect_tensor = expect_local_op(expected_h, w, device=expected_device, dtype=dtype)
torch.manual_seed(seed)
new_op_st = op(st, dtype=dtype)
self.assertEqual(new_op_st.local_tensor(), expect_tensor)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_partial_world_size(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
# Validate local shard.
local_shards = st.local_shards()
if self.rank >= 2:
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
self.assertEqual((5, 20), local_shard.size())
else:
self.assertEqual(0, len(local_shards))
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(2, len(shards_metadata))
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank * 5, 0], shard_metadata.shard_offsets)
self.assertEqual([5, 20], shard_metadata.shard_sizes)
self.assertEqual(f'rank:{shard_rank + 2}/cuda:{shard_rank + 2}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
if self.rank >= 2:
self.assertEqual(1, len(remote_shards))
else:
self.assertEqual(2, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual(f'rank:{rpc_rank}/cuda:{rpc_rank}', str(shard.metadata.placement))
self.assertEqual((5, 20), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_new_group(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:1/cuda:2",
"rank:2/cuda:3",
],
)
pg = dist.new_group(ranks=[1, 2, 3])
st = sharded_tensor.empty(spec, 10, 20, process_group=pg, init_rrefs=True)
# Validate local shard.
local_shards = st.local_shards()
if self.rank >= 2:
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
self.assertEqual((5, 20), local_shard.size())
else:
self.assertEqual(0, len(local_shards))
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(2, len(shards_metadata))
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank * 5, 0], shard_metadata.shard_offsets)
self.assertEqual([5, 20], shard_metadata.shard_sizes)
self.assertEqual(f'rank:{shard_rank + 1}/cuda:{shard_rank + 2}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
if self.rank >= 2:
self.assertEqual(1, len(remote_shards))
else:
self.assertEqual(2, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
shard = remote_shard.to_here()
self.assertEqual(rpc_rank, remote_shard.owner().id)
self.assertEqual(f'rank:{rpc_rank - 1}/cuda:{rpc_rank}', str(shard.metadata.placement))
self.assertEqual((5, 20), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_multiple_local_shards(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 16, 20, init_rrefs=True)
# Validate local shards.
local_shards = st.local_shards()
self.assertEqual(2, len(local_shards))
for local_shard in local_shards:
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
self.assertEqual((2, 20), local_shard.tensor.size())
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(8, len(shards_metadata))
for shard_idx, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_idx * 2, 0], shard_metadata.shard_offsets)
self.assertEqual([2, 20], shard_metadata.shard_sizes)
self.assertEqual(f'rank:{shard_idx % 4}/cuda:{shard_idx % 4}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
self.assertEqual(3, len(remote_shards))
owners = {}
for rpc_rank, shards in remote_shards.items():
self.assertEqual(2, len(shards))
for remote_shard in shards:
shard = remote_shard.to_here()
self.assertEqual((2, 20), shard.tensor.size())
self.assertEqual(rpc_rank, remote_shard.owner().id)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharding_columns(self):
self.init_pg()
for dim in [1, -1]:
spec = ChunkShardingSpec(
dim=dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 10, 32)
# Validate local shard.
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
self.assertEqual((10, 8), local_shard.size())
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([0, rank * 8], shard_metadata.shard_offsets)
self.assertEqual([10, 8], shard_metadata.shard_sizes)
self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_invalid_sharding(self):
self.init_pg()
with self.assertRaisesRegex(NotImplementedError, 'does not support named dimension'):
spec = ChunkShardingSpec(dim='H', placements=["rank:1/cuda:1"])
sharded_tensor.empty(spec, 10, 20)
for dim in [2, 3, 4, -3, -4, -5]:
spec = ChunkShardingSpec(dim=dim, placements=["rank:1/cuda:1"])
with self.assertRaisesRegex(ValueError, 'Invalid sharding dim'):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:5/cuda:1"])
with self.assertRaisesRegex(ValueError, 'Invalid rank'):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
st = sharded_tensor.empty(spec, 10, 20)
tensor = torch.empty(10, 20)
with self.assertRaisesRegex(RuntimeError, "not supported yet for ShardedTensor!"):
torch.add(st, tensor)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
with self.assertRaisesRegex(ValueError, 'Only torch.strided layout is currently supported'):
sharded_tensor.empty(spec, 10, 20, layout=torch.sparse_coo)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
with self.assertRaisesRegex(ValueError, 'Only torch.contiguous_format memory_format is currently supported'):
sharded_tensor.empty(spec, 10, 20, memory_format=torch.channels_last)
spec = ChunkShardingSpec(dim=0, placements=["worker0/cuda:1"])
with self.assertRaisesRegex(RuntimeError, 'RPC framework needs to be initialized'):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
with self.assertRaisesRegex(RuntimeError, 'RPC Framework needs to be initialized'):
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
with self.assertRaisesRegex(RuntimeError, 'ShardedTensor created with init_rrefs=False'):
st = sharded_tensor.empty(spec, 10, 20)
st.remote_shards()
self.init_rpc()
spec = ChunkShardingSpec(dim=0, placements=["workerfoo/cuda:1"])
with self.assertRaisesRegex(ValueError, 'Invalid worker name'):
sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_invalid_pg_rpc_ranks(self):
self.init_pg()
# Init RPC with different ranks.
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports())
rpc_backend_options.init_method = f"file://{self.file_name}"
rank = (self.rank + 1) % self.world_size
rpc.init_rpc(
name=f'worker{rank}',
rank=rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
spec = ChunkShardingSpec(dim=0, placements=["rank:1/cuda:1"])
with self.assertRaisesRegex(ValueError, 'Default ProcessGroup and RPC ranks must be the same'):
sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_insufficient_sharding_dims(self):
self.init_pg()
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 2, 20)
# Validate local shard.
local_shards = st.local_shards()
if self.rank <= 1:
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
self.assertEqual((1, 20), local_shard.size())
else:
self.assertEqual(0, len(local_shards))
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(2, len(shards_metadata))
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank, 0], shard_metadata.shard_offsets)
self.assertEqual([1, 20], shard_metadata.shard_sizes)
self.assertEqual(f'rank:{shard_rank}/cuda:{shard_rank}', str(shard_metadata.placement))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_sizes(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
# Test with *args
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
self.assertEqual(torch.Size([10, 20]), st.size())
# Test with single *args
st = sharded_tensor.empty(spec, 10, init_rrefs=True)
self.assertEqual(torch.Size([10]), st.size())
# Test with list
st = sharded_tensor.empty(spec, [10, 20], init_rrefs=True)
self.assertEqual(torch.Size([10, 20]), st.size())
# Test with tuple
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
self.assertEqual(torch.Size([10, 20]), st.size())
# Test with row size
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
self.assertEqual(st.size(0), 10)
# Test with col size
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
self.assertEqual(st.size(1), 20)
# Test with negative indexed size
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
self.assertEqual(st.size(-1), 20)
# Test with dim/ndim
self.assertEqual(st.dim(), 2)
self.assertEqual(st.ndim, 2)
# Test with invalid input
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
with self.assertRaisesRegex(IndexError, 'Dimension out of range'):
st.size(-3)
with self.assertRaisesRegex(IndexError, 'Dimension out of range'):
st.size(2)
with self.assertRaises(TypeError):
st = sharded_tensor.empty(spec, 'foo')
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_state_dict(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
m = MyShardedModel1(spec)
# Test save
m._register_state_dict_hook(state_dict_hook)
buffer = io.BytesIO()
mod_state_dict = m.state_dict()
mod_state_keys = mod_state_dict.keys()
self.assertTrue("sharded_tensor1" in mod_state_keys)
self.assertTrue("submodule.sharded_tensor2" in mod_state_keys)
torch.save(mod_state_dict, buffer)
# Test load.
module_load = MyShardedModel1()
module_load._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
buffer.seek(0)
state_dict_deser = torch.load(buffer)
module_load.load_state_dict(state_dict_deser, strict=False)
module_load._register_state_dict_hook(state_dict_hook)
loaded_dict_keys = module_load.state_dict().keys()
self.assertTrue("sharded_tensor1" in loaded_dict_keys)
self.assertTrue("submodule.sharded_tensor2" in loaded_dict_keys)
# Verify after load.
self.assertTrue(torch.equal(m.sharded_tensor1, module_load.sharded_tensor1))
self.assertTrue(torch.equal(m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_state_dict_new_group(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:0/cuda:2",
"rank:1/cuda:3",
],
)
pg = dist.new_group([2, 3])
m = MyShardedModel1(spec, pg)
# Test save
m._register_state_dict_hook(state_dict_hook)
buffer = io.BytesIO()
torch.save(m.state_dict(), buffer)
# Test load.
module_load = MyShardedModel1(spec=None, group=pg)
module_load._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
buffer.seek(0)
with load_with_process_group(pg):
state_dict_deser = torch.load(buffer)
module_load.load_state_dict(state_dict_deser, strict=False)
# Verify after load.
self.assertTrue(torch.equal(m.sharded_tensor1, module_load.sharded_tensor1))
self.assertTrue(torch.equal(m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_state_dict_no_sharded_tensors(self):
# Verify hooks don't affect modules with no ShardedTensors.
m = torch.nn.Linear(10, 10)
# Test save
state_dict_before = m.state_dict()
m._register_state_dict_hook(state_dict_hook)
buffer = io.BytesIO()
torch.save(m.state_dict(), buffer)
self.assertEqual(state_dict_before, m.state_dict())
# Test load.
module_load = torch.nn.Linear(10, 10)
module_load._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
buffer.seek(0)
state_dict_deser = torch.load(buffer)
module_load.load_state_dict(state_dict_deser, strict=False)
# Verify after load.
self.assertEqual(m.weight, module_load.weight)
self.assertEqual(m.bias, module_load.bias)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_load_state_dict_errors(self):
self.init_rpc()
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
m = MyShardedModel1(spec)
# Test save
m._register_state_dict_hook(state_dict_hook)
buffer = io.BytesIO()
torch.save(m.state_dict(), buffer)
pg = dist.new_group(ranks=[0, 2, 3])
buffer.seek(0)
if self.rank != 0:
with self.assertRaisesRegex(RuntimeError, 'Local rank at save time was'):
with load_with_process_group(pg):
state_dict_deser = torch.load(buffer)
else:
with self.assertRaisesRegex(RuntimeError, 'Local world size at save time was'):
with load_with_process_group(pg):
state_dict_deser = torch.load(buffer)
dist.destroy_process_group()
buffer.seek(0)
with self.assertRaisesRegex(RuntimeError, 'Need to initialize default process group'):
state_dict_deser = torch.load(buffer)
rpc.shutdown()
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_cleanup(self):
def create_tensors():
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st1 = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
st2 = sharded_tensor.empty(spec, 10, 20)
create_tensors()
self.assertEqual(0, len(sharded_tensor.api._sharded_tensor_map))
class TestShardedTensorEnumerable(ShardedTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_metadata(self):
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
)
])
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
st_metadata = st.metadata()
self.assertEqual(torch.Size([10, 10]), st_metadata.size)
self.assertEqual(torch.float, st.dtype)
self.assertEqual(torch.strided, st.layout)
self.assertEqual(False, st.requires_grad)
self.assertTrue(st.is_contiguous())
self.assertFalse(st.is_pinned())
st = sharded_tensor.empty(spec, 10, 10, requires_grad=True, init_rrefs=True)
self.assertEqual(True, st.requires_grad)
st = sharded_tensor.empty(spec, 10, 10, dtype=torch.double, init_rrefs=True)
self.assertEqual(torch.double, st.dtype)
# Need CPU for pin_memory
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cpu",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cpu",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:2/cpu",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cpu",
)
])
st = sharded_tensor.empty(spec, 10, 10, pin_memory=True, init_rrefs=True)
self.assertTrue(st.is_pinned())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_grid_sharding(self):
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
)
])
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
self.assertEqual(1, len(st.local_shards()))
# Verify local shard.
local_shard = st.local_shards()[0]
self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
self.assertEqual(3, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual((5, 5), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_ones(self):
""" Test sharded_tensor.ones(...) """
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
)
])
st = sharded_tensor.ones(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
self.assertEqual(1, len(st.local_shards()))
# Verify local shard is initialized with torch.ones
local_shard = st.local_shards()[0]
self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
self.assertEqual(local_shard.tensor, torch.ones(5, 5))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_even(self) -> None:
""" Test _sharded_tensor.gather(...) with evenly distributed._shards"""
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
)
])
h, w = 10, 10
st = sharded_tensor.ones(spec, h, w, init_rrefs=True)
full_tensor = None
dst = 0
if self.rank == dst:
full_tensor = torch.zeros(
h,
w,
device=torch.device(f"cuda:{dst}")
)
st.gather(dst, full_tensor)
if self.rank == dst:
self.assertEqual(full_tensor, torch.ones(h, w))
else:
self.assertIsNone(full_tensor)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_uneven(self) -> None:
""" Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
)
])
h, w = 10, 10
st = sharded_tensor.ones(spec, h, w, init_rrefs=True)
full_tensor = None
dst = 0
if self.rank == dst:
full_tensor = torch.zeros(
h,
w,
device=torch.device(f"cuda:{dst}")
)
st.gather(dst, full_tensor)
if self.rank == dst:
self.assertEqual(full_tensor, torch.ones(h, w))
else:
self.assertIsNone(full_tensor)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_to_cpu(self):
cpu_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cpu",
"rank:1/cpu",
"rank:2/cpu",
"rank:3/cpu",
],
)
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
gloo_pg = dist.new_group(backend="gloo")
# CPU sharded tensor should return the same instance (no copy)
st_cpu = sharded_tensor.zeros(cpu_spec, h, w, process_group=gloo_pg)
new_st_cpu = st_cpu.cpu()
self.assertTrue(st_cpu is new_st_cpu)
# GPU sharded tensor to cpu
st = sharded_tensor.zeros(spec, h, w)
# test ability to move st to CPU
spec_before_move = st.sharding_spec()
new_st = st.cpu(process_group=gloo_pg)
# return a copy of orginal st
self.assertFalse(st is new_st)
# check the spec is still ChunkShardingSpec
spec_after_move = new_st.sharding_spec()
self.assertIsInstance(spec_after_move, ChunkShardingSpec)
# now it should be ProcessGroupGloo since it's on CPU
self.assertIsInstance(new_st._process_group, distributed_c10d.ProcessGroupGloo)
# test specs before and after the move almost the same except placement device
self.assertEqual(spec_before_move.dim, spec_after_move.dim)
self.assertEqual(len(spec_before_move.placements), len(spec_after_move.placements))
for i, remote_device_after in enumerate(spec_after_move.placements):
remote_device_before = spec_before_move.placements[i]
self.assertEqual(remote_device_before.rank(), remote_device_after.rank())
self.assertEqual(str(remote_device_after.device()), "cpu")
# ensure metdata also get changed to CPU
metas = new_st.metadata().shards_metadata
for meta in metas:
self.assertEqual(str(meta.placement.device()), "cpu")
# Test if a mixed sharded tensor (ShardedTensor with different devices) to cpu
mixed_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cpu",
"rank:1/cpu",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.zeros(mixed_spec, h, w, process_group=gloo_pg)
new_st = st.cpu()
# return a copy of orginal st
self.assertFalse(st is new_st)
# check the spec is still ChunkShardingSpec
spec_after_move = new_st.sharding_spec()
self.assertIsInstance(spec_after_move, ChunkShardingSpec)
# test specs before and after the move almost the same except placement device
self.assertEqual(mixed_spec.dim, spec_after_move.dim)
self.assertEqual(len(mixed_spec.placements), len(spec_after_move.placements))
for i, remote_device_after in enumerate(spec_after_move.placements):
remote_device_before = mixed_spec.placements[i]
self.assertEqual(remote_device_before.rank(), remote_device_after.rank())
self.assertEqual(str(remote_device_after.device()), "cpu")
# ensure metdata also get changed to CPU
metas = new_st.metadata().shards_metadata
for meta in metas:
self.assertEqual(str(meta.placement.device()), "cpu")
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_to_cuda(self):
cpu_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cpu",
"rank:1/cpu",
"rank:2/cpu",
"rank:3/cpu",
],
)
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
# CUDA sharded tensor should return a new ShardedTensor, but same
# local shards(no movements)
st_cuda = sharded_tensor.zeros(spec, h, w)
new_st_cuda = st_cuda.cuda()
self.assertTrue(st_cuda is not new_st_cuda)
self.assertTrue(st_cuda.local_tensor() is new_st_cuda.local_tensor())
gloo_pg = dist.new_group(backend="gloo")
# CPU sharded tensor to GPU
st_cpu = sharded_tensor.zeros(cpu_spec, h, w, process_group=gloo_pg)
# test ability to move st to GPU
spec_before_move = st_cpu.sharding_spec()
new_st_gpu = st_cpu.cuda()
# check the spec is still ChunkShardingSpec
spec_after_move = new_st_gpu.sharding_spec()
self.assertIsInstance(spec_after_move, ChunkShardingSpec)
# test specs before and after the move almost the same except placement device
self.assertEqual(spec_before_move.dim, spec_after_move.dim)
self.assertEqual(len(spec_before_move.placements), len(spec_after_move.placements))
for i, remote_device_after in enumerate(spec_after_move.placements):
remote_device_before = spec_before_move.placements[i]
self.assertEqual(remote_device_before.rank(), remote_device_after.rank())
self.assertEqual(str(remote_device_before.device().type), "cpu")
self.assertEqual(str(remote_device_after.device().type), "cuda")
# ensure metdata also get changed to GPU
metas = new_st_gpu.metadata().shards_metadata
for meta in metas:
self.assertEqual(str(meta.placement.device().type), "cuda")
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_to_test(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
# CUDA sharded tensor should return a new ShardedTensor, but same
# local shards(no movements)
st = sharded_tensor.zeros(spec, h, w)
# test same dtype, device return itself
st_self = st.to(dtype=st.dtype, device="cuda")
self.assertTrue(st_self is st)
# test dtype to
st_16 = st.to(torch.float16)
self.assertFalse(st_16 is st)
self.assertEqual(st_16.dtype, torch.float16)
# test device to
st_cpu = st.to(device=torch.device("cpu"))
self.assertFalse(st_cpu is st)
self.assertEqual(st_cpu.local_tensor().device.type, "cpu")
st_cuda = st_cpu.to(device=torch.device("cuda"))
self.assertEqual(st_cuda.local_tensor().device.type, "cuda")
# non-kwarg device to
st_cuda = st_cpu.to(torch.device("cuda"))
self.assertEqual(st_cuda.local_tensor().device.type, "cuda")
st_cpu = st_cuda.to(torch.device("cpu"))
self.assertEqual(st_cpu.local_tensor().device.type, "cpu")
# with string like device conversion
st_cpu = st_cuda.to("cpu")
self.assertEqual(st_cpu.local_tensor().device.type, "cpu")
st_cuda = st_cpu.to("cuda")
self.assertEqual(st_cuda.local_tensor().device.type, "cuda")
# with int like device conversion
st_cpu = st_cuda.to("cpu")
self.assertEqual(st_cpu.local_tensor().device.type, "cpu")
st_cuda = st_cpu.to(self.rank)
self.assertEqual(st_cuda.local_tensor().device.type, "cuda")
# test tensor to
cuda_tensor = torch.randn(3, 4, dtype=torch.float16, device="cuda")
st_cuda = st.to(cuda_tensor)
self.assertFalse(st_cuda is st)
self.assertEqual(st_cuda.dtype, torch.float16)
cuda_tensor = torch.randn(3, 4, dtype=torch.float16, device="cuda:2")
st_cuda = st.to(cuda_tensor)
self.assertEqual(st_cuda.dtype, torch.float16)
# test dtype and device together
st_cpu_16 = st.to("cpu", torch.float16)
self.assertEqual(st_cpu_16.dtype, torch.float16)
self.assertEqual(st_cpu_16.local_tensor().device.type, "cpu")
st_cuda_32 = st_cpu_16.to("cuda", torch.float32)
self.assertEqual(st_cuda_32.dtype, torch.float32)
self.assertEqual(st_cuda_32.local_tensor().device.type, "cuda")
# test pass additional process group
gloo_pg = dist.new_group(backend="gloo")
st_gloo = st.to(device="cpu", process_group=gloo_pg)
self.assertFalse(st_gloo is st)
self.assertEqual(st_gloo.local_tensor().device.type, "cpu")
self.assertEqual(st_gloo._process_group, gloo_pg)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_device(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
# CUDA sharded tensor should return a new ShardedTensor, but same
# local shards(no movements)
st = sharded_tensor.zeros(spec, h, w)
current_device = torch.device(torch.cuda.current_device())
self.assertEqual(current_device, st.device)
# test after to cpu, device get changed
cpu_device = torch.device("cpu")
st_cpu = st.to(device=cpu_device)
self.assertEqual(st_cpu.device, cpu_device)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_uneven_shards(self):
self.init_pg()
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[2, 4],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 4],
shard_sizes=[4, 2],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[2, 0],
shard_sizes=[4, 4],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[4, 4],
shard_sizes=[2, 2],
placement="rank:3/cuda:3",
),
])
st = sharded_tensor.empty(spec, 6, 6)
self.assertEqual((6, 6), st.size())
self.assertEqual(1, len(st.local_shards()))
def verify_size(rank, tensor_dims):
if rank == 0:
self.assertEqual((2, 4), tensor_dims)
elif rank == 1:
self.assertEqual((4, 2), tensor_dims)
elif rank == 2:
self.assertEqual((4, 4), tensor_dims)
elif rank == 3:
self.assertEqual((2, 2), tensor_dims)
def verify_offsets(rank, offsets):
if rank == 0:
self.assertEqual((0, 0), offsets)
elif rank == 1:
self.assertEqual((0, 4), offsets)
elif rank == 2:
self.assertEqual((2, 0), offsets)
elif rank == 3:
self.assertEqual((4, 4), offsets)
# Verify local shard.
local_shard = st.local_shards()[0]
self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
verify_size(self.rank, local_shard.tensor.size())
# Verify local shard metadata.
verify_offsets(self.rank, local_shard.metadata.shard_offsets)
verify_size(self.rank, local_shard.metadata.shard_sizes)
self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
verify_offsets(rank, shard_metadata.shard_offsets)
verify_size(rank, shard_metadata.shard_sizes)
self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_partial_world_size(self):
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
])
st = sharded_tensor.empty(spec, 10, 5, init_rrefs=True)
self.assertEqual((10, 5), st.size())
if self.rank <= 1:
self.assertEqual(1, len(st.local_shards()))
else:
self.assertEqual(0, len(st.local_shards()))
if self.rank <= 1:
# Verify local shard.
local_shard = st.local_shards()[0]
self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual((self.rank * 5, 0), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(2, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
if self.rank <= 1:
self.assertEqual(1, len(remote_shards))
else:
self.assertEqual(2, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual((5, 5), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_new_group(self):
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:2/cuda:3",
),
])
pg = dist.new_group(ranks=[1, 2, 3])
st = sharded_tensor.empty(spec, 10, 5, process_group=pg, init_rrefs=True)
self.assertEqual((10, 5), st.size())
if self.rank == 1 or self.rank == 3:
# Verify local shard.
local_shard = st.local_shards()[0]
self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual((self.rank // 2 * 5, 0), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
self.assertEqual(f'rank:{self.rank - 1}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(2, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
self.assertEqual(f'rank:{rank * 2}/cuda:{rank * 2 + 1}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
if self.rank == 1 or self.rank == 3:
self.assertEqual(1, len(remote_shards))
else:
self.assertEqual(2, len(remote_shards))
owners = {}
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual((5, 5), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_multiple_local_shards(self):
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
)
])
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
if self.rank <= 1:
self.assertEqual(2, len(st.local_shards()))
# Verify local shards.
for idx, local_shard in enumerate(st.local_shards()):
self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual((idx * 5, self.rank * 5), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
else:
self.assertEqual(0, len(st.local_shards()))
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((shard_rank // 2 * 5, (shard_rank % 2) * 5), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
self.assertEqual(f'rank:{shard_rank % 2}/cuda:{shard_rank % 2}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
if self.rank <= 1:
self.assertEqual(1, len(remote_shards))
else:
self.assertEqual(2, len(remote_shards))
owners = {}
for rpc_rank, shards in remote_shards.items():
self.assertEqual(2, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual((5, 5), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_with_rpc_names(self):
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="worker0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="worker1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="worker2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="worker3/cuda:3",
)
])
st = sharded_tensor.empty(spec, 10, 10, init_rrefs=True)
self.assertEqual((10, 10), st.size())
self.assertEqual(1, len(st.local_shards()))
# Verify local shard.
local_shard = st.local_shards()[0]
self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
self.assertEqual(f'worker{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
self.assertEqual(f'worker{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
self.assertEqual(3, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual((5, 5), shard.tensor.size())
class TestShardedTensorFromLocalTensor(ShardedTensorTestBase):
def _generate_st_from_chunk_local_tensor(self, st_size, sharding_spec):
tensor_meta = sharding_spec.build_metadata(st_size, TensorProperties())
pg = dist.distributed_c10d._get_default_group()
local_tensor = None
local_shard_metadata = None
rank_to_metadata = {}
for shard_metadata in tensor_meta.shards_metadata:
rank, device = _parse_and_validate_remote_device(pg, shard_metadata.placement)
rank_to_metadata[rank] = shard_metadata
if rank == self.rank:
local_tensor = torch.rand(shard_metadata.shard_sizes).cuda(device)
local_shard_metadata = shard_metadata
# TODO: figure out what the API should behave when some rank have no shard
# see https://github.com/pytorch/pytorch/issues/73133
assert local_tensor is not None
st = ShardedTensor._init_from_local_tensor(
local_tensor,
sharding_spec,
st_size,
init_rrefs=True,
)
self.assertEqual(tuple(st_size), st.size())
self.assertEqual(1, len(st.local_shards()))
# Verify local shard.
local_shard = st.local_shards()[0]
self.assertEqual(st.local_tensor(), local_tensor)
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.tensor.device)
# Verify local shard metadata.
self.assertEqual(
local_shard_metadata.shard_offsets, local_shard.metadata.shard_offsets
)
self.assertEqual(
local_shard_metadata.shard_sizes, local_shard.metadata.shard_sizes
)
self.assertEqual(local_shard_metadata.placement, local_shard.metadata.placement)
# Verify global metadata.
st_shards_metadata = st.metadata().shards_metadata
self.assertEqual(self.world_size, len(st_shards_metadata))
self.assertEqual(tensor_meta.shards_metadata, st_shards_metadata)
# Validate remote shards.
remote_shards = st.remote_shards()
self.assertEqual(self.world_size - 1, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
# If remote shard does not exist, to_here() will throw exception.
if tensor_meta.shards_metadata[rpc_rank]:
shard = remote_shard.to_here()
self.assertEqual(
rank_to_metadata[rpc_rank].shard_sizes, shard.tensor.size()
)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_from_local_tensor(self):
chunk_specs = _chunk_sharding_specs_list_for_test([0, 1, 1, 0], seed=31)
for spec in chunk_specs:
self._generate_st_from_chunk_local_tensor([20, 10], spec)
self._generate_st_from_chunk_local_tensor([21, 11], spec)
self._generate_st_from_chunk_local_tensor([23, 16], spec)
self._generate_st_from_chunk_local_tensor([44, 16, 8], spec)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_from_local_tensor_errors(self):
enumerable_sharding_spec = EnumerableShardingSpec(
[
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
]
)
st_size = [24, 12]
local_tensor = torch.rand(*st_size).cuda(self.rank)
with self.assertRaisesRegex(
ValueError, "do not cover the entire tensor"
):
ShardedTensor._init_from_local_tensor(
local_tensor,
enumerable_sharding_spec,
st_size,
)
chunk_specs = _chunk_sharding_specs_list_for_test([0], seed=31)
with self.assertRaisesRegex(
ValueError, "local_tensor is not a contiguous Tensor."
):
ShardedTensor._init_from_local_tensor(
local_tensor.t(),
chunk_specs[0],
st_size,
)
class TestShardedTensorFromLocalShards(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_local_shards(self):
shard_offsets = [(self.rank // 2) * 5, (self.rank % 2) * 5]
local_shard_metadata = ShardMetadata(
shard_offsets=shard_offsets,
shard_sizes=[5, 5],
placement=f"rank:{self.rank}/cuda:{self.rank}"
)
local_tensor = torch.randn(5, 5, device=f"cuda:{self.rank}")
local_shard = sharded_tensor.Shard(local_tensor, local_shard_metadata)
local_shard_from_offsets = sharded_tensor.Shard.from_tensor_and_offsets(
local_tensor,
shard_offsets=shard_offsets,
rank=self.rank
)
self.assertEqual(local_shard.metadata, local_shard_from_offsets.metadata)
wrong_local_shard_metadata = ShardMetadata(
shard_offsets=shard_offsets,
shard_sizes=[6, 5],
placement=f"rank:{self.rank}/cuda:{self.rank}"
)
with self.assertRaisesRegex(ValueError, 'Shard tensor size does not match'):
local_shard_from_wrong_meta = sharded_tensor.Shard(
local_tensor,
metadata=wrong_local_shard_metadata,
)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_from_local_shards(self):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
placement=f"rank:{self.rank}/cuda:{self.rank}"
)
local_shards = [sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)]
st = sharded_tensor.init_from_local_shards(local_shards, [10, 10], init_rrefs=True)
self.assertEqual((10, 10), st.size())
self.assertEqual(1, len(st.local_shards()))
# Verify local shard.
local_shard = st.local_shards()[0]
self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
shards_metadata = st.metadata().shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
self.assertEqual(3, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual((5, 5), shard.tensor.size())
@skip_if_lt_x_gpu(4)
def test_st_base_init_from_local_shards_and_global_metadata(self):
world_size = 4
shards_metadata = []
shards = []
for rank in range(world_size):
local_shard_metadata = ShardMetadata(
shard_offsets=[(rank // 2) * 5, (rank % 2) * 5],
shard_sizes=[5, 5],
placement=f"rank:{rank}/cuda:{rank}",
)
shards_metadata.append(local_shard_metadata)
shards.append(
sharded_tensor.Shard(
torch.randn(5, 5, device=f"cuda:{rank}"), local_shard_metadata
)
)
tensor_properties = TensorProperties(
dtype=torch.get_default_dtype(),
layout=torch.strided,
requires_grad=False,
memory_format=torch.contiguous_format,
pin_memory=False,
)
sharded_tensor_metadata = sharded_tensor.ShardedTensorMetadata(
shards_metadata=shards_metadata,
size=torch.Size([10, 10]),
tensor_properties=tensor_properties,
)
st_base = sharded_tensor.ShardedTensorBase._init_from_local_shards_and_global_metadata(
shards, sharded_tensor_metadata=sharded_tensor_metadata
)
self.assertEqual(4, len(st_base.local_shards()))
# Verify local shard of st_base
local_shard = st_base.local_shards()[0]
self.assertEqual(torch.device("cuda:0"), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual(
(0, 0),
local_shard.metadata.shard_offsets,
)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
self.assertEqual("rank:0/cuda:0", str(local_shard.metadata.placement))
# Verify global metadata.
shards_metadata = st_base.metadata().shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual(
(rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets
)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
self.assertEqual(f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_from_local_shards_and_global_metadata(self):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
placement=f"rank:{self.rank}/cuda:{self.rank}"
)
shards_metadata = []
for r in range(self.world_size):
if r == self.rank:
shards_metadata.append(local_shard_metadata)
else:
shards_metadata.append(ShardMetadata(
shard_offsets=[(r // 2) * 5, (r % 2) * 5],
shard_sizes=[5, 5],
placement=f"rank:{r}/cuda:{r}"
))
local_shards = [sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)]
tensor_properties = TensorProperties(
dtype=torch.get_default_dtype(),
layout=torch.strided,
requires_grad=False,
memory_format=torch.contiguous_format,
pin_memory=False,
)
sharded_tensor_metadata = sharded_tensor.ShardedTensorMetadata(
shards_metadata=shards_metadata,
size=torch.Size([10, 10]),
tensor_properties=tensor_properties,
)
st = ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards,
sharded_tensor_metadata,
init_rrefs=True,
)
self.assertEqual((10, 10), st.size())
self.assertEqual(1, len(st.local_shards()))
# Verify local shard.
local_shard = st.local_shards()[0]
self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual((self.rank // 2 * 5, (self.rank % 2) * 5), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
self.assertEqual(f'rank:{self.rank}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
shards_metadata = st.metadata().shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank // 2 * 5, (rank % 2) * 5), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
self.assertEqual(f'rank:{rank}/cuda:{rank}', str(shard_metadata.placement))
# Validate remote shards.
remote_shards = st.remote_shards()
self.assertEqual(3, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual((5, 5), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_from_local_shards_new_group(self):
new_pg = dist.new_group(ranks=[1, 2, 3])
if self.rank != 0:
local_shard_metadata = ShardMetadata(
shard_offsets=[5 * (self.rank - 1), 0],
shard_sizes=[5, 5],
placement=f"rank:{self.rank - 1}/cuda:{self.rank}"
)
local_shards = [sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)]
st = sharded_tensor.init_from_local_shards(local_shards, [15, 5], process_group=new_pg)
# Verify local shard.
local_shard = st.local_shards()[0]
self.assertEqual(torch.device(f'cuda:{self.rank}'), local_shard.tensor.device)
self.assertEqual((5, 5), local_shard.tensor.size())
# Verify local shard metadata.
self.assertEqual(((self.rank - 1) * 5, 0), local_shard.metadata.shard_offsets)
self.assertEqual((5, 5), local_shard.metadata.shard_sizes)
self.assertEqual(f'rank:{self.rank - 1}/cuda:{self.rank}', str(local_shard.metadata.placement))
# Verify global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(3, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual((rank * 5, 0), shard_metadata.shard_offsets)
self.assertEqual((5, 5), shard_metadata.shard_sizes)
self.assertEqual(f'rank:{rank}/cuda:{rank + 1}', str(shard_metadata.placement))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_from_local_shards_invalid_local_shards(self):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
placement=f"rank:{self.rank}/cuda:{self.rank}"
)
indices = [[0, 1, 1], [2, 0, 2]]
values = [3.2, 4.5, 5.8]
sparse_tensor = torch.sparse_coo_tensor(indices, values, (5, 5), device=f"cuda:{self.rank}")
empty_local_shards = []
with self.assertRaisesRegex(ValueError, 'have no local shards on all ranks'):
st = sharded_tensor.init_from_local_shards(empty_local_shards, [10, 10], init_rrefs=True)
wrong_layout_shards = [
sharded_tensor.Shard(sparse_tensor, local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, 'Only torch.strided layout is currently supported'):
st = sharded_tensor.init_from_local_shards(
wrong_layout_shards, [10, 10], init_rrefs=True)
wrong_memory_format_shards = [
sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}").t(), local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, 'Only torch.contiguous_format memory_format is currently supported'):
st = sharded_tensor.init_from_local_shards(
wrong_memory_format_shards, [10, 10], init_rrefs=True)
with self.assertRaisesRegex(ValueError, 'Shard tensor size does not match'):
wrong_size_shards = [sharded_tensor.Shard(torch.randn(2, 3, device=f"cuda:{self.rank}"), local_shard_metadata)]
with self.assertRaisesRegex(ValueError, "Local shard tensor device does not match"):
wrong_device_shards = [sharded_tensor.Shard(torch.randn(5, 5), local_shard_metadata)]
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_from_local_shards_invalid_property_cross_ranks(self):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
placement=f"rank:{self.rank}/cuda:{self.rank}"
)
tensor_overall_size = [10, 10] if self.rank == 0 else [10, 5]
wrong_dtype_shards = [
sharded_tensor.Shard(torch.ones(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, "ShardedTensor global_size property does not match from different ranks!"):
st = sharded_tensor.init_from_local_shards(wrong_dtype_shards, tensor_overall_size, init_rrefs=True)
tensor_dtype = torch.int if self.rank == 0 else torch.float32
wrong_dtype_shards = [
sharded_tensor.Shard(torch.ones(5, 5, device=f"cuda:{self.rank}", dtype=tensor_dtype), local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, "ShardedTensor dtype property does not match from different ranks!"):
st = sharded_tensor.init_from_local_shards(wrong_dtype_shards, [10, 10], init_rrefs=True)
tensor_requires_grad = True if self.rank == 0 else False
wrong_requires_grad_shards = [
sharded_tensor.Shard(
torch.randn(5, 5, device=f"cuda:{self.rank}", requires_grad=tensor_requires_grad),
local_shard_metadata
)
]
with self.assertRaisesRegex(ValueError, 'ShardedTensor requires_grad property does not match from different ranks!'):
st = sharded_tensor.init_from_local_shards(
wrong_requires_grad_shards, [10, 10], init_rrefs=True)
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
placement=f"rank:{self.rank}/cpu"
)
@with_comms(init_rpc=False, backend="gloo")
@skip_if_lt_x_gpu(4)
def test_init_from_local_shards_invalid_pin_memory(self):
# pin memory can only be on dense cpu
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
placement=f"rank:{self.rank}/cpu"
)
wrong_pin_memory_local_shards = [
sharded_tensor.Shard(torch.randn(5, 5, pin_memory=True), local_shard_metadata),
sharded_tensor.Shard(torch.randn(5, 5, pin_memory=False), local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, "Local shards' tensor pin_memory property need to be the same"):
st = sharded_tensor.init_from_local_shards(
wrong_pin_memory_local_shards, [10, 10], init_rrefs=True)
tensor_pin_memory = True if self.rank == 0 else False
wrong_pin_memory_shards_cross_ranks = [
sharded_tensor.Shard(torch.randn(5, 5, pin_memory=tensor_pin_memory), local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, 'ShardedTensor pin_memory property does not match from different ranks!'):
st = sharded_tensor.init_from_local_shards(
wrong_pin_memory_shards_cross_ranks, [10, 10], init_rrefs=True)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_from_local_shards_invalid_shards_overlap(self):
local_shard_size = [5, 5] if self.rank != 0 else [6, 6]
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=local_shard_size,
placement=f"rank:{self.rank}/cuda:{self.rank}"
)
local_shards = [sharded_tensor.Shard(torch.randn(local_shard_size, device=f"cuda:{self.rank}"), local_shard_metadata)]
with self.assertRaisesRegex(ValueError, "overlap"):
sharded_tensor.init_from_local_shards(local_shards, [10, 10], init_rrefs=True)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_from_local_shards_invalid_shards_gaps(self):
local_shard_size = [5, 5] if self.rank != 0 else [4, 4]
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=local_shard_size,
placement=f"rank:{self.rank}/cuda:{self.rank}"
)
local_shards = [sharded_tensor.Shard(torch.randn(local_shard_size, device=f"cuda:{self.rank}"), local_shard_metadata)]
with self.assertRaisesRegex(ValueError, "does not match tensor volume"):
sharded_tensor.init_from_local_shards(local_shards, [10, 10], init_rrefs=True)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_from_local_shards_and_global_metadata_invalid_shards(self):
local_shard_metadata = ShardMetadata(
shard_offsets=[(self.rank // 2) * 5, (self.rank % 2) * 5],
shard_sizes=[5, 5],
placement=f"rank:{self.rank}/cuda:{self.rank}"
)
shards_metadata = []
for r in range(self.world_size):
if r == self.rank:
shards_metadata.append(local_shard_metadata)
else:
shards_metadata.append(ShardMetadata(
shard_offsets=[(r // 2) * 5, (r % 2) * 5],
shard_sizes=[5, 5],
placement=f"rank:{r}/cuda:{r}"
))
tensor_properties = TensorProperties(
dtype=torch.get_default_dtype(),
layout=torch.strided,
requires_grad=False,
memory_format=torch.contiguous_format,
pin_memory=False,
)
sharded_tensor_metadata = sharded_tensor.ShardedTensorMetadata(
shards_metadata=shards_metadata,
size=torch.Size([10, 10]),
tensor_properties=tensor_properties,
)
empty_local_shards = []
with self.assertRaisesRegex(RuntimeError, 'does not match number of local shards metadata'):
ShardedTensor._init_from_local_shards_and_global_metadata(
empty_local_shards,
sharded_tensor_metadata
)
wrong_num_shards = [
sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata),
sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}"), local_shard_metadata)
]
with self.assertRaisesRegex(RuntimeError, 'does not match number of local shards metadata'):
ShardedTensor._init_from_local_shards_and_global_metadata(
wrong_num_shards,
sharded_tensor_metadata
)
with self.assertRaisesRegex(ValueError, 'Shard tensor size does not match with metadata.shard_lengths'):
wrong_size_shards = [sharded_tensor.Shard(torch.randn(2, 3, device=f"cuda:{self.rank}"), local_shard_metadata)]
with self.assertRaisesRegex(ValueError, "Local shard tensor device does not match with local Shard's placement"):
wrong_device_shards = [sharded_tensor.Shard(torch.randn(5, 5), local_shard_metadata)]
wrong_dtype_shards = [
sharded_tensor.Shard(torch.ones(5, 5, device=f"cuda:{self.rank}", dtype=torch.int), local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, "Local shards' tensor dtype property is incompatible with"):
ShardedTensor._init_from_local_shards_and_global_metadata(
wrong_dtype_shards,
sharded_tensor_metadata
)
indices = [[0, 1, 1], [2, 0, 2]]
values = [3.2, 4.5, 5.8]
sparse_tensor = torch.sparse_coo_tensor(indices, values, (5, 5), device=f"cuda:{self.rank}")
wrong_layout_shards = [
sharded_tensor.Shard(sparse_tensor, local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, "Local shards' tensor layout property is incompatible with"):
ShardedTensor._init_from_local_shards_and_global_metadata(
wrong_layout_shards,
sharded_tensor_metadata
)
wrong_requires_grad_shards = [
sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}", requires_grad=True), local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, "Local shards' tensor requires_grad property is incompatible with"):
ShardedTensor._init_from_local_shards_and_global_metadata(
wrong_requires_grad_shards,
sharded_tensor_metadata
)
wrong_memory_format_shards = [
sharded_tensor.Shard(torch.randn(5, 5, device=f"cuda:{self.rank}").t(), local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, 'Only torch.contiguous_format memory_format is currently supported'):
ShardedTensor._init_from_local_shards_and_global_metadata(
wrong_memory_format_shards,
sharded_tensor_metadata
)
# pin_memory can only be on CPU
local_shard_metadata.placement = _remote_device(f"rank:{self.rank}/cpu")
wrong_pin_memory_shards = [
sharded_tensor.Shard(torch.randn(5, 5, pin_memory=True), local_shard_metadata)
]
with self.assertRaisesRegex(ValueError, "Local shards' tensor pin_memory property is incompatible with"):
ShardedTensor._init_from_local_shards_and_global_metadata(
wrong_pin_memory_shards,
sharded_tensor_metadata
)
class TestShardedTensorCustomOps(ShardedTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_op(self):
@custom_sharded_op_impl(torch.asin)
def my_sharded_asin(types, args, kwargs, process_group):
return torch.asin(args[0].local_shards()[0].tensor)
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, 10, 10)
res = torch.asin(st)
self.assertEqual(res, torch.asin(st.local_shards()[0].tensor))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_op_override(self):
t = torch.rand(10, 10).cuda(self.rank)
from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
@custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.linear)
def my_sharded_linear(types, args, kwargs, process_group):
return t
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
m = torch.nn.Linear(32, 16).cuda(self.rank)
shard_parameter(m, 'weight', spec)
result = m(torch.rand(15, 32).cuda(self.rank))
self.assertEqual(t, result)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_op_errors(self):
with self.assertRaisesRegex(TypeError, 'expects signature'):
@custom_sharded_op_impl(torch.nn.functional.linear)
def my_op1(types, args, kwargs, process_group, random_param):
pass
with self.assertRaisesRegex(TypeError, 'expects signature'):
@custom_sharded_op_impl(torch.nn.functional.linear)
def my_op2(types):
pass
class TestShardMetadata(ShardedTensorTestBase):
@with_comms
@requires_nccl()
def test_shard_metadata_init(self):
pg = dist.distributed_c10d._get_default_group()
md = ShardMetadata([10], [0])
self.assertIsNone(md.placement)
with self.assertRaisesRegex(ValueError, "remote device is None"):
_parse_and_validate_remote_device(pg, md.placement)
# String placement gets converted by ctor
md = ShardMetadata([10], [0], "rank:0/cpu")
self.assertEqual(md.placement, _remote_device("rank:0/cpu"))
rank, device = _parse_and_validate_remote_device(pg, md.placement)
self.assertEqual(0, rank)
self.assertEqual(device, torch.device("cpu"))
@with_comms
@requires_nccl()
def test_create_shard_with_no_placement(self):
md = ShardMetadata([0], [10])
shard = Shard(torch.zeros(10), md)
self.assertIsNone(shard.metadata.placement)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
|
# Owner(s): ["oncall: distributed"]
import copy
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard.sharded_optim import (
ShardedOptimizer,
)
from torch.distributed._shard.api import (
shard_parameter,
_reshard_output,
_collect_local_shard
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
clone_module_parameter,
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
from torch.testing._internal.distributed._shard.test_common import SimpleMegatronLM
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedTensorMegatronLinear(ShardedTensorTestBase):
def assertEdistNorm(self, t1, t2):
"""
Use a normalized euclidean distance measure to validate two tensors
are close since comparing each element individually is not a good
measure where majority of elements are similar and maybe only a few
elements are slightly off.
"""
dist = torch.sqrt(((t1 - t2) ** 2).sum() / t1.numel())
self.assertTrue(dist.item() <= 0.5)
def _run_megatron_linear(self, spec, input_size, linear_size, dtype):
def _weight_override(module_dst, module_src):
module_dst.fc1.weight = clone_module_parameter(module_src.fc1, "weight")
module_dst.fc1.bias = clone_module_parameter(module_src.fc1, "bias")
module_dst.fc2.weight = clone_module_parameter(module_src.fc2, "weight")
module_dst.fc2.bias = clone_module_parameter(module_src.fc2, "bias")
def _shard_parameter(module, spec):
shard_parameter(module.fc1, "weight", spec[0])
shard_parameter(module.fc2, "weight", spec[1])
# Use same seed.
torch.manual_seed(0)
local_megatron_lm = SimpleMegatronLM(linear_size, rank=self.rank, dtype=dtype)
sharded_megatron_lm = SimpleMegatronLM(linear_size, dtype=dtype)
_weight_override(sharded_megatron_lm, local_megatron_lm)
# Shard the parameter. First col-wise sharding and then row-wise
_shard_parameter(sharded_megatron_lm, spec)
# Setup resharding of output.
reshard_spec = copy.deepcopy(spec[1])
reshard_spec.placements.sort(key=lambda placement: placement.rank())
reshard_spec.dim = 0
sharded_megatron_lm = _collect_local_shard(
_reshard_output(sharded_megatron_lm, reshard_spec)
)
torch.manual_seed(self.rank) # inputs different on each rank
inp = torch.rand(*input_size, requires_grad=True, device=self.rank, dtype=dtype)
# Run local computation
local_output = local_megatron_lm(inp)
# Compute loss and run backward pass.
local_output.sum().backward()
# Save and reset input grads.
local_input_grad = inp.grad
self.assertIsNotNone(inp.grad)
inp.grad = None
# Run sharded computation
sharded_output = sharded_megatron_lm(inp)
# Verify local and sharded results
self.assertEqual(local_output, sharded_output, atol=1e-3, rtol=1e-6)
sharded_output.sum().backward()
sharded_input_grad = inp.grad
self.assertIsNotNone(inp.grad)
# Verify sharded and local grads.
self.assertEqual(local_input_grad, sharded_input_grad, atol=1e-3, rtol=1e-6)
(
local_weight_grad_fc1,
local_weight_grad_fc2,
) = local_megatron_lm.get_weight_grads()
local_bias_grad_fc1, local_bias_grad_fc2 = local_megatron_lm.get_bias_grads()
# Verify that weights in both layers and biases in the sharded linear has non-None grad.
(
sharded_weight_fc1,
sharded_weight_fc2,
) = sharded_megatron_lm.get_weights()
bias_grad_fc1, bias_grad_fc2 = sharded_megatron_lm.get_bias_grads()
self.assertNotEqual(sharded_weight_fc1.grad, None)
self.assertNotEqual(sharded_weight_fc2.grad, None)
self.assertNotEqual(bias_grad_fc1, None)
self.assertNotEqual(bias_grad_fc2, None)
# Shard the local linear's weight grad so that we can compare.
dist.all_reduce(local_weight_grad_fc1)
dist.all_reduce(local_weight_grad_fc2)
dist.all_reduce(local_bias_grad_fc1)
dist.all_reduce(local_bias_grad_fc2)
local_weight_fc1, local_weight_fc2 = local_megatron_lm.get_weights()
(
start_pos_fc1,
chunk_size_fc1,
) = generate_local_weight_sharding_params_for_test(
local_weight_fc1, 0, TEST_GPU_NUM, spec[0], self.rank
)
local_grad_narrowed_fc1 = local_weight_grad_fc1.narrow(
0, start_pos_fc1, chunk_size_fc1
)
(
start_pos_fc2,
chunk_size_fc2,
) = generate_local_weight_sharding_params_for_test(
local_weight_fc2, 1, TEST_GPU_NUM, spec[1], self.rank
)
local_grad_narrowed_fc2 = local_weight_grad_fc2.narrow(
1, start_pos_fc2, chunk_size_fc2
)
# Test backward gradient calculation.
self.assertEdistNorm(sharded_weight_fc1.grad, local_grad_narrowed_fc1)
self.assertEdistNorm(sharded_weight_fc2.grad, local_grad_narrowed_fc2)
self.assertEdistNorm(bias_grad_fc1, local_bias_grad_fc1)
self.assertEdistNorm(bias_grad_fc2, local_bias_grad_fc2)
# Test optimizer.
bias_fc1, bias_fc2 = sharded_megatron_lm.get_biases()
local_bias_fc1, local_bias_fc2 = local_megatron_lm.get_biases()
self.assertEdistNorm(bias_fc1, local_bias_fc1)
self.assertEdistNorm(bias_fc2, local_bias_fc2)
self.assertEdistNorm(bias_fc1.grad, local_bias_fc1.grad)
self.assertEdistNorm(bias_fc2.grad, local_bias_fc2.grad)
previous_sharded_weight_fc1 = sharded_weight_fc1.clone()
previous_sharded_weight_fc2 = sharded_weight_fc2.clone()
previous_bias_fc1 = bias_fc1.clone()
previous_bias_fc2 = bias_fc2.clone()
optim = torch.optim.SGD(local_megatron_lm.parameters(), lr=0.1)
optim.step()
sharded_optim = ShardedOptimizer(
dict(sharded_megatron_lm.named_parameters()),
torch.optim.SGD,
lr=0.1,
)
sharded_optim.step()
local_weight_fc1_narrowed = local_weight_fc1.narrow(
0, start_pos_fc1, chunk_size_fc1
)
local_weight_fc2_narrowed = local_weight_fc2.narrow(
1, start_pos_fc2, chunk_size_fc2
)
# Test weight value after optimizer.
self.assertEqual(sharded_weight_fc1.size(), local_weight_fc1_narrowed.size())
self.assertEqual(sharded_weight_fc2.size(), local_weight_fc2_narrowed.size())
self.assertNotEqual(previous_sharded_weight_fc1, sharded_weight_fc1)
self.assertNotEqual(previous_sharded_weight_fc2, sharded_weight_fc2)
self.assertEdistNorm(sharded_weight_fc1, local_weight_fc1_narrowed)
self.assertEdistNorm(sharded_weight_fc2, local_weight_fc2_narrowed)
# Test bias value after optimizer.
local_bias_fc1, local_bias_fc2 = local_megatron_lm.get_biases()
self.assertNotEqual(previous_bias_fc1, bias_fc1)
self.assertEdistNorm(bias_fc1, local_bias_fc1)
self.assertNotEqual(previous_bias_fc2, bias_fc2)
self.assertEdistNorm(bias_fc2, local_bias_fc2)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_megatron_two_layer_prototype(self):
colwise_sharding_spec = generate_chunk_sharding_specs_for_test(0)
rowwise_sharding_spec = generate_chunk_sharding_specs_for_test(1)
for spec in zip(colwise_sharding_spec, rowwise_sharding_spec):
self._run_megatron_linear(spec, [22, 17], [[17, 12], [12, 29]], torch.float16)
self._run_megatron_linear(spec, [28, 21], [[21, 11], [11, 29]], torch.float32)
self._run_megatron_linear(spec, [37, 23], [[23, 13], [13, 24]], torch.float64)
self._run_megatron_linear(spec, [24, 15], [[15, 14], [14, 20]], torch.float16)
# Test multiple input dims
self._run_megatron_linear(spec, [10, 22, 17], [[17, 12], [12, 29]], torch.float32)
self._run_megatron_linear(spec, [13, 28, 21], [[21, 11], [11, 29]], torch.float16)
self._run_megatron_linear(spec, [27, 37, 23], [[23, 13], [13, 24]], torch.float32)
self._run_megatron_linear(spec, [100, 24, 15], [[15, 14], [14, 20]], torch.float64)
# Test single input dim
self._run_megatron_linear(spec, [17], [[17, 12], [12, 29]], torch.float16)
self._run_megatron_linear(spec, [21], [[21, 11], [11, 29]], torch.float32)
self._run_megatron_linear(spec, [23], [[23, 13], [13, 24]], torch.float64)
self._run_megatron_linear(spec, [15], [[15, 14], [14, 20]], torch.float16)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/test_megatron_prototype.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
generate_chunk_sharding_specs_for_test,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
_chunk_sharding_specs_list_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedTensorElementWiseOps(ShardedTensorTestBase):
def _run_sharded_elementwise_ops(
self, spec, input_size, op, reset_seed=None, **kwargs
):
torch.manual_seed(self.rank)
st = sharded_tensor.rand(spec, *input_size)
reset_seed() if reset_seed else None
new_st = op(st, **kwargs)
local_shard = st.local_tensor()
new_st_local_shard = new_st.local_tensor()
reset_seed() if reset_seed else None
self.assertEqual(
op(local_shard, **kwargs),
new_st_local_shard,
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_gelu(self):
specs = generate_chunk_sharding_specs_for_test(
0
) + generate_chunk_sharding_specs_for_test(1)
for spec in specs:
self._run_sharded_elementwise_ops(spec, [12, 17], torch.nn.functional.gelu)
self._run_sharded_elementwise_ops(spec, [18, 21], torch.nn.functional.gelu)
self._run_sharded_elementwise_ops(spec, [17, 23], torch.nn.functional.gelu)
self._run_sharded_elementwise_ops(spec, [14, 15], torch.nn.functional.gelu)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_relu(self):
specs = generate_chunk_sharding_specs_for_test(
0
) + generate_chunk_sharding_specs_for_test(1)
for spec in specs:
self._run_sharded_elementwise_ops(spec, [12, 17], torch.nn.functional.relu)
self._run_sharded_elementwise_ops(spec, [18, 21], torch.nn.functional.relu)
self._run_sharded_elementwise_ops(spec, [17, 23], torch.nn.functional.relu)
self._run_sharded_elementwise_ops(spec, [14, 15], torch.nn.functional.relu)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_dropout(self):
def _reset_random_seed():
torch.manual_seed(self.rank + 4)
specs = generate_chunk_sharding_specs_for_test(
0
) + generate_chunk_sharding_specs_for_test(1)
for spec in specs:
self._run_sharded_elementwise_ops(
spec,
[12, 17],
torch.nn.functional.dropout,
p=0.4,
reset_seed=_reset_random_seed,
)
self._run_sharded_elementwise_ops(
spec,
[18, 21],
torch.nn.functional.dropout,
p=0.5,
reset_seed=_reset_random_seed,
)
_reset_random_seed()
dropout = torch.nn.Dropout(p=0.8)
self._run_sharded_elementwise_ops(
spec, [17, 23], dropout, reset_seed=_reset_random_seed
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_nan_to_num(self):
specs = _chunk_sharding_specs_list_for_test([0, 1], seed=10)
for spec in specs:
tensor = torch.rand(16, 12).cuda(self.rank)
tensor[:, :2] = float('nan')
tensor[:, 4:5] = float('inf')
tensor[:, 10:] = -float('inf')
st = _shard_tensor(tensor, spec)
st_expected = _shard_tensor(torch.nan_to_num(tensor), spec)
st = torch.nan_to_num(st)
self.assertTrue(torch.allclose(st, st_expected))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_elementwise_ops.py
|
# Owner(s): ["oncall: distributed"]
import copy
import itertools
import sys
import torch
from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
generate_enumerable_sharding_specs_for_test,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
_chunk_sharding_specs_list_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedTensorMatrixOps(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_contiguous(self):
specs = _chunk_sharding_specs_list_for_test([0], seed=7)
for spec in specs:
st = sharded_tensor.rand(spec, 10, 22, 5, init_rrefs=False)
st = st.transpose(1, 0)
st = st.contiguous()
self.assertTrue(st.is_contiguous())
self.assertTrue(st.local_tensor().is_contiguous())
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_type_as(self):
specs = _chunk_sharding_specs_list_for_test([0], seed=7)
for spec in specs:
st = sharded_tensor.rand(
spec, 16, 30, 5, init_rrefs=False, dtype=torch.double
)
st_2 = sharded_tensor.rand(
spec, 16, 30, 5, init_rrefs=False, dtype=torch.float
)
st_3 = st.type_as(st_2)
self.assertEqual(torch.float, st_3.dtype)
self.assertEqual(torch.float, st_3.local_tensor().dtype)
st_3 = st.type_as(torch.zeros(10).type(torch.BoolTensor).cuda())
self.assertEqual(torch.bool, st_3.dtype)
self.assertEqual(torch.bool, st_3.local_tensor().dtype)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_transpose(self):
specs = _chunk_sharding_specs_list_for_test([0, 1, 2], seed=7)
for spec in specs:
tensor = torch.rand(15, 27, 16).cuda(self.rank)
tensor_t = tensor.transpose(0, 1).contiguous()
spec_n = copy.deepcopy(spec)
if spec_n.dim in (0, 1):
spec_n.dim = 1 - spec_n.dim
st_expected = _shard_tensor(tensor_t, spec_n)
self.assertTrue(
torch.allclose(
torch.transpose(_shard_tensor(tensor, spec), 0, 1), st_expected
)
)
tensor_t = torch.transpose(tensor, 1, 2).contiguous()
spec_n = copy.deepcopy(spec)
if spec_n.dim in (1, 2):
spec_n.dim = 3 - spec_n.dim
st_expected = _shard_tensor(tensor_t, spec_n)
self.assertTrue(
torch.allclose(_shard_tensor(tensor, spec).transpose(1, 2), st_expected)
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_transpose_error(self):
enumerable_spec = generate_enumerable_sharding_specs_for_test()[0]
st = sharded_tensor.rand(
enumerable_spec, 10, 10, init_rrefs=False, dtype=torch.double
)
with self.assertRaisesRegex(
RuntimeError,
"not supported",
):
st.transpose(1, 0)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_softmax(self):
specs = _chunk_sharding_specs_list_for_test([0, 2], seed=17)
for spec in specs:
tensor = torch.rand(15, 27, 16).cuda(self.rank)
tensor_n = torch.nn.functional.softmax(tensor, dim=1, dtype=torch.float32)
st_expected = _shard_tensor(tensor_n, spec)
self.assertTrue(
torch.allclose(
torch.nn.functional.softmax(
_shard_tensor(tensor, spec), dim=1, dtype=torch.float32
),
st_expected,
)
)
def _test_masked_fill_with_sizes(self, mask_size, broadcast_style=False):
specs = _chunk_sharding_specs_list_for_test([0, 1, 2], seed=7)
for spec in specs:
tensor = torch.rand(35, 17, 26).cuda(self.rank)
mask = torch.randint(0, 2, mask_size).type(torch.BoolTensor).cuda(self.rank)
if broadcast_style:
mask = mask.unsqueeze(1)
tensor_m = tensor.masked_fill(mask, 25.0)
st_expected = _shard_tensor(tensor_m, spec)
self.assertTrue(
torch.allclose(
_shard_tensor(tensor, spec).masked_fill(mask, 25.0),
st_expected,
)
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_masked_fill(self):
self._test_masked_fill_with_sizes((35, 17, 26))
self._test_masked_fill_with_sizes((17, 26))
self._test_masked_fill_with_sizes((35, 26), broadcast_style=True)
self._test_masked_fill_with_sizes((26,))
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_masked_fill_error(self):
specs = _chunk_sharding_specs_list_for_test([1, 2], seed=7)
for spec in specs:
st = sharded_tensor.rand(
spec, 35, 17, 26, init_rrefs=False, dtype=torch.double
)
mask = (
torch.randint(0, 2, (2, 35, 17, 26))
.type(torch.BoolTensor)
.cuda(self.rank)
)
with self.assertRaisesRegex(
ValueError,
"mask dim must not greater than the dim of the sharded tensor.",
):
st.masked_fill(mask, 25.0)
mask = torch.randint(0, 2, (16, 26)).type(torch.BoolTensor).cuda(self.rank)
with self.assertRaisesRegex(
ValueError,
"The size of mask 0 must match the size of sharded tensor 1 "
"at non-singleton dimension 0",
):
st.masked_fill(mask, 25.0)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_view(self):
specs = _chunk_sharding_specs_list_for_test([0, 0, -3], seed=10)
for spec in specs:
tensor = torch.rand(16, 35, 26).cuda(self.rank)
tensor_v = tensor.view(16, 35, 26).view(4, 4, 35, 26)
new_spec = copy.deepcopy(spec)
if new_spec.dim < 0:
new_spec.dim -= 1
st_expected = _shard_tensor(tensor_v, new_spec)
self.assertTrue(
torch.allclose(
_shard_tensor(tensor, spec).view(4, 4, 35, 26),
st_expected,
)
)
st_expected = _shard_tensor(tensor, spec)
self.assertTrue(
torch.allclose(
_shard_tensor(tensor_v, new_spec).view(16, 35, 26),
st_expected,
)
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_view_error(self):
for spec in _chunk_sharding_specs_list_for_test([2], seed=7):
st = sharded_tensor.rand(
spec, 35, 17, 26, init_rrefs=False, dtype=torch.double
)
with self.assertRaisesRegex(
NotImplementedError,
"Shape having dim 2 is not supported "
"for sharded tensor sharded on dim 2.",
):
st.view(35 * 17, 26)
with self.assertRaisesRegex(
ValueError,
r"Shape '\[5, 7, 35, 17, 26\]' is invalid for sharded tensor size 15470.",
):
st.view(5, 7, 35, 17, 26)
with self.assertRaisesRegex(
ValueError,
"Only one dimension can be inferred for sharded view op.",
):
st.view(5, 7, -1, -1)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_layer_norm(self):
specs = _chunk_sharding_specs_list_for_test([1, 2], seed=10)
flags = [True, False]
for spec, flag in itertools.product(specs, flags):
tensor = torch.rand(16, 35, 26).cuda(self.rank)
layer_norm = torch.nn.LayerNorm((35, 26), elementwise_affine=flag).cuda(
self.rank
)
st = layer_norm(_shard_tensor(tensor, spec))
with torch.no_grad():
tensor_normed = layer_norm(tensor)
st_expected = _shard_tensor(tensor_normed, spec)
self.assertEqual(
st.local_tensor(),
st_expected.local_tensor(),
)
self.assertTrue(
torch.allclose(
st,
st_expected,
atol=1e-6,
)
)
st_expected = torch.nn.functional.layer_norm(
_shard_tensor(tensor, spec),
(35, 26),
weight=layer_norm.weight,
bias=layer_norm.bias,
)
self.assertTrue(
torch.allclose(
st,
st_expected,
atol=1e-6,
)
)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_tensor_layer_norm_error(self):
specs = _chunk_sharding_specs_list_for_test([2], seed=10)
for spec in specs:
tensor = torch.rand(16, 35, 26).cuda(self.rank)
with self.assertRaisesRegex(
ValueError,
"normalized_shape dim must not be greater "
"than the dim of the sharded tensor.",
):
layer_norm = torch.nn.LayerNorm((14, 55, 35, 26)).cuda(self.rank)
layer_norm(_shard_tensor(tensor, spec))
with self.assertRaisesRegex(
ValueError,
r"Given normalized_shape=\[35\], expected input with shape "
r"\[\*, 35\], but got input of size \[16, 35, 26\].",
):
layer_norm = torch.nn.LayerNorm((35)).cuda(self.rank)
layer_norm(_shard_tensor(tensor, spec))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_matrix_ops.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard import (
shard_parameter,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
clone_module_parameter,
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedEmbeddingBag(ShardedTensorTestBase):
def _run_sharded_embedding_bag(
self,
spec,
input_size,
num_embeddings,
embedding_dim,
mode,
sharded_dim=None,
include_last_offset=False,
offset_size=None,
max_norm=None,
norm_type=2.0,
padding_idx=None,
):
# Use same seed.
torch.manual_seed(0)
local_embedding_bag = torch.nn.EmbeddingBag(
num_embeddings,
embedding_dim,
mode=mode,
max_norm=max_norm,
norm_type=norm_type,
include_last_offset=include_last_offset,
padding_idx=padding_idx,
).cuda(self.rank)
sharded_embedding_bag = torch.nn.EmbeddingBag(
num_embeddings,
embedding_dim,
mode=mode,
max_norm=max_norm,
norm_type=norm_type,
include_last_offset=include_last_offset,
padding_idx=padding_idx,
)
# Copy the weights from local embedding bag.
sharded_embedding_bag.weight = clone_module_parameter(
local_embedding_bag, "weight"
)
# Shard the parameter.
shard_parameter(sharded_embedding_bag, "weight", spec)
# Run sharded computation
torch.manual_seed(self.rank) # inputs different on each rank
inp = torch.randint(0, num_embeddings, tuple(input_size)).cuda(self.rank)
per_sample_weights = None
if mode == "sum":
per_sample_weights = torch.rand(*input_size).cuda(self.rank)
offsets = None
if len(input_size) == 1:
# We need to generate certain length offset for each rank.
# The current implementation and dist API does not support
# the case when the offset has different lengths.
# input_size[0] >> offset_size, so the while loop will not
# for too long.
while offsets is None or (offsets.size(0) != offset_size):
offsets = torch.randint(input_size[0], (offset_size,))
offsets[0] = 0
if include_last_offset:
offsets[-1] = input_size[0]
offsets = (
torch.unique(offsets, sorted=True).contiguous().cuda(self.rank)
)
# If max_norm is set, we need to ensure that the renorm has been applied across
# inputs from all ranks.
if max_norm is not None:
gathered_inputs = [torch.zeros_like(inp) for _ in range(TEST_GPU_NUM)]
dist.all_gather(gathered_inputs, inp)
unique_inp = torch.unique(torch.cat(gathered_inputs))
offsets_dummy = torch.tensor([len(unique_inp) // 2]).cuda(self.rank)
local_embedding_bag(unique_inp, offsets=offsets_dummy)
sharded_output = sharded_embedding_bag(
inp,
offsets=offsets,
per_sample_weights=per_sample_weights,
)
# Run local computation
local_output = local_embedding_bag(
inp,
offsets=offsets,
per_sample_weights=per_sample_weights,
)
# Compare local weight and shared one to ensure the renorm
# as expected.
if max_norm is not None:
sharded_weight = sharded_embedding_bag.weight.local_shards()[0].tensor
(start_pos, chunk_size) = generate_local_weight_sharding_params_for_test(
local_embedding_bag.weight, sharded_dim, TEST_GPU_NUM, spec, self.rank
)
local_weight_narrowed = local_embedding_bag.weight.narrow(
sharded_dim, start_pos, chunk_size
)
self.assertEqual(local_weight_narrowed, sharded_weight)
# Verify
self.assertEqual(local_output, sharded_output)
# Validate for torch.nn.functional.embedding_bag version.
local_output = torch.nn.functional.embedding_bag(
inp,
local_embedding_bag.weight,
offsets=offsets,
mode=mode,
per_sample_weights=per_sample_weights,
include_last_offset=include_last_offset,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
sharded_output = torch.nn.functional.embedding_bag(
inp,
sharded_embedding_bag.weight,
offsets=offsets,
mode=mode,
per_sample_weights=per_sample_weights,
include_last_offset=include_last_offset,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
self.assertEqual(local_output, sharded_output)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_embedding_bag_colwise(self):
for spec in generate_chunk_sharding_specs_for_test(1):
self._test_sharded_embedding_bag_with_test_cases(spec, 1)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_embedding_bag_rowwise(self):
for spec in generate_chunk_sharding_specs_for_test(0):
self._test_sharded_embedding_bag_with_test_cases(spec, 0)
def _test_sharded_embedding_bag_with_test_cases(self, spec, sharded_dim):
self._run_sharded_embedding_bag(spec, [5, 5], 17, 14, "sum")
self._run_sharded_embedding_bag(spec, [5, 4], 17, 12, "mean")
self._run_sharded_embedding_bag(spec, [6, 7], 21, 11, "max")
self._run_sharded_embedding_bag(
spec, [5, 5], 17, 14, "sum", max_norm=2.5, sharded_dim=sharded_dim
)
self._run_sharded_embedding_bag(
spec,
[5, 4],
17,
12,
"mean",
max_norm=2.0,
norm_type=1.0,
sharded_dim=sharded_dim,
)
self._run_sharded_embedding_bag(
spec,
[6, 7],
21,
11,
"max",
max_norm=1.5,
norm_type=1.0,
sharded_dim=sharded_dim,
)
self._run_sharded_embedding_bag(spec, [5, 5], 17, 14, "sum", padding_idx=6)
self._run_sharded_embedding_bag(spec, [8, 6], 24, 13, "sum")
self._run_sharded_embedding_bag(spec, [4, 3], 16, 14, "max")
self._run_sharded_embedding_bag(spec, [8], 23, 13, "sum", offset_size=3)
self._run_sharded_embedding_bag(spec, [5], 17, 12, "mean", offset_size=2)
self._run_sharded_embedding_bag(spec, [12], 16, 12, "max", offset_size=4)
self._run_sharded_embedding_bag(
spec, [8], 23, 13, "sum", offset_size=3, include_last_offset=True
)
self._run_sharded_embedding_bag(
spec, [12], 16, 12, "max", offset_size=4, include_last_offset=True
)
self._run_sharded_embedding_bag(
spec,
[12],
17,
12,
"sum",
offset_size=3,
max_norm=1.25,
sharded_dim=sharded_dim,
)
self._run_sharded_embedding_bag(
spec,
[5],
17,
12,
"mean",
offset_size=2,
max_norm=1.25,
sharded_dim=sharded_dim,
)
self._run_sharded_embedding_bag(
spec,
[5],
17,
12,
"max",
offset_size=2,
max_norm=1.15,
sharded_dim=sharded_dim,
)
self._run_sharded_embedding_bag(spec, [4, 3], 16, 14, "sum", padding_idx=12)
self._run_sharded_embedding_bag(spec, [4, 3], 16, 14, "mean", padding_idx=12)
self._run_sharded_embedding_bag(spec, [4, 3], 16, 14, "max", padding_idx=12)
self._run_sharded_embedding_bag(
spec,
[12],
17,
12,
"sum",
offset_size=3,
max_norm=1.25,
padding_idx=10,
sharded_dim=sharded_dim,
)
self._run_sharded_embedding_bag(
spec,
[5],
17,
12,
"mean",
offset_size=2,
max_norm=1.25,
padding_idx=10,
sharded_dim=sharded_dim,
)
self._run_sharded_embedding_bag(
spec,
[5],
17,
12,
"max",
offset_size=2,
max_norm=1.15,
padding_idx=10,
sharded_dim=sharded_dim,
)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_embedding_bag.py
|
# Owner(s): ["oncall: distributed"]
import copy
import torch
import torch.distributed._shard.sharded_tensor as sharded_tensor
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.common_utils import (
run_tests,
)
class TestTensorOps(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_deep_copy(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, (12, 5))
copied_st = copy.deepcopy(st)
self.assertTrue(type(copied_st) is type(st))
self.assertEqual(copied_st.local_tensor(), st.local_tensor())
self.assertFalse(copied_st is st)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_inplace_copy(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, (12, 5))
ones_st = sharded_tensor.ones(spec, (12, 5))
self.assertFalse(torch.equal(ones_st, st))
st.copy_(ones_st)
self.assertTrue(torch.equal(st, ones_st))
# no grad inplace_copy should work between two with different requires_grad
st_with_grad = sharded_tensor.rand(spec, (12, 5), requires_grad=True)
self.assertTrue(st_with_grad.requires_grad)
self.assertFalse(ones_st.requires_grad)
with torch.no_grad():
st_with_grad.copy_(ones_st)
self.assertEqual(st_with_grad.local_tensor(), ones_st.local_tensor())
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_clone(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, (12, 5))
copied_st = st.clone()
self.assertTrue(type(copied_st) is type(st))
self.assertEqual(copied_st.local_tensor(), st.local_tensor())
self.assertFalse(copied_st is st)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_detach(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, (12, 5), requires_grad=True)
local_shards = st.local_shards()
# before set requires_grad, all local shards should not require grads
for local_shard in local_shards:
self.assertTrue(local_shard.tensor.requires_grad)
detached_st = st.detach()
self.assertFalse(detached_st.requires_grad)
for local_shard in detached_st.local_shards():
self.assertFalse(local_shard.tensor.requires_grad)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_set_requires_grad(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.rand(spec, (12, 5))
local_shards = st.local_shards()
# before set requires_grad, all local shards should not require grads
for local_shard in local_shards:
self.assertFalse(local_shard.tensor.requires_grad)
st.requires_grad_()
self.assertTrue(st.requires_grad)
for local_shard in local_shards:
self.assertTrue(local_shard.tensor.requires_grad)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard import (
shard_parameter,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
clone_module_parameter,
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedEmbedding(ShardedTensorTestBase):
def _run_sharded_embedding(
self,
spec,
input_size,
num_embeddings,
embedding_dim,
sharded_dim=None,
max_norm=None,
norm_type=2.0,
padding_idx=None,
):
# Use same seed.
torch.manual_seed(0)
local_embedding = torch.nn.Embedding(
num_embeddings,
embedding_dim,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
).cuda(self.rank)
sharded_embedding = torch.nn.Embedding(
num_embeddings,
embedding_dim,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
# Copy the weights from local embedding
sharded_embedding.weight = clone_module_parameter(
local_embedding, "weight"
)
# Shard the parameter.
shard_parameter(sharded_embedding, "weight", spec)
# Run sharded computation
torch.manual_seed(self.rank) # inputs different on each rank
inp = torch.randint(0, num_embeddings, tuple(input_size)).cuda(self.rank)
sharded_output = sharded_embedding(inp)
# If max_norm is set, we need to ensure that the renorm has been applied across
# inputs from all ranks.
if max_norm is not None:
gathered_inputs = [torch.zeros_like(inp) for _ in range(TEST_GPU_NUM)]
dist.all_gather(gathered_inputs, inp)
unique_inp = torch.unique(torch.cat(gathered_inputs))
local_embedding(unique_inp)
# Run local computation
local_output = local_embedding(inp)
# Compare local weight and shared one to ensure the renorm
# as expected.
if max_norm is not None:
sharded_weight = sharded_embedding.weight.local_shards()[0].tensor
(start_pos, chunk_size) = generate_local_weight_sharding_params_for_test(
local_embedding.weight, sharded_dim, TEST_GPU_NUM, spec, self.rank
)
local_weight_narrowed = local_embedding.weight.narrow(
sharded_dim, start_pos, chunk_size
)
self.assertEqual(local_weight_narrowed, sharded_weight)
# Verify
self.assertEqual(local_output, sharded_output)
# Validate for torch.nn.functional.embedding version.
local_output = torch.nn.functional.embedding(
inp,
local_embedding.weight,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
sharded_output = torch.nn.functional.embedding(
inp,
sharded_embedding.weight,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
self.assertEqual(local_output, sharded_output)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_embedding_colwise(self):
for spec in generate_chunk_sharding_specs_for_test(1):
self._run_sharded_embedding(spec, [5, 4], 17, 12)
self._run_sharded_embedding(spec, [6, 7, 6], 21, 11)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 23, 13)
self._run_sharded_embedding(spec, [8, 6, 5, 4, 7], 23, 16)
self._run_sharded_embedding(spec, [4], 15, 14)
self._run_sharded_embedding(spec, [34], 15, 14, padding_idx=10)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 23, 13, padding_idx=12)
self._run_sharded_embedding(
spec, [4, 5, 6], 23, 13, max_norm=2.5, sharded_dim=1
)
self._run_sharded_embedding(
spec, [12, 7, 16], 23, 13, max_norm=2.5, sharded_dim=1
)
self._run_sharded_embedding(
spec, [8, 16, 20], 12, 12, max_norm=1.25, norm_type=1.0, sharded_dim=1
)
self._run_sharded_embedding(spec, [30], 15, 14, max_norm=2.0, sharded_dim=1)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_embedding_rowwise(self):
for spec in generate_chunk_sharding_specs_for_test(0):
# Test even split.
self._run_sharded_embedding(spec, [5, 12], 16, 22)
self._run_sharded_embedding(spec, [5, 4], 32, 12)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11)
self._run_sharded_embedding(
spec, [5, 12], 16, 22, max_norm=2.5, sharded_dim=0
)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11, padding_idx=30)
self._run_sharded_embedding(
spec, [6, 5, 3], 26, 11, max_norm=2.0, sharded_dim=0
)
# Test uneven split.
self._run_sharded_embedding(spec, [8, 6, 5, 4], 19, 11)
self._run_sharded_embedding(spec, [6, 7, 6], 21, 11)
self._run_sharded_embedding(spec, [4], 21, 11)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 21, 11, padding_idx=10)
self._run_sharded_embedding(
spec, [12, 16, 8], 27, 11, max_norm=2.0, sharded_dim=0
)
self._run_sharded_embedding(spec, [4], 14, 11, max_norm=2.5, sharded_dim=0)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_embedding.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard import sharded_tensor
from torch.distributed.distributed_c10d import _get_default_group
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
if TEST_WITH_DEV_DBG_ASAN:
print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
class TestShardedTensorBinaryOps(ShardedTensorTestBase):
""" Test base for binary comparison functions such as torch.equal, torch.allclose etc. for ShardedTensor """
seed = 42
def get_random_tensors(self, spec1, spec2, *sizes, pg1=None, pg2=None, seed_offset=0):
pg1 = _get_default_group() if pg1 is None else pg1
pg2 = _get_default_group() if pg2 is None else pg2
torch.manual_seed(TestShardedTensorBinaryOps.seed)
st1 = sharded_tensor.rand(spec1, sizes, process_group=pg1)
torch.manual_seed(TestShardedTensorBinaryOps.seed + seed_offset)
st2 = sharded_tensor.rand(spec2, sizes, process_group=pg2)
TestShardedTensorBinaryOps.seed += 1
return st1, st2
def get_gpu_specs(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
alt_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:1/cuda:1",
"rank:0/cuda:0",
"rank:3/cuda:3",
"rank:2/cuda:2",
],
)
return spec, alt_spec
def _test_common_failures(self, cmp_op):
spec, alt_spec = self.get_gpu_specs()
st1, st2 = self.get_random_tensors(spec, spec, 10, 10)
if self.rank == 0:
torch.nn.init.uniform_(st1.local_shards()[0].tensor)
self.assertFalse(cmp_op(st1, st2))
st1 = sharded_tensor.ones(spec, 10, 10)
st2 = sharded_tensor.ones(spec, 10, 5)
self.assertFalse(cmp_op(st1, st2))
st1, st2 = self.get_random_tensors(spec, alt_spec, 10, 10)
self.assertFalse(cmp_op(st1, st2))
st1 = sharded_tensor.ones(spec, 10, 10)
st2 = sharded_tensor.zeros(spec, 10, 10)
self.assertFalse(cmp_op(st1, st2))
st1 = sharded_tensor.ones(spec, 10, 10)
st2 = sharded_tensor.ones(spec, 10, 10, dtype=torch.double)
self.assertFalse(cmp_op(st1, st2))
st1 = sharded_tensor.ones(spec, 10, 10)
st2 = sharded_tensor.ones(spec, 10, 10, requires_grad=True)
self.assertFalse(cmp_op(st1, st2))
cpu_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cpu",
"rank:1/cpu",
"rank:2/cpu",
"rank:3/cpu",
],
)
st1 = sharded_tensor.ones(cpu_spec, 10, 10)
st2 = sharded_tensor.ones(cpu_spec, 10, 10, pin_memory=True)
self.assertFalse(cmp_op(st1, st2))
pg = dist.new_group([1, 0, 3, 2])
st1, st2 = self.get_random_tensors(spec, spec, 10, 10, pg2=pg)
with self.assertRaisesRegex(
RuntimeError, "All distributed tensors should use the same ProcessGroup"
):
cmp_op(st1, st2)
pg = dist.new_group([0, 1, 2, 3])
st1, st2 = self.get_random_tensors(spec, spec, 10, 10, pg2=pg)
with self.assertRaisesRegex(
RuntimeError, "All distributed tensors should use the same ProcessGroup"
):
cmp_op(st1, st2)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_equal_tensor_specs(self):
self._test_common_failures(torch.equal)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_equal(self):
""" Test torch.equal(ShardedTensor, ShardedTensor) """
spec, alt_spec = self.get_gpu_specs()
st1, st2 = self.get_random_tensors(spec, spec, 10, 10)
self.assertTrue(torch.equal(st1, st2))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_allclose_tensor_specs(self):
self._test_common_failures(torch.allclose)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_torch_allclose(self):
""" Test torch.allclose(ShardedTensor, ShardedTensor) """
spec, alt_spec = self.get_gpu_specs()
st1, st2 = self.get_random_tensors(spec, spec, 10, 10)
self.assertTrue(torch.allclose(st1, st2))
self.assertTrue(torch.allclose(st1, st2, atol=0))
# compare different arrays
st1, st2 = self.get_random_tensors(spec, spec, 10, 10, seed_offset=1)
self.assertFalse(torch.allclose(st1, st2))
# sharded_tensor.rand produces uniform values in the [0,1] range.
self.assertTrue(torch.allclose(st1, st2, atol=1))
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_binary_cmp.py
|
# Owner(s): ["oncall: distributed"]
import copy
import sys
import torch
import torch.distributed as dist
from torch.distributed._shard.api import (
shard_parameter,
_collect_local_shard,
_reshard_output,
)
from torch.distributed._shard.sharded_optim import (
ShardedOptimizer,
)
from torch.distributed._shard.sharded_tensor import (
empty,
)
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
clone_module_parameter,
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedTensorOpsLinear(ShardedTensorTestBase):
def _run_sharded_linear(
self, spec, input_size, linear_size, sharded_dim, dtype
):
# Use same seed.
torch.manual_seed(0)
local_linear = torch.nn.Linear(*linear_size, dtype=dtype).cuda(self.rank)
sharded_linear = torch.nn.Linear(*linear_size, dtype=dtype)
# Copy the weights and bias from local linear
sharded_linear.weight = clone_module_parameter(local_linear, "weight")
sharded_linear.bias = clone_module_parameter(local_linear, "bias")
# Shard the parameter.
shard_parameter(sharded_linear, "weight", spec)
# Run sharded computation
torch.manual_seed(self.rank) # inputs different on each rank
inp = torch.rand(*input_size, dtype=dtype).cuda(self.rank)
reshard_spec = copy.deepcopy(spec)
reshard_spec.dim = 0
reshard_spec.placements.sort(key=lambda placement: placement.rank())
sharded_linear = _collect_local_shard(
_reshard_output(sharded_linear, reshard_spec)
)
sharded_output = sharded_linear(inp)
# Run local computation
local_output = local_linear(inp)
# Verify
self.assertEqual(local_output, sharded_output, atol=1e-3, rtol=1e-3)
# Validate for torch.nn.functional.linear version.
local_output = torch.nn.functional.linear(
inp, local_linear.weight, local_linear.bias
)
sharded_output = torch.nn.functional.linear(
inp, sharded_linear.weight, sharded_linear.bias
)
sharded_output = sharded_output.reshard(reshard_spec).local_tensor()
# When local tensor only has one dimension, we increase one more dimension
# for reshard. We need to squeeze the # of dimensions manually.
if inp.dim() == 1:
sharded_output = sharded_output.squeeze(reshard_spec.dim)
self.assertEqual(local_output, sharded_output, atol=1e-3, rtol=1e-3)
# Compute loss and run backward pass.
local_output.sum().backward()
sharded_output.sum().backward()
local_grad = local_linear.weight.grad
# Verify that both weight and bias in the sharded linear has non-None grad.
sharded_weight = sharded_linear.weight.local_tensor()
self.assertNotEqual(sharded_linear.bias.grad, None)
self.assertNotEqual(sharded_weight.grad, None)
# Shard the local linear's weight grad so that we can compare.
dist.all_reduce(local_grad)
(start_pos, chunk_size) = generate_local_weight_sharding_params_for_test(
local_linear.weight, sharded_dim, TEST_GPU_NUM, spec, self.rank
)
local_grad_narrowed = local_grad.narrow(sharded_dim, start_pos, chunk_size)
local_bias_grad = local_linear.bias.grad
dist.all_reduce(local_bias_grad)
# Test backward gradient calculation.
self.assertEqual(sharded_linear.bias.grad, local_bias_grad, atol=1e-3, rtol=1e-3)
self.assertEqual(sharded_weight.grad, local_grad_narrowed, atol=1e-3, rtol=1e-3)
# Test optimizer.
previous = local_linear.weight.clone().detach()
optim = torch.optim.SGD(local_linear.parameters(), lr=0.1)
optim.step()
self.assertNotEqual(previous, local_linear.weight)
previous_sharded_weight = sharded_weight.clone()
previous_sharded_bias = sharded_linear.bias.clone()
sharded_optim = ShardedOptimizer(
dict(sharded_linear.named_parameters()),
torch.optim.SGD,
lr=0.1,
)
sharded_optim.step()
sharded_weight = sharded_linear.weight.local_tensor()
local_weight_narrowed = local_linear.weight.narrow(
sharded_dim, start_pos, chunk_size
)
self.assertEqual(sharded_weight.size(), local_weight_narrowed.size())
self.assertNotEqual(previous_sharded_weight, sharded_weight)
self.assertEqual(sharded_weight, local_weight_narrowed, atol=1e-3, rtol=1e-3)
self.assertNotEqual(previous_sharded_bias, sharded_linear.bias)
self.assertEqual(sharded_linear.bias, local_linear.bias, atol=1e-3, rtol=1e-3)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_linear_colwise(self):
for spec in generate_chunk_sharding_specs_for_test(0):
self._run_sharded_linear(spec, [2, 17], [17, 12], 0, torch.float16)
self._run_sharded_linear(spec, [8, 21], [21, 11], 0, torch.float32)
self._run_sharded_linear(spec, [7, 23], [23, 13], 0, torch.float64)
self._run_sharded_linear(spec, [4, 15], [15, 14], 0, torch.float16)
# Test multiple input dims
self._run_sharded_linear(spec, [10, 2, 17], [17, 12], 0, torch.float32)
self._run_sharded_linear(spec, [13, 8, 21], [21, 11], 0, torch.float64)
self._run_sharded_linear(spec, [27, 7, 23], [23, 13], 0, torch.float16)
self._run_sharded_linear(spec, [100, 12, 4, 15], [15, 14], 0, torch.float32)
# Test single input dim
self._run_sharded_linear(spec, [17], [17, 12], 0, torch.float64)
self._run_sharded_linear(spec, [21], [21, 11], 0, torch.float16)
self._run_sharded_linear(spec, [23], [23, 13], 0, torch.float32)
self._run_sharded_linear(spec, [15], [15, 14], 0, torch.float64)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_linear_rowwise(self):
for spec in generate_chunk_sharding_specs_for_test(1):
# Test even split.
self._run_sharded_linear(spec, [8, 16], [16, 11], 1, torch.float16)
# Test uneven split.
self._run_sharded_linear(spec, [5, 19], [19, 11], 1, torch.float32)
self._run_sharded_linear(spec, [10, 21], [21, 11], 1, torch.float64)
# Test multiple input dims
self._run_sharded_linear(spec, [13, 8, 16], [16, 11], 1, torch.float16)
self._run_sharded_linear(spec, [10, 5, 19], [19, 11], 1, torch.float32)
self._run_sharded_linear(spec, [12, 15, 10, 21], [21, 11], 1, torch.float64)
# Test single input dim
self._run_sharded_linear(spec, [16], [16, 11], 1, torch.float16)
self._run_sharded_linear(spec, [19], [19, 11], 1, torch.float32)
self._run_sharded_linear(spec, [21], [21, 11], 1, torch.float64)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_linear_errors(self):
for spec in generate_chunk_sharding_specs_for_test(0):
fc1 = torch.nn.Linear(10, 10).cuda(self.rank)
shard_parameter(fc1, "weight", spec)
shard_parameter(fc1, "bias", spec)
with self.assertRaisesRegex(TypeError, 'bias needs to be torch.Tensor'):
fc1(torch.rand(10, 10).cuda(self.rank))
fc2 = torch.nn.Linear(10, 10).cuda(self.rank)
shard_parameter(fc2, "weight", spec)
with self.assertRaisesRegex(ValueError, 'Input needs to have at least 1 dim'):
fc2(torch.tensor(1).cuda(self.rank))
fc3 = torch.nn.Linear(10, 10).cuda(self.rank)
fc3.weight = torch.nn.Parameter(torch.rand(10, 10, 10).cuda(self.rank))
shard_parameter(fc3, "weight", spec)
with self.assertRaisesRegex(ValueError, 'Weight needs to have exactly 2 dims'):
fc3(torch.rand(10, 10).cuda(self.rank))
fc4 = torch.nn.Linear(10, 10).cuda(self.rank)
fc4.bias = torch.nn.Parameter(torch.rand(10, 10).cuda(self.rank))
shard_parameter(fc4, "weight", spec)
with self.assertRaisesRegex(ValueError, 'Bias needs to have exactly 1 dim'):
fc4(torch.rand(10, 10).cuda(self.rank))
fc5 = torch.nn.Linear(7, 10).cuda(self.rank)
shard_parameter(fc5, "weight", spec)
with self.assertRaisesRegex(ValueError, 'Input dim: 13 does not match appropriate weight dim: 7'):
fc5(torch.rand(20, 10, 13).cuda(self.rank))
fc6 = torch.nn.Linear(10, 10).cuda(self.rank)
del fc6.weight
enumerable_spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
)
])
fc6.weight = empty(enumerable_spec, 10, 10)
# Sharded Tensor metadata has parenthesis imbalance issue when using re.compile
error_msg = r"torch function 'linear', with args: (?s).* "
r"and kwargs: None not supported for ShardedTensor!"
with self.assertRaisesRegex(RuntimeError, error_msg):
fc6(torch.rand(10, 10).cuda(self.rank))
fc7 = torch.nn.Linear(10, 80).cuda(self.rank)
multiple_local_shard_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:2/cuda:2",
"rank:3/cuda:3",
"rank:3/cuda:3",
],
)
del fc7.weight
fc7.weight = empty(multiple_local_shard_spec, 80, 10)
with self.assertRaisesRegex(ValueError, 'Only one local shard supported!'):
fc7(torch.rand(10, 10).cuda(self.rank))
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_linear.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed._shard import _shard_tensor
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedSoftmax(ShardedTensorTestBase):
def _test_sharded_softmax(self, softmax_dim, sharding_dim):
torch.manual_seed(0)
local_tensor = torch.rand(10, 10, device=self.rank)
local_softmax = torch.nn.functional.softmax(local_tensor, softmax_dim)
spec = ChunkShardingSpec(dim=sharding_dim, placements=[f'rank:{idx}/cuda:{idx}' for idx in range(self.world_size)])
st = _shard_tensor(local_tensor, spec)
sharded_softmax = torch.nn.functional.softmax(st, softmax_dim)
self.assertEqual(local_softmax.chunk(self.world_size, dim=sharding_dim)[self.rank], sharded_softmax.local_tensor())
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_softmax_basic(self):
self._test_sharded_softmax(0, 1)
self._test_sharded_softmax(-2, 1)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_softmax_on_sharding_dim(self):
self._test_sharded_softmax(1, 1)
self._test_sharded_softmax(-1, 1)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_softmax.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch.distributed._shard import sharded_tensor
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
if TEST_WITH_DEV_DBG_ASAN:
print("Skip dev-asan as torch + multiprocessing spawn have known issues", file=sys.stderr)
sys.exit(0)
class TestShardedTensorNNInit(ShardedTensorTestBase):
""" Testing torch.nn.init functions for ShardedTensor """
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_sharded_tensor_with_uniform(self):
""" Test torch.nn.init.uniform_(ShardedTensor, a, b) """
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 8, 2
expected_h = 2
expected_device = torch.device(f"cuda:{self.rank}")
a, b = 10, 20
seed = 1234
dtype = torch.double
st = sharded_tensor.empty(spec, h, w, dtype=dtype)
self.assertEqual(1, len(st.local_shards()))
# Clone local tensor to ensure torch.nn.init starts from the same input
local_tensor_clone = torch.clone(st.local_shards()[0].tensor)
torch.manual_seed(seed)
torch.nn.init.uniform_(st, a=a, b=b)
torch.manual_seed(seed)
torch.nn.init.uniform_(local_tensor_clone, a=a, b=b)
self.assertEqual(local_tensor_clone, st.local_shards()[0].tensor)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_sharded_tensor_with_normal(self):
""" Test torch.nn.init.normal_(ShardedTensor, mean, std) """
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 8, 2
expected_h = 2
expected_device = torch.device(f"cuda:{self.rank}")
mean, std = 10, 5
seed = 1234
dtype = torch.double
st = sharded_tensor.empty(spec, h, w, dtype=dtype)
self.assertEqual(1, len(st.local_shards()))
# Clone local tensor to ensure torch.nn.init starts from the same input
local_tensor_clone = torch.clone(st.local_shards()[0].tensor)
torch.manual_seed(seed)
torch.nn.init.normal_(st, mean=mean, std=std)
torch.manual_seed(seed)
torch.nn.init.normal_(local_tensor_clone, mean=mean, std=std)
self.assertEqual(local_tensor_clone, st.local_shards()[0].tensor)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_init_sharded_tensor_with_kaiming_uniform(self):
""" Test torch.nn.init.kaiming_uniform_(ShardedTensor, a, mode, nonlinearit) """
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 8, 2
expected_h = 2
expected_device = torch.device(f"cuda:{self.rank}")
a, mode, nonlinearity = 0, 'fan_in', 'leaky_relu'
seed = 1234
dtype = torch.double
st = sharded_tensor.empty(spec, h, w, dtype=dtype)
self.assertEqual(1, len(st.local_shards()))
# Clone local tensor to ensure torch.nn.init starts from the same input
local_tensor_clone = torch.clone(st.local_shards()[0].tensor)
torch.manual_seed(seed)
torch.nn.init.kaiming_uniform_(st, a=a, mode=mode, nonlinearity=nonlinearity)
torch.manual_seed(seed)
torch.nn.init.kaiming_uniform_(local_tensor_clone, a=a, mode=mode, nonlinearity=nonlinearity)
self.assertEqual(local_tensor_clone, st.local_shards()[0].tensor)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_init.py
|
# Owner(s): ["oncall: distributed"]
import torch
from torch.distributed._shard import _shard_tensor
import torch.distributed._shard.sharded_tensor as sharded_tensor
import torch.distributed as dist
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
gen_binary_op_func,
generate_chunk_sharding_specs_for_test,
)
class TestMathOps(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_basic_math_ops(self):
ops = ["torch.add", "torch.sub", "torch.mul", "torch.div", "+", "-", "*", "/"]
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
sharded_lhs = sharded_tensor.rand(spec, (12, 3))
sharded_rhs = sharded_tensor.rand(spec, (12, 3))
current_rank = dist.get_rank()
global_lhs = (
torch.empty((12, 3), device=current_rank) if current_rank == 0 else None
)
global_rhs = (
torch.empty((12, 3), device=current_rank) if current_rank == 0 else None
)
sharded_lhs.gather(dst=0, out=global_lhs)
sharded_rhs.gather(dst=0, out=global_rhs)
for op in ops:
binary_op = gen_binary_op_func(op)
binary_op_ = gen_binary_op_func(op, inplace=True)
# test basic math ops between ShardedTensors
sharded_output = binary_op(sharded_lhs, sharded_rhs)
output = (
torch.empty((12, 3), device=current_rank) if current_rank == 0 else None
)
sharded_output.gather(dst=0, out=output)
if current_rank == 0:
global_output = binary_op(global_lhs, global_rhs)
self.assertEqual(output, global_output)
# test basic math ops between ShardedTensor and scalar
scalars = [3, 1.8]
for scalar in scalars:
sharded_output_lhs = binary_op(sharded_lhs, scalar)
output_lhs = (
torch.empty((12, 3), device=current_rank)
if current_rank == 0
else None
)
sharded_output_lhs.gather(dst=0, out=output_lhs)
sharded_output_rhs = binary_op(scalar, sharded_rhs)
output_rhs = (
torch.empty((12, 3), device=current_rank)
if current_rank == 0
else None
)
sharded_output_rhs.gather(dst=0, out=output_rhs)
if current_rank == 0:
global_output_lhs = binary_op(global_lhs, scalar)
global_output_rhs = binary_op(scalar, global_rhs)
self.assertEqual(output_lhs, global_output_lhs)
self.assertEqual(output_rhs, global_output_rhs)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_math_ops_errors(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
sharded_lhs = sharded_tensor.rand(spec, (20, 3))
sharded_rhs = sharded_tensor.rand(spec, (12, 3))
with self.assertRaisesRegex(
RuntimeError, "Implicit broadcasting not supported"
):
torch.add(sharded_lhs, sharded_rhs)
spec = EnumerableShardingSpec(
[
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
),
]
)
st = sharded_tensor.rand(spec, 10, 10)
with self.assertRaisesRegex(RuntimeError, "not supported"):
torch.add(st, sharded_rhs)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_bmm(self):
for spec in generate_chunk_sharding_specs_for_test(0):
lhs = torch.rand(15, 4, 5).cuda(self.rank)
rhs = torch.rand(15, 5, 6).cuda(self.rank)
tensor = lhs.bmm(rhs)
st_lhs = _shard_tensor(lhs, spec)
st_rhs = _shard_tensor(rhs, spec)
st_expected = _shard_tensor(tensor, spec)
self.assertTrue(torch.allclose(torch.bmm(st_lhs, st_rhs), st_expected))
self.assertTrue(torch.allclose(st_lhs.bmm(st_rhs), st_expected))
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_bmm_errors(self):
specs = generate_chunk_sharding_specs_for_test(0)
st_lhs = sharded_tensor.rand(specs[0], (15, 5, 6))
st_rhs = sharded_tensor.rand(specs[1], (15, 5, 6))
with self.assertRaisesRegex(
NotImplementedError,
"Both st and st2 need to have same placements for bmm",
):
torch.bmm(st_lhs, st_rhs)
for spec in specs:
st_lhs = sharded_tensor.rand(spec, (20, 3))
st_rhs = sharded_tensor.rand(spec, (20, 3))
with self.assertRaisesRegex(
TypeError,
"both st and st2 need to be a 3D ShardedTensor",
):
torch.bmm(st_lhs, st_rhs)
rhs = torch.rand(15, 5, 6).cuda(self.rank)
with self.assertRaisesRegex(
TypeError,
"st2 needs to be a ShardedTensor for torch.bmm",
):
torch.bmm(st_lhs, rhs)
spec.dim = 1
st_lhs = sharded_tensor.rand(spec, (15, 5, 6))
st_rhs = sharded_tensor.rand(spec, (15, 5, 6))
with self.assertRaisesRegex(
NotImplementedError,
"Only support performing bmm on tensors sharded on dim 0 now",
):
torch.bmm(st_lhs, st_rhs)
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_math_ops.py
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
generate_chunk_sharding_specs_for_test,
generate_enumerable_sharding_specs_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedTensorChunkOps(ShardedTensorTestBase):
def _compare_chunk_result(self, chunked_list, chunked_st_list):
self.assertEqual(len(chunked_list), len(chunked_st_list))
for idx, chunked_st in enumerate(chunked_st_list):
tensor = chunked_list[idx]
st = _shard_tensor(tensor.contiguous(), chunked_st.sharding_spec())
# _shard_tensor generate sharded tensor with metadata ranked by # of rank.
st._metadata.shards_metadata.sort(
key=lambda x: x.shard_offsets[chunked_st.sharding_spec().dim],
)
self.assertTrue(torch.allclose(chunked_st, st))
def _run_sharded_chunk_test(self, local_tensor_size, shard_spec, chunk_num):
torch.manual_seed(0)
local_tensor = torch.rand(*local_tensor_size).cuda(self.rank)
st_tensor = _shard_tensor(local_tensor.clone().detach(), shard_spec)
local_tensor_chunked = torch.chunk(local_tensor, chunk_num, dim=-1)
chunked_st = torch.chunk(st_tensor, chunk_num, dim=-1)
self._compare_chunk_result(local_tensor_chunked, chunked_st)
chunked_st = st_tensor.chunk(chunk_num, dim=-1)
self._compare_chunk_result(local_tensor_chunked, chunked_st)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_chunk(self):
sharding_dims = [0]
specs = []
for dim in sharding_dims:
specs.extend(generate_chunk_sharding_specs_for_test(dim))
for spec in specs:
self._run_sharded_chunk_test([17, 14], spec, 3)
self._run_sharded_chunk_test([17, 15, 20], spec, 5)
self._run_sharded_chunk_test([17, 16], spec, 2)
# Large matrix case.
self._run_sharded_chunk_test([128, 512], spec, 8)
self._run_sharded_chunk_test([1024, 2048], spec, 4)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_chunk_error(self):
chunk_spec = generate_chunk_sharding_specs_for_test(-1)
with self.assertRaisesRegex(
NotImplementedError, "Chunk by sharding dim is not supported."
):
st = sharded_tensor.rand(chunk_spec[0], [17, 24])
torch.chunk(st, 5, dim=-1)
enumerable_spec = generate_enumerable_sharding_specs_for_test()
with self.assertRaisesRegex(
NotImplementedError, "Only ChunkShardingSpec is supported for chunk."
):
st = sharded_tensor.rand(enumerable_spec[0], [10, 10])
torch.chunk(st, 5, dim=-1)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_tensor/ops/test_chunk.py
|
# Owner(s): ["oncall: distributed"]
import sys
import copy
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.distributed._shard.sharded_optim import (
ShardedOptimizer,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.distributed._shard import shard_module
from torch.distributed._shard.sharding_plan import ShardingPlan, ShardingPlanner
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_ops_common import (
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
from torch.testing._internal.distributed._shard.test_common import SimpleMegatronLM
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
# Example ShardingPlanner that chunks every parameter in the module
# to all available devices defined.
class ChunkAllShardingPlanner(ShardingPlanner):
dim = 0
devices = []
def __init__(self, chunk_dim=0, device_count=0):
self.dim = chunk_dim
self.devices = [f"rank:{i}/cuda:{i}" for i in range(device_count)]
def build_plan(self, module: nn.Module) -> ShardingPlan:
named_params = module.named_parameters()
plan = {}
for name, param in named_params:
plan[name] = ChunkShardingSpec(self.dim, placements=self.devices)
return ShardingPlan(plan=plan)
class TestShardingPlan(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharding_plan_simple_megatron(self):
colwise_sharding_spec = generate_chunk_sharding_specs_for_test(0)
rowwise_sharding_spec = generate_chunk_sharding_specs_for_test(1)
for spec in zip(colwise_sharding_spec, rowwise_sharding_spec):
# test each sharding spec pair and see if we can apply sharding
reshard_spec = copy.deepcopy(spec[1])
reshard_spec.placements.sort(key=lambda placement: placement.rank())
reshard_spec.dim = 0
sharding_plan = ShardingPlan(
plan={
"fc1.weight": spec[0],
"fc2.weight": spec[1]
},
output_plan={
"": reshard_spec
},
return_local_tensor=[""])
# Use same seed.
torch.manual_seed(0)
local_megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]]).cuda(self.rank)
megatron_lm = copy.deepcopy(local_megatron_lm)
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan)
# check to make sure the module already been sharded
self.assertTrue(isinstance(megatron_lm.fc1.weight, ShardedTensor))
self.assertTrue(isinstance(megatron_lm.fc2.weight, ShardedTensor))
self.assertEqual(megatron_lm.fc1.weight.sharding_spec(), spec[0])
self.assertEqual(megatron_lm.fc2.weight.sharding_spec(), spec[1])
# make sure we can run sharded computation
input = torch.rand(22, 17).cuda(self.rank)
sharded_output = megatron_lm(input)
local_output = local_megatron_lm(input)
# verify and make sure local and sharded output matches
self.assertEqual(local_output, sharded_output)
# Compute loss and run backward pass.
local_output.sum().backward()
sharded_output.sum().backward()
(
local_weight_grad_fc1,
local_weight_grad_fc2,
) = local_megatron_lm.get_weight_grads()
local_bias_grad_fc1, local_bias_grad_fc2 = local_megatron_lm.get_bias_grads()
# Verify that weights in both layers and biases in the sharded linear has non-None grad.
(
sharded_weight_fc1,
sharded_weight_fc2,
) = megatron_lm.get_weights()
bias_grad_fc1, bias_grad_fc2 = megatron_lm.get_bias_grads()
self.assertNotEqual(sharded_weight_fc1.grad, None)
self.assertNotEqual(sharded_weight_fc2.grad, None)
self.assertNotEqual(bias_grad_fc1, None)
self.assertNotEqual(bias_grad_fc2, None)
# Shard the local linear's weight grad so that we can compare.
dist.all_reduce(local_weight_grad_fc1)
dist.all_reduce(local_weight_grad_fc2)
dist.all_reduce(local_bias_grad_fc1)
dist.all_reduce(local_bias_grad_fc2)
local_weight_fc1, local_weight_fc2 = local_megatron_lm.get_weights()
(
start_pos_fc1,
chunk_size_fc1,
) = generate_local_weight_sharding_params_for_test(
local_weight_fc1, 0, TEST_GPU_NUM, spec[0], self.rank
)
local_grad_narrowed_fc1 = local_weight_grad_fc1.narrow(
0, start_pos_fc1, chunk_size_fc1
)
(
start_pos_fc2,
chunk_size_fc2,
) = generate_local_weight_sharding_params_for_test(
local_weight_fc2, 1, TEST_GPU_NUM, spec[1], self.rank
)
local_grad_narrowed_fc2 = local_weight_grad_fc2.narrow(
1, start_pos_fc2, chunk_size_fc2
)
# Test backward gradient calculation.
self.assertEqual(sharded_weight_fc1.grad, local_grad_narrowed_fc1)
self.assertEqual(sharded_weight_fc2.grad, local_grad_narrowed_fc2)
self.assertEqual(bias_grad_fc1, local_bias_grad_fc1)
self.assertEqual(bias_grad_fc2, local_bias_grad_fc2)
# Test optimizer.
bias_fc1, bias_fc2 = megatron_lm.get_biases()
local_bias_fc1, local_bias_fc2 = local_megatron_lm.get_biases()
self.assertEqual(bias_fc1, local_bias_fc1)
self.assertEqual(bias_fc2, local_bias_fc2)
self.assertEqual(bias_fc1.grad, local_bias_fc1.grad)
self.assertEqual(bias_fc2.grad, local_bias_fc2.grad)
previous_sharded_weight_fc1 = sharded_weight_fc1.clone()
previous_sharded_weight_fc2 = sharded_weight_fc2.clone()
previous_bias_fc1 = bias_fc1.clone()
previous_bias_fc2 = bias_fc2.clone()
optim = torch.optim.SGD(local_megatron_lm.parameters(), lr=0.1)
optim.step()
sharded_optim = ShardedOptimizer(
dict(megatron_lm.named_parameters()),
torch.optim.SGD,
lr=0.1,
)
sharded_optim.step()
local_weight_fc1_narrowed = local_weight_fc1.narrow(
0, start_pos_fc1, chunk_size_fc1
)
local_weight_fc2_narrowed = local_weight_fc2.narrow(
1, start_pos_fc2, chunk_size_fc2
)
# Test weight value after optimizer.
self.assertEqual(sharded_weight_fc1.size(), local_weight_fc1_narrowed.size())
self.assertEqual(sharded_weight_fc2.size(), local_weight_fc2_narrowed.size())
self.assertNotEqual(previous_sharded_weight_fc1, sharded_weight_fc1)
self.assertNotEqual(previous_sharded_weight_fc2, sharded_weight_fc2)
self.assertEqual(sharded_weight_fc1, local_weight_fc1_narrowed)
self.assertEqual(sharded_weight_fc2, local_weight_fc2_narrowed)
# Test bias value after optimizer.
local_bias_fc1, local_bias_fc2 = local_megatron_lm.get_biases()
self.assertNotEqual(previous_bias_fc1, bias_fc1)
self.assertEqual(bias_fc1, local_bias_fc1)
self.assertNotEqual(previous_bias_fc2, bias_fc2)
self.assertEqual(bias_fc2, local_bias_fc2)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_reshard_to_ddp_sharding_plan(self):
colwise_sharding_spec = generate_chunk_sharding_specs_for_test(0)[0]
rowwise_sharding_spec = generate_chunk_sharding_specs_for_test(1)[0]
# test each sharding spec pair and see if we can apply sharding
output_spec = copy.deepcopy(rowwise_sharding_spec)
output_spec.placements.sort(key=lambda placement: placement.rank())
output_spec.dim = 0
# new module with megatron as submodule
class MyModule(nn.Module):
def __init__(self, rank=None):
super().__init__()
self.megatron = SimpleMegatronLM([[17, 12], [12, 29]], rank=rank)
self.relu = nn.ReLU()
def forward(self, input):
return self.relu(self.megatron(input))
sharding_plan = ShardingPlan(
plan={
"megatron.fc1.weight": colwise_sharding_spec,
"megatron.fc2.weight": rowwise_sharding_spec,
},
output_plan={
"megatron": output_spec
},
return_local_tensor=[
"megatron"
]
)
# Use same seed.
torch.manual_seed(0)
local_module = MyModule().cuda(self.rank)
sharded_module = copy.deepcopy(local_module)
# shard the module with the provided sharding plan
shard_module(sharded_module, sharding_plan)
# check to make sure the module already been sharded
self.assertTrue(isinstance(sharded_module.megatron.fc1.weight, ShardedTensor))
self.assertTrue(isinstance(sharded_module.megatron.fc2.weight, ShardedTensor))
self.assertEqual(sharded_module.megatron.fc1.weight.sharding_spec(), colwise_sharding_spec)
self.assertEqual(sharded_module.megatron.fc2.weight.sharding_spec(), rowwise_sharding_spec)
# make sure we can run sharded computation
input = torch.rand(22, 17).cuda(self.rank)
sharded_output = sharded_module(input)
local_output = local_module(input)
# verify and make sure local and sharded output matches
self.assertEqual(local_output, sharded_output)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharding_plan_errors(self):
rowwise_sharding_spec = generate_chunk_sharding_specs_for_test(1)[0]
sharding_plan_wrong_plan = ShardingPlan(
plan={
"fc1.weight": torch.randn(3, 4),
},
output_plan={
"": rowwise_sharding_spec
},
)
megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]]).cuda(self.rank)
with self.assertRaisesRegex(
TypeError, "Only `ShardingSpec` and `Sharder` are supported to shard"
):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_plan)
sharding_plan_wrong_output_plan = ShardingPlan(
plan={
"fc1.weight": rowwise_sharding_spec,
},
output_plan={
"": torch.randn(3, 4)
},
)
with self.assertRaisesRegex(
TypeError, "Only `ShardingSpec` is supported as output_plan"
):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_output_plan)
sharding_plan_wrong_module_path = ShardingPlan(
plan={
"fc3.weight": rowwise_sharding_spec,
},
)
with self.assertRaisesRegex(
AttributeError, "has no attribute"
):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_module_path)
sharding_plan_wrong_param_path = ShardingPlan(
plan={
"fc1.biass": rowwise_sharding_spec,
},
)
with self.assertRaisesRegex(
AttributeError, "has no attribute"
):
# shard the module with the provided sharding plan
shard_module(megatron_lm, sharding_plan_wrong_param_path)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_custom_sharding_planner(self):
megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]], rank=self.rank).cuda(
self.rank
)
planner = ChunkAllShardingPlanner(device_count=TEST_GPU_NUM)
sharding_plan = planner.build_plan(megatron_lm)
shard_module(megatron_lm, sharding_plan)
# check to make sure the module already been sharded
self.assertTrue(isinstance(megatron_lm.fc1.weight, ShardedTensor))
self.assertTrue(isinstance(megatron_lm.fc2.weight, ShardedTensor))
self.assertTrue(isinstance(megatron_lm.fc1.bias, ShardedTensor))
self.assertTrue(isinstance(megatron_lm.fc2.bias, ShardedTensor))
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_shard_module_sub_process_group(self):
megatron_lm = SimpleMegatronLM([[17, 12], [12, 29]], rank=self.rank)
colwise_sharding_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:2",
"rank:1/cuda:3",
],
)
rowwise_sharding_spec = ChunkShardingSpec(
dim=1,
placements=[
"rank:0/cuda:2",
"rank:1/cuda:3",
],
)
sharding_plan = ShardingPlan(
plan={
"fc1.weight": colwise_sharding_spec,
"fc2.weight": rowwise_sharding_spec
}
)
pg = dist.new_group([2, 3])
if self.rank >= 2:
shard_module(megatron_lm, sharding_plan, process_group=pg)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharding_plan/test_sharding_plan.py
|
# Owner(s): ["oncall: distributed"]
from typing import List, Union
from dataclasses import dataclass
import copy
import torch
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.distributed._shard import sharded_tensor, _shard_tensor
from torch.distributed._shard.sharding_spec import (
ShardingSpec,
ChunkShardingSpec,
DevicePlacementSpec,
EnumerableShardingSpec,
ShardMetadata,
_infer_sharding_spec_from_shards_metadata,
)
from torch.distributed._shard.sharded_tensor import (
TensorProperties,
ShardedTensor,
ShardedTensorMetadata,
)
from torch.distributed._shard.sharding_spec._internals import (
check_tensor,
get_split_size,
get_chunked_dim_size,
get_chunk_sharding_params,
)
from torch.testing._internal.common_utils import (
run_tests,
sandcastle_skip_if,
)
from torch.testing._internal.distributed._shard.sharded_tensor._test_st_common import (
_chunk_sharding_specs_list_for_test,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
class TestShardingSpec(TestCase):
@sandcastle_skip_if(torch.cuda.device_count() < 2, '2 CUDA GPUs are needed')
def test_device_placement(self):
# valid devices
DevicePlacementSpec("cuda:0")
DevicePlacementSpec(torch.device(0))
DevicePlacementSpec(torch.device("cuda:0"))
DevicePlacementSpec("rank:0/cuda:0")
DevicePlacementSpec("rank:0/cpu")
DevicePlacementSpec("rank:0")
# invalid devices
with self.assertRaisesRegex(ValueError, "Could not parse remote_device"):
DevicePlacementSpec("cuda:foo")
with self.assertRaisesRegex(ValueError, "Could not parse remote_device"):
DevicePlacementSpec("foo:0")
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
DevicePlacementSpec("rank:0/cuda:foo")
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
DevicePlacementSpec("rank:0/cpu2")
@sandcastle_skip_if(torch.cuda.device_count() < 2, '2 CUDA GPUs are needed')
def test_chunked_sharding_spec(self):
# Test valid specs.
ChunkShardingSpec(0, [torch.device(0), torch.device(1)])
ChunkShardingSpec(0, [torch.device("cuda:0"), torch.device("cuda:1")])
ChunkShardingSpec(-1, ["cuda:0", "cuda:1"])
ChunkShardingSpec(0, ["rank:0/cuda:0", "rank:0/cuda:1"])
ChunkShardingSpec(0, ["rank:0", "rank:1"])
ChunkShardingSpec(0, ["rank:0/cpu", "rank:1/cpu"])
# Test unimplemented error
with self.assertRaisesRegex(NotImplementedError, "not support named dimension"):
# Named dimension.
ChunkShardingSpec("N", ["cuda:0", "cuda:1"])
# Test invalid specs
with self.assertRaisesRegex(ValueError, "needs to be an integer"):
ChunkShardingSpec(None, ["cuda:0", "cuda:1"])
with self.assertRaisesRegex(ValueError, "needs to be an integer"):
ChunkShardingSpec({}, ["cuda:0", "cuda:1"])
with self.assertRaisesRegex(ValueError, "Could not parse remote_device"):
ChunkShardingSpec(0, ["random:0", "cuda:1"])
with self.assertRaisesRegex(ValueError, "Could not parse remote_device"):
ChunkShardingSpec(0, ["cuda:foo", "cuda:1"])
with self.assertRaisesRegex(ValueError, "Could not parse remote_device"):
ChunkShardingSpec(0, ["rank:foo", "cuda:1"])
with self.assertRaisesRegex(RuntimeError, "Expected one of"):
ChunkShardingSpec(0, ["rank:0/foo", "cuda:1"])
with self.assertRaisesRegex(RuntimeError, "Expected one of"):
ChunkShardingSpec(0, ["rank:0/random:0", "cuda:1"])
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
ChunkShardingSpec(0, ["rank:0/cuda:foo", "cuda:1"])
@sandcastle_skip_if(torch.cuda.device_count() < 2, '2 CUDA GPUs are needed')
def test_enumerable_sharding_spec(self):
# test valid specs
# test row-wise sharding
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="cuda:1",
)
])
check_tensor(spec.shards, torch.rand(10, 5).size())
# test row and column sharding
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[3, 3],
placement="cuda:0",
),
ShardMetadata(
shard_offsets=[0, 3],
shard_sizes=[3, 3],
placement="cuda:1",
),
ShardMetadata(
shard_offsets=[3, 0],
shard_sizes=[3, 3],
placement="cuda:2",
),
ShardMetadata(
shard_offsets=[3, 3],
shard_sizes=[3, 3],
placement="cuda:3",
),
])
check_tensor(spec.shards, torch.rand(6, 6).size())
# test uneven shard sizes.
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[2, 4],
placement="cuda:0",
),
ShardMetadata(
shard_offsets=[0, 4],
shard_sizes=[4, 2],
placement="cuda:1",
),
ShardMetadata(
shard_offsets=[2, 0],
shard_sizes=[4, 4],
placement="cuda:2",
),
ShardMetadata(
shard_offsets=[4, 4],
shard_sizes=[2, 2],
placement="cuda:3",
),
])
check_tensor(spec.shards, torch.rand(6, 6).size())
# test invalid sharding
with self.assertRaisesRegex(ValueError, 'Could not parse remote_device'):
ShardMetadata(shard_offsets=[0], shard_sizes=[1], placement="cuda:foo")
with self.assertRaisesRegex(ValueError, 'same number of elements'):
ShardMetadata(shard_offsets=[0, 0], shard_sizes=[1], placement="cuda:0")
with self.assertRaisesRegex(ValueError, 'shard_offsets should be >=0'):
ShardMetadata(shard_offsets=[-1, 0], shard_sizes=[1, 1], placement="cuda:0")
with self.assertRaisesRegex(ValueError, 'shard_sizes should be >= 0'):
ShardMetadata(shard_offsets=[0, 0], shard_sizes=[-1, 1], placement="cuda:0")
with self.assertRaisesRegex(ValueError, 'Empty shard list provided'):
EnumerableShardingSpec([])
with self.assertRaisesRegex(ValueError, 'Found inconsistent ranks for shards'):
EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[1, 1],
placement="cpu"
),
ShardMetadata(
shard_offsets=[0, 0, 0],
shard_sizes=[1, 1, 1],
placement="cpu"
),
])
with self.assertRaisesRegex(ValueError, 'Shards.*overlap'):
EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[3, 3],
placement="cpu"
),
ShardMetadata(
shard_offsets=[2, 0],
shard_sizes=[3, 3],
placement="cpu"
),
])
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="cuda:1",
)
])
with self.assertRaisesRegex(ValueError, 'Rank of tensor is.*but shards rank'):
check_tensor(spec.shards, torch.rand(10, 10, 10).size())
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="cuda:1",
)
])
with self.assertRaisesRegex(ValueError, 'exceeds tensor dim'):
check_tensor(spec.shards, torch.rand(10, 3).size())
spec = EnumerableShardingSpec([
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="cuda:0",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="cuda:1",
)
])
with self.assertRaisesRegex(ValueError, 'does not match tensor volume'):
check_tensor(spec.shards, torch.rand(10, 10).size())
def test_get_split_size(self):
self.assertEqual(3, get_split_size(11, 4))
self.assertEqual(3, get_split_size(12, 4))
self.assertEqual(4, get_split_size(13, 4))
self.assertEqual(2, get_split_size(5, 4))
self.assertEqual(11, get_split_size(11, 1))
self.assertEqual(1, get_split_size(11, 11))
def test_get_chunked_dim_size(self):
self.assertEqual(3, get_chunked_dim_size(11, 3, 0))
self.assertEqual(2, get_chunked_dim_size(11, 3, 3))
self.assertEqual(4, get_chunked_dim_size(13, 4, 0))
self.assertEqual(1, get_chunked_dim_size(13, 4, 3))
self.assertEqual(0, get_chunked_dim_size(5, 2, 3))
def test_get_chunk_sharding_params(self):
ranks = [
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
]
spec = ChunkShardingSpec(
dim=0,
placements=ranks,
)
result = get_chunk_sharding_params(21, 4, spec, 1)
self.assertEqual(6, result[0])
self.assertEqual(6, result[1])
result = get_chunk_sharding_params(21, 4, spec, 3)
self.assertEqual(18, result[0])
self.assertEqual(3, result[1])
ranks[1], ranks[2] = ranks[2], ranks[1]
ranks[0], ranks[3] = ranks[3], ranks[0]
spec.placements = ranks
result = get_chunk_sharding_params(21, 4, spec, 1)
self.assertEqual(12, result[0])
self.assertEqual(6, result[1])
result = get_chunk_sharding_params(21, 4, spec, 3)
self.assertEqual(0, result[0])
self.assertEqual(6, result[1])
def _infer_enum_sharding_spec_case(self):
shards_metadata = [
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[10, 5],
placement="cuda:1",
)
]
spec = _infer_sharding_spec_from_shards_metadata(shards_metadata)
self.assertTrue(isinstance(spec, EnumerableShardingSpec))
self.assertEqual(spec.shards, shards_metadata)
shards_metadata = [
ShardMetadata(
shard_offsets=[0],
shard_sizes=[16],
placement="cuda:0",
),
ShardMetadata(
shard_offsets=[16],
shard_sizes=[9],
placement="cuda:1",
)
]
spec = _infer_sharding_spec_from_shards_metadata(shards_metadata)
self.assertTrue(isinstance(spec, EnumerableShardingSpec))
self.assertEqual(spec.shards, shards_metadata)
shards_metadata = [
ShardMetadata(
shard_offsets=[0, 0],
shard_sizes=[5, 5],
placement="rank:0/cuda:0",
),
ShardMetadata(
shard_offsets=[5, 0],
shard_sizes=[5, 5],
placement="rank:1/cuda:1",
),
ShardMetadata(
shard_offsets=[0, 5],
shard_sizes=[5, 5],
placement="rank:2/cuda:2",
),
ShardMetadata(
shard_offsets=[5, 5],
shard_sizes=[5, 5],
placement="rank:3/cuda:3",
),
]
spec = _infer_sharding_spec_from_shards_metadata(shards_metadata)
self.assertTrue(isinstance(spec, EnumerableShardingSpec))
self.assertEqual(spec.shards, shards_metadata)
def _infer_chunk_sharding_spec_case(self, placements, sharding_dim, st_size):
world_size = len(placements)
split_size = get_split_size(st_size[sharding_dim], world_size)
shards_metadata = [None] * world_size
for idx, placement in enumerate(placements):
shard_size = copy.deepcopy(st_size)
offsets = [0] * len(st_size)
offsets[sharding_dim] = split_size * idx
shard_size[sharding_dim] = get_chunked_dim_size(st_size[sharding_dim], split_size, idx)
shards_metadata[placement.rank()] = ShardMetadata(
shard_offsets=offsets,
shard_sizes=shard_size,
placement=placement,
)
spec = _infer_sharding_spec_from_shards_metadata(shards_metadata)
self.assertTrue(isinstance(spec, ChunkShardingSpec))
self.assertEqual(spec.dim, sharding_dim)
self.assertEqual(spec.placements, placements)
def test_infer_sharding_spec_from_shards_metadata(self):
self._infer_enum_sharding_spec_case()
chunk_specs = _chunk_sharding_specs_list_for_test([0, 0, 1, 1], seed=31)
for spec in chunk_specs:
self._infer_chunk_sharding_spec_case(spec.placements, 0, [4, 16])
self._infer_chunk_sharding_spec_case(spec.placements, 0, [5, 15, 16])
self._infer_chunk_sharding_spec_case(spec.placements, 1, [12, 16])
self._infer_chunk_sharding_spec_case(spec.placements, 2, [4, 18, 15])
self._infer_chunk_sharding_spec_case(spec.placements, 3, [7, 12, 16, 37])
self._infer_chunk_sharding_spec_case(spec.placements, 4, [50, 4, 18, 15, 77])
# Custom ShardingSpec, an simple example to do grid sharding
@dataclass
class GridShardingSpec(ShardingSpec):
grid_size: int
placements: List[Union[torch.distributed._remote_device, str]]
def __post_init__(self):
for i, remote_device in enumerate(self.placements):
if not isinstance(remote_device, torch.distributed._remote_device):
self.placements[i] = torch.distributed._remote_device(remote_device)
def build_metadata(self,
tensor_sizes: torch.Size,
tensor_properties: TensorProperties,
) -> ShardedTensorMetadata:
tensor_num_dim = len(tensor_sizes)
assert tensor_num_dim == 2, "only support 2-dim tensor for grid sharding"
shards_metadata = []
def chunk_num(dim_size, grid_size):
assert dim_size % grid_size == 0, "only support dim_size mod grid_size == 0"
return dim_size // grid_size
row_chunks = chunk_num(tensor_sizes[0], self.grid_size)
col_chunks = chunk_num(tensor_sizes[1], self.grid_size)
assert row_chunks * col_chunks == len(self.placements)
for row_idx in range(row_chunks):
for col_idx in range(col_chunks):
shards_metadata.append(
ShardMetadata(
shard_offsets=[row_idx * self.grid_size, col_idx * self.grid_size],
shard_sizes=[self.grid_size, self.grid_size],
placement=self.placements[row_idx * row_chunks + col_idx]
)
)
return ShardedTensorMetadata(
shards_metadata=shards_metadata,
size=tensor_sizes,
tensor_properties=tensor_properties
)
def shard(self,
tensor: torch.Tensor,
src_rank: int = 0,
process_group=None) -> ShardedTensor:
raise NotImplementedError("GridShardingSpec.shard not implemented yet!")
class TestCustomShardingSpec(ShardedTensorTestBase):
def test_custom_sharding_spec(self):
ranks = [
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
]
grid_spec = GridShardingSpec(
grid_size=4,
placements=ranks
)
tensor_properties = TensorProperties(
dtype=torch.get_default_dtype(),
layout=torch.strided,
requires_grad=False,
memory_format=torch.contiguous_format,
pin_memory=False,
)
meta = grid_spec.build_metadata(torch.Size((8, 8)), tensor_properties)
check_tensor(meta.shards_metadata, torch.Size((8, 8)))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_sharding_spec_tensor_ctor(self):
""" Test sharded_tensor.ones(...) with the custom
grid sharding spec.
"""
ranks = [
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
]
grid_spec = GridShardingSpec(
grid_size=2,
placements=ranks
)
st = sharded_tensor.ones(grid_spec, 4, 4)
# Validate local shard is initialized with torch.ones
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
self.assertEqual((2, 2), local_shard.size())
self.assertEqual(local_shard, torch.ones(2, 2))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_custom_sharding_spec_shard_tensor(self):
""" Test custom spec can be invoked from the
_shard_tensor callsite.
"""
ranks = [
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
]
grid_spec = GridShardingSpec(
grid_size=2,
placements=ranks
)
with self.assertRaisesRegex(NotImplementedError, 'not implemented'):
_shard_tensor(torch.randn(8, 8), grid_spec)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharding_spec/test_sharding_spec.py
|
# Owner(s): ["oncall: distributed"]
import torch
import torch.optim as optim
from torch.distributed._shard import (
sharded_tensor,
shard_parameter
)
from copy import deepcopy
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
)
from torch.distributed._shard.sharded_optim import (
ShardedOptimizer,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
run_tests,
)
from torch.testing._internal.distributed._shard.sharded_tensor import (
ShardedTensorTestBase,
with_comms,
)
class MyShardedModel(torch.nn.Module):
def __init__(self, spec=None, group=None):
super(MyShardedModel, self).__init__()
# Use same seed.
torch.manual_seed(0)
self.param = torch.nn.Parameter(torch.rand(5, 10))
if spec is not None:
self.sharded_param = torch.nn.Parameter(sharded_tensor.rand(spec, 20, 10, requires_grad=True, process_group=group))
else:
self.sharded_param = torch.nn.Parameter(torch.rand(5, 10))
def forward(self, input):
if isinstance(self.sharded_param, sharded_tensor.ShardedTensor):
return self.param + self.sharded_param.local_shards()[0].tensor + input
else:
return self.sharded_param + self.param + input
class MyShardedLinear(torch.nn.Module):
def __init__(self, rank=None):
super(MyShardedLinear, self).__init__()
# Use same seed.
torch.manual_seed(0)
self.linear1 = torch.nn.Linear(17, 12)
self.linear2 = torch.nn.Linear(12, 29)
self.gelu = torch.nn.GELU()
if rank:
self.linear1.cuda(rank)
self.linear2.cuda(rank)
def shard_parameter(self):
rowwise_sharding_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
colwise_sharding_spec = ChunkShardingSpec(
dim=1,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
shard_parameter(self.linear1, "weight", rowwise_sharding_spec)
shard_parameter(self.linear2, "weight", colwise_sharding_spec)
def forward(self, inp):
return self.linear2(self.gelu(self.linear1(inp)))
class TestShardedOptimizer(ShardedTensorTestBase):
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_optim(self):
rowwise_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
local_model = MyShardedModel().cuda()
sharded_model = MyShardedModel(spec=rowwise_spec).cuda()
# copy the parameteres from local model
sharded_model.sharded_param.local_shards()[0].tensor = \
local_model.sharded_param.detach().clone().requires_grad_()
local_optim = optim.SGD(local_model.parameters(), lr=0.1)
sharded_model_params = dict(sharded_model.named_parameters())
sharded_optim = ShardedOptimizer(sharded_model_params, optim.SGD, lr=0.1)
local_optim.zero_grad()
sharded_optim.zero_grad()
before_update = deepcopy(sharded_optim.named_params)
inp = torch.rand([5, 10]).cuda(self.rank).requires_grad_()
# run forward
local_output = local_model(inp)
sharded_output = sharded_model(inp)
# backward
local_output.sum().backward()
sharded_output.sum().backward()
# optimizer update
local_optim.step()
sharded_optim.step()
# make sure the parameters (including sharded param)
# get updated by the optimizer, and the updated
# local params are the same as the sharded params
for key, val in before_update.items():
new_val = sharded_optim.named_params[key]
if isinstance(val, sharded_tensor.ShardedTensor):
self.assertNotEqual(
val.local_shards()[0].tensor,
new_val.local_shards()[0].tensor
)
self.assertEqual(
new_val.local_shards()[0].tensor,
local_model.sharded_param
)
else:
self.assertNotEqual(val, new_val)
self.assertEqual(new_val, local_model.param)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_named_params_with_sharded_tensor(self):
rowwise_spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
sharded_model = MyShardedModel(spec=rowwise_spec).cuda()
sharded_model_params = dict(sharded_model.named_parameters())
param_keys = list(sharded_model_params.keys())
self.assertEqual(len(param_keys), 2)
self.assertTrue("param" in param_keys)
self.assertTrue("sharded_param" in param_keys)
sharded_linear = MyShardedLinear(rank=self.rank).cuda()
sharded_linear.shard_parameter()
sharded_linear_params = dict(sharded_linear.named_parameters())
param_keys = list(sharded_linear_params.keys())
self.assertEqual(len(param_keys), 4)
self.assertTrue("linear1.bias" in param_keys)
self.assertTrue("linear2.bias" in param_keys)
self.assertTrue("linear1.weight" in param_keys)
self.assertTrue("linear2.weight" in param_keys)
self.assertFalse("bias" in param_keys)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/distributed/_shard/sharded_optim/test_sharded_optim.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import IS_CI, run_tests
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
GENERIC_TESTS,
TENSORPIPE_TESTS,
generate_tests,
)
# On CircleCI these tests are already run on CPU jobs, thus to save resources do
# not run them on GPU jobs, since thet wouldn't provide additional test signal.
if not (IS_CI and torch.cuda.is_available()):
globals().update(
generate_tests(
"TensorPipe",
TensorPipeRpcAgentTestFixture,
GENERIC_TESTS + TENSORPIPE_TESTS,
__name__,
)
)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/rpc/test_tensorpipe_agent.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
import copyreg
import os
import contextlib
from torch import multiprocessing
import torch.multiprocessing.reductions as TorchMpReductions
import torch.distributed.rpc as rpc
from torch.distributed.rpc.internal import _InternalRPCPickler
from torch.distributed.rpc.api import _use_rpc_pickler
from torch.testing._internal.common_utils import TestCase, run_tests
@contextlib.contextmanager
def fs_sharing():
prev_strategy = multiprocessing.get_sharing_strategy()
multiprocessing.set_sharing_strategy('file_system')
try:
yield
finally:
multiprocessing.set_sharing_strategy(prev_strategy)
class ShareMemoryRPCPickler(_InternalRPCPickler):
def __init__(self) -> None:
super().__init__()
self._dispatch_table
# pyre-fixme[4]: Attribute must be annotated.
self._dispatch_table = copyreg.dispatch_table.copy()
for t in torch._storage_classes:
self._dispatch_table[t] = TorchMpReductions.reduce_storage
for t in torch._tensor_classes:
self._dispatch_table[t] = TorchMpReductions.reduce_tensor
self._dispatch_table[torch.Tensor] = TorchMpReductions.reduce_tensor
self._dispatch_table[
torch.nn.parameter.Parameter
] = TorchMpReductions.reduce_tensor
def worker_loop(a):
rpc.init_rpc('worker1', rank=1, world_size=2)
rpc.shutdown()
def worker_fn(m):
pass
class TestRPCPickler(TestCase):
def setUp(self):
super().setUp()
def test_case(self):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '29500'
with fs_sharing():
r = multiprocessing.spawn(worker_loop, join=False)
try:
with _use_rpc_pickler(ShareMemoryRPCPickler()):
rpc.init_rpc(
'worker0',
rank=0,
world_size=2)
m = torch.nn.Linear(1, 2)
m.share_memory()
rref = rpc.remote(
'worker1',
worker_fn,
args=(m,))
rref.to_here()
finally:
rpc.shutdown()
r.join()
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/distributed/rpc/test_share_memory.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import IS_CI, run_tests
from torch.testing._internal.distributed.rpc.faulty_rpc_agent_test_fixture import (
FaultyRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
FAULTY_AGENT_TESTS,
generate_tests,
)
# On CircleCI these tests are already run on CPU jobs, thus to save resources do
# not run them on GPU jobs, since thet wouldn't provide additional test signal.
if not (IS_CI and torch.cuda.is_available()):
globals().update(
generate_tests(
"Faulty",
FaultyRpcAgentTestFixture,
FAULTY_AGENT_TESTS,
__name__,
)
)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/rpc/test_faulty_agent.py
|
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
import sys
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
GENERIC_CUDA_TESTS,
TENSORPIPE_CUDA_TESTS,
generate_tests,
)
globals().update(
generate_tests(
"TensorPipe",
TensorPipeRpcAgentTestFixture,
GENERIC_CUDA_TESTS + TENSORPIPE_CUDA_TESTS,
__name__,
)
)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/distributed/rpc/cuda/test_tensorpipe_agent.py
|
# Owner(s): ["module: autograd"]
import types
import unittest
import warnings
import torch
import torch.autograd.functional as autogradF
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (
TestCase, run_tests, subtest, gradcheck, gradgradcheck, parametrize, instantiate_parametrized_tests)
from torch.testing._internal.logging_tensor import LoggingTensor
# Utilities for parametrizing the tensor constructors used in autograd tests
#
# TODO: maybe move somewhere so other tests can also use
#
# NB: Not all factory functions included. A complete(?) list can be found here:
# https://pytorch.org/cppdocs/notes/tensor_creation.html
base_ctors_dict = {
"ones": torch.ones,
"zeros": torch.zeros,
"randn": torch.randn,
"rand": torch.rand,
"tensor": torch.tensor,
}
base_ctors = types.SimpleNamespace(**base_ctors_dict)
def wrap_with_logging_tensor(ctor):
def wrapper(*args, **kwargs):
requires_grad = kwargs.pop("requires_grad", False)
return LoggingTensor(ctor(*args, **kwargs), requires_grad=requires_grad)
return wrapper
logging_tensor_ctors_dict = {k: wrap_with_logging_tensor(ctor) for (k, ctor) in base_ctors_dict.items()}
logging_tensor_ctors = types.SimpleNamespace(**logging_tensor_ctors_dict)
base_and_logging_tensor = parametrize("ctors", [subtest(base_ctors, name="base_tensor"),
subtest(logging_tensor_ctors, name="logging_tensor")])
FIXME_base_and_xfail_logging_tensor = parametrize("ctors", [subtest(base_ctors, name="base_tensor"),
subtest(logging_tensor_ctors, name="logging_tensor",
decorators=[unittest.expectedFailure])])
# NB: This is equivalent to having both @parmetrize("vectorized", [True, False]) and
# FIXME_base_and_xfail_logging_tensor, except the non-vectorized logging_tensor case is
# actually expected to succeed
FIXME_xfail_vectorized_logging_tensor = (
parametrize("vectorize,ctors", [subtest((True, base_ctors), name="vectorized_base_tensor"),
subtest((False, base_ctors), name="base_tensor"),
subtest((True, logging_tensor_ctors), name="vectorized_logging_tensor",
decorators=[unittest.expectedFailure]),
subtest((False, logging_tensor_ctors), name="logging_tensor")]))
vectorized_logging_tensor = (
parametrize("vectorize,ctors", [subtest((True, base_ctors), name="vectorized_base_tensor"),
subtest((False, base_ctors), name="base_tensor"),
subtest((True, logging_tensor_ctors), name="vectorized_logging_tensor"),
subtest((False, logging_tensor_ctors), name="logging_tensor")]))
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
@base_and_logging_tensor
def test_vjp_err_check(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = ctors.rand(4)
v = ctors.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
@base_and_logging_tensor
def test_vjp_err_check_strict(self, ctors):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
@base_and_logging_tensor
def test_vjp_no_grad(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(4, 4)
v = ctors.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
@base_and_logging_tensor
def test_vjp_output(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(4, 4)
v = ctors.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (ctors.rand(2), ctors.rand(2))
v = ctors.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (ctors.rand(2), ctors.rand(2))
v = (ctors.tensor([1., 0.]), ctors.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
@base_and_logging_tensor
def test_vjp_scalar(self, ctors):
def reducer(x):
return x.sum()
inputs = ctors.rand(4, 4)
v = ctors.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = ctors.rand([])
v = ctors.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
@base_and_logging_tensor
def test_vjp_create_graph(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(2, 2, dtype=torch.double)
v = ctors.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (ctors.rand(2, dtype=torch.double, requires_grad=True),
ctors.rand(2, dtype=torch.double, requires_grad=True))
v = (ctors.tensor([1., 0.], dtype=torch.double, requires_grad=True),
ctors.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
@base_and_logging_tensor
def test_jvp_err_check(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
@base_and_logging_tensor
def test_jvp_err_check_strict(self, ctors):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
@base_and_logging_tensor
def test_jvp_no_grad(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
@base_and_logging_tensor
def test_jvp_output(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (ctors.rand(2), ctors.rand(2))
v = (ctors.ones(2), ctors.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (ctors.rand(2), ctors.rand(2))
v = (ctors.tensor([1., 0.]), ctors.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
@base_and_logging_tensor
def test_jvp_scalar(self, ctors):
def reducer(x):
return x.sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], ctors.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = ctors.rand([])
v = ctors.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], ctors.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], ctors.zeros(4))
self._assert_same_struct(res[1], res[0])
@base_and_logging_tensor
def test_jvp_create_graph(self, ctors):
def reducer(x):
return x.sum(dim=1)
inputs = ctors.rand(2, 2, dtype=torch.double)
v = ctors.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (ctors.rand(2, dtype=torch.double, requires_grad=True),
ctors.rand(2, dtype=torch.double, requires_grad=True))
v = (ctors.tensor([1., 0.], dtype=torch.double, requires_grad=True),
ctors.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
@base_and_logging_tensor
def test_construct_standard_basis_for(self, ctors):
test_cases = [
(ctors.randn(2, 3),),
(ctors.randn(1),),
(ctors.randn([]),),
(ctors.randn(1), ctors.randn([]), ctors.randn([])),
(ctors.randn(2), ctors.randn(3), ctors.randn([])),
(ctors.randn(2), ctors.randn([]), ctors.randn(3)),
(ctors.randn(2, 3), ctors.randn(3), ctors.randn(3, 4, 2)),
(ctors.randn(2, dtype=torch.float64), ctors.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
@base_and_logging_tensor
def test_construct_standard_basis_for_cuda(self, ctors):
test_cases = [
(ctors.randn(2), ctors.randn(3, device='cuda')),
(ctors.randn(3, device='cuda'), ctors.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api, ctors):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = ctors.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
@base_and_logging_tensor
def test_jacobian_vectorize_raises_no_warnings(self, ctors):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian, ctors)
@base_and_logging_tensor
def test_hessian_vectorize_raises_no_warnings(self, ctors):
return self._test_vectorize_raises_no_warnings(autogradF.hessian, ctors)
@parametrize("vectorize", [True, False])
@base_and_logging_tensor
def test_jacobian_err_check(self, vectorize, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = ctors.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (ctors.rand(4), ctors.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
@base_and_logging_tensor
def test_jacobian_err_check_strict(self, ctors):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = ctors.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
@base_and_logging_tensor
def test_jacobian_err_check_strict_vectorize(self, ctors):
def foo(x):
return x
inp = ctors.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
@base_and_logging_tensor
def test_jacobian_no_grad(self, ctors):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = ctors.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, ctors.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, ctors.zeros(4, 4))
@vectorized_logging_tensor
def test_jacobian_output(self, vectorize, ctors):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = ctors.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = ctors.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (ctors.rand(4, 4), ctors.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
@vectorized_logging_tensor
def test_jacobian_scalar(self, vectorize, ctors):
def reducer(x):
return x.sum()
inputs = ctors.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = ctors.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, ctors.zeros(4))
@parametrize("vectorize", [True, False])
@base_and_logging_tensor
def test_jacobian_create_graph(self, vectorize, ctors):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = ctors.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (ctors.rand(4, 4, dtype=torch.double, requires_grad=True),
ctors.rand(4, 4, dtype=torch.double, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def _check_jacobian_vectorize_correctness(self, f, inputs, test_forward_ad=True):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result_backward_mode = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result_backward_mode, expected)
if test_forward_ad:
result_forward_mode = autogradF.jacobian(f, inputs, strategy="forward-mode", vectorize=True)
self.assertEqual(result_forward_mode, expected)
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_simple(self, ctors):
def f(x):
return 3 * x ** 2
x = ctors.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_multi_input(self, ctors):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = ctors.randn(2, 3)
y = ctors.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_multi_input_multi_output(self, ctors):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = ctors.randn(5, 3)
y = ctors.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_unrelated_outputs(self, ctors):
def f(x, y):
return x, y, x, y
x = ctors.randn(2)
y = ctors.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_zero_dim(self, ctors):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = ctors.randn(3)
y = ctors.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = ctors.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = ctors.randn([])
y = ctors.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_different_devices(self, ctors):
def f(x, y):
return x * y, (x * y).cuda()
x = ctors.randn(3)
y = ctors.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
@base_and_logging_tensor
def test_jacobian_vectorize_correctness_different_dtype(self, ctors):
def f(x, y):
return (x * y).float(), (x * y).double()
x = ctors.randn(3)
y = ctors.randn(3)
# The Jacobian computed using forward AD has the dtype of the output
# but the Jacobian computed with reverse AD has dtype of input
self._check_jacobian_vectorize_correctness(f, (x, y), test_forward_ad=False)
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
result_forward_mode = autogradF.hessian(f, inputs, outer_jacobian_strategy="forward-mode", vectorize=True)
self.assertEqual(result_forward_mode, expected)
@base_and_logging_tensor
def test_hessian_vectorize_correctness_simple(self, ctors):
def f(x):
return (3 * x ** 2).sum()
x = ctors.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
@base_and_logging_tensor
def test_hessian_vectorize_correctness_multi_input(self, ctors):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = ctors.randn(2, 3)
y = ctors.randn(3, 5)
z = ctors.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
@base_and_logging_tensor
def test_hessian_vectorize_correctness_unrelated_outputs(self, ctors):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = ctors.randn(2)
y = ctors.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return ctors.ones([])
x = ctors.randn(2)
y = ctors.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
@parametrize("vectorize", [True, False])
@base_and_logging_tensor
def test_hessian_err_check(self, vectorize, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = ctors.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (ctors.rand(4), ctors.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
@base_and_logging_tensor
def test_hessian_err_check_strict(self, ctors):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = ctors.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
@base_and_logging_tensor
def test_hessian_err_check_strict_vectorize(self, ctors):
def foo(x):
return (x ** 3).sum()
inp = ctors.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
@base_and_logging_tensor
def test_hessian_no_grad(self, ctors):
def pow_reducer(x):
return x.pow(3).sum()
inputs = ctors.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, ctors.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, ctors.zeros(2, 2, 2))
@vectorized_logging_tensor
def test_hessian_output(self, vectorize, ctors):
def pow_reducer(x):
return x.pow(3).sum()
inputs = ctors.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (ctors.rand(2, 2), ctors.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
@parametrize("vectorize", [True, False])
@base_and_logging_tensor
def test_hessian_scalar(self, vectorize, ctors):
def reducer(x):
return x.sum()
inputs = ctors.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = ctors.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = ctors.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
@parametrize("vectorize", [True, False])
@base_and_logging_tensor
def test_hessian_create_graph(self, vectorize, ctors):
def pow_reducer(x):
return x.pow(3).sum()
inputs = ctors.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (ctors.rand(2, 2, dtype=torch.double, requires_grad=True),
ctors.rand(2, 2, dtype=torch.double, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
@base_and_logging_tensor
def test_vhp_err_check(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, ctors.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (ctors.rand(4), ctors.rand(5))
v = (ctors.rand(4), ctors.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
@base_and_logging_tensor
def test_vhp_err_check_strict(self, ctors):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
@base_and_logging_tensor
def test_vhp_no_grad(self, ctors):
def reducer(x):
return x.exp().sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
@base_and_logging_tensor
def test_vhp_output(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (ctors.rand(3), ctors.rand(4))
v = (ctors.ones(3), ctors.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
@base_and_logging_tensor
def test_vhp_scalar(self, ctors):
def reducer(x):
return x.sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = ctors.rand([])
v = ctors.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = ctors.rand(4, 4)
v = ctors.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
@base_and_logging_tensor
def test_vhp_create_graph(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = ctors.rand(4, 4, dtype=torch.double, requires_grad=True)
v = ctors.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (ctors.rand(3, dtype=torch.double, requires_grad=True),
ctors.rand(4, dtype=torch.double, requires_grad=True))
v = (ctors.ones(3, dtype=torch.double, requires_grad=True),
ctors.ones(4, dtype=torch.double, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
@base_and_logging_tensor
def test_hvp_err_check(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = ctors.rand(4)
v = ctors.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, ctors.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (ctors.rand(4), ctors.rand(5))
v = (ctors.rand(4), ctors.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
@base_and_logging_tensor
def test_hvp_err_check_strict(self, ctors):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = ctors.rand(4)
v = ctors.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
@base_and_logging_tensor
def test_hvp_no_grad(self, ctors):
def reducer(x):
return x.exp().sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], ctors.zeros(4, 4))
@base_and_logging_tensor
def test_hvp_output(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (ctors.rand(3), ctors.rand(4))
v = (ctors.ones(3), ctors.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
@base_and_logging_tensor
def test_hvp_scalar(self, ctors):
def reducer(x):
return x.exp().sum()
inputs = ctors.rand(4, 4)
v = ctors.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = ctors.rand([])
v = ctors.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = ctors.rand(4, 4)
v = ctors.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
@base_and_logging_tensor
def test_hvp_create_graph(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = ctors.rand(4, 4, dtype=torch.double, requires_grad=True)
v = ctors.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (ctors.rand(3, dtype=torch.double, requires_grad=True),
ctors.rand(4, dtype=torch.double, requires_grad=True))
v = (ctors.ones(3, dtype=torch.double, requires_grad=True),
ctors.ones(4, dtype=torch.double, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
@base_and_logging_tensor
def test_jacobian_match_vjp_jvp(self, ctors):
def foo(x):
return x ** 3 + x.sum()
inputs = ctors.rand(4)
v = ctors.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
@base_and_logging_tensor
def test_hessian_match_vhp_hvp(self, ctors):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = ctors.rand(4)
v = ctors.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
instantiate_parametrized_tests(TestAutogradFunctional)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/autograd/test_functional.py
|
# Owner(s): ["module: autograd"]
import torch
from torch.testing._internal.common_utils import TestCase, run_tests, gradcheck
class TestAutogradComplex(TestCase):
def test_view_func_for_complex_views(self):
# case 1: both parent and child have view_func
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
x0 = x.clone()
x1 = torch.view_as_complex(x0)
x2 = torch.view_as_real(x1)
x2.mul_(2)
x2.sum().backward()
y0 = y.clone()
y0.mul_(2)
y0.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 2: parent has view_func but child does not
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a):
b = a.clone()
b1 = torch.view_as_complex(b)
b2 = b1.reshape(b1.numel())
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 3: parent does not have a view_func but child does
x = torch.randn(10, dtype=torch.cdouble, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a, dim0_size=5):
b = a.clone()
b1 = b.reshape(dim0_size, 2)
b2 = torch.view_as_real(b1)
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
def test_view_with_multi_output(self):
x = torch.randn(2, 2, 2, dtype=torch.double)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
x.requires_grad_(True)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
def as_identity(self):
# view_as_real and view_as_complex behavior should be like an identity
def func(z):
z_ = torch.view_as_complex(z)
z_select = torch.select(z_, z_.dim() - 1, 0)
z_select_real = torch.view_as_real(z_select)
return z_select_real.sum()
z = torch.randn(10, 2, 2, dtype=torch.double, requires_grad=True)
gradcheck(func, [z])
func(z).backward()
z1 = z.clone().detach().requires_grad_(True)
torch.select(z1, z1.dim() - 2, 0).sum().backward()
self.assertEqual(z.grad, z1.grad)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/autograd/test_complex.py
|
# Owner(s): ["oncall: fx"]
import torch
from torch.testing._internal.common_utils import (
TestCase, run_tests)
from torch.fx.experimental.proxy_tensor import make_fx
from torch.fx.passes.dialect.common.cse_pass import CSEPass, get_CSE_banned_ops
from torch.fx import symbolic_trace
import random
banned_ops = get_CSE_banned_ops()
P_default = CSEPass(banned_ops=banned_ops)
def check(self, f, t, delta, check_val=True, graph_input=False, P=None):
"""
check if the CSE modified graph of ``f``
1) has delta less nodes, and
2) do not reduce the number of nodes further on a second pass, and
3) modified returned is true only if the number of nodes decreases.
Args:
f: function to be checked
t: tensor to be passed to f
delta: an integer >= -1.
If delta = -1, it only checks if the new graph has less or equal number of nodes
check_val: if True, check if the output of f is correct
graph_input: True is f is type GraphModule
P: the pass to use. If None, use P_default
"""
if graph_input:
fx_g = f
else:
fx_g = make_fx(f)(t)
if P is None:
P = P_default
res = P(fx_g)
new_g = res.graph_module
new_graph = new_g.graph
modified = res.modified
# the number of nodes decrease/ or stay the same
old_num_nodes = len(fx_g.graph.nodes)
new_num_nodes = len(new_graph.nodes)
assert (new_num_nodes < old_num_nodes) == modified, "modified should be True if the number of nodes decrease"
if delta == -1:
self.assertTrue(old_num_nodes >= new_num_nodes, (
f"number of nodes increased {old_num_nodes}, {new_num_nodes}"))
else:
self.assertTrue(old_num_nodes == new_num_nodes + delta, (
f"number of nodes not the same {old_num_nodes - delta}, {new_num_nodes}\n {fx_g.graph} \n {new_graph}"))
# a second pass should not reduce more nodes
res = P(new_g)
pass_2_graph = res.graph_module.graph
pass_2_num_nodes = len(pass_2_graph.nodes)
self.assertTrue(pass_2_num_nodes == new_num_nodes, (
f"second pass graph has less node {pass_2_num_nodes}, {new_num_nodes}\n {new_graph} \n {pass_2_graph}"))
# check correctness
if check_val:
true_result = fx_g(t)
our_result = new_g(t)
if true_result is None: # both return None
self.assertTrue(our_result is None, f"true result is None, CSE result is {our_result}")
else: # results returned are the same
self.assertTrue(torch.all(true_result == our_result), (
f"results are different {true_result}, {our_result}")) # check results are the same
class TestCSEPass(TestCase):
def test_nochange(self):
def f(x):
a = x + 1
b = x + a
a = x
d = x + a
return b + d
t = torch.randn(2, 2)
check(self, f, t, 0)
def test_empty(self):
def f(x):
pass
t = torch.randn(2, 2)
check(self, f, t, 0)
def test_immutable_list_type(self):
def f(x):
a = x.sum(dim=1)
b = x.sum(dim=1)
c = x.sum()
d = x.sum()
return a + b + c + d
t = torch.randn(2, 2)
check(self, f, t, 2)
def test_immutable_list_multiple_entries(self):
def f(x):
a = x.sum(dim=[0, 1])
b = x.sum(dim=[0, 1])
c = x.sum(dim=1)
d = x.sum(dim=1)
return a + b + c + d
t = torch.randn(2, 2)
check(self, f, t, 2)
def test_simple(self):
def f(x):
a = x.cos()
b = x.cos()
c = a + a
d = b + b
return c + d
t = torch.randn(2, 2)
check(self, f, t, 2)
def test_simple_2(self):
def f(x):
a = x.cos().sin()
b = x.cos().sin()
c = a + a
d = b + b
return c + d
t = torch.randn(1)
check(self, f, t, 3)
def test_two_args_default(self):
def f(x):
a = x.sum(dim=1)
b = x.sum(dim=1, keepdim=False)
c = x.sum(dim=1, keepdim=False)
d = x.sum(dim=1)
return a + b + c + d
t = torch.randn(2, 2)
check(self, f, t, 3)
def test_two_args(self):
def f(x):
a = x.sum(dim=1)
b = x.sum(dim=1, keepdim=True)
c = x.sum(dim=1, keepdim=True)
d = x.sum(dim=1)
return a + b + c + d
t = torch.randn(2, 2)
check(self, f, t, 2)
def test_simple_multiple_same_ops(self):
def f(x):
a = x.sum()
b = x.sum()
c = x.sum()
d = x.sum()
return a + b + c + d
t = torch.randn(2, 2)
check(self, f, t, 3)
def test_nested_immutable_list_type(self):
def f(x):
a = torch.cat((x, x))
b = torch.cat((x, x))
return a + b
t = torch.randn(2, 2)
check(self, f, t, 1)
def test_kwarg(self):
def f(x):
a = torch.ones_like(x)
b = torch.ones_like(x)
return a + b
t = torch.randn(2, 2)
check(self, f, t, 1)
"""
Generate function with random ops and check if the result is the same
"""
def test_random(self):
def f(x):
vals = [x]
ops = [torch.clone, torch.cos, torch.tanh, torch.nn.functional.gelu]
for _ in range(100):
new_val = random.choice(ops)(random.choice(vals))
vals.append(new_val)
return vals[-1]
fx_g = symbolic_trace(f)
fx_g.graph.eliminate_dead_code()
fx_g.recompile()
t = torch.randn(2, 2)
for _ in range(30):
check(self, fx_g, t, -1, graph_input=True)
"""
Test that banned list ban ops as expected.
"""
def test_banned_list(self):
def f(x):
a = x + 1
b = x + 1
return a + b
t = torch.randn(2, 2)
P_ban_add = P = CSEPass(banned_ops=[torch.ops.aten.add])
check(self, f, t, 0, P=P_ban_add) # check that add is banned
check(self, f, t, 1) # check that add is not banned by default
def test_rand_like(self):
def f(x):
a = torch.rand_like(x)
b = torch.rand_like(x)
return a + b
t = torch.randn(2, 2)
check(self, f, t, 0, check_val=False)
def test_rand_n(self):
def f(x):
a = torch.randn(4)
b = torch.randn(4)
return a + b
t = torch.randn(2, 2)
check(self, f, t, 0, check_val=False)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/fx/test_cse_pass.py
|
# Owner(s): ["oncall: fx"]
import torch
from torch.testing._internal.common_utils import (
TestCase, parametrize, instantiate_parametrized_tests, run_tests)
from torch.fx.experimental.proxy_tensor import make_fx
from torch.fx.passes.dialect.common.cse_pass import CSEPass
from torch.fx.graph_module import GraphModule
import itertools
def FactoryFunctionCall(x, device):
y = torch.full(x.shape, 3, device=device)
z = torch.add(y, x)
return z
def TorchTensorCall(x):
y = torch.tensor(3)
return x + y
def TakeList(x):
z = torch.cat([x, x])
return z
def ReturnList(x):
a = torch.arange(10).reshape(5, 2)
z = torch.split(a, [1, 4])
return z
def Mutation(x):
y = x + 2
y.add_(1)
return x + y
def MutationInput(x):
x.add_(1)
y = x + 2
return x + y
def MutationFactory(x, device):
y = torch.full(x.shape, 3, device=device)
y.add_(1)
return x + y
def MutationTorchTensorCall(x):
y = torch.tensor(3)
y.add_(1)
return x + y
def MutationMetadata(x):
x.resize_(2)
return x
Passes = [CSEPass]
Test_Cases = [TakeList,
ReturnList,
Mutation,
MutationInput,
MutationMetadata,
MutationTorchTensorCall]
Factory_Test_Cases = [FactoryFunctionCall, MutationFactory]
Devices = ["cpu"]
if torch.cuda.is_available():
Devices.append("cuda")
@instantiate_parametrized_tests
class TestCommonPass(TestCase):
@parametrize("common_pass,f,device", itertools.product(Passes, Test_Cases, Devices))
def test_correctness(self, common_pass, f, device):
inp = torch.randn(10, device=device)
traced_m = make_fx(f)(inp)
P = common_pass()
res = P(traced_m)
modified_m = res.graph_module
assert isinstance(modified_m, GraphModule)
inp_copy = inp.clone()
expected = f(inp)
result = modified_m(inp_copy)
self.assertEqual(result, expected)
@parametrize("common_pass,f,device", itertools.product(Passes, Factory_Test_Cases, Devices))
def test_correctness_factory(self, common_pass, f, device):
inp = torch.randn(10, device=device)
traced_m = make_fx(f)(inp, device)
P = common_pass()
res = P(traced_m)
modified_m = res.graph_module
assert isinstance(modified_m, GraphModule)
inp_copy = inp.clone()
expected = f(inp, device)
result = modified_m(inp_copy, device)
self.assertEqual(result, expected)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/fx/test_common_passes.py
|
# Owner(s): ["module: fx"]
import operator
import unittest
from torch.fx import GraphModule, symbolic_trace
from torch.fx.experimental.meta_tracer import symbolic_trace as meta_symbolic_trace
from torch.fx.experimental.migrate_gradual_types.constraint import BinConstraintT, DVar, TVar, T
from torch.fx.experimental.migrate_gradual_types.constraint_generator import ConstraintGenerator
from torch.fx.experimental.migrate_gradual_types.constraint_transformation import transform_constraint
from torch.fx.experimental.migrate_gradual_types.operation import op_precision, op_matching, op_consistency
from torch.fx.experimental.migrate_gradual_types.transform_to_z3 import transform_all_constraints,\
evaluate_conditional_with_constraints
from torch.fx.experimental.migrate_gradual_types.z3_types import tensor_type, D, z3_dyn
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.tensor_type import Dyn, TensorType
import torch
try:
import z3 # type: ignore[import]
HAS_Z3 = True
except ImportError:
HAS_Z3 = False
try:
from torchvision import models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class TorchDynamoUseCases(unittest.TestCase):
def test_reshape(self):
"""
In this example, we prove that some nodes must
always have a fixed shape regardless of the input
"""
class BasicBlock(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: Dyn):
y = x.view(100)
tmp = y.size()[0]
return tmp
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
dim = z3.Int(4)
self.assertEqual(s.model()[dim], 100)
# print(s.model()[dim])
class HFOperations(unittest.TestCase):
def test_eq_dim(self):
"""
test dimensions and equalities
"""
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([32, 4, 4])):
eq = x.dim() == 3
return eq
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
# The node we are considering is the gt node
for n in graph.nodes:
if n.target == operator.eq:
node = n
positive, negative = evaluate_conditional_with_constraints(ast_rewriter.root, graph, node)
self.assertEqual(positive, z3.sat)
self.assertEqual(negative, z3.unsat)
def test_conditional_ne_1(self):
"""
This test case is for the HFmodels interface.
A function takes a node and a graph and considers
the conditional the node represents and its negation
and solves each formula with the remaining sets of constraints
Returns:
"""
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([32, 4, 4]), y: TensorType([32, 4, 4])):
size_5 = x.size()
getitem_7 = size_5[0]
getitem_8 = size_5[1]
getitem_9 = size_5[2]
ne_1 = y != (getitem_7, getitem_8, getitem_9)
return ne_1
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
# The node we are considering is the gt node
for n in graph.nodes:
if n.target == operator.ne:
node = n
# since x and y are equal, the requirement that x != y cannot be true, so we should get unsat
# for the positive condition and sat for the negative condition
positive, negative = evaluate_conditional_with_constraints(ast_rewriter.root, graph, node)
self.assertEqual(positive, z3.unsat)
self.assertEqual(negative, z3.sat)
def test_bmm(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn, 2, 3]), y: TensorType([1, 3, 2])):
bmm = torch.bmm(x, y)
return bmm
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
b = BasicBlock().forward(torch.rand(1, 2, 3), torch.rand(1, 3, 2))
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
output = z3.Const(3, tensor_type)
self.assertEqual(s.check(), z3.sat)
self.assertEqual(s.model()[output].arg(0).arg(1), b.shape[0])
self.assertEqual(s.model()[output].arg(1).arg(1), b.shape[1])
self.assertEqual(s.model()[output].arg(2).arg(1), b.shape[2])
def test_bmm2(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn, y: TensorType([1, 3, 2])):
bmm = torch.bmm(x, y)
return bmm
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
b = BasicBlock().forward(torch.rand(1, 2, 3), torch.rand(1, 3, 2))
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
output = z3.Const(3, tensor_type)
self.assertEqual(s.check(), z3.sat)
self.assertEqual(s.model()[output].arg(0).arg(1), b.shape[0])
self.assertEqual(s.model()[output].arg(1).arg(0), 0)
self.assertEqual(s.model()[output].arg(2).arg(1), b.shape[2])
def test_bmm3(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 3, 3]), y: TensorType([1, 3, 2])):
bmm = torch.bmm(x, y)
return bmm
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.unsat)
def test_transpose(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([1, 2, 3, 4])):
transpose = x.transpose(0, 1)
return transpose
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
b = BasicBlock().forward(torch.rand(1, 2, 3, 4))
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
output = z3.Const(2, tensor_type)
self.assertEqual(s.check(), z3.sat)
self.assertEqual(s.model()[output].arg(0).arg(1), b.shape[0])
self.assertEqual(s.model()[output].arg(1).arg(1), b.shape[1])
self.assertEqual(s.model()[output].arg(2).arg(1), b.shape[2])
self.assertEqual(s.model()[output].arg(3).arg(1), b.shape[3])
# change the annotation to Dyn
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
def test_index_select(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2050, 1024]), y: Dyn):
index_select = x.index_select(0, y)
return index_select
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
# print(symbolic_traced)
b = BasicBlock().forward(torch.rand(2050, 1024), torch.ones(8).int())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
index_select = z3.Const(3, tensor_type)
# the second dimension of the result should not be affected since
# the index is 0
self.assertEqual(s.model()[index_select].arg(1).arg(1), b.shape[1])
replacement_vector = z3.Const(2, tensor_type)
# we set the vector to Dyn
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
index_select = z3.Const(3, tensor_type)
s.add(replacement_vector == z3_dyn)
self.assertEqual(s.check(), z3.sat)
# this implies that the index at 0 should be dyn
self.assertEqual(s.model()[index_select].arg(0).arg(0), 0)
def test_get_attr(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([1, 2, 3])):
getattr = x.device
to = x.to(getattr)
return to
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
b = BasicBlock().forward(torch.rand(1, 2, 3))
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
attr_res = z3.Const(3, tensor_type)
assert s.model()[attr_res].arg(0).arg(1) == b.shape[0]
assert s.model()[attr_res].arg(1).arg(1) == b.shape[1]
assert s.model()[attr_res].arg(2).arg(1) == b.shape[2]
def test_expand(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([1, 4])):
size = x.size()
getitem = size[-1]
expand = x.expand(getitem, 4)
return expand
b = BasicBlock().forward(torch.rand(1, 4))
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
expand_res = z3.Const(4, tensor_type)
assert s.model()[expand_res].arg(0).arg(1) == b.shape[0]
assert s.model()[expand_res].arg(1).arg(1) == b.shape[1]
# change the annotation on the input to Dyn.
# the last dimension should still be 4
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
assert s.model()[expand_res].arg(1).arg(1) == b.shape[1]
def test_getitem_tensor(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([4, 4])):
getitem = x[(None, None, slice(None, None, None), slice(None, None, None))]
return getitem
B = BasicBlock()
b = B.forward(torch.rand(4, 4))
symbolic_traced: torch.fx.GraphModule = symbolic_trace(B)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
get_item_res = z3.Const(2, tensor_type)
assert s.model()[get_item_res].arg(0).arg(1) == b.shape[0]
assert s.model()[get_item_res].arg(1).arg(1) == b.shape[1]
assert s.model()[get_item_res].arg(2).arg(1) == b.shape[2]
assert s.model()[get_item_res].arg(3).arg(1) == b.shape[3]
# change the annotation on the input to make sure it propagates
# to the output
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([Dyn, 4])
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# dyn check
assert s.model()[get_item_res].arg(2).arg(0) == 0
def test_getitem_tensor2(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([4, 4])):
getitem = x[(None, None)]
return getitem
B = BasicBlock()
b = B.forward(torch.rand(4, 4))
symbolic_traced: torch.fx.GraphModule = symbolic_trace(B)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
get_item_res = z3.Const(2, tensor_type)
assert s.model()[get_item_res].arg(0).arg(1) == b.shape[0]
assert s.model()[get_item_res].arg(1).arg(1) == b.shape[1]
assert s.model()[get_item_res].arg(2).arg(1) == b.shape[2]
assert s.model()[get_item_res].arg(3).arg(1) == b.shape[3]
def test_getitem_tensor_3(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([4, 4])):
getitem = x[(None, slice(None, None, None), None, slice(None, None, None))]
return getitem
B = BasicBlock()
b = B.forward(torch.rand(4, 4))
symbolic_traced: torch.fx.GraphModule = symbolic_trace(B)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
get_item_res = z3.Const(2, tensor_type)
assert s.model()[get_item_res].arg(0).arg(1) == b.shape[0]
assert s.model()[get_item_res].arg(1).arg(1) == b.shape[1]
assert s.model()[get_item_res].arg(2).arg(1) == b.shape[2]
assert s.model()[get_item_res].arg(3).arg(1) == b.shape[3]
def test_layer_norm(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.l = torch.nn.LayerNorm((1024,))
def forward(self, x: Dyn):
return self.l(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
# make the output a size 1 tensor which should result
# in the migration of the input
b = BasicBlock().forward(torch.rand(1024))
input = z3.Const(1, tensor_type)
output = z3.Const(2, tensor_type)
s.add(output == tensor_type.tensor1(D(1, 1024)))
s.check()
self.assertEqual(s.model()[input], s.model()[output])
# input shape = output shape
self.assertEqual(b.shape[0], s.model()[input].arg(0).arg(1))
# change annotation to the wrong shape
for n in graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([10, 10])
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.unsat)
# fix the annotation
for n in graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([10, 1024])
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
s.check()
b = BasicBlock().forward(torch.rand(10, 1024)).shape
self.assertEqual(s.model()[output].arg(0).arg(1), b[0])
self.assertEqual(s.model()[output].arg(1).arg(1), b[1])
def test_layer_norm_functional(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn):
return torch.nn.functional.layer_norm(x, (1024,))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
# make the output a size 1 tensor which should result
# in the migration of the input
b = BasicBlock().forward(torch.rand(1024))
input = z3.Const(1, tensor_type)
output = z3.Const(2, tensor_type)
s.add(output == tensor_type.tensor1(D(1, 1024)))
s.check()
self.assertEqual(s.model()[input], s.model()[output])
# input shape = output shape
self.assertEqual(b.shape[0], s.model()[input].arg(0).arg(1))
def test_ne_int_long_type_as(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn, Dyn]), y: TensorType([Dyn, Dyn])):
ne_int = torch.ne(x, y).int()
type_as = ne_int.type_as(y)
long = type_as.long()
return long
symbolic_traced: torch.fx.GraphModule = symbolic_trace(BasicBlock())
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
# migrate one of the parameters to a fully static shape so we can compare
input = z3.Const(1, tensor_type)
input_2 = z3.Const(2, tensor_type)
s1, s2 = z3.Ints('s1 s2')
output_long = z3.Const(8, tensor_type)
s.add(input == tensor_type.tensor2(D(1, 2), D(1, 4)))
s.add(input_2 == tensor_type.tensor2(D(1, s1), D(1, s2)))
self.assertEquals(s.check(), z3.sat)
actual_shape = BasicBlock().forward(torch.rand(2, 4), torch.rand(2, 4)).shape
self.assertEqual(s.model()[output_long].arg(0).arg(1), actual_shape[0])
self.assertEqual(s.model()[output_long].arg(1).arg(1), actual_shape[1])
def test_ne(self):
s1, s2 = z3.Ints('s1 s2')
s11, s22 = z3.Ints('s11 s22')
d1, d2 = D(s11, s1), D(0, s2)
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn, y: Dyn):
return torch.ne(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
# change the annotations
for n in graph.nodes:
if n.name == 'x':
n.type = TensorType([1, 2])
if n.name == 'y':
n.type = TensorType([2, Dyn])
# resulting type should be TensorType([2, 2])
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
# force the second dimension to be Dyn
# output should still be TensorType([2, 2])
input = z3.Const(2, tensor_type)
s.add(input == tensor_type.tensor2(d1, d2))
self.assertEqual(s.check(), z3.sat)
B = BasicBlock().forward(torch.rand(1, 2), torch.rand(2, 1))
output = z3.Const(3, tensor_type)
self.assertEqual(s.model()[output].arg(0).arg(1), B.shape[0])
self.assertEqual(s.model()[output].arg(1).arg(1), B.shape[0])
def test_cumsum(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn, 4, 3])):
t = torch.cumsum(x, 3)
return t
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(BasicBlock(), meta_args={})
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
# should be unsat since the index is not valid for this annotation
self.assertEqual(s.check(), z3.unsat)
# modify the annotation to Dyn which should give sat
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# # modify the annotation to the right tensor size
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([1, 2, 3, 4])
# verify that the input is equal to the output
B = BasicBlock().forward(torch.rand(1, 2, 3, 4))
res_shape = B.shape
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
# confirm the output matches the expected tensor
result = z3.Const(2, tensor_type)
self.assertEqual(s.model()[result].arg(0).arg(1), res_shape[0])
self.assertEqual(s.model()[result].arg(1).arg(1), res_shape[1])
self.assertEqual(s.model()[result].arg(2).arg(1), res_shape[2])
self.assertEqual(s.model()[result].arg(3).arg(1), res_shape[3])
# confirm the output is not dyn
self.assertNotEqual(s.model()[result].arg(0).arg(0).as_long(), 0)
self.assertNotEqual(s.model()[result].arg(1).arg(0).as_long(), 0)
self.assertNotEqual(s.model()[result].arg(2).arg(0).as_long(), 0)
self.assertNotEqual(s.model()[result].arg(3).arg(0).as_long(), 0)
def test_cumsum_kwargs(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn, 4, 3])):
t = torch.cumsum(x, dim=3)
return t
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(BasicBlock(), meta_args={})
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
# should be unsat since the index is not valid for this annotation
self.assertEqual(s.check(), z3.unsat)
# modify the annotation to Dyn which should give sat
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
def test_arange(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 4])):
size = x.size()
getitem = size[-1]
arange = torch.arange(getitem)
return arange
B = BasicBlock().forward(torch.rand(2, 4))
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(BasicBlock(), meta_args={})
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
arange_result = z3.Const(5, tensor_type)
self.assertNotEqual(s.model()[arange_result].arg(0).arg(0).as_long(), 0)
self.assertEqual(s.model()[arange_result].arg(0).arg(1).as_long(), B.size()[0])
# change the annotation to Dyn. This will migrate to an arbitirary type
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([Dyn, Dyn, Dyn, Dyn])
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
def test_scalar_add(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 4])):
size = x.size()
getitem = size[-1]
arange = torch.arange(getitem)
add = arange + 1
return add
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(BasicBlock(), meta_args={})
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
arange_result = z3.Const(5, tensor_type)
add_result = z3.Const(6, tensor_type)
self.assertEqual(s.model()[arange_result], s.model()[add_result])
def test_regular_add_2(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 4])):
to = x.to()
size = to.size()
getitem = size[-1]
add = getitem + 1
return add
b = BasicBlock().forward(torch.rand(2, 4))
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(BasicBlock(), meta_args={})
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
res = z3.Int(5)
self.assertEqual(s.model()[res], b)
def test_regular_add_3(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 4])):
to = x.to()
size = to.size()
getitem = size[-1]
add = 1 + getitem
return add
b = BasicBlock().forward(torch.rand(2, 4))
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(BasicBlock(), meta_args={})
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
res = z3.Int(5)
self.assertEqual(s.model()[res], b)
def test_embedding(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.embedding = torch.nn.Embedding(256008, 1024, padding_idx=1)
def forward(self, x: TensorType([2, 4])):
return self.embedding(x)
B = BasicBlock().forward(torch.ones([2, 4], dtype=torch.long)).size()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
embedding_result = z3.Const(2, tensor_type)
assert s.model()[embedding_result].arg(0).arg(1) == B[0]
assert s.model()[embedding_result].arg(1).arg(1) == B[1]
assert s.model()[embedding_result].arg(2).arg(1) == B[2]
# change the type. This should still be satisfiable
for n in traced.graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([Dyn, Dyn])
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
assert s.model()[embedding_result].arg(0).arg(0) == 0
assert s.model()[embedding_result].arg(1).arg(0) == 0
assert s.model()[embedding_result].arg(2).arg(1) == B[2]
# change the type to Dyn. Here, we will get an arbitirary migration
for n in traced.graph.nodes:
if n.op == 'placeholder':
n.type = Dyn
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
def test_embedding_2(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 4]), y: TensorType([Dyn, 1024])):
return torch.nn.functional.embedding(x, y)
B = BasicBlock().forward(torch.ones([2, 4], dtype=torch.long), torch.rand(256008, 1024)).size()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
embedding_result = z3.Const(5, tensor_type)
assert s.model()[embedding_result].arg(0).arg(1) == B[0]
assert s.model()[embedding_result].arg(1).arg(1) == B[1]
assert s.model()[embedding_result].arg(2).arg(1) == B[2]
def test_size_two_args(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn, 2, Dyn])):
size = x.size(-1)
return size
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
d1, d2 = z3.Int(39), z3.Int(2)
d4, d5 = z3.Int('input_d1'), z3.Int('input_d2')
# migrate the third dimension
s.add(d1 != 0)
self.assertEqual(s.check(), z3.sat)
input = z3.Const(1, tensor_type)
s.add(input == tensor_type.tensor3(D(3, 39), D(1, 2), D(d4, d5)))
# check if the item we got is the right one
self.assertEqual(s.check(), z3.sat)
self.assertEqual(s.model()[d5], s.model()[d2])
self.assertEqual(s.model()[d1], s.model()[d4])
def test_size_getitem(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn):
size = x.size()
getitem = size[-1]
return getitem
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
# force the input to be of size 4
s1, s2, s3, s4 = z3.Ints('x1 x2 x3 x4')
s11, s22, s33, s44 = z3.Ints('x11 x22 x33 x44')
d1, d2, d3, d4 = D(s11, s1), D(s22, s2), D(s33, s3), D(s44, s4),
input = z3.Const(1, tensor_type)
s.add(input == tensor_type.tensor4(d1, d2, d3, d4))
# check if the model is still SAT
self.assertEquals(s.check(), z3.sat)
s1, s2 = z3.Int(23), z3.Int(3)
# check that the item is correct
self.assertEquals(s.model()[s1], s.model()[s2])
# invalid index but should still be SAT because input will be Dyn
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn):
size = x.size()
getitem = size[-10]
return getitem
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
s.add(input != z3_dyn)
self.assertEqual(s.check(), z3.unsat)
def test_view_mul(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.embed_tokens = torch.nn.Embedding(256008, 1024, padding_idx=1)
def forward(self, x: TensorType([2, 4])):
size = x.size()
getitem = size[-1]
view = x.view(-1, getitem)
embed_tokens = self.embed_tokens(view)
mul = embed_tokens * 32.0
return mul
# print(B)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
# print(traced)
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
# print(s.model())
embedding_result = z3.Const(6, tensor_type)
# note that the view output will be: tensor3(dim(0, 0), dim(1, 4), dim(1, 1024))
# this is due to the reshape constraints. This can be lifted
# but would require revising the type rules accordingly so we leave it for now
assert (s.model()[embedding_result].arg(1).arg(1)) == 4
assert (s.model()[embedding_result].arg(2).arg(1)) == 1024
mul_result = z3.Const(13, tensor_type)
assert s.model()[mul_result] == s.model()[embedding_result]
def test_gt(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn, 4])):
size = x.size()
getitem_1 = size[-1]
gt = getitem_1 > 1
return gt
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
res = z3.Bool(4)
self.assertEqual(s.model()[res], True)
def test_view(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 4])):
view = x.view(-1, 8)
return view
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
def test_lt_tensor(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 4]), y: Dyn):
lt = x > y
return lt
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
def test_conditional(self):
"""
This test case is for the HFmodels interface.
A function takes a node and a graph and considers
the conditional the node represents and its negation
and solves each formula with the remaining sets of constraints
Returns:
"""
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.embed_tokens = torch.nn.Embedding(256008, 1024, padding_idx=1)
def forward(self, x: TensorType([Dyn, 4])):
size = x.size()
getitem = size[-1]
view = x.view(-1, getitem)
embed_tokens = self.embed_tokens(view)
mul = embed_tokens * 32.0
getitem_1 = size[-1]
gt = getitem_1 > 1
return gt
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
# The node we are considering is the gt node
for n in graph.nodes:
if n.target == operator.gt:
node = n
positive, negative = evaluate_conditional_with_constraints(ast_rewriter.root, graph, node)
self.assertEqual(positive, z3.sat)
self.assertEqual(negative, z3.unsat)
# change the annotation to Dyn
for n in graph.nodes:
if n.op == 'placeholder':
n.type = Dyn
# here, both should be SAT since the input is Dyn
positive, negative = evaluate_conditional_with_constraints(ast_rewriter.root, graph, node)
self.assertEqual(positive, z3.sat)
self.assertEqual(negative, z3.sat)
# change the annotation to TensorType[Dyn, Dyn]
for n in graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([Dyn, Dyn])
# here, both should be SAT as well
positive, negative = evaluate_conditional_with_constraints(ast_rewriter.root, graph, node)
self.assertEqual(positive, z3.sat)
self.assertEqual(negative, z3.sat)
def test_conditional_2(self):
"""
This test case is for the HFmodels interface.
A function takes a node and a graph and considers
the conditional the node represents and its negation
and solves each formula with the remaining sets of constraints
Returns the opposite result of the above testcase
"""
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.embed_tokens = torch.nn.Embedding(256008, 1024, padding_idx=1)
def forward(self, x: TensorType([Dyn, 4])):
size = x.size()
getitem = size[-1]
view = x.view(-1, getitem)
embed_tokens = self.embed_tokens(view)
mul = embed_tokens * 32.0
getitem_1 = size[-1]
lt = getitem_1 < 1
return lt
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
# The node we are considering is the gt node
for n in graph.nodes:
if n.target == operator.lt:
node = n
positive, negative = evaluate_conditional_with_constraints(ast_rewriter.root, graph, node)
self.assertEqual(positive, z3.unsat)
self.assertEqual(negative, z3.sat)
class ComposeOperationsGradualTypes(unittest.TestCase):
def test_masked_fill(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 4])):
size = x.size()
getitem = size[-1]
arange = torch.arange(getitem)
view = x.view(-1, getitem)
lt = arange > view
masked_fill = x.masked_fill_(lt, 0)
return masked_fill
B = BasicBlock().forward(torch.rand(2, 4))
# print(B.shape)
symbolic_traced: torch.fx.GraphModule = meta_symbolic_trace(BasicBlock(), meta_args={})
# print(symbolic_traced)
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
masked_fill_res = z3.Const(10, tensor_type)
self.assertEqual(s.model()[masked_fill_res].arg(0).arg(1).as_long(), B.size()[0])
self.assertEqual(s.model()[masked_fill_res].arg(1).arg(1).as_long(), B.size()[1])
# change the annotation to Dyn. This will migrate to an arbitirary type
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = Dyn
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([Dyn, Dyn, Dyn, Dyn])
transformed = transform_all_constraints(symbolic_traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEqual(s.check(), z3.sat)
def test_add_reshape_1(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn, y: Dyn):
return torch.add(torch.reshape(x, (1, 2)), torch.reshape(y, (2, 2)))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
def test_add_reshape_2(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn, y: Dyn):
return torch.add(torch.reshape(x, (-1, 2)), torch.reshape(y, (2, 2, 2)))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
def test_conv_reshape_add_0(self):
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x: Dyn, y: Dyn):
return torch.add(self.conv1(torch.reshape(x, (1, 2, 10, 20))), y)
B = BasicBlock(2, 2, 2, 3, 2, 2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
new_transformed_c = transform_all_constraints(traced)
solver = z3.Solver()
solver.add(new_transformed_c)
self.assertEquals(solver.check(), z3.sat)
def test_conv_reshape_add_0_2(self):
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x: Dyn, y: TensorType([4, 1])):
return torch.add(self.conv1(torch.reshape(x, (1, 2, 10, 20))), y)
B = BasicBlock(2, 2, 2, 3, 2, 2, 2)
# 4,1
# 1, 2, 4, 8
res = B.forward(torch.rand(20, 20), torch.rand(1, 2, 4, 8)).size()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
new_transformed_c = transform_all_constraints(traced)
solver = z3.Solver()
solver.add(new_transformed_c)
self.assertEquals(solver.check(), z3.sat)
conv_result = z3.Const(4, tensor_type)
add_result = z3.Const(9, tensor_type)
input_2 = z3.Const(2, tensor_type)
s1, s2, s3, s4 = z3.Ints('x1 x2 x3 x4')
s11, s22, s33, s44 = z3.Ints('x11 x22 x33 x44')
d1, d2, d3, d4 = D(s11, s1), D(s22, s2), D(s33, s3), D(s44, s4),
solver.add(conv_result == tensor_type.tensor4(d1, d2, d3, d4))
solver.check()
assert solver.model()[s1].as_long() == res[0]
assert solver.model()[s2].as_long() == res[1]
assert solver.model()[s3].as_long() == res[2]
assert solver.model()[s4].as_long() == res[3]
solver.add(input_2 == tensor_type.tensor2(D(1, 4), D(1, 1)))
self.assertEquals(solver.check(), z3.sat)
solver.add(add_result == tensor_type.tensor4(d1, d2, d3, d4))
self.assertEquals(solver.check(), z3.sat)
# first dimension could be anything because we have broadcasting
assert solver.model()[s1] == res[0]
assert solver.model()[s2] == res[1]
assert solver.model()[s3] == res[2]
assert solver.model()[s4] == res[3]
def test_conv_reshape_add_0_3(self):
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x: Dyn, y: TensorType([11, 1])):
return torch.add(self.conv1(torch.reshape(x, (1, 2, 10, 20))), y)
B = BasicBlock(2, 2, 2, 3, 2, 2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
new_transformed_c = transform_all_constraints(traced)
solver = z3.Solver()
solver.add(new_transformed_c)
self.assertEquals(solver.check(), z3.unsat)
def test_conv_reshape_add_1(self):
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x: Dyn, y: TensorType([1, 2, 10, 20])):
return torch.add(self.conv1(torch.reshape(x, (1, 2, 10, 20))), y)
B = BasicBlock(2, 2, 2, 3, 2, 2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
new_transformed_c = transform_all_constraints(traced)
solver = z3.Solver()
solver.add(new_transformed_c)
self.assertEquals(solver.check(), z3.unsat)
class GradualTypes(unittest.TestCase):
def test_conv_reshape_unsat(self):
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x: Dyn):
return self.conv1(torch.reshape(x, (1, 2, 10)))
B = BasicBlock(2, 2, 2, 3, 2, 2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
new_transformed_c = transform_all_constraints(traced)
solver = z3.Solver()
solver.add(new_transformed_c)
self.assertEquals(solver.check(), z3.unsat)
def test_conv_reshape0(self):
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x: Dyn):
return self.conv1(torch.reshape(x, (1, 2, 10, 20)))
B = BasicBlock(2, 2, 2, 3, 2, 2, 2)
res = B.forward(torch.rand(20, 20)).size()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
new_transformed_c = transform_all_constraints(traced)
solver = z3.Solver()
solver.add(new_transformed_c)
self.assertEquals(solver.check(), z3.sat)
conv_result = z3.Const(3, tensor_type)
s1, s2, s3, s4 = z3.Ints('x1 x2 x3 x4')
s11, s22, s33, s44 = z3.Ints('x11 x22 x33 x44')
d1, d2, d3, d4 = D(s11, s1), D(s22, s2), D(s33, s3), D(s44, s4),
solver.add(conv_result == tensor_type.tensor4(d1, d2, d3, d4))
solver.check()
# print(solver.model())
# print(type(solver.model()[s1]))
assert solver.model()[s1].as_long() == res[0]
assert solver.model()[s2].as_long() == res[1]
assert solver.model()[s3].as_long() == res[2]
assert solver.model()[s4].as_long() == res[3]
s1, s2, s3, s4 = z3.Ints('y1 y2 y3 y4')
s11, s22, s33, s44 = z3.Ints('y11 y22 y33 y44')
d1, d2, d3, d4 = D(s11, s1), D(s22, s2), D(s33, s3), D(s44, s4),
input = z3.Const(1, tensor_type)
solver.add(input == tensor_type.tensor4(d1, d2, d3, d4))
# assert solver.check() == sat
# solver.add(s11 == 1)
# solver.add(s22 == 1)
# solver.add(s33 == 1)
# solver.add(s44 == 1)
#
# print(solver.check())
# print(solver.model())
def test_conv_reshape1(self):
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x: TensorType([20, 20])):
return self.conv1(torch.reshape(x, (1, -1, 10, 20)))
B = BasicBlock(2, 2, 2, 3, 2, 2, 2)
res = B.forward(torch.rand(20, 20)).size()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
new_transformed_c = transform_all_constraints(traced)
solver = z3.Solver()
solver.add(new_transformed_c)
self.assertEquals(solver.check(), z3.sat)
conv_result = z3.Const(3, tensor_type)
s1, s2, s3, s4 = z3.Ints('x1 x2 x3 x4')
s11, s22, s33, s44 = z3.Ints('x11 x22 x33 x44')
d1, d2, d3, d4 = D(s11, s1), D(s22, s2), D(s33, s3), D(s44, s4),
solver.add(conv_result == tensor_type.tensor4(d1, d2, d3, d4))
solver.check()
# print(solver.model())
assert solver.model()[s1].as_long() == res[0]
assert solver.model()[s2].as_long() == res[1]
assert solver.model()[s3].as_long() == res[2]
assert solver.model()[s4].as_long() == res[3]
class TestSingleOperation(unittest.TestCase):
def test_conv_dyn(self):
s1, s2, s3, s4 = z3.Ints('s1 s2 s3 s4')
e1, e2, e3, e4 = z3.Ints('e1 e2 e3 e4')
s11, s22, s33, s44 = z3.Ints('s11 s22 s33 s44')
e11, e22, e33, e44 = z3.Ints('e11 e22 e33 e44')
d1, d2, d3, d4 = D(s11, s1), D(s22, s2), D(s33, s3), D(s44, s4),
b1, b2, b3, b4 = D(e11, e1), D(e22, e2), D(e33, e3), D(e44, e4)
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x: Dyn):
return self.conv1(x)
BasicBlock(2, 2, 2, 2, 2, 2, 2).forward(torch.rand(4, 2, 3, 4))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock(2, 2, 2, 2, 2, 2, 2))
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced)
solver3 = z3.Solver()
solver3.add(transformed)
assert solver3.check() == z3.sat
x = z3.Const(1, tensor_type)
y = z3.Const(2, tensor_type)
solver3.add(x == tensor_type.tensor4(d1, d2, d3, d4),
y == tensor_type.tensor4(b1, b2, b3, b4))
assert solver3.check() == z3.sat
assert solver3.model()[s1].as_long() == solver3.model()[e1].as_long()
assert solver3.model()[s11].as_long() == solver3.model()[e11].as_long()
solver3.add(s2 != 2)
assert solver3.check() == z3.sat
assert solver3.model()[s22].as_long() == 0
solver3.add(s22 != 0)
self.assertEquals(solver3.check(), z3.unsat)
solver2 = z3.Solver()
solver2.add(transformed)
assert solver2.check() == z3.sat
solver2.add(x == tensor_type.tensor3(d1, d2, d3))
self.assertEquals(solver2.check(), z3.unsat)
def test_add(self):
s1, s2, s3, s4 = z3.Ints('s1 s2 s3 s4')
s11, s22, s33, s44 = z3.Ints('s11 s22 s33 s44')
d1, d2, d3, d4 = D(s11, s1), D(s22, s2), D(s33, s3), D(s44, s4),
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn, y: Dyn):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
# make the tensor be of size 1
x = z3.Const(1, tensor_type)
s.add(x == tensor_type.tensor1(D(1, s11)))
self.assertEquals(s.check(), z3.sat)
y = z3.Const(2, tensor_type)
s.add(y == tensor_type.tensor1(D(1, s22)))
self.assertEquals(s.check(), z3.sat)
s.add(s11 == 1) # tensor[1]
s.add(s22 == 2) # tensor[2]
self.assertEquals(s.check(), z3.sat)
class BasicBlock2(torch.nn.Module):
def __init__(self):
super(BasicBlock2, self).__init__()
def forward(self, x: TensorType((Dyn,)), y: Dyn):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock2())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
# make the tensor be of size 1
x = z3.Const(1, tensor_type)
s.add(x == tensor_type.tensor1(D(1, s11)))
self.assertEquals(s.check(), z3.sat)
y = z3.Const(2, tensor_type)
s.add(y == tensor_type.tensor1(D(1, s22)))
self.assertEquals(s.check(), z3.sat)
s.add(s11 == 4) # tensor[4]
s.add(s22 == 5) # tensor[5]
self.assertEquals(s.check(), z3.unsat)
class BasicBlock3(torch.nn.Module):
def __init__(self):
super(BasicBlock3, self).__init__()
def forward(self, x: TensorType((Dyn,)), y: Dyn):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock3())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced)
s = z3.Solver()
s.add(transformed)
x = z3.Const(1, tensor_type)
s.add(x == tensor_type.tensor2(d1, d2))
self.assertEquals(s.check(), z3.unsat)
def test_add_padding(self):
s1, s2, s3, s4 = z3.Ints('s1 s2 s3 s4')
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType((Dyn,)), y: TensorType((Dyn, Dyn))):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
s.add(x == tensor_type.tensor1(D(1, s1)))
self.assertEquals(s.check(), z3.sat)
# print(s.model())
def test_add_padding_2(self):
s1, s2, s3, s4 = z3.Ints('s1 s2 s3 s4')
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn, Dyn]), y: TensorType([Dyn])):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
# print(s.model())
x = z3.Const(1, tensor_type)
s.add(x == tensor_type.tensor2(D(1, s1), D(1, s2)))
self.assertEquals(s.check(), z3.sat)
y = z3.Const(2, tensor_type)
s.add(y == tensor_type.tensor1(D(0, s3)))
self.assertEquals(s.check(), z3.sat)
add_result = z3.Const(3, tensor_type)
broadcast_res1, broadcast_res2 = z3.Const(4, tensor_type), z3.Const(5, tensor_type)
# print(s.model())
assert s.model()[broadcast_res1].decl() == tensor_type.tensor2
assert s.model()[broadcast_res2].decl() == tensor_type.tensor2
assert s.model()[add_result].decl() == tensor_type.tensor2
assert s.model()[y].decl() == tensor_type.tensor1
# print(s.model())
# prevent broadcasting for that dimension
s.add(s2 > 1)
assert s.check()
# the second dimension of the result is a number, not Dyn.
# however if the first input dimension had been 1, we would
# have had dyn in the result, as seen in the next test case
assert s.model()[add_result].arg(1).arg(0).as_long() != 0
def test_add_padding_3(self):
s1, s2, s3, s4 = z3.Ints('s1 s2 s3 s4')
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn, 1]), y: TensorType([Dyn])):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
# print(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
y = z3.Const(2, tensor_type)
s.add(s2 != 0)
s.add(x == tensor_type.tensor2(D(0, s1), D(s2, 1)))
s.add(y == tensor_type.tensor1(D(0, s3)))
self.assertEquals(s.check(), z3.sat)
# print(s.model())
add_result = z3.Const(3, tensor_type)
assert s.model()[add_result].arg(0).arg(0).as_long() == 0
assert s.model()[add_result].arg(1).arg(0).as_long() == 0
def test_add_padding_4(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 1]), y: TensorType([3])):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
add_result = z3.Const(3, tensor_type)
assert s.model()[add_result] == tensor_type.tensor2(D(1, 2), D(1, 3))
def test_add_padding_5(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([2, 2]), y: TensorType([3])):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.unsat)
def test_add_size_3(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn, Dyn, Dyn]), y: TensorType([Dyn, Dyn, Dyn])):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
y = z3.Const(2, tensor_type)
s1, s2, s3, s4, s5 = z3.Ints('s1 s2 s3 s4 s5')
s.add(x == tensor_type.tensor3(D(1, s1), D(1, 1), D(1, s2)))
s.add(y == tensor_type.tensor3(D(1, s3), D(1, s4), D(1, s5)))
self.assertEquals(s.check(), z3.sat)
s.add(s2 == 5)
self.assertEquals(s.check(), z3.sat)
s.add(s5 == 6)
self.assertEquals(s.check(), z3.unsat)
def test_add_padding_6(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn]), y: TensorType([Dyn, Dyn, Dyn])):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
y = z3.Const(2, tensor_type)
s1, s2, s3, s4, s5 = z3.Ints('s1 s2 s3 s4 s5')
s.add(x == tensor_type.tensor1(D(1, s1)))
s.add(y == tensor_type.tensor3(D(1, s2), D(1, s3), D(1, s4)))
self.assertEquals(s.check(), z3.sat)
s.add(s1 == 4)
s.add(s4 == 5)
self.assertEquals(s.check(), z3.unsat)
def test_add_padding_7(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn]), y: TensorType([Dyn, Dyn, Dyn, Dyn])):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
s1, s2, s3, s4, s5 = z3.Ints('s1 s2 s3 s4 s5')
s.add(x == tensor_type.tensor2(D(s1, s2), D(s2, s3)))
self.assertEquals(s.check(), z3.unsat)
def test_add_padding_8(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn]), y: TensorType([Dyn, Dyn, Dyn, Dyn])):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
y = z3.Const(2, tensor_type)
s1, s2, s3, s4, s5 = z3.Ints('s1 s2 s3 s4 s5')
s.add(x == tensor_type.tensor1(D(s1, 1)))
s.add(s1 >= 0)
self.assertEquals(s.check(), z3.sat)
s.add(y == tensor_type.tensor4(D(0, s2), D(0, s3), D(0, s4), D(0, s5)))
self.assertEquals(s.check(), z3.sat)
def test_add_padding_9(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn, y: TensorType([Dyn, Dyn, Dyn, Dyn])):
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced, counter=0)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
y = z3.Const(2, tensor_type)
s1, s2, s3, s4, s5, s6, s7 = z3.Ints('s1 s2 s3 s4 s5 s6 s7')
s.add(x == tensor_type.tensor1(D(s1, s7)))
s.add(s1 == 1)
self.assertEquals(s.check(), z3.sat)
s.add(y == tensor_type.tensor4(D(0, s2), D(0, s3), D(0, s4), D(s6, s5)))
self.assertEquals(s.check(), z3.sat)
s.add(s6 == 1)
self.assertEquals(s.check(), z3.sat)
s.add(s5 != 1, s7 != 1)
assert s.check()
assert s.model()[s5].as_long() == s.model()[s7].as_long()
def test_conv_static(self):
s1, s2, s3, s4 = z3.Ints('s1 s2 s3 s4')
e1, e2, e3, e4 = z3.Ints('e1 e2 e3 e4')
s11, s22, s33, s44 = z3.Ints('s11 s22 s33 s44')
e11, e22, e33, e44 = z3.Ints('e11 e22 e33 e44')
d1, d2, d3, d4 = D(s11, s1), D(s22, s2), D(s33, s3), D(s44, s4),
b1, b2, b3, b4 = D(e11, e1), D(e22, e2), D(e33, e3), D(e44, e4)
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation)
def forward(self, x: TensorType((1, 2, 10, 20))):
return self.conv1(x)
ast_rewriter = RewritingTracer()
B = BasicBlock(2, 2, 2, 3, 2, 2, 2)
res = B.forward(torch.rand(1, 2, 10, 20)).size()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
new_transformed_c = transform_all_constraints(traced)
solver = z3.Solver()
solver.add(new_transformed_c)
self.assertEquals(solver.check(), z3.sat)
x = z3.Const(1, tensor_type)
y = z3.Const(2, tensor_type)
solver.add(x == tensor_type.tensor4(d1, d2, d3, d4))
solver.add(y == tensor_type.tensor4(b1, b2, b3, b4))
self.assertEquals(solver.check(), z3.sat)
# print(solver.model())
assert solver.model()[e3].as_long() == res[2]
assert solver.model()[e4].as_long() == res[3]
B2 = BasicBlock(2, 4, 5, 2, 9, 2, 2)
res2 = B2.forward(torch.rand(1, 2, 10, 20)).size()
graph2 = ast_rewriter.trace(B2)
traced2 = GraphModule(ast_rewriter.root, graph2, "gm")
new_transformed_c = transform_all_constraints(traced2)
solver = z3.Solver()
solver.add(new_transformed_c)
solver.add(x == tensor_type.tensor4(d1, d2, d3, d4))
solver.add(y == tensor_type.tensor4(b1, b2, b3, b4))
self.assertEquals(solver.check(), z3.sat)
assert solver.model()[e3].as_long() == res2[2]
assert solver.model()[e4].as_long() == res2[3]
def test_reshape_dyn(self):
s11, s22, s33, s44 = z3.Ints('s11 s22 s33 s44')
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn):
return torch.reshape(x, (2, -1))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
s.add(x == tensor_type.tensor1(D(1, s11)))
self.assertEquals(s.check(), z3.sat)
s.add(z3.Or([s11 == 2, s11 == 4, s11 == 9]))
self.assertEquals(s.check(), z3.sat)
s.add(s11 == 9)
self.assertEquals(s.check(), z3.unsat)
def test_reshape_annotated(self):
s1, s2, s3, s4 = z3.Ints('s1 s2 s3 s4')
s11, s22, s33, s44 = z3.Ints('s11 s22 s33 s44')
d1, d2, d3, d4 = D(s11, s1), D(s22, s2), D(s33, s3), D(s44, s4),
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn])):
return torch.reshape(x, (2, -1))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
s.add(x == tensor_type.tensor2(d1, d2))
self.assertEquals(s.check(), z3.unsat)
def test_reshape_static_target(self):
s11, s22, s33, s44 = z3.Ints('s11 s22 s33 s44')
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: TensorType([Dyn])):
return torch.reshape(x, (2, 3))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced)
# print(transformed)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
s.add(x == tensor_type.tensor1(D(1, s11)))
s.check()
assert s.model()[s11].as_long() == 6
s.add(s11 != 6)
self.assertEquals(s.check(), z3.unsat)
def test_reshape_static_target2(self):
s11, s22, s33, s44 = z3.Ints('s11 s22 s33 s44')
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn):
return torch.reshape(x, (2, 3, 1, 1))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
transformed = transform_all_constraints(traced)
s = z3.Solver()
s.add(transformed)
self.assertEquals(s.check(), z3.sat)
x = z3.Const(1, tensor_type)
s.add(x == tensor_type.tensor1(D(1, s11)))
s.check()
assert s.model()[s11].as_long() == 6
s.add(s11 != 6)
self.assertEquals(s.check(), z3.unsat)
def test_conv2D_maxpool2d_flatten(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 5)
self.pool = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.fc1 = torch.nn.Linear(5, 120)
self.pool2 = torch.nn.AdaptiveAvgPool2d((6, 7))
def forward(self, x : TensorType((4, 3, 32, 32))):
out = self.conv1(x)
out = self.pool(out)
out = self.conv2(out)
out = self.pool(out)
out = self.fc1(out)
out = self.pool2(out)
out = torch.flatten(out, 1)
return out
B = BasicBlock()
res = B.forward(torch.rand(4, 3, 32, 32)).shape
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
constraints = transform_all_constraints(traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
solver.check()
input = z3.Const(1, tensor_type)
solver.add(input == tensor_type.tensor4(D(1, 4), D(1, 3), D(1, 32), D(1, 32)))
solver.check()
output = z3.Const(48, tensor_type)
assert solver.model()[output].arg(0).arg(1) == res[0]
assert solver.model()[output].arg(1).arg(1) == res[1]
def test_conv2D_maxpool2d_flatten_unsat(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 5)
self.pool = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.fc1 = torch.nn.Linear(5, 120)
self.pool2 = torch.nn.AdaptiveAvgPool2d((6, 7))
def forward(self, x : TensorType((4, 3, 32, 32))):
out = self.conv1(x)
out = self.pool(out)
out = self.conv2(out)
out = self.pool(out)
out = self.fc1(out)
out = self.pool2(out)
out = torch.flatten(out, 1)
return out
B = BasicBlock()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
constraints = transform_all_constraints(traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
solver.check()
input = z3.Const(1, tensor_type)
solver.add(input == tensor_type.tensor4(D(1, 4), D(1, 3), D(1, 32), D(1, 45)))
self.assertEquals(solver.check(), z3.unsat)
def test_conv2D_maxpool2d_flatten_dyn(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 5)
self.pool = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.fc1 = torch.nn.Linear(5, 120)
self.pool2 = torch.nn.AdaptiveAvgPool2d((6, 7))
def forward(self, x : TensorType((Dyn, 3, 32, 32))):
out = self.conv1(x)
out = self.pool(out)
out = self.conv2(out)
out = self.pool(out)
out = self.fc1(out)
out = self.pool2(out)
out = torch.flatten(out, 1)
return out
B = BasicBlock()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
constraints = transform_all_constraints(traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEquals(solver.check(), z3.sat)
def test_type_check_flatten(self):
s1, s2, s3, s4 = z3.Ints('s1 s2 s3 s4')
class M(torch.nn.Module):
def forward(self, x: TensorType([2, 3, 4, 5])):
return torch.flatten(x, start_dim=1, end_dim=3)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEquals(solver.check(), z3.sat)
flatten = z3.Const(2, tensor_type)
res = M().forward(torch.rand(2, 3, 4, 5)).size()
assert solver.model()[flatten].arg(0).arg(1) == res[0]
assert solver.model()[flatten].arg(1).arg(1) == res[1]
class M(torch.nn.Module):
def forward(self, x: TensorType([2, 3, Dyn, 5])):
return torch.flatten(x, start_dim=1, end_dim=3)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEquals(solver.check(), z3.sat)
x = z3.Const(1, tensor_type)
y = z3.Const(2, tensor_type)
solver.add(x == tensor_type.tensor4(D(1, 2), D(1, 3), D(0, s1), D(1, 5)))
self.assertEquals(solver.check(), z3.sat)
assert solver.model()[y].arg(1).arg(0) == 0
class M(torch.nn.Module):
def forward(self, x: TensorType([2, 3, Dyn])):
return torch.flatten(x, 10, 0)
module = M()
# print(module.forward(torch.rand(2,3,5)).shape)
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEquals(solver.check(), z3.unsat)
class ConstraintGeneration(unittest.TestCase):
def test_add_reshape(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
def forward(self, x: Dyn, y: Dyn):
return torch.add(torch.reshape(x, (1, 2)), torch.reshape(y, (2, 2)))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(BasicBlock())
traced = GraphModule(ast_rewriter.root, graph, "gm")
generator = ConstraintGenerator(traced)
new_constraints, counter = generator.generate_constraints(0)
assert len(new_constraints.conjucts) == 11
def test_conv_reshape_add(self):
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x: Dyn, y: Dyn):
return torch.add(self.conv1(torch.reshape(x, (1, 2, 10, 20))), y)
B = BasicBlock(2, 2, 2, 3, 2, 2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
generator = ConstraintGenerator(traced)
new_constraints, counter = generator.generate_constraints(0)
assert len(new_constraints.conjucts) == 16
class TestInternalConstraints(unittest.TestCase):
def test_precision(self):
c1 = BinConstraintT(Dyn, TVar('x'), op_precision)
transformed, _ = transform_constraint(c1, 0)
assert transformed == T()
c2 = BinConstraintT(TensorType([1, Dyn, 3]), TVar('x'), op_precision)
transformed, counter = transform_constraint(c2, 0)
assert len(transformed.conjucts) == 7
def test_matching(self):
c1 = BinConstraintT(TVar('x'),
TensorType([DVar('a'), DVar('b'), DVar('c'), DVar('d')]), op_matching)
transformed, _ = transform_constraint(c1, 0)
assert len(transformed.disjuncts) == 2
def test_consistency(self):
c1 = BinConstraintT(TVar('x'),
TensorType([DVar('a'), DVar('b')]), op_consistency)
transformed, count = transform_constraint(c1, 0)
assert len(transformed.disjuncts) == 5
transformed, count = transform_constraint(transformed, count)
assert len(transformed.disjuncts) == 5
# def test_apply_broadcasting(self):
# c1 = ApplyBroadcasting(TVar(1), TVar(2), TVar(3), TVar(4))
# transformed, count = transform_apply_broadcasting(c1, 5)
# assert len(transformed.conjucts) == 41
@skipIfNoTorchVision
class TestResNet(unittest.TestCase):
def test_resnet50_unsat(self):
traced = symbolic_trace(models.resnet50())
for n in traced.graph.nodes:
n.type = Dyn
constraints = transform_all_constraints(traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
input = z3.Const(1, tensor_type)
# input with 3 dimensions
solver.add(input == tensor_type.tensor3(D(1, 1), D(1, 3), D(1, 224)))
self.assertEquals(solver.check(), z3.unsat)
def test_resnet50(self):
traced = symbolic_trace(models.resnet50())
for n in traced.graph.nodes:
n.type = Dyn
sample_input = torch.randn(1, 3, 224, 224)
res = models.resnet50().forward(sample_input).size()
constraints = transform_all_constraints(traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEquals(solver.check(), z3.sat)
linear = z3.Const(650, tensor_type)
input = z3.Const(1, tensor_type)
solver.add(input == tensor_type.tensor4(D(1, 1), D(1, 3), D(1, 224), D(1, 224)))
self.assertEquals(solver.check(), z3.sat)
assert solver.model()[linear] == tensor_type.tensor2(D(1, res[0]), D(1, res[1]))
def test_resnet502(self):
traced = symbolic_trace(models.resnet50())
for n in traced.graph.nodes:
n.type = Dyn
constraints = transform_all_constraints(traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
linear = z3.Const(650, tensor_type)
input = z3.Const(1, tensor_type)
batch = z3.Int('b')
solver.add(input == tensor_type.tensor4(D(1, batch), D(1, 3), D(1, 224), D(1, 224)))
solver.add(batch > 4)
solver.check()
assert solver.model()[batch] == solver.model()[linear].arg(0).arg(1)
def test_resnet503(self):
traced = symbolic_trace(models.resnet50())
for n in traced.graph.nodes:
n.type = Dyn
constraints = transform_all_constraints(traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
linear = z3.Const(650, tensor_type)
input = z3.Const(1, tensor_type)
batch, d1, d2 = z3.Ints('b d1 d2')
solver.add(input == tensor_type.tensor4(D(1, batch), D(1, 3), D(1, 224), D(1, 224)))
solver.add(linear == tensor_type.tensor2(D(1, d1), D(1, d2)))
self.assertEquals(solver.check(), z3.sat)
solver.add(batch != d1)
self.assertEquals(solver.check(), z3.unsat)
@skipIfNoTorchVision
class TestAlexNet(unittest.TestCase):
def test_alexnet1(self):
alexnet = models.alexnet()
symbolic_traced : torch.fx.GraphModule = symbolic_trace(alexnet)
for n in symbolic_traced.graph.nodes:
n.type = Dyn
# print(symbolic_traced)
res = alexnet.forward(torch.rand(10, 3, 227, 227)).size()
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEquals(solver.check(), z3.sat)
input = z3.Const(1, tensor_type)
conv = z3.Const(2, tensor_type)
solver.add(input == tensor_type.tensor4(D(1, 10), D(1, 3), D(1, 227), D(1, 227)))
self.assertEquals(solver.check(), z3.sat)
assert solver.model()[conv] == tensor_type.tensor4(D(1, 10), D(1, 64), D(1, 56), D(1, 56))
relu = z3.Const(7, tensor_type)
assert solver.model()[relu] == tensor_type.tensor4(D(1, 10), D(1, 64), D(1, 56), D(1, 56))
maxpool = z3.Const(8, tensor_type)
assert solver.model()[maxpool] == tensor_type.tensor4(D(1, 10), D(1, 64), D(1, 27), D(1, 27))
maxpool2 = z3.Const(42, tensor_type)
assert solver.model()[maxpool2] == tensor_type.tensor4(D(1, 10), D(1, 256), D(1, 6), D(1, 6))
flatten = z3.Const(52, tensor_type)
assert solver.model()[flatten] == tensor_type.tensor2(D(1, 10), D(1, 9216))
linear = z3.Const(64, tensor_type)
assert solver.model()[linear] == tensor_type.tensor2(D(1, 10), D(1, 4096))
linear2 = z3.Const(109, tensor_type)
assert solver.model()[linear2] == tensor_type.tensor2(D(1, res[0]), D(1, res[1]))
def test_alexnet2(self):
alexnet = models.alexnet()
symbolic_traced : torch.fx.GraphModule = symbolic_trace(alexnet)
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([Dyn, 4, 227, 227])
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEquals(solver.check(), z3.unsat)
def test_alexnet3(self):
alexnet = models.alexnet()
symbolic_traced : torch.fx.GraphModule = symbolic_trace(alexnet)
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([Dyn, Dyn, 227, 227])
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEquals(solver.check(), z3.sat)
def test_alexnet4(self):
alexnet = models.alexnet()
symbolic_traced : torch.fx.GraphModule = symbolic_trace(alexnet)
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
n.type = TensorType([Dyn, Dyn, 227])
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEquals(solver.check(), z3.unsat)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
test/fx/test_z3_gradual_types.py
|
# Owner(s): ["module: fx"]
import os
import sys
import torch
from torch.fx import symbolic_trace, subgraph_rewriter
from torch.fx.annotate import annotate
# Make the helper files in test/ importable
from torch.fx.experimental.rewriter import RewritingTracer
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_fx.py TESTNAME\n\n"
"instead.")
class TestSubgraphRewriter(JitTestCase):
def test_subgraph_rewriter_preserves_logic(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x) + torch.relu(x)
def comparison(x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
# Replace `pattern` with the same pattern (shouldn't change
# the underlying logic)
subgraph_rewriter.replace_pattern(traced, pattern, pattern)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_with_oneliner_pattern(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x)
def replacement(x):
return torch.relu(x)
def comparison(x):
val = torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_single_pattern_match(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x) + torch.relu(x)
def replacement(x):
return torch.relu(x)
def comparison(x):
val = torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_multiple_pattern_match(self):
class M(torch.nn.Module):
def forward(self, x, w1, w2):
m1 = torch.cat([w1, w2]).sum()
m2 = torch.cat([w1, w2]).sum()
return x + torch.max(m1) + torch.max(m2)
def pattern(w1, w2):
return torch.cat([w1, w2]).sum()
def replacement(w1, w2):
return torch.stack([w1, w2])
def comparison(x, w1, w2):
m1 = torch.stack([w1, w2])
m2 = torch.stack([w1, w2])
return x + torch.max(m1) + torch.max(m2)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
w1 = torch.rand(1, 3)
w2 = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x, w1, w2)
test_outs = traced.forward(x, w1, w2)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_graph_argument_order(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.mm(x, y)
def pattern(x, y):
return torch.mm(x, y)
def comparison(x, y):
return torch.mm(x, y)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
y = torch.randn(4, 5)
subgraph_rewriter.replace_pattern(traced, pattern, pattern)
traced.graph.lint()
ref_outs = comparison_fn(x, y)
test_outs = traced.forward(x, y)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_correct_output_replacement(self):
class M(torch.nn.Module):
def forward(self, x, y):
val = torch.neg(y) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.relu(x)
def replacement(x):
return torch.neg(x)
def comparison(x, y):
val = torch.neg(y) + torch.neg(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x, y)
test_outs = traced.forward(x, y)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_traced_as_callable(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
class Pattern(torch.nn.Module):
def forward(self, x):
return torch.neg(x) + torch.relu(x)
class Replacement(torch.nn.Module):
def forward(self, x):
return torch.sigmoid(x)
def comparison(x):
val = torch.sigmoid(x)
return torch.add(val, val)
traced = symbolic_trace(M())
traced_pattern = symbolic_trace(Pattern())
traced_replacement = symbolic_trace(Replacement())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, traced_pattern, traced_replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_pattern_is_entire_graph(self):
class M(torch.nn.Module):
def forward(self, x):
a = torch.neg(x)
return torch.add(a, a)
def pattern(x):
a = torch.neg(x)
return torch.add(a, a)
def replacement(x):
a = torch.sigmoid(x)
return torch.cat([a, a])
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(replacement)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_pattern_output_pattern_node_can_have_users_that_are_not_matched(self):
class M(torch.nn.Module):
def forward(self, x):
y = torch.relu(x)
return torch.neg(y) - y
def pattern(x):
return torch.relu(x)
def replacement(x):
return torch.sigmoid(x)
def comparison(x):
y = torch.sigmoid(x)
return torch.neg(y) - y
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_internal_pattern_nodes_cannot_have_users_that_are_not_matched(self):
class M(torch.nn.Module):
def forward(self, x, w1, w2, b1, b2):
m0 = torch.cat([w1, w2])
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
t0 = torch.addmm(b1, m1, m2.t())
t1 = torch.sum(w1, 1)
t2 = torch.addmm(b1, m1, m2.t())
return torch.sum(t1), torch.sum(t2)
def pattern(x, w1, w2, b1, b2):
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
return torch.addmm(b1, m1, m2.t())
def replacement(x, w1, w2, b1, b2):
return torch.cat([x, w1, w2])
traced = symbolic_trace(M())
# Result should be [] since no matches can be found
res = subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
self.assertEqual(res, [])
def test_subgraph_rewriter_placeholder_matching(self):
"""
This tests that a placeholder Node can be matched to a Node with
a different number of input Nodes. In the example below, the
original traced Module looks like this:
opcode target args kwargs
------------- ---------------------------------------------------------- ------------------------ --------
placeholder x () {}
call_function <built-in function add> (x, 3) {}
call_method dequantize (add,) {}
call_function <built-in method sigmoid of type object at 0x7f7c1f440fe0> (dequantize,) {}
call_method to (sigmoid, torch.float16) {}
output output (to,) {}
while the pattern we want to match looks like this:
opcode target args kwargs
------------- ---------------------------------------------------------- ------------------------ --------
placeholder x () {}
call_method dequantize (x,) {}
call_function <built-in method sigmoid of type object at 0x7f7c1f440fe0> (dequantize,) {}
call_method to (sigmoid, torch.float16) {}
output output (to,) {}
Here, we want to be able to match the original graph's
`call_function.add` Node with the pattern graph's
`plaeholder.x` Node.
Credit to Jerry Zhang (GitHub: jerryzh168) for this test case
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.dtype = torch.float16
def forward(self, x):
x += 3
x = x.dequantize()
x = torch.sigmoid(x)
dtype = self.dtype
x = x.to(dtype)
return x
def pattern(x):
x = x.dequantize()
x = torch.sigmoid(x)
x = x.to(torch.float16)
return x
def replacement(x):
return x
def comparison(x):
return x + 3
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_replaces_referenced_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.submod = torch.nn.ReLU()
def forward(self, x):
x = x + 1
return self.submod(self.sigmoid(x))
class Pattern(torch.nn.Module):
def __init__(self):
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.submod = torch.nn.ReLU()
def forward(self, x):
return self.submod(self.sigmoid(x))
class Replacement(torch.nn.Module):
def __init__(self):
super().__init__()
self.tanh = torch.nn.Tanh()
self.submod = torch.nn.ReLU()
def forward(self, x):
return self.submod(self.tanh(x))
class Comparison(torch.nn.Module):
def __init__(self):
super().__init__()
self.tanh = torch.nn.Tanh()
self.submod = torch.nn.ReLU()
def forward(self, x):
x = x + 1
return self.submod(self.tanh(x))
traced = symbolic_trace(M())
comparison = Comparison()
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, Pattern(), Replacement())
traced.graph.lint()
ref_outs = comparison(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
traced.get_submodule("tanh")
with self.assertRaisesRegex(AttributeError, "has no attribute"):
traced.get_submodule("sigmoid")
submod = traced.get_submodule("submod")
self.assertEqual(type(submod), torch.nn.ReLU)
def test_subgraph_rewriter_annotations_int(self):
class M1(torch.nn.Module):
def forward(self, x):
y: int = x
return torch.add(x, y)
class M2(torch.nn.Module):
def forward(self, x):
y = annotate(x, int)
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M1())
module = M2()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
for n, m in zip(symbolic_traced.graph.nodes, graph.nodes):
if n.op == 'placeholder':
assert n.type == int
assert m.type == int
def test_subgraph_writer_replace_consecutive_submodules(self):
def f(x):
x = torch.sigmoid(x)
x = torch.sigmoid(x)
return torch.sigmoid(x)
def pattern(x):
return torch.sigmoid(x)
def replacement(x):
return torch.exp(x)
def comparison(x):
x = torch.exp(x)
x = torch.exp(x)
return torch.exp(x)
traced = symbolic_trace(f)
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
|
pytorch-master
|
test/fx/test_subgraph_rewriter.py
|
# Owner(s): ["module: fx"]
from __future__ import annotations # type: ignore[attr-defined]
import torch
import typing
from torch.fx import symbolic_trace
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
# No forward references
class M1(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
# Forward references
class M2(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
# Non-torch annotation with no internal forward references
class M3(torch.nn.Module):
def forward(self, x: typing.List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
# Non-torch annotation with internal forward references
class M4(torch.nn.Module):
def forward(self, x: typing.List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x[0])
x = torch.rand(2, 3)
ref = torch.add(x, x)
traced1 = symbolic_trace(M1())
res1 = traced1(x, A())
assert torch.all(torch.eq(ref, res1))
traced2 = symbolic_trace(M2())
res2 = traced2(x, A())
assert torch.all(torch.eq(ref, res2))
traced3 = symbolic_trace(M3())
res3 = traced3([x], A())
assert torch.all(torch.eq(ref, res3))
traced4 = symbolic_trace(M4())
res4 = traced4([x], A())
assert torch.all(torch.eq(ref, res4))
|
pytorch-master
|
test/fx/test_future.py
|
r'''
**This file is EXPERIMENTAL and is mostly used for testing purposes! Do not
rely on it for anything!**
'''
from torch.fx import Graph, GraphModule
from torch.fx.graph import map_arg
from torch.fx.proxy import Proxy
import sys
import torch
from torch.nn.utils import fuse_conv_bn_weights
import operator
# can be a
# module type, a builtin function, or a string to match target
def _minmax_scale_zeropoint(min_val, max_val, qmin=-127, qmax=128, eps=torch.finfo(torch.float32).eps):
min_val = min(0.0, min_val)
max_val = max(0.0, max_val)
if max_val == min_val:
return 1.0, 0
else:
scale = (max_val - min_val) / float(qmax - qmin)
scale = max(scale, eps)
zero_point = qmin - round(min_val / scale)
zero_point = max(qmin, zero_point)
zero_point = min(qmax, zero_point)
zero_point = int(zero_point)
return scale, zero_point
class MinMaxObserver:
def __init__(self, quantizer, node):
self.min, self.max = float('inf'), float('-inf')
self.all_tensors = True
def observe(self, node, env):
v = env[node.name]
if not isinstance(v, torch.Tensor):
self.all_tensors = False
return
self.max = max(self.max, float(v.max()))
self.min = min(self.min, float(v.min()))
def scale_zeropoint(self):
return _minmax_scale_zeropoint(self.min, self.max, qmin=0, qmax=255)
class NoObserver:
def __init__(self, quantizer, node):
pass
def observe(self, node, env):
pass
DEFAULT_QUANTIZATION_PATTERNS = {}
def register_pattern(pattern):
def insert(fn):
DEFAULT_QUANTIZATION_PATTERNS[pattern] = fn
return fn
return insert
@register_pattern(operator.add)
class Add(MinMaxObserver):
def quantize(self, quantizer, node, load_arg):
if not self.all_tensors:
return NotImplemented
scale, zeropoint = self.scale_zeropoint()
return quantizer.quantized_graph.create_node(
'call_function', torch.ops.quantized.add, load_arg(node.args), {'scale': scale, 'zero_point': zeropoint})
class Relu(NoObserver):
def quantize(self, quantizer, node, load_arg):
return torch.relu(load_arg(node.args[0])) # torch.relu works directly on quantized tensors?
# these ops have quantized equivalents that do not need any extra information
@register_pattern(torch.nn.ReLU)
@register_pattern(torch.nn.AvgPool2d)
@register_pattern(torch.nn.MaxPool2d)
@register_pattern(torch.nn.AdaptiveAvgPool2d)
class CopyNode(NoObserver):
def quantize(self, quantizer, node, load_arg):
return quantizer.quantized_graph.node_copy(node, load_arg)
class IdentityModule(torch.nn.Module):
def forward(self, x):
return x
# handle conv, maybe followed by bn, maybe followed by relu
@register_pattern(torch.nn.modules.conv.Conv2d)
@register_pattern((torch.nn.ReLU, torch.nn.modules.conv.Conv2d))
@register_pattern((torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.conv.Conv2d))
@register_pattern((torch.nn.ReLU, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.conv.Conv2d)))
class ConvNormRelu(MinMaxObserver):
def __init__(self, quantizer, node):
super().__init__(quantizer, node)
self.relu_node, self.bn_node = None, None
if isinstance(quantizer.modules[node.target], torch.nn.ReLU):
self.relu_node = node
node = node.args[0]
if isinstance(quantizer.modules[node.target], torch.nn.BatchNorm2d):
self.bn_node = node
self.bn = quantizer.modules[self.bn_node.target]
node = node.args[0]
assert isinstance(quantizer.modules[node.target], torch.nn.modules.Conv2d)
self.conv_node = node
self.conv = quantizer.modules[self.conv_node.target]
def quantize(self, quantizer, node, load_arg):
mod = self.conv
weight, bias = mod.weight, mod.bias
if self.bn_node is not None:
weight, bias = fuse_conv_bn_weights(
weight, bias, self.bn.running_mean, self.bn.running_var,
self.bn.eps, self.bn.weight, self.bn.bias)
min_val, max_val = float(weight.min()), float(weight.max())
act_scale, act_zp = self.scale_zeropoint()
weight_scale, weight_zp = _minmax_scale_zeropoint(min_val, max_val)
qweight = torch.quantize_per_tensor(weight, weight_scale, weight_zp, torch.qint8)
ctor = torch.nn.intrinsic.quantized.ConvReLU2d if self.relu_node is not None else torch.nn.quantized.Conv2d
qconv = ctor(mod.in_channels, mod.out_channels, mod.kernel_size,
mod.stride, mod.padding, mod.dilation, mod.groups,
mod.bias is not None, mod.padding_mode)
qconv.set_weight_bias(qweight, bias)
qconv.scale = float(act_scale)
qconv.zero_point = int(act_zp)
parent_name, name = _parent_name(self.conv_node.target)
setattr(quantizer.modules[parent_name], name, qconv)
if self.bn_node is not None:
parent_bn, bn_name = _parent_name(self.bn_node.target)
# we can't just delete this because submodules's forwards (which are not longer use)
# try to call it, so replace with something that does nothing.
setattr(quantizer.modules[parent_name], bn_name, IdentityModule())
return quantizer.quantized_graph.create_node('call_module', self.conv_node.target, (load_arg(self.conv_node.args[0]),), {})
# turn foo.bar -> ['foo', 'bar']
def _parent_name(target):
r = target.rsplit('.', 1)
if len(r) == 1:
return '', r[0]
else:
return r[0], r[1]
class DefaultQuant(MinMaxObserver):
def quantize(self, input):
assert self.all_tensors
scale, zeropoint = self.scale_zeropoint()
return torch.quantize_per_tensor(Proxy(input), scale, zeropoint, torch.quint8).node
def matches(modules, node, pattern, max_uses=sys.maxsize):
if isinstance(pattern, tuple):
self_match, *arg_matches = pattern
else:
self_match = pattern
arg_matches = None
if len(node.users) > max_uses:
return False
if isinstance(self_match, type) and issubclass(self_match, torch.nn.Module):
if node.op != 'call_module':
return False
if not isinstance(modules[node.target], self_match):
return False
elif callable(self_match):
if node.op != 'call_function' or node.target is not self_match:
return False
elif node.target != self_match:
return False
if not arg_matches:
return True
if len(arg_matches) != len(node.args):
return False
return all(matches(modules, node, arg_match, max_uses=1) for node, arg_match in zip(node.args, arg_matches))
class Quantizer:
def __init__(self, mod, patterns=DEFAULT_QUANTIZATION_PATTERNS, quant_ctor=DefaultQuant):
self.root = mod
self.graph = mod.graph
self.quant_ctor = quant_ctor
# cached information for observe
self.state_dict = self.root.state_dict()
self.modules = dict(self.root.named_modules())
# match the patterns that will get quantized
self.matches = self._find_matches(patterns)
# find _inputs_ to matched nodes that are not quantized, these
# have to be quantized, which requires measuring stats,
# initialize an quant_ctor object for each
self.quants = self._find_quants(quant_ctor)
def observe(self, args):
# most of this function is just an interpreter for the graph
# it would be possible to put this in some abstraction, but
# it is pretty nice to just be able to see exactly what is happening here
# and hack on it.
# maybe we should just provide an example interpreter that people copy/paste
# then edit.
args_iter = iter(args)
env = {}
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
output_node : Optional[Node] = None
for node in self.graph.nodes:
if node.op == 'placeholder':
result = next(args_iter)
elif node.op == 'get_attr':
result = self.state_dict[node.target]
elif node.op == 'call_function':
result = node.target(*load_arg(node.args), **load_arg(node.kwargs))
elif node.op == 'call_method':
self_obj, *args = load_arg(node.args)
kwargs = load_arg(node.kwargs)
result = getattr(self_obj, node.target)(*args, **kwargs)
elif node.op == 'call_module':
result = self.modules[node.target](*load_arg(node.args), **load_arg(node.kwargs))
elif node.op == 'output':
return load_arg(node.args[0])
env[node.name] = result
root_node, obj = self.matches.get(node.name, (None, None))
if root_node is node:
obj.observe(node, env)
if node.name in self.quants:
self.quants[node.name].observe(node, env)
raise RuntimeError('Graph had no output node!')
def quantize(self):
self.quantized_graph = Graph()
env = {}
quant_env = {}
def load_arg(n, quantized):
if not quantized:
if n.name not in env and n.name in quant_env:
env[n.name] = Proxy(quant_env[n.name]).dequantize().node
return env[n.name]
else:
if n.name not in quant_env and n.name in env:
quant_env[n.name] = self.quants[n.name].quantize(env[n.name])
return quant_env[n.name]
def copy_recursive(node):
def load_or_emit(n):
if n.name in env or e.name in quant_env:
return load_arg(n, quantized=False)
else:
return copy_recusive(n)
r = env[node.name] = self.quantized_graph.node_copy(node, lambda n: load_arg(n, quantized=False))
return r
for node in self.graph.nodes:
root_node, obj = self.matches.get(node.name, (None, None))
if root_node is None:
# not quantized just copy it
env[node.name] = self.quantized_graph.node_copy(node, lambda n: load_arg(n, quantized=False))
elif root_node is node:
r = obj.quantize(self, node, lambda a: map_arg(a, lambda n: load_arg(n, quantized=True)))
if r is NotImplemented:
# quantizer choose to to quantize the node take the entire match, and just copy it over
env[node.name] = copy_recursive(node)
else:
quant_env[node.name] = r
return GraphModule(self.root, self.quantized_graph)
def _find_matches(self, patterns):
modules = dict(self.root.named_modules())
match_map = {} # node name -> (root_node, match_value?)
def apply_match(pattern, node, match):
if isinstance(pattern, tuple):
s, *args = pattern
apply_match(s, node, match)
for subpattern, arg in zip(args, node.args):
apply_match(subpattern, arg, match)
else:
match_map[node.name] = match
for node in reversed(self.graph.nodes):
if node.name not in match_map:
for pattern, value in patterns.items():
if matches(modules, node, pattern):
apply_match(pattern, node, (node, value(self, node)))
return match_map
def _find_quants(self, quant_ctor):
quants = {}
def visit_arg(n):
# note: we have to measure quantization information
# even for nodes where we might not use it because it is already
# quantized. This is because each match has the option to
# say NotImplemented (if for instance, it is an __add__ and the data type is not appropriate)
if n.name not in quants:
quants[n.name] = quant_ctor(self, n)
for node in self.graph.nodes:
if node.name in self.matches:
map_arg(node.args, visit_arg)
map_arg(node.kwargs, visit_arg)
return quants
|
pytorch-master
|
test/fx/quantization.py
|
# Owner(s): ["module: fx"]
import unittest
import torch
from torch.fx import symbolic_trace
from torch.fx.experimental.unify_refinements import infer_symbolic_types
from torch.fx.experimental.refinement_types import Equality
from torch.fx.tensor_type import TensorType, Dyn, is_consistent, is_more_precise
from torch.fx.annotate import annotate
from torch.fx.experimental.graph_gradual_typechecker import GraphTypeChecker, broadcast_types, Refine
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx import GraphModule
from torch.fx.passes.shape_prop import ShapeProp
from torch.testing._internal.common_utils import TestCase
try:
import sympy
HAS_SYMPY = True
except ImportError:
HAS_SYMPY = False
skipIfNoSympy = unittest.skipIf(not HAS_SYMPY, "no sympy")
try:
from torchvision.models import resnet50
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return torch.nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
class AnnotationsTest(TestCase):
def test_annotations(self):
"""
Test type annotations in the forward function.
The annoation should appear in the n.graph
where n is the corresoinding node in the resulting graph.
"""
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, Dyn)), y: Dyn):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
expected_ph_types = [TensorType((1, 2, 3, Dyn)), Dyn]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
assert n.type == next(expected_iter)
def test_annotate(self):
class M(torch.nn.Module):
def forward(self, x):
y = annotate(x, TensorType((1, 2, 3, Dyn)))
return torch.add(x, y)
module = M()
symbolic_traced : torch.fx.GraphModule = symbolic_trace(module)
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
assert n.type == TensorType((1, 2, 3, Dyn))
def test_consistency(self):
"""
Test the consistency relation.
"""
self.assertTrue(is_consistent(TensorType((1, 2, 3)), TensorType((1, Dyn, 3))))
self.assertTrue(is_consistent(int, Dyn))
self.assertTrue(is_consistent(int, int))
self.assertFalse(is_consistent(TensorType((1, 2, 3)), TensorType((1, 2, 3, 5))))
self.assertFalse(is_consistent(TensorType((1, 2, 3)), int))
def test_precision(self):
"""
Test the consistency relation.
"""
self.assertTrue(is_more_precise(TensorType((1, 2, 3)), TensorType((1, Dyn, 3))))
self.assertTrue(is_more_precise(int, Dyn))
self.assertTrue(is_more_precise(int, int))
self.assertFalse(is_more_precise(TensorType((1, 2, 3)), TensorType((1, 2, 3, 5))))
self.assertFalse(is_more_precise(TensorType((1, 2, 3)), int))
def test_broadcasting1(self):
t1 = TensorType((1, 2, 3, 4))
t2 = TensorType((1, 2, 1, 4))
t3 = TensorType(())
t4 = TensorType((4, 1))
t5 = TensorType((4, 4, 4))
# todo switch all code to use list instead of tuple
t6 = TensorType([1])
assert broadcast_types(t1, t2) == (TensorType((1, 2, 3, 4)), TensorType((1, 2, 3, 4)))
assert broadcast_types(t3, t4) == (t4, t4)
assert broadcast_types(t5, t6) == (t5, t5)
def test_broadcasting2(self):
t1 = TensorType((2, 3, 4))
t2 = TensorType((1, 2, 1, 4))
assert broadcast_types(t1, t2) == (TensorType((1, 2, 3, 4)), TensorType((1, 2, 3, 4)))
def test_broadcasting3(self):
t1 = TensorType((1, 2, 3, Dyn))
t2 = TensorType((2, 3, 4))
assert broadcast_types(t1, t2) == (TensorType((1, 2, 3, Dyn)), TensorType((1, 2, 3, 4)))
class TypeCheckerTest(TestCase):
def test_type_check_add_with_broadcast(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, Dyn)), y: TensorType((2, 3, 4))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
expected_ph_types = [TensorType((1, 2, 3, Dyn)),
TensorType((2, 3, 4)),
TensorType((1, 2, 3, Dyn)),
TensorType((1, 2, 3, Dyn))]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
if n.op == 'call_function':
assert n.meta['broadcast']
assert n.type == next(expected_iter)
def test_type_check_add_with_scalar(self):
class M(torch.nn.Module):
def forward(self, x: int, y: TensorType((2, 3, 4))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
expected_ph_types = [int,
TensorType((2, 3, 4)),
TensorType((2, 3, 4)),
TensorType((2, 3, 4))]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
assert n.type == next(expected_iter)
def test_type_check_add_false(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, Dyn)), y: TensorType((1, 2, 3))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_add_true(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, Dyn)), y: TensorType((1, 2, 3))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
self.assertTrue(tc.type_check())
expected_ph_types = [TensorType((1, 2, Dyn)), TensorType((1, 2, 3))]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
assert n.type == next(expected_iter)
if n.op == 'output':
assert n.type == TensorType((1, 2, Dyn))
def test_type_check_reshape_true(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 6))):
return torch.reshape(x, [1, 2, 3])
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
self.assertTrue(tc.type_check())
for n in symbolic_traced.graph.nodes:
if n.op == 'placeholder':
assert n.type == TensorType((1, 6))
if n.op == 'call_function':
assert n.type == TensorType((1, 2, 3))
if n.op == 'output':
assert n.type == TensorType((1, 2, 3))
def test_type_check_reshape_false(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 5))):
return torch.reshape(x, [1, 2, 3])
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_reshape_dyn_false(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 5))):
return torch.reshape(x, [1, 2, -1])
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_reshape_dyn_true(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 15))):
return torch.reshape(x, [1, 5, -1])
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
self.assertTrue(tc.type_check())
def test_type_check_reshape_dyn_true_param_false(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((Dyn, 5))):
return torch.reshape(x, [1, 2, -1])
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_transpose_true(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, 5))):
return torch.transpose(x, 0, 1)
module = M()
symbolic_traced : torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
self.assertTrue(tc.type_check())
for n in symbolic_traced.graph.nodes:
if n.op == 'call_function':
assert n.type == TensorType([2, 1, 3, 5])
if n.op == 'output':
assert n.type == TensorType([2, 1, 3, 5])
if n.op == 'x':
assert n.placeholder == TensorType([1, 2, 3, 5])
def test_type_check_transpose_False(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, 5))):
return torch.transpose(x, 0, 10)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_batch_norm_2D(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes):
super(BasicBlock, self).__init__()
norm_layer = torch.nn.BatchNorm2d
self.bn1 = norm_layer(planes)
def forward(self, x: TensorType((2, 2, 5, 4))):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.bn1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == 'placeholder':
assert n.type == TensorType((2, 2, 5, 4))
if n.op == 'output':
assert n.type == TensorType((2, 2, 5, 4))
if n.op == 'call_module':
assert n.type == TensorType((2, 2, 5, 4))
if n.op == 'call_function':
assert n.type == TensorType((2, 2, 5, 4))
def test_type_check_batch_norm_2D_false(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes):
super(BasicBlock, self).__init__()
norm_layer = torch.nn.BatchNorm2d
self.bn1 = norm_layer(planes)
def forward(self, x: TensorType((2, 2, 5))):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.bn1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_batch_norm_2D_broadcast(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes):
super(BasicBlock, self).__init__()
norm_layer = torch.nn.BatchNorm2d
self.bn1 = norm_layer(planes)
def forward(self, x: Dyn):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.bn1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == 'placeholder':
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == 'call_function':
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == 'output':
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == 'call_module':
assert n.type == TensorType((2, 2, Dyn, 4))
B = BasicBlock(1, 1)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_conv2D(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
norm_layer = torch.nn.BatchNorm2d
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
def forward(self, x: Dyn):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.conv1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == 'placeholder':
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == 'call_function':
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == 'output':
assert n.type == TensorType((Dyn, Dyn, Dyn, Dyn))
if n.op == 'call_module':
assert n.type == TensorType((2, 2, Dyn, 4))
def test_type_check_conv2D_2(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
norm_layer = torch.nn.BatchNorm2d
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
def forward(self, x: TensorType((5, 2, 3, 4))):
identity = x
out = self.conv1(x)
out += identity
return out
B = BasicBlock(2, 2)
b = B.forward(torch.rand(5, 2, 3, 4))
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
t = TensorType((5, 2, 3, 4))
for n in graph.nodes:
if n.op == 'placeholder':
assert n.type == t
if n.op == 'call_function':
assert n.type == t
if n.op == 'output':
assert torch.Size(n.type.__args__) == b.shape
if n.op == 'call_module':
assert n.type == t
B = BasicBlock(1, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
with self.assertRaises(TypeError):
tc.type_check()
def test_type_check_conv2D_2_fully_static(self):
annotation_list = [(1, 2, 3, 5), (2, 5, 6, 9), (10, 15, 13, 14),
(10, Dyn, 13, 14), (Dyn, Dyn, Dyn, 3)]
input_list = [(1, 2, 3, 5), (2, 5, 6, 9), (10, 15, 13, 14),
(10, 15, 13, 14), (1, 2, 2, 3)]
intermediate_types = [(1, Dyn, Dyn, 7), (2, Dyn, 4, 6), (10, 15, Dyn, 5),
(10, 15, 7, 7), (1, Dyn, Dyn, Dyn)]
in_planes_list = [2, 5, 15, 15, 2]
stride_list = [1, 2, 3, 2, 2]
out_planes_list = [2, 5, 15, 15, 2]
groups_list = [1, 5, 5, 5, 2]
dilation_list = [1, 2, 3, 3, 3]
padding_list = [1, 2, 3, 3, 3]
kernel_size_list = [1, 2, 3, 3, 3]
output_types = [(1, 2, Dyn, 7), (2, 5, 4, 6), (10, 15, Dyn, 5), (10, 15, 7, 7), (1, 2, Dyn, Dyn)]
for i in range(5):
annotation = annotation_list[i]
input = input_list[i]
in_planes = in_planes_list[i]
stride = stride_list[i]
out_planes = out_planes_list[i]
groups = groups_list[i]
dilation = dilation_list[i]
padding = padding_list[i]
kernel_size = kernel_size_list[i]
intermediate_type = intermediate_types[i]
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x):
out = self.conv1(x)
return out
B = BasicBlock(in_planes, out_planes, kernel_size, stride, padding, groups, dilation)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
# annotate our argument
for n in graph.nodes:
if n.op == 'placeholder':
n.type = TensorType(annotation)
b = B.forward(torch.rand(input))
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == 'output':
assert is_consistent(n.type, TensorType(b.size()))
# test with intermediate annotations
class BasicBlock(torch.nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding, groups, dilation):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation)
def forward(self, x):
out = self.conv1(x)
return out
B = BasicBlock(in_planes, out_planes, kernel_size, stride, padding, groups, dilation)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
# populate our intermediate notes
for n in traced.graph.nodes:
if n.op == 'call_module':
n.type = TensorType(intermediate_type)
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in traced.graph.nodes:
if n.op == 'output':
assert n.type == TensorType(output_types[i])
assert is_consistent(n.type, TensorType(b.size()))
def test_typecheck_basicblock(self):
class BasicBlock(torch.nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1):
super(BasicBlock, self).__init__()
norm_layer = torch.nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = torch.nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: TensorType((2, 2, 4, 5))):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in traced.graph.nodes:
if n.target == 'output':
assert isinstance(n.type, TensorType)
assert torch.Size(n.type.__args__) == B.forward(torch.rand(2, 2, 4, 5)).size()
def test_type_check_conv2D_maxpool2d_flatten(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 5)
self.pool = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.fc1 = torch.nn.Linear(5, 120)
self.pool2 = torch.nn.AdaptiveAvgPool2d((6, 7))
def forward(self, x : TensorType((4, 3, 32, 32))):
out = self.conv1(x)
out = self.pool(out)
out = self.conv2(out)
out = self.pool(out)
out = self.fc1(out)
out = self.pool2(out)
out = torch.flatten(out, 1)
return out
B = BasicBlock()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
expected_ph_types = [TensorType((4, 3, 32, 32)), TensorType((4, 6, 28, 28)),
TensorType((4, 6, 14, 14)), TensorType((4, 16, 10, 10)),
TensorType((4, 16, 5, 5)), TensorType((4, 16, 5, 120)),
TensorType((4, 16, 6, 7)), TensorType((4, 672)), TensorType((4, 672))]
expected_iter = iter(expected_ph_types)
traced.graph.eliminate_dead_code()
for n in traced.graph.nodes:
assert n.type == next(expected_iter)
def test_type_check_flatten(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, 5, Dyn))):
return torch.flatten(x, 1, 2)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
for n in symbolic_traced.graph.nodes:
if n.op == 'output':
assert n.type == TensorType((1, 6, 5, Dyn))
def test_type_check_flatten_2(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, Dyn, 3, 5, Dyn))):
return torch.flatten(x, 1, 2)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
for n in symbolic_traced.graph.nodes:
if n.op == 'output':
assert n.type == TensorType((1, Dyn, 5, Dyn))
def test_type_check_flatten3(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((2, 3, 4, 5))):
return torch.flatten(x, start_dim=1, end_dim=3)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
for n in symbolic_traced.graph.nodes:
if n.op == 'output':
assert n.type == TensorType((2, 60))
r = Refine(symbolic_traced)
r.refine()
c = r.constraints
assert c == [Equality(2, 2)]
def test_type_typechecl_maxpool2d_3dinput(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.pool = torch.nn.MaxPool2d(5, 8)
def forward(self, x : TensorType((64, 8, 8))):
out = self.pool(x)
return out
B = BasicBlock()
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in traced.graph.nodes:
if n.target == 'output':
assert n.type == TensorType((64, 1, 1))
def test_type_maxpool2d_fully_static(self):
annotation_list = [(Dyn, Dyn, 3, 5), (2, 5, 6, 9), (10, 15, 13, 14),
(10, Dyn, 13, 14), (Dyn, Dyn, Dyn, 10)]
input_list = [(1, 2, 3, 5), (2, 5, 6, 9), (10, 15, 13, 14),
(10, 15, 13, 14), (2, 2, 10, 10)]
intermediate_types = [(1, 2, Dyn, Dyn), (2, Dyn, 2, 4), (10, 15, Dyn, 2),
(10, 15, 2, 3), (2, Dyn, Dyn, Dyn)]
stride_list = [1, 2, 3, 2, 1]
dilation_list = [1, 2, 3, 3, 2]
padding_list = [1, 2, 3, 3, 1]
kernel_size_list = [2, 4, 6, 6, 3]
output_types = [(1, 2, 4, 6), (2, 5, 2, 4), (10, 15, 2, 2), (10, 15, 2, 3), (2, Dyn, Dyn, 8)]
for i in range(5):
annotation = annotation_list[i]
input = input_list[i]
stride = stride_list[i]
dilation = dilation_list[i]
padding = padding_list[i]
kernel_size = kernel_size_list[i]
intermediate_type = intermediate_types[i]
class BasicBlock(torch.nn.Module):
def __init__(self, kernel_size, stride, padding, dilation):
super(BasicBlock, self).__init__()
self.pool = torch.nn.MaxPool2d(kernel_size, stride=stride,
padding=padding, dilation=dilation,
return_indices=False, ceil_mode=False)
def forward(self, x):
out = self.pool(x)
return out
B = BasicBlock(kernel_size, stride, padding, dilation)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
# annotate our argument
for n in graph.nodes:
if n.op == 'placeholder':
n.type = TensorType(annotation)
b = B.forward(torch.rand(input))
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == 'output':
assert is_consistent(n.type, TensorType(b.size()))
# test with intermediate annotations
class BasicBlock(torch.nn.Module):
def __init__(self, kernel_size, stride, padding, dilation):
super(BasicBlock, self).__init__()
self.pool = torch.nn.MaxPool2d(kernel_size, stride=stride,
padding=padding, dilation=dilation,
return_indices=False, ceil_mode=False)
def forward(self, x):
out = self.pool(x)
return out
B = BasicBlock(kernel_size, stride, padding, dilation)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
# annotate our argument
for n in graph.nodes:
if n.op == 'placeholder':
n.type = TensorType(annotation)
# populate our intermediate notes
for n in traced.graph.nodes:
if n.op == 'call_module':
n.type = TensorType(intermediate_type)
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in traced.graph.nodes:
if n.op == 'output':
assert n.type == TensorType(output_types[i])
assert is_consistent(n.type, TensorType(b.size()))
def test_flatten_fully_static(self):
annotation_list = [Dyn, TensorType((2, 5, 6, 9)), TensorType((10, 15, 13, 14)),
TensorType((10, Dyn, 13, 14)), TensorType((Dyn, Dyn, Dyn, 10))]
input_list = [(1, 2, 3, 5), (2, 5, 6, 9), (10, 15, 13, 14),
(10, 15, 13, 14), (2, 2, 10, 10)]
intermediate_list = [Dyn, (2, 5, 6, 9), (10, 15, 13, 14),
(10, 15, 13, 14), (2, 2, 10, 10)]
start_dim = [1, 2, 1, 2, 0]
end_dim = [1, 3, 3, 3, -2]
for i in range(5):
annotation = annotation_list[i]
input = input_list[i]
# intermediate_type = intermediate_list[i]
class BasicBlock(torch.nn.Module):
def __init__(self, start, end):
super(BasicBlock, self).__init__()
self.start = start
self.end = end
def forward(self, x):
out = torch.flatten(x, self.start, self.end)
return out
B = BasicBlock(start_dim[i], end_dim[i])
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
# annotate our argument
for n in graph.nodes:
if n.op == 'placeholder':
n.type = annotation
b = B.forward(torch.rand(input))
tc = GraphTypeChecker({}, traced)
tc.type_check()
for n in graph.nodes:
if n.op == 'output':
assert is_consistent(n.type, TensorType(b.size()))
@skipIfNoSympy
@skipIfNoTorchVision
def test_resnet50(self):
gm_run = symbolic_trace(resnet50())
sample_input = torch.randn(1, 3, 224, 224)
# run our nodes
ShapeProp(gm_run).propagate(sample_input)
gm_static = symbolic_trace(resnet50())
for n in gm_static.graph.nodes:
n.type = None
g = GraphTypeChecker({}, gm_static)
g.type_check()
gm_static.graph.eliminate_dead_code()
gm_run.graph.eliminate_dead_code()
# here we are checking for consistency with fully dynamic nodes
for n1, n2 in zip(gm_static.graph.nodes, gm_run.graph.nodes):
assert is_consistent(n1.type, TensorType(n2.meta['tensor_meta'].shape))
# here we give the same input as to runtume
gm_static_with_types = symbolic_trace(resnet50())
# we initialize our placeholder
for n in gm_static_with_types.graph.nodes:
if n.op == 'placeholder':
n.type = TensorType((1, 3, 224, 224))
g = GraphTypeChecker({}, gm_static_with_types)
g.type_check()
for n1, n2 in zip(gm_static_with_types.graph.nodes, gm_run.graph.nodes):
assert n1.type == TensorType(n2.meta['tensor_meta'].shape)
# apply shape inference to graph and check
# that the batch size is equal across all layers
infer_symbolic_types(gm_static)
batch_sizes = set()
gm_static.graph.eliminate_dead_code()
for n in gm_static.graph.nodes:
assert isinstance(n.type, TensorType)
batch_sizes.add(n.type.__args__[0])
assert (len(batch_sizes) == 1)
@skipIfNoSympy
def test_type_check_batch_norm_symbolic(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes):
super(BasicBlock, self).__init__()
norm_layer = torch.nn.BatchNorm2d
self.bn1 = norm_layer(planes)
def forward(self, x: Dyn):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.bn1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
infer_symbolic_types(traced)
my_types = iter([TensorType[(2, 2, sympy.symbols('~7'), 4)],
TensorType[(2, 2, sympy.symbols('~7'), 4)],
TensorType[(2, 2, sympy.symbols('~7'), 4)],
TensorType[(2, 2, sympy.symbols('~7'), 4)]])
for n in graph.nodes:
assert n.type == next(my_types)
@skipIfNoSympy
def test_symbolic_add_with_broadcast(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2, 3, Dyn)), y: TensorType((2, 3, 4))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
infer_symbolic_types(symbolic_traced)
r = Refine(symbolic_traced)
r.refine()
assert r.constraints == [Equality(1, 1), Equality(2, 2), Equality(3, 3)]
# note that there is no equality constraint between dyn and 4 because
# dyn could be 4 or 1
infer_symbolic_types(symbolic_traced)
expected_ph_types = [TensorType((1, 2, 3, sympy.symbols('~0'))),
TensorType((2, 3, 4)),
TensorType((1, 2, 3, sympy.symbols('~1'))),
TensorType((1, 2, 3, sympy.symbols('~1')))]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
assert n.type == next(expected_iter)
@skipIfNoSympy
def test_symbolic_add_with_broadcast_2(self):
class M(torch.nn.Module):
def forward(self, x: TensorType((1, 2)), y: TensorType((Dyn, 2))):
return torch.add(x, y)
module = M()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
tc = GraphTypeChecker({}, symbolic_traced)
tc.type_check()
infer_symbolic_types(symbolic_traced)
r = Refine(symbolic_traced)
r.refine()
expected_ph_types = [TensorType((1, 2)),
TensorType((sympy.symbols('~1'), 2)),
TensorType((sympy.symbols('~1'), 2)),
TensorType((sympy.symbols('~1'), 2))]
expected_iter = iter(expected_ph_types)
for n in symbolic_traced.graph.nodes:
assert n.type == next(expected_iter)
@skipIfNoSympy
def test_type_check_conv2D_types(self):
class BasicBlock(torch.nn.Module):
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
norm_layer = torch.nn.BatchNorm2d
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
def forward(self, x: Dyn):
identity = x
out: TensorType((2, 2, Dyn, 4)) = self.conv1(x)
out += identity
return out
B = BasicBlock(2, 2)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(B)
traced = GraphModule(ast_rewriter.root, graph, "gm")
tc = GraphTypeChecker({}, traced)
tc.type_check()
infer_symbolic_types(traced)
for n in traced.graph.nodes:
if n.op == 'call_module':
assert isinstance(n.type.__args__[2], sympy.floor)
assert isinstance(n.type.__args__[3], sympy.floor)
@skipIfNoSympy
def test_type_check_symbolic_inferenceconv2D_maxpool2d_flatten(self):
class BasicBlock(torch.nn.Module):
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 5)
self.pool = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.fc1 = torch.nn.Linear(5, 120)
self.pool2 = torch.nn.AdaptiveAvgPool2d((6, 7))
def forward(self, x : TensorType((4, 3, Dyn, Dyn))):
out = self.conv1(x)
out = self.pool(out)
out = self.conv2(out)
out = self.pool(out)
out = self.fc1(out)
out = self.pool2(out)
out = torch.flatten(out, 1)
return out
B = BasicBlock()
ast_rewriter = RewritingTracer()
traced = symbolic_trace(B)
tc = GraphTypeChecker({}, traced)
tc.type_check()
infer_symbolic_types(traced)
for n in traced.graph.nodes:
if n.target == 'conv1':
assert n.type == TensorType((4, 6, sympy.floor((sympy.symbols('~0') - 4)),
sympy.floor((sympy.symbols('~1') - 4))))
elif n.target == 'conv2':
assert n.type == TensorType((4, 16, sympy.floor((sympy.symbols('~4') - 4)),
sympy.floor((sympy.symbols('~5') - 4))))
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
test/fx/test_gradual_type.py
|
from typing import NamedTuple
import torch
class MyNamedTup(NamedTuple):
i : torch.Tensor
f : torch.Tensor
|
pytorch-master
|
test/fx/named_tup.py
|
# Owner(s): ["module: fx"]
import unittest
import torch
import torch.fx
from torch.testing._internal.common_utils import TestCase
class MyModuleBase(torch.nn.Module):
def forward(self, x):
matrx = self.get_mul_matrix()
if self.no_relu():
return torch.mm(x, matrx)
else:
return torch.relu(torch.mm(x, matrx))
def get_mul_matrix(self):
return self.param
def no_relu(self):
raise Exception("not implemented")
class MyModuleParamShape(MyModuleBase):
def __init__(self, in_channels):
super().__init__()
self.param = torch.nn.Parameter(torch.randn(in_channels, 3))
def no_relu(self):
return self.param.shape[0] < 10
class MyModuleParamSize(MyModuleBase):
def __init__(self, in_channels):
super().__init__()
self.param = torch.nn.Parameter(torch.randn(in_channels, 3))
def no_relu(self):
return self.param.size()[0] < 10
class MyModuleParamDim(MyModuleBase):
def __init__(self, param):
super().__init__()
self.param = param
def get_mul_matrix(self):
return self.param[0] if (self.param.dim() == 3) else self.param
def no_relu(self):
return self.param.dim() == 3
class MyModuleParamNDim(MyModuleBase):
def __init__(self, param):
super().__init__()
self.param = param
def get_mul_matrix(self):
return self.param[0] if (self.param.ndim == 3) else self.param
def no_relu(self):
return self.param.ndim == 3
class MyModuleParamNumEl(MyModuleBase):
def __init__(self, in_channels):
super().__init__()
self.param = torch.nn.Parameter(torch.randn(in_channels, 3))
def no_relu(self):
return self.param.numel() < 10 * 3
class MyModuleParamNElement(MyModuleBase):
def __init__(self, in_channels):
super().__init__()
self.param = torch.nn.Parameter(torch.randn(in_channels, 3))
def no_relu(self):
return self.param.nelement() < 10 * 3
class TestConstParamShapeInControlFlow(TestCase):
def verify_mm_relu_mods(self, mm_only_mod, relu_mod):
"""
Verify one module only does a mm op while the other
performs both mm and relu ops in cascade
"""
x = torch.randn(10, 5)
torch.testing.assert_allclose(mm_only_mod(x), torch.mm(x, mm_only_mod.get_mul_matrix()))
tracer = torch.fx.Tracer(param_shapes_constant=True)
traced_graph = tracer.trace(mm_only_mod)
# verify the graph module calculates the same result
graph_mod_mm = torch.fx.GraphModule(mm_only_mod, traced_graph)
torch.testing.assert_allclose(graph_mod_mm(x), torch.mm(x, mm_only_mod.get_mul_matrix()))
# Make a new module with different parameter shape to go down the different
# code path
x = torch.randn(10, 15)
torch.testing.assert_allclose(relu_mod(x), torch.relu(torch.mm(x, relu_mod.get_mul_matrix())))
tracer2 = torch.fx.Tracer(param_shapes_constant=True)
traced_graph2 = tracer2.trace(relu_mod)
# verify the graph module calculates the same result
graph_mod_relu = torch.fx.GraphModule(relu_mod, traced_graph2)
torch.testing.assert_allclose(graph_mod_relu(x), torch.relu(torch.mm(x, relu_mod.get_mul_matrix())))
graph1_node_targets = [n.target for n in traced_graph.nodes]
graph2_node_targets = [n.target for n in traced_graph2.nodes]
# the second graph has an exta relu function call node
assert torch.mm in graph1_node_targets and torch.mm in graph2_node_targets
assert torch.relu not in graph1_node_targets and torch.relu in graph2_node_targets
def test_param_shape_const(self):
mymod = MyModuleParamShape(in_channels=5)
mymod2 = MyModuleParamShape(in_channels=15)
self.verify_mm_relu_mods(mymod, mymod2)
def test_param_size_const(self):
mymod = MyModuleParamSize(in_channels=5)
mymod2 = MyModuleParamSize(in_channels=15)
self.verify_mm_relu_mods(mymod, mymod2)
def test_param_dim_const(self):
mymod = MyModuleParamDim(torch.nn.Parameter(torch.randn(2, 5, 3)))
mymod2 = MyModuleParamDim(torch.nn.Parameter(torch.randn(15, 3)))
self.verify_mm_relu_mods(mymod, mymod2)
def test_param_ndim_const(self):
mymod = MyModuleParamNDim(torch.nn.Parameter(torch.randn(2, 5, 3)))
mymod2 = MyModuleParamNDim(torch.nn.Parameter(torch.randn(15, 3)))
self.verify_mm_relu_mods(mymod, mymod2)
def test_param_numel_const(self):
mymod = MyModuleParamNumEl(in_channels=5)
mymod2 = MyModuleParamNumEl(in_channels=15)
self.verify_mm_relu_mods(mymod, mymod2)
def test_param_nelement_const(self):
mymod = MyModuleParamNElement(in_channels=5)
mymod2 = MyModuleParamNElement(in_channels=15)
self.verify_mm_relu_mods(mymod, mymod2)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
test/fx/test_fx_param_shape_control_flow.py
|
# Owner(s): ["module: fx"]
from typing import Set, Type
import torch
import torch.fx
from torch.testing._internal.common_utils import TestCase
class TestDCE(TestCase):
def _has_nodes_without_users(self, m: torch.fx.GraphModule):
for node in m.graph.nodes:
if node.is_impure():
continue
if len(node.users) == 0:
return True
return False
def _get_num_placeholders(self, m: torch.fx.GraphModule) -> int:
count = 0
for node in m.graph.nodes:
if node.op == "placeholder":
count += 1
return count
def _run_dce_and_test(
self,
m: torch.nn.Module,
expect_dce_changes: bool,
modules_to_be_leafs: Set[Type] = None,
):
class TestTracer(torch.fx.Tracer):
def is_leaf_module(self, m, qualname):
if modules_to_be_leafs and type(m) in modules_to_be_leafs:
return True
return super().trace(m, qualname)
traced: torch.fx.GraphModule = torch.fx.GraphModule(m, TestTracer().trace(m))
print(str(traced.graph))
# Verify there are nodes without users (if expected).
has_nodes_without_users = self._has_nodes_without_users(traced)
if expect_dce_changes:
self.assertTrue(has_nodes_without_users)
else:
self.assertFalse(has_nodes_without_users)
# Get the original number of placeholders to verify it doesn't change
# during DCE.
orig_num_phs = self._get_num_placeholders(traced)
changed = traced.graph.eliminate_dead_code()
self.assertTrue(changed if expect_dce_changes else not changed)
# Verify there are no nodes without users after DCE is run.
self.assertFalse(self._has_nodes_without_users(traced))
new_num_phs = self._get_num_placeholders(traced)
self.assertEqual(orig_num_phs, new_num_phs)
traced.recompile()
# Make sure we run and get the same results before/after DCE.
inputs = [torch.tensor([1.5])] * new_num_phs
self.assertTrue(torch.equal(m(*inputs), traced(*inputs)))
def test_simple(self):
"""
Tests that a single node in the graph is DCE'd correctly.
"""
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9]))
def forward(self, x):
a = x + 1
return x + self.attr_1
self._run_dce_and_test(TestModule(), expect_dce_changes=True)
def test_dead_chain(self):
"""
Tests that a chain of two nodes in the graph are DCE'd correctly.
"""
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9]))
def forward(self, x):
a = x + 1
b = a * 7
return x + self.attr_1
self._run_dce_and_test(TestModule(), expect_dce_changes=True)
def test_dead_getattr(self):
"""
Tests that a getatrr in the graph is DCE'd correctly.
"""
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr_1 = torch.nn.Parameter(torch.tensor([-0.9]))
def forward(self, x):
a = x + 1
b = a * self.attr_1
return x + 11
self._run_dce_and_test(TestModule(), expect_dce_changes=True)
def test_dead_placeholder(self):
"""
Tests that a placeholder in the graph is not DCE'd, as that would change
the function signature.
"""
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + 7
self._run_dce_and_test(TestModule(), expect_dce_changes=False)
def test_dead_placeholder_with_user(self):
"""
Tests that a placeholder in the graph is not DCE'd, as that would change
the function signature. Also verifies that a dead node that uses the
placeholder is DCE'd.
"""
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
a = y + 2
return x + 7
self._run_dce_and_test(TestModule(), expect_dce_changes=True)
def test_keep_module_with_side_effects(self):
"""
Test that DCE doesn't remove a module if it's specified as having side effects.
"""
class ReLUImpure(torch.nn.ReLU):
_is_impure = True
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = ReLUImpure()
def forward(self, a: torch.Tensor) -> torch.Tensor:
r = self.relu(a)
return a * 2
self._run_dce_and_test(
TestModule(), expect_dce_changes=False, modules_to_be_leafs={ReLUImpure}
)
def test_keep_torch_assert(self):
"""
Test that DCE doesn't remove torch._assert since it has side effects.
"""
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a: torch.Tensor) -> torch.Tensor:
torch._assert(torch.equal(a, a), "a must equal a")
return a * 2
# Note: Don't need to specify torch._assert as having side effects
# because it's known to.
self._run_dce_and_test(TestModule(), expect_dce_changes=False)
|
pytorch-master
|
test/fx/test_dce_pass.py
|
# Owner(s): ["module: fx"]
import operator
import torch
import torch.fx
from torch.fx.experimental import const_fold
from torch.fx.passes.shape_prop import _extract_tensor_metadata, ShapeProp
from torch.testing._internal.common_utils import TestCase
class TestConstFold(TestCase):
def _get_attr(self, node):
mod = node.graph.owning_module
target = str(node.target)
target_atoms = target.split(".")
curr_obj = mod
for i, atom in enumerate(target_atoms):
if not hasattr(curr_obj, atom):
raise RuntimeError(
f"Node referenced nonexistent target '{'.'.join(target_atoms[:i])}'; "
f" original whole target: '{target}'"
)
curr_obj = getattr(curr_obj, atom)
return curr_obj
def _verify_const_fold_mod(self, mod_folded: const_fold.FoldedGraphModule):
self.assertTrue(mod_folded.const_subgraph_module is not None)
# Check that we don't have the const or non-const fold graphs in the gm, and
# that we do have the const folded get_attr.
found_folded_attrs = False
for n in mod_folded.graph.nodes:
if n.op == "get_attr" and n.target.startswith("_FX_CONST_FOLDED_ATTRS"):
found_folded_attrs = True
elif n.op == "call_module":
self.assertTrue(n.target not in {"submod_0", "submod_1"})
self.assertTrue(found_folded_attrs)
def test_const_fold_basic_one_attr_no_name_collision(self):
r"""
Perform constant folding conversion, from original mod to split constant folding
module with two split subgraphs, where there's a single attr to fold and
a single output attr result to replace.
attr1 attr1
| | | |
x add add
\ / |
sub y output (becomes attr add_1)
\ / ==> -------+------- (const/base subgraph split)
mul attr2 x / (input from previous subgraph
\ / \ / is attr)
add sub y
| \ /
output mul attr2
\ /
add
|
output
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr_1 = torch.nn.Parameter(torch.tensor([[-0.9]]))
self.attr_2 = torch.nn.Parameter(torch.tensor([[17.1]]))
def forward(self, x, y):
a = self.attr_1 + self.attr_1
x = x - a
return x * y + self.attr_2
mod = ConstFoldTestModule()
mod_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(mod)
self._verify_const_fold_mod(mod_folded)
# Now run both folded and non-folded to check results equal.
in_x, in_y = torch.tensor([[-0.45]]), torch.tensor([0.9])
base_result = mod(in_x, in_y)
fold_result = mod_folded(in_x, in_y)
self.assertTrue(torch.equal(fold_result, base_result))
def test_const_fold_basic_one_attr_name_collision(self):
r"""
Perform constant folding conversion, from original mod to split constant folding
module with two split subgraphs, where there's a single attr to fold and
a single output attr result to replace. Name the attrs such that they will
collide by name with folded attrs.
add_1 add_1
| | | |
x add add
\ / |
sub y output (becomes attr add_1)
\ / ==> -------+------- (const/base subgraph split)
mul add_2 x / (input from previous subgraph
\ / \ / is attr)
add sub y
| \ /
output mul add_2
\ /
add
|
output
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
# Note: Named as such to result in name collision.
self.add_1__CF = torch.nn.Parameter(torch.tensor([[1.0]]))
self.add_2__CF = torch.nn.Parameter(torch.tensor([[17.1]]))
def forward(self, x, y):
a = self.add_1__CF + self.add_1__CF
x = x - a
return x * y + self.add_2__CF
mod = ConstFoldTestModule()
mod_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(mod)
self._verify_const_fold_mod(mod_folded)
# Now run both folded and non-folded to check results equal.
in_x, in_y = torch.tensor([[5.0]]), torch.tensor([4.0])
base_result = mod(in_x, in_y)
fold_result = mod_folded(in_x, in_y)
self.assertTrue(torch.equal(fold_result, base_result))
def test_const_fold_basic_placeholder_reordered(self):
"""
Test code path where placeholder comes after normal op node in FX
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x * 2 + y
mod = ConstFoldTestModule()
mod = torch.fx.symbolic_trace(mod)
yy = None
for n in mod.graph.nodes:
if n.op == "placeholder" and n.target == "y":
yy = n
elif yy is not None and n.op == "call_function":
yy.prepend(n)
break
mod_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(mod)
self.assertTrue(mod_folded.const_subgraph_module is None)
# Now run both folded and non-folded to check results equal.
in_x = torch.tensor([[-0.45]])
in_y = torch.tensor([[0.45]])
base_result = mod(in_x, in_y)
fold_result = mod_folded(in_x, in_y)
self.assertTrue(torch.equal(fold_result, base_result))
def test_const_fold_noop(self):
r"""
Check that a graph with no constant folding is handled correctly.
x attr1
\ /
sub
|
output
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr1 = torch.nn.Parameter(torch.tensor([[-0.9]]))
def forward(self, x):
return x - self.attr1
mod = ConstFoldTestModule()
mod_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(mod)
# Check that the folded graph module is None, since there was no folding to do.
self.assertTrue(mod_folded.const_subgraph_module is None)
# Now run both folded and non-folded to check results equal.
in_x = torch.tensor([[-0.45]])
base_result = mod(in_x)
fold_result = mod_folded(in_x)
self.assertTrue(torch.equal(fold_result, base_result))
def test_const_fold_basic_two_attr_three_input(self):
r"""
Perform constant folding conversion, from original mod to split constant
folding module with two split subgraphs, where there are two attrs to
fold into a single output, and there are three placeholder inputs.
attr1 attr2 attr1 attr2
\ / \ /
x add add
\ / |
sub y output (becomes attr add_1)
\ / ==> -------+------- (const/base subgraph split)
mul z x / (input from previous subgraph
\ / \ / is attr)
div sub y
| \ /
output mul z
\ /
div
|
output
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr1 = torch.nn.Parameter(torch.tensor([[-0.9]]))
self.attr1 = torch.nn.Parameter(torch.tensor([[1.32]]))
def forward(self, x, y, z):
a = self.attr1 + self.attr1
sub = x - a
mul = sub * y
return mul / z
mod = ConstFoldTestModule()
mod_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(mod)
self._verify_const_fold_mod(mod_folded)
# Now run both folded and non-folded to check results equal.
in_x, in_y, in_z = (
torch.tensor([[-0.45]]),
torch.tensor([0.9]),
torch.tensor([1.1]),
)
base_result = mod(in_x, in_y, in_z)
fold_result = mod_folded(in_x, in_y, in_z)
self.assertTrue(torch.equal(fold_result, base_result))
def test_const_fold_basic_two_attr(self):
r"""
Perform constant folding conversion, from original mod to split constant
folding module with two split subgraphs, where there are two attrs to
fold into a single output.
attr1 attr2 attr1 attr2
\ / \ /
x add add (becomes attr add_1)
\ / ==> -------+------- (const/base subgraph split)
sub x | (input from previous subgraph is attr)
| \ /
output sub
|
output
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr1 = torch.nn.Parameter(torch.randn(2, 3))
self.attr2 = torch.nn.Parameter(torch.randn(2, 3))
def forward(self, x):
y = self.attr1 + self.attr2
return x + y
mod = ConstFoldTestModule()
mod_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(mod)
self._verify_const_fold_mod(mod_folded)
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = mod_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result, base_result))
def test_const_fold_multi_const_folded_attrs(self):
r"""
Perform constant folding conversion, from original mod to split constant
folding module with two split subgraphs, where there are two attrs to
fold into two new attrs.
attr1 attr2 attr1 attr2
/ \ | / \ |
permute | sum permute | sum
\ / / \ / |
x add y / add |
\ / \ / | |
sub add output output (become attrs add_1 and mul_1)
\ / ==> --------+-------+------ (const/base subgraph split)
\ / x | y | (inputs from previous subgraph
add \ / \ / are attrs)
| sub add
linear \ /
| add
sigmoid |
| linear
output |
sigmoid
|
output
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr1 = torch.nn.Parameter(torch.randn(4, 4))
self.attr2 = torch.nn.Parameter(torch.randn(4, 4))
self.lin = torch.nn.Linear(4, 4)
def forward(self, x, y):
a = self.attr1 + self.attr1.permute(1, 0)
x = x - a
amax = torch.sum(self.attr2, dim=1)
y = y + amax
return torch.sigmoid(self.lin(x + y))
mod = ConstFoldTestModule()
mod_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(mod)
self._verify_const_fold_mod(mod_folded)
# Now run both folded and non-folded to check results equal.
in_x, in_y = torch.randn(4, 4), torch.randn(4)
fold_result = mod_folded(in_x, in_y)
base_result = mod(in_x, in_y)
self.assertTrue(torch.equal(fold_result, base_result))
def test_const_fold_submod_hierarchy(self):
r"""
Perform constant folding conversion, from original mod to split constant folding
module where one of the folded attrs comes from a submod deeper in the hierarchy
of the base module.
"""
class TracedThroughModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.internal_attr = torch.nn.Parameter(torch.randn(2, 3))
def forward(self):
return self.internal_attr
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.my_mod = TracedThroughModule()
self.attr = torch.nn.Parameter(torch.randn(2, 3))
def forward(self, x):
return self.attr + self.my_mod() + x
mod = ConstFoldTestModule()
mod_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(mod)
self._verify_const_fold_mod(mod_folded)
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = mod_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result, base_result))
def test_retain_node_meta(self):
r"""
Perform constant folding conversion, and validate that node meta is retained.
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.nn.Parameter(torch.randn(2, 3))
def forward(self, x):
a = self.attr + self.attr
return x - a
mod = ConstFoldTestModule()
gm = torch.fx.symbolic_trace(mod)
# Add a count for each node to check after we const fold.
for idx, node in enumerate(gm.graph.nodes):
if node.op != "output":
node.meta["meta_idx"] = idx
# Pre-folding:
# idx 0: placeholder
# idx 1: get_attr (will no longer be used, hence removed)
# idx 2: add (will be folded into a get_attr)
# idx 3: sub
gm_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(gm)
self._verify_const_fold_mod(gm_folded)
# Post-folding:
# idx 0: placeholder
# idx 2: get_attr (replaced original add; original get_attr was removed)
# idx 3: sub
# Check the expected indices are still here.
for node in gm_folded.graph.nodes:
if node.op == "placeholder":
self.assertEqual(node.meta["meta_idx"], 0)
elif node.op == "get_attr":
self.assertEqual(node.meta["meta_idx"], 2)
elif node.op == "call_function" and node.target == operator.sub:
self.assertEqual(node.meta["meta_idx"], 3)
else:
self.assertEqual(node.op, "output")
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = gm_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result, base_result))
def test_const_fold_has_inlined_call_module_node(self):
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.nn.Parameter(torch.randn(2, 3))
self.mod = torch.nn.Identity()
self.mod.relu = torch.nn.ReLU()
def forward(self, x):
a = self.attr + self.attr
return self.mod.relu(x - a)
mod = ConstFoldTestModule()
gm_folded = const_fold.split_const_subgraphs(mod)
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = gm_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result, base_result))
def test_const_fold_module_attr(self):
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.const = torch.nn.Parameter(torch.randn(2, 3))
self.mod = torch.nn.Identity()
self.mod.attr = torch.nn.Parameter(torch.randn(2, 3))
def forward(self, x):
a = self.const + self.mod.attr
x = x + a
return x + self.mod.attr
mod = ConstFoldTestModule()
gm_folded = const_fold.split_const_subgraphs(mod)
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = gm_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result, base_result))
def test_const_fold_unused_placeholder(self):
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.const = torch.nn.Parameter(torch.randn(2, 3))
def forward(self, x, y, z):
a = self.const + self.const
return y + a
mod = ConstFoldTestModule()
gm_folded = const_fold.split_const_subgraphs(mod)
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = gm_folded(in_x, in_x, in_x)
base_result = mod(in_x, in_x, in_x)
self.assertTrue(torch.equal(fold_result, base_result))
def test_dict_output(self):
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.const = torch.nn.Parameter(torch.randn(2, 3))
def forward(self, x):
a = self.const + self.const
return {"result": x + a}
mod = ConstFoldTestModule()
gm_folded = const_fold.split_const_subgraphs(mod)
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = gm_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result["result"], base_result["result"]))
def test_two_outputs(self):
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.const = torch.nn.Parameter(torch.randn(2, 3))
def forward(self, x):
a = self.const + self.const
return x, x + a
mod = ConstFoldTestModule()
gm_folded = const_fold.split_const_subgraphs(mod)
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = gm_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result[0], base_result[0]))
self.assertTrue(torch.equal(fold_result[1], base_result[1]))
def test_three_outputs(self):
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.const = torch.nn.Parameter(torch.randn(2, 3))
def forward(self, x):
a = self.const + self.const
return x, x + a, x + a
mod = ConstFoldTestModule()
gm_folded = const_fold.split_const_subgraphs(mod)
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = gm_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result[0], base_result[0]))
self.assertTrue(torch.equal(fold_result[1], base_result[1]))
self.assertTrue(torch.equal(fold_result[2], base_result[2]))
def test_check_inline_non_const(self):
r"""
Perform constant folding conversion and check that the non-const module is inlined
correctly.
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.nn.Parameter(torch.randn(2, 3))
def forward(self, x):
a = self.attr + self.attr
return (x - a * x) / 2
mod = ConstFoldTestModule()
gm = torch.fx.symbolic_trace(mod)
gm_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(gm)
self._verify_const_fold_mod(gm_folded)
# Check there are no call modules, because they've been inlined or extracted for
# const folding.
for node in gm_folded.graph.nodes:
self.assertNotEqual(node.op, "call_module")
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = gm_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result, base_result))
def test_check_inline_non_const_mult_return(self):
r"""
Perform constant folding conversion and check that the non-const module is inlined
correctly.
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.nn.Parameter(torch.randn(2, 3))
def forward(self, x):
a = self.attr + self.attr
return x - a, x / 2
mod = ConstFoldTestModule()
gm = torch.fx.symbolic_trace(mod)
gm_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(gm)
self._verify_const_fold_mod(gm_folded)
# Check there are no call modules, because they've been inlined or extracted for
# const folding.
for node in gm_folded.graph.nodes:
self.assertNotEqual(node.op, "call_module")
# Now run both folded and non-folded to check results equal.
in_x = torch.randn(2, 3)
fold_result = gm_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result[0], base_result[0]))
self.assertTrue(torch.equal(fold_result[1], base_result[1]))
def test_check_skip_folding_quant_dequant_pattern(self):
r"""
Set up skip_folding_quant_dequant function to skip quant/dequant pattern.
This example shows how to use skip_folding_node_fn.
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.randn(4, 4))
self.bias = torch.nn.Parameter(torch.randn(4))
self.relu = torch.nn.ReLU()
def forward(self, x):
quant_weight = torch.quantize_per_tensor(
self.weight, 0.5, 3, torch.quint8
)
dequant_weight = torch.dequantize(quant_weight)
output = torch.nn.functional.linear(x, dequant_weight, self.bias)
return self.relu(output)
mod = ConstFoldTestModule()
in_x = torch.randn(2, 4)
gm = torch.fx.symbolic_trace(mod)
def skip_folding_quant_dequant(node: torch.fx.Node):
if node.target != torch.quantize_per_tensor:
return False
# If quantize_per_node -> dequantize, then skip folding.
for user in node.users:
if user.target == torch.dequantize:
return True
return False
gm_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(
gm, skip_folding_node_fn=skip_folding_quant_dequant
)
# Check that the folded graph module is None, since there was no folding to do.
self.assertTrue(gm_folded.const_subgraph_module is None)
# Now run both folded and non-folded to check results equal.
fold_result = gm_folded(in_x)
base_result = mod(in_x)
self.assertTrue(torch.equal(fold_result, base_result))
def test_fold_module(self):
r"""
Perform constant folding with a call_module node.
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin_input = torch.nn.Parameter(torch.randn(4, 4))
self.lin = torch.nn.Linear(4, 4)
def forward(self, x):
return self.lin(self.lin_input) + x
mod = ConstFoldTestModule()
mod_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(mod)
self._verify_const_fold_mod(mod_folded)
# Now run both folded and non-folded to check results equal.
inp = torch.randn(4, 4)
self.assertTrue(torch.equal(mod_folded(inp), mod(inp)))
def test_const_fold_tensor_meta(self):
self._test_const_fold_tensor_meta(True)
self._test_const_fold_tensor_meta(False)
def _test_const_fold_tensor_meta(self, requires_grad):
"""
Verify tensor_meta is handled correctly.
"""
class ConstFoldTestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr_1 = torch.nn.Parameter(torch.tensor([[-0.9]]), requires_grad)
self.attr_2 = torch.nn.Parameter(torch.tensor([[17.1]]), requires_grad)
def forward(self, x, y):
a = self.attr_1 + self.attr_1
x = x - a
return x * y + self.attr_2
mod = ConstFoldTestModule()
gm = torch.fx.symbolic_trace(mod)
in_x, in_y = torch.tensor([[-0.45]]), torch.tensor([0.9])
ShapeProp(gm).propagate(in_x, in_y)
mod_folded: const_fold.FoldedGraphModule = const_fold.split_const_subgraphs(
gm, device_for_folded_attrs="cpu"
)
self._verify_const_fold_mod(mod_folded)
mod_folded.run_folding()
for n in mod_folded.graph.nodes:
if n.op == "get_attr":
attr = self._get_attr(n)
self.assertEquals(_extract_tensor_metadata(attr), n.meta["tensor_meta"])
# Now run both folded and non-folded to check results equal.
base_result = mod(in_x, in_y)
fold_result = mod_folded(in_x, in_y)
self.assertTrue(torch.equal(fold_result, base_result))
|
pytorch-master
|
test/fx/test_fx_const_fold.py
|
# Owner(s): ["module: fx"]
import torch
import torch.fx as fx
from torch.testing._internal.common_utils import TestCase
from torch.fx.passes.infra.pass_base import PassResult
from torch.fx.passes.infra.pass_manager import (
PassManager,
this_before_that_pass_constraint,
_topological_sort_passes,
)
def replace_add_with_mul_pass(gm):
modified = False
for node in gm.graph.nodes:
if node.op == "call_function" and node.target == torch.add:
node.target = torch.mul
modified = True
return PassResult(gm, modified)
def replace_mul_with_div_pass(gm):
modified = False
for node in gm.graph.nodes:
if node.op == "call_function" and node.target == torch.mul:
node.target = torch.div
modified = True
return PassResult(gm, modified)
class AddModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = torch.add(x, x)
z = torch.add(y, x)
return z
class TestPassManager(TestCase):
def test_pass_manager(self):
"""
Tests that the pass manager runs the passes correctly.
"""
m = AddModule()
traced_m = torch.fx.symbolic_trace(m)
pm = PassManager(passes=[replace_add_with_mul_pass, replace_mul_with_div_pass], steps=5)
pm.validate_constraints()
self.assertEqual(len(pm.passes), 2)
res = pm(traced_m)
modified_m = res.graph_module
assert isinstance(modified_m, fx.GraphModule)
# Check that all call_function nodes are divs
for node in modified_m.graph.nodes:
if node.op == "call_function":
self.assertEqual(node.target, torch.div)
def test_this_before_that_pass_constraint(self):
"""
Tests the construction of constraints
"""
passes = [lambda x: 2 * x for _ in range(10)]
pm = PassManager(passes)
# add unfulfillable constraint
pm.add_constraint(this_before_that_pass_constraint(passes[-1], passes[0]))
with self.assertRaises(RuntimeError):
pm.validate_constraints()
def test_pass_manager_checks(self):
"""
Tests that users can add in check functions correctly
"""
m = AddModule()
traced_m = fx.symbolic_trace(m)
pm = PassManager(passes=[replace_add_with_mul_pass, replace_mul_with_div_pass])
def check_div_target(graph_module):
for node in graph_module.graph.nodes:
if node.op == "call_function" and node.target != torch.div:
raise ValueError("Target should be div!")
pm.add_checks(check_div_target)
with self.assertRaises(ValueError):
pm(traced_m)
def test_pass_manager_bad_checks(self):
"""
Checks that we error if we pass in a check function with the wrong parameters
"""
def check_bad_args(graph_module, i):
pass
pm = PassManager()
self.assertRaises(TypeError, pm.add_checks, check_bad_args)
def test_topological_sort(self):
"""
Tests that passes are correctly ordered based on contraints.
"""
def pass0(x):
return x
def pass1(x):
return x + 1
def pass2(x):
return x + 2
def pass3(x):
return x + 3
def pass4(x):
return x + 4
def pass5(x):
return x + 5
# Not passing any constraints should keep the original order
passes = [pass0, pass1, pass2, pass3, pass4, pass5]
sorted = _topological_sort_passes(passes, [])
self.assertEqual(sorted, passes)
# Graph that we are constructing:
# 5 ----> 0 <---- 4
# | |
# +-> 2 -> 3 -> 1 <-+
# Which has a possible topological order of: [4, 5, 0, 2, 3, 1]
passes = [pass0, pass1, pass2, pass3, pass4, pass5]
constraints = [
this_before_that_pass_constraint(pass5, pass0),
this_before_that_pass_constraint(pass5, pass2),
this_before_that_pass_constraint(pass4, pass0),
this_before_that_pass_constraint(pass4, pass1),
this_before_that_pass_constraint(pass2, pass3),
this_before_that_pass_constraint(pass3, pass1),
]
sorted = _topological_sort_passes(passes, constraints)
self.assertEqual(sorted, [pass4, pass5, pass0, pass2, pass3, pass1])
# Circular dependency should result in the circular_dep flag being set
passes = [pass0, pass1, pass2]
constraints = [
this_before_that_pass_constraint(passes[0], passes[1]),
this_before_that_pass_constraint(passes[1], passes[2]),
this_before_that_pass_constraint(passes[2], passes[0]),
]
with self.assertRaises(RuntimeError) as e:
_topological_sort_passes(passes, constraints)
expected_error_msg = f"Circular dependency detected within the following passes: {passes}"
self.assertEqual(e.exception.args[0], expected_error_msg)
|
pytorch-master
|
test/fx/test_pass_infra.py
|
import argparse
import torch
def dump(filename):
schemas = torch._C._jit_get_all_schemas()
schemas += torch._C._jit_get_custom_class_schemas()
with open(filename, 'w') as f:
for s in schemas:
f.write(str(s))
f.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument(
'-f',
'--filename',
help='filename to dump the schemas',
type=str,
default='schemas.txt')
args = parser.parse_args()
dump(args.filename)
|
pytorch-master
|
test/forward_backward_compatibility/dump_all_function_schemas.py
|
import argparse
import datetime
import re
import sys
import warnings
from collections import defaultdict
import torch
from torch._C import parse_schema
# How to run this test locally:
# 1 Have two virtual environments (eg conda env), one without PyTorch installed (venv_nightly)
# one with your local changes (venv_yours).
# In venv_nightly:
# 2. First ensure that Pytorch is uninstalled, but all prereqs are installed
# 3. Install torch nightly build with
# `pip install --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html`
# 4. Generate original schemas with
# `python test/forward_backward_compatibility/dump_all_function_schemas.py --filename nightly_schemas.txt`
# Now in venv_yours:
# 5. Run this test with
# `python test/forward_backward_compatibility/check_forward_backward_compatibility.py --existing-schemas nightly_schemas.txt`
# The date specifies how long the allowlist exclusion should apply to.
#
# - If we NEVER give BC guarantee for an operator, you can put the
# date arbitrarily far in the future.
# - Otherwise, pick a date that is far enough in the future that you
# believe you can land your diff before then.
#
# Allowlist entries can be removed after the date listed on them passes.
#
# Allowlist item format:
# [
# 0: function name regex
# 1: date until which the allowlist entry is valid
# 2: (optional) function argument regex
# ]
#
# NB: function name DOES NOT include overload name!
ALLOW_LIST = [
("c10_experimental", datetime.date(2222, 1, 1)),
# Internal
("static", datetime.date(9999, 1, 1)),
("prim::ModuleDictIndex", datetime.date(9999, 1, 1)),
("prim::MKLDNNRelu6", datetime.date(9999, 1, 1)),
("prim::MKLDNNRelu6_", datetime.date(9999, 1, 1)),
("prim::Concat", datetime.date(9999, 1, 1)),
("aten::_NestedTensor_GeneralizedBMM", datetime.date(9999, 1, 1)),
# Internal, profiler-specific ops
("profiler::_call_end_callbacks_on_jit_fut*", datetime.date(9999, 1, 1)),
("profiler::_record_function_enter", datetime.date(9999, 1, 1)),
("aten::_sparse_addmm", datetime.date(2022, 6, 30)),
("aten::kl_div_backward", datetime.date(2022, 9, 1)),
("aten::_cholesky_helper", datetime.date(9999, 1, 1)),
("aten::_lstsq_helper", datetime.date(9999, 1, 1)),
("aten::_syevd_helper", datetime.date(9999, 1, 1)),
("aten::_linalg_solve_out_helper_", datetime.date(9999, 1, 1)),
("aten::select_backward", datetime.date(9999, 1, 1)),
("aten::slice_backward", datetime.date(9999, 1, 1)),
("aten::diagonal_backward", datetime.date(9999, 1, 1)),
("aten::rowwise_prune", datetime.date(9999, 1, 1)),
("aten::adaptive_avg_pool3d_backward", datetime.date(9999, 1, 1)),
("aten::_embedding_bag_dense_backward", datetime.date(9999, 1, 1)),
("aten::randperm", datetime.date(9999, 1, 1)),
("aten::linalg_solve", datetime.date(2022, 8, 31)),
("aten::linalg_solve.out", datetime.date(2022, 8, 31)),
("aten::binary_cross_entropy_with_logits_backward", datetime.date(2022, 9, 21)),
("aten::_linalg_qr_helper", datetime.date(2022, 8, 1)),
("aten::linalg_lu_solve", datetime.date(2022, 8, 1)),
("aten::linalg_lu_solve.out", datetime.date(2022, 8, 1)),
("aten::linalg_det", datetime.date(2022, 8, 1)),
("aten::linalg_det.out", datetime.date(2022, 8, 1)),
("aten::_det_lu_based_helper", datetime.date(2022, 8, 1)),
("aten::slogdet", datetime.date(2022, 8, 1)),
("aten::slogdet.out", datetime.date(2022, 8, 1)),
("aten::linalg_slogdet", datetime.date(2022, 8, 1)),
("aten::linalg_slogdet.out", datetime.date(2022, 8, 1)),
("aten::_linalg_solve", datetime.date(2022, 10, 1)),
("aten::_linalg_solve.solution", datetime.date(2022, 10, 1)),
("aten::solve", datetime.date(9999, 1, 1)),
("aten::solve.solution", datetime.date(9999, 1, 1)),
("aten::_solve_helper", datetime.date(9999, 1, 1)),
("aten::_convolution_nogroup", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_bias", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_weight", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward_weight", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward_weight", datetime.date(9999, 1, 1)),
("aten::_nested_tensor", datetime.date(9999, 1, 1)),
("prepacked::unpack_prepacked_sizes_conv2d", datetime.date(9999, 1, 1)),
("prepacked::unpack_prepacked_sizes_linear", datetime.date(9999, 1, 1)),
("aten::linalg_solve", datetime.date(2022, 8, 31)),
("aten::linalg_solve.out", datetime.date(2022, 8, 31)),
("aten::quantile", datetime.date(2022, 9, 30)),
("aten::nanquantile", datetime.date(2022, 9, 30)),
("aten::native_multi_head_self_attention", datetime.date(9999, 1, 1)),
("aten::_native_multi_head_self_attention", datetime.date(9999, 1, 1)),
("aten::grid_sampler_3d_backward", datetime.date(9999, 1, 1)),
("aten::_transform_bias_rescale_qkv", datetime.date(9999, 1, 1)),
("aten::_s_where", datetime.date(2022, 9, 30)),
("prim::infer_squeeze_size.dim", datetime.date(9999, 1, 1)),
("prim::infer_squeeze_size", datetime.date(9999, 1, 1)),
("aten::_weight_norm_cuda_interface", datetime.date(9999, 1, 1)),
("aten::_weight_norm_cuda_interface_backward", datetime.date(9999, 1, 1)),
("aten::segment_reduce", datetime.date(2022, 6, 30)),
("aten::_segment_reduce_backward", datetime.date(2022, 6, 30)),
("aten::empty.SymInt", datetime.date(9999, 1, 1)),
("c10d::broadcast", datetime.date(2022, 6, 25)),
("aten::.*functional", datetime.date(2022, 8, 1)),
("aten::_foreach.*", datetime.date(2022, 8, 1)),
("aten::unflatten", datetime.date(2022, 8, 10)),
("aten::nanmean", datetime.date(2022, 8, 30)),
("aten::nanmean.out", datetime.date(2022, 8, 30)),
("aten::nansum", datetime.date(2022, 8, 30)),
("aten::nansum.out", datetime.date(2022, 8, 30)),
# TODO: FIXME: prims shouldn't be checked
("prims::.*", datetime.date(9999, 1, 1)),
]
ALLOW_LIST_COMPILED = [
(
re.compile(item[0]),
item[1],
re.compile(item[2]) if len(item) > 2 else None,
) for item in ALLOW_LIST if item[1] >= datetime.date.today()
]
def allow_listed(schema):
for item in ALLOW_LIST_COMPILED:
if item[0].search(str(schema)):
if len(item) > 2 and item[2] is not None:
# if arguments regex is present, use it
return bool(item[2].search(str(schema)))
return True
return False
# The nightly will fail to parse newly added syntax to schema declarations
# Add new schemas that will fail the nightly here
dont_parse_list = [
("_TorchScriptTesting.*", datetime.date(2099, 9, 17)),
("test_backend", datetime.date(2099, 9, 17)),
("dist_c10d", datetime.date(2099, 9, 17)),
("__backends__.nnc", datetime.date(2099, 9, 17)),
]
def has_valid_upgraders(schema, version_map):
# we want to parse through the map to find if
# the schema has valid upgraders. Since the
# version map has entry for each overload
# we need to do some ugly parsing.
# the name of the operator
schema_name = schema.name
if schema_name not in version_map:
return False
entries = version_map[schema_name]
possible_overloads = []
possible_schemas = []
for key, upgrader_schema_entries in entries.items():
possible_overloads.append(key)
possible_schemas.extend(upgrader_schema_entries)
# let's make sure this existing schema is part of possible
# schemas
for old_schema in possible_schemas:
if old_schema == schema:
return True
return False
def dont_parse(schema_line):
for item in dont_parse_list:
if item[1] < datetime.date.today():
continue
regexp = re.compile(item[0])
if regexp.search(schema_line):
return True
return False
def load_schemas_to_dict():
new_schemas = torch._C._jit_get_all_schemas()
new_schemas += torch._C._jit_get_custom_class_schemas()
new_schema_dict = defaultdict(list)
for s in new_schemas:
new_schema_dict[s.name].append(s)
return new_schema_dict
def process_version_map(version_map):
# version map maps full schema name to
# list of upgraders. Since we only have
# the name of the schema (aka no overload)
# we want to first process the map to make
# the key lookup easier. After this it will be:
# Dict[schema_name, Dict[overload, List[schema]]]
output = defaultdict(dict)
for (key, entries) in version_map.items():
operator_name = key.split(".")[0]
schema_entries = [parse_schema(entry.old_schema) for entry in entries]
output[operator_name][key] = schema_entries
return output
def check_bc(existing_schemas):
new_schema_dict = load_schemas_to_dict()
version_map = process_version_map(torch._C._get_operator_version_map())
is_bc = True
broken_ops = []
for existing_schema in existing_schemas:
if allow_listed(existing_schema):
print("schema: ", str(existing_schema), " found on allowlist, skipping")
continue
if has_valid_upgraders(existing_schema, version_map):
print("schema: ", str(existing_schema), " has valid upgrader, skipping")
continue
print("processing existing schema: ", str(existing_schema))
matching_new_schemas = new_schema_dict.get(existing_schema.name, [])
found = False
for matching_new_schema in matching_new_schemas:
if matching_new_schema.is_backward_compatible_with(existing_schema):
found = True
break
if not found:
print(
"Can NOT find backward compatible schemas after changes "
"for schema {} from the following candidates:\n[\n{}\n]".format(
str(existing_schema),
"\n\t".join(str(s) for s in matching_new_schemas),
)
)
# TODO Print out more details about why candidates don't match.
broken_ops.append(str(existing_schema))
is_bc = False
if is_bc:
print("Found backward compatible schemas for all existing schemas")
else:
print(
"The PR is introducing backward incompatible changes to the "
"operator library. Please contact PyTorch team to confirm "
"whether this change is wanted or not. \n\nBroken ops: "
"[\n\t{}\n]".format("\n\t".join(broken_ops))
)
return is_bc
def check_fc(existing_schemas):
new_schema_dict = load_schemas_to_dict()
is_fc = True
broken_ops = []
for existing_schema in existing_schemas:
if allow_listed(existing_schema):
print("schema: ", str(existing_schema), " found on allowlist, skipping")
continue
print("processing existing schema: ", str(existing_schema))
matching_new_schemas = new_schema_dict.get(existing_schema.name, [])
found = False
possible_failure_reasons = []
for matching_new_schema in matching_new_schemas:
is_compatible, reason = matching_new_schema.check_forward_compatible_with(existing_schema)
if is_compatible:
found = True
break
if reason != "":
possible_failure_reasons.append(reason)
if not found:
print(
"Can NOT find forward compatible schemas after changes "
"for schema {} from the following candidates:\n[\n{}\n]".format(
str(existing_schema),
"\n\t".join(str(s) for s in matching_new_schemas),
)
)
print(
"Refer to following reasons for failure "
"to find FC schema:\n[\n{}\n]".format(
"\n\t".join(str(r) for r in possible_failure_reasons)
)
)
broken_ops.append(str(existing_schema))
is_fc = False
if is_fc:
print("Found forward compatible schemas for all existing schemas")
else:
warnings.warn(
"The PR is introducing a potentially forward incompatible changes to the "
"operator library. Please contact PyTorch team to confirm "
"whether this change is wanted or not. \n\nBroken ops: "
"[\n\t{}\n]".format("\n\t".join(broken_ops))
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"--existing-schemas",
help="filename to load existing schemas",
type=str,
default="schemas.txt",
)
args = parser.parse_args()
existing_schema_dict = dict()
slist = []
with open(args.existing_schemas, "r") as f:
while True:
line = f.readline()
if not line:
break
if dont_parse(line.strip()):
print("Not parsing schema line: ", line.strip())
continue
s = parse_schema(line.strip())
slist.append(s)
# TODO in case there is FC breaking changes,
# we just warn for now until there is a policy.
check_fc(slist)
if not check_bc(slist):
sys.exit(1)
|
pytorch-master
|
test/forward_backward_compatibility/check_forward_backward_compatibility.py
|
# Owner(s): ["module: intel"]
from torch.testing._internal.common_utils import TestCase, run_tests, IS_LINUX
import shutil
import subprocess
import tempfile
import unittest
@unittest.skipIf(not IS_LINUX, "Only works on linux")
class TestTorchrun(TestCase):
def setUp(self):
self._test_dir = tempfile.mkdtemp(prefix=self.__class__.__name__)
def tearDown(self):
shutil.rmtree(self._test_dir)
def test_cpu_info(self):
lscpu_info = """# The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node
0,0,0,0
1,1,0,0
2,2,0,0
3,3,0,0
4,4,1,1
5,5,1,1
6,6,1,1
7,7,1,1
8,0,0,0
9,1,0,0
10,2,0,0
11,3,0,0
12,4,1,1
13,5,1,1
14,6,1,1
15,7,1,1
"""
from torch.backends.xeon.run_cpu import _CPUinfo
cpuinfo = _CPUinfo(lscpu_info)
assert cpuinfo._physical_core_nums() == 8
assert cpuinfo._logical_core_nums() == 16
assert cpuinfo.get_node_physical_cores(0) == [0, 1, 2, 3]
assert cpuinfo.get_node_physical_cores(1) == [4, 5, 6, 7]
assert cpuinfo.get_node_logical_cores(0) == [0, 1, 2, 3, 8, 9, 10, 11]
assert cpuinfo.get_node_logical_cores(1) == [4, 5, 6, 7, 12, 13, 14, 15]
assert cpuinfo.get_all_physical_cores() == [0, 1, 2, 3, 4, 5, 6, 7]
assert cpuinfo.get_all_logical_cores() == [0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15]
assert cpuinfo.numa_aware_check([0, 1, 2, 3]) == [0]
assert cpuinfo.numa_aware_check([4, 5, 6, 7]) == [1]
assert cpuinfo.numa_aware_check([2, 3, 4, 5]) == [0, 1]
def test_multi_threads(self):
num = 0
with subprocess.Popen(f"python -m torch.backends.xeon.run_cpu --ninstances 4 --use_default_allocator \
--disable_iomp --disable_numactl --log_path {self._test_dir} --no_python pwd",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p:
for line in p.stdout.readlines():
segs = str(line, "utf-8").strip().split("-")
if segs[-1].strip() == "pwd":
num += 1
assert num == 4, "Failed to launch multiple instances for inference"
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/backends/xeon/test_launch.py
|
# Owner(s): ["module: unknown"]
import collections
import json
import os
import re
import textwrap
import timeit
from typing import Any, List, Tuple
import unittest
import torch
import torch.utils.benchmark as benchmark_utils
from torch.testing._internal.common_utils import TestCase, run_tests, IS_SANDCASTLE, IS_WINDOWS, slowTest
import expecttest
import numpy as np
CALLGRIND_ARTIFACTS: str = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
"callgrind_artifacts.json"
)
def generate_callgrind_artifacts() -> None:
"""Regenerate `callgrind_artifacts.json`
Unlike the expect tests, regenerating callgrind counts will produce a
large diff since build directories and conda/pip directories are included
in the instruction string. It is also not 100% deterministic (due to jitter
from Python) and takes over a minute to run. As a result, running this
function is manual.
"""
print("Regenerating callgrind artifact.")
stats_no_data = benchmark_utils.Timer(
"y = torch.ones(())"
).collect_callgrind(number=1000)
stats_with_data = benchmark_utils.Timer(
"y = torch.ones((1,))"
).collect_callgrind(number=1000)
user = os.getenv("USER")
def to_entry(fn_counts):
return [f"{c} {fn.replace(f'/{user}/', '/test_user/')}" for c, fn in fn_counts]
artifacts = {
"baseline_inclusive": to_entry(stats_no_data.baseline_inclusive_stats),
"baseline_exclusive": to_entry(stats_no_data.baseline_exclusive_stats),
"ones_no_data_inclusive": to_entry(stats_no_data.stmt_inclusive_stats),
"ones_no_data_exclusive": to_entry(stats_no_data.stmt_exclusive_stats),
"ones_with_data_inclusive": to_entry(stats_with_data.stmt_inclusive_stats),
"ones_with_data_exclusive": to_entry(stats_with_data.stmt_exclusive_stats),
}
with open(CALLGRIND_ARTIFACTS, "wt") as f:
json.dump(artifacts, f, indent=4)
def load_callgrind_artifacts() -> Tuple[benchmark_utils.CallgrindStats, benchmark_utils.CallgrindStats]:
"""Hermetic artifact to unit test Callgrind wrapper.
In addition to collecting counts, this wrapper provides some facilities for
manipulating and displaying the collected counts. The results of several
measurements are stored in callgrind_artifacts.json.
While FunctionCounts and CallgrindStats are pickleable, the artifacts for
testing are stored in raw string form for easier inspection and to avoid
baking any implementation details into the artifact itself.
"""
with open(CALLGRIND_ARTIFACTS, "rt") as f:
artifacts = json.load(f)
pattern = re.compile(r"^\s*([0-9]+)\s(.+)$")
def to_function_counts(
count_strings: List[str],
inclusive: bool
) -> benchmark_utils.FunctionCounts:
data: List[benchmark_utils.FunctionCount] = []
for cs in count_strings:
# Storing entries as f"{c} {fn}" rather than [c, fn] adds some work
# reviving the artifact, but it makes the json much easier to read.
match = pattern.search(cs)
assert match is not None
c, fn = match.groups()
data.append(benchmark_utils.FunctionCount(count=int(c), function=fn))
return benchmark_utils.FunctionCounts(
tuple(sorted(data, reverse=True)),
inclusive=inclusive)
baseline_inclusive = to_function_counts(artifacts["baseline_inclusive"], True)
baseline_exclusive = to_function_counts(artifacts["baseline_exclusive"], False)
stats_no_data = benchmark_utils.CallgrindStats(
benchmark_utils.TaskSpec("y = torch.ones(())", "pass"),
number_per_run=1000,
built_with_debug_symbols=True,
baseline_inclusive_stats=baseline_inclusive,
baseline_exclusive_stats=baseline_exclusive,
stmt_inclusive_stats=to_function_counts(artifacts["ones_no_data_inclusive"], True),
stmt_exclusive_stats=to_function_counts(artifacts["ones_no_data_exclusive"], False),
stmt_callgrind_out=None,
)
stats_with_data = benchmark_utils.CallgrindStats(
benchmark_utils.TaskSpec("y = torch.ones((1,))", "pass"),
number_per_run=1000,
built_with_debug_symbols=True,
baseline_inclusive_stats=baseline_inclusive,
baseline_exclusive_stats=baseline_exclusive,
stmt_inclusive_stats=to_function_counts(artifacts["ones_with_data_inclusive"], True),
stmt_exclusive_stats=to_function_counts(artifacts["ones_with_data_exclusive"], False),
stmt_callgrind_out=None,
)
return stats_no_data, stats_with_data
class MyModule(torch.nn.Module):
def forward(self, x):
return x + 1
class TestBenchmarkUtils(TestCase):
def regularizeAndAssertExpectedInline(
self, x: Any,
expect: str,
indent: int = 12
) -> None:
x_str: str = re.sub(
"object at 0x[0-9a-fA-F]+>",
"object at 0xXXXXXXXXXXXX>",
x if isinstance(x, str) else repr(x)
)
if "\n" in x_str:
# Indent makes the reference align at the call site.
x_str = textwrap.indent(x_str, " " * indent)
self.assertExpectedInline(x_str, expect, skip=1)
def test_timer(self):
timer = benchmark_utils.Timer(
stmt="torch.ones(())",
)
sample = timer.timeit(5).median
self.assertIsInstance(sample, float)
median = timer.blocked_autorange(min_run_time=0.01).median
self.assertIsInstance(median, float)
# We set a very high threshold to avoid flakiness in CI.
# The internal algorithm is tested in `test_adaptive_timer`
median = timer.adaptive_autorange(threshold=0.5).median
# Test that multi-line statements work properly.
median = benchmark_utils.Timer(
stmt="""
with torch.no_grad():
y = x + 1""",
setup="""
x = torch.ones((1,), requires_grad=True)
for _ in range(5):
x = x + 1.0""",
).timeit(5).median
self.assertIsInstance(sample, float)
@slowTest
@unittest.skipIf(IS_SANDCASTLE, "C++ timing is OSS only.")
@unittest.skipIf(True, "Failing on clang, see 74398")
def test_timer_tiny_fast_snippet(self):
timer = benchmark_utils.Timer(
'auto x = 1;(void)x;',
timer=timeit.default_timer,
language=benchmark_utils.Language.CPP,
)
median = timer.blocked_autorange().median
self.assertIsInstance(median, float)
@slowTest
@unittest.skipIf(IS_SANDCASTLE, "C++ timing is OSS only.")
@unittest.skipIf(True, "Failing on clang, see 74398")
def test_cpp_timer(self):
timer = benchmark_utils.Timer(
"""
#ifndef TIMER_GLOBAL_CHECK
static_assert(false);
#endif
torch::Tensor y = x + 1;
""",
setup="torch::Tensor x = torch::empty({1});",
global_setup="#define TIMER_GLOBAL_CHECK",
timer=timeit.default_timer,
language=benchmark_utils.Language.CPP,
)
t = timer.timeit(10)
self.assertIsInstance(t.median, float)
class _MockTimer:
_seed = 0
_timer_noise_level = 0.05
_timer_cost = 100e-9 # 100 ns
_function_noise_level = 0.05
_function_costs = (
("pass", 8e-9),
("cheap_fn()", 4e-6),
("expensive_fn()", 20e-6),
("with torch.no_grad():\n y = x + 1", 10e-6),
)
def __init__(self, stmt, setup, timer, globals):
self._random_state = np.random.RandomState(seed=self._seed)
self._mean_cost = {k: v for k, v in self._function_costs}[stmt]
def sample(self, mean, noise_level):
return max(self._random_state.normal(mean, mean * noise_level), 5e-9)
def timeit(self, number):
return sum([
# First timer invocation
self.sample(self._timer_cost, self._timer_noise_level),
# Stmt body
self.sample(self._mean_cost * number, self._function_noise_level),
# Second timer invocation
self.sample(self._timer_cost, self._timer_noise_level),
])
def test_adaptive_timer(self):
class MockTimer(benchmark_utils.Timer):
_timer_cls = self._MockTimer
class _MockCudaTimer(self._MockTimer):
# torch.cuda.synchronize is much more expensive than
# just timeit.default_timer
_timer_cost = 10e-6
_function_costs = (
self._MockTimer._function_costs[0],
self._MockTimer._function_costs[1],
# GPU should be faster once there is enough work.
("expensive_fn()", 5e-6),
)
class MockCudaTimer(benchmark_utils.Timer):
_timer_cls = _MockCudaTimer
m = MockTimer("pass").blocked_autorange(min_run_time=10)
self.regularizeAndAssertExpectedInline(
m,
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
pass
Median: 7.98 ns
IQR: 0.52 ns (7.74 to 8.26)
125 measurements, 10000000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockTimer("pass").adaptive_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
pass
Median: 7.86 ns
IQR: 0.71 ns (7.63 to 8.34)
6 measurements, 1000000 runs per measurement, 1 thread"""
)
# Check against strings so we can reuse expect infra.
self.regularizeAndAssertExpectedInline(m.mean, """8.0013658357956e-09""")
self.regularizeAndAssertExpectedInline(m.median, """7.983151323215967e-09""")
self.regularizeAndAssertExpectedInline(len(m.times), """125""")
self.regularizeAndAssertExpectedInline(m.number_per_run, """10000000""")
self.regularizeAndAssertExpectedInline(
MockTimer("cheap_fn()").blocked_autorange(min_run_time=10),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
cheap_fn()
Median: 3.98 us
IQR: 0.27 us (3.85 to 4.12)
252 measurements, 10000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockTimer("cheap_fn()").adaptive_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
cheap_fn()
Median: 4.16 us
IQR: 0.22 us (4.04 to 4.26)
4 measurements, 1000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockTimer("expensive_fn()").blocked_autorange(min_run_time=10),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
expensive_fn()
Median: 19.97 us
IQR: 1.35 us (19.31 to 20.65)
501 measurements, 1000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockTimer("expensive_fn()").adaptive_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
expensive_fn()
Median: 20.79 us
IQR: 1.09 us (20.20 to 21.29)
4 measurements, 1000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockCudaTimer("pass").blocked_autorange(min_run_time=10),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
pass
Median: 7.92 ns
IQR: 0.43 ns (7.75 to 8.17)
13 measurements, 100000000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockCudaTimer("pass").adaptive_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
pass
Median: 7.75 ns
IQR: 0.57 ns (7.56 to 8.13)
4 measurements, 10000000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockCudaTimer("cheap_fn()").blocked_autorange(min_run_time=10),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
cheap_fn()
Median: 4.04 us
IQR: 0.30 us (3.90 to 4.19)
25 measurements, 100000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockCudaTimer("cheap_fn()").adaptive_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
cheap_fn()
Median: 4.09 us
IQR: 0.38 us (3.90 to 4.28)
4 measurements, 100000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockCudaTimer("expensive_fn()").blocked_autorange(min_run_time=10),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
expensive_fn()
Median: 4.98 us
IQR: 0.31 us (4.83 to 5.13)
20 measurements, 100000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockCudaTimer("expensive_fn()").adaptive_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
expensive_fn()
Median: 5.01 us
IQR: 0.28 us (4.87 to 5.15)
4 measurements, 10000 runs per measurement, 1 thread"""
)
# Make sure __repr__ is reasonable for
# multi-line / label / sub_label / description, but we don't need to
# check numerics.
multi_line_stmt = """
with torch.no_grad():
y = x + 1
"""
self.regularizeAndAssertExpectedInline(
MockTimer(multi_line_stmt).blocked_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
stmt:
with torch.no_grad():
y = x + 1
Median: 10.06 us
IQR: 0.54 us (9.73 to 10.27)
20 measurements, 1000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockTimer(multi_line_stmt, sub_label="scalar_add").blocked_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
stmt: (scalar_add)
with torch.no_grad():
y = x + 1
Median: 10.06 us
IQR: 0.54 us (9.73 to 10.27)
20 measurements, 1000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockTimer(
multi_line_stmt,
label="x + 1 (no grad)",
sub_label="scalar_add",
).blocked_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
x + 1 (no grad): scalar_add
Median: 10.06 us
IQR: 0.54 us (9.73 to 10.27)
20 measurements, 1000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockTimer(
multi_line_stmt,
setup="setup_fn()",
sub_label="scalar_add",
).blocked_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
stmt: (scalar_add)
with torch.no_grad():
y = x + 1
setup: setup_fn()
Median: 10.06 us
IQR: 0.54 us (9.73 to 10.27)
20 measurements, 1000 runs per measurement, 1 thread"""
)
self.regularizeAndAssertExpectedInline(
MockTimer(
multi_line_stmt,
setup="""
x = torch.ones((1,), requires_grad=True)
for _ in range(5):
x = x + 1.0""",
sub_label="scalar_add",
description="Multi-threaded scalar math!",
num_threads=16,
).blocked_autorange(),
"""\
<torch.utils.benchmark.utils.common.Measurement object at 0xXXXXXXXXXXXX>
stmt: (scalar_add)
with torch.no_grad():
y = x + 1
Multi-threaded scalar math!
setup:
x = torch.ones((1,), requires_grad=True)
for _ in range(5):
x = x + 1.0
Median: 10.06 us
IQR: 0.54 us (9.73 to 10.27)
20 measurements, 1000 runs per measurement, 16 threads"""
)
@slowTest
@unittest.skipIf(IS_WINDOWS, "Valgrind is not supported on Windows.")
@unittest.skipIf(IS_SANDCASTLE, "Valgrind is OSS only.")
def test_collect_callgrind(self):
with self.assertRaisesRegex(
ValueError,
r"`collect_callgrind` requires that globals be wrapped "
r"in `CopyIfCallgrind` so that serialization is explicit."
):
benchmark_utils.Timer(
"pass",
globals={"x": 1}
).collect_callgrind(collect_baseline=False)
with self.assertRaisesRegex(
# Subprocess raises AttributeError (from pickle),
# _ValgrindWrapper re-raises as generic OSError.
OSError, "AttributeError: Can't get attribute 'MyModule'"
):
benchmark_utils.Timer(
"model(1)",
globals={"model": benchmark_utils.CopyIfCallgrind(MyModule())}
).collect_callgrind(collect_baseline=False)
@torch.jit.script
def add_one(x):
return x + 1
timer = benchmark_utils.Timer(
"y = add_one(x) + k",
setup="x = torch.ones((1,))",
globals={
"add_one": benchmark_utils.CopyIfCallgrind(add_one),
"k": benchmark_utils.CopyIfCallgrind(5),
"model": benchmark_utils.CopyIfCallgrind(
MyModule(),
setup=f"""\
import sys
sys.path.append({repr(os.path.split(os.path.abspath(__file__))[0])})
from test_benchmark_utils import MyModule
"""
)
}
)
stats = timer.collect_callgrind(number=1000)
counts = stats.counts(denoise=False)
self.assertIsInstance(counts, int)
self.assertGreater(counts, 0)
# There is some jitter with the allocator, so we use a simpler task to
# test reproducibility.
timer = benchmark_utils.Timer(
"x += 1",
setup="x = torch.ones((1,))",
)
stats = timer.collect_callgrind(number=1000, repeats=20)
assert isinstance(stats, tuple)
# Check that the repeats are at least somewhat repeatable. (within 10 instructions per iter)
counts = collections.Counter([s.counts(denoise=True) // 10_000 * 10_000 for s in stats])
self.assertGreater(max(counts.values()), 1, f"Every instruction count total was unique: {counts}")
from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import wrapper_singleton
self.assertIsNone(
wrapper_singleton()._bindings_module,
"JIT'd bindings are only for back testing."
)
@slowTest
@unittest.skipIf(IS_WINDOWS, "Valgrind is not supported on Windows.")
@unittest.skipIf(IS_SANDCASTLE, "Valgrind is OSS only.")
@unittest.skipIf(True, "Failing on clang, see 74398")
def test_collect_cpp_callgrind(self):
timer = benchmark_utils.Timer(
"x += 1;",
setup="torch::Tensor x = torch::ones({1});",
timer=timeit.default_timer,
language="c++",
)
stats = [
timer.collect_callgrind()
for _ in range(3)
]
counts = [s.counts() for s in stats]
self.assertGreater(
min(counts), 0, "No stats were collected")
self.assertEqual(
min(counts), max(counts), "C++ Callgrind should be deterministic")
for s in stats:
self.assertEqual(
s.counts(denoise=True), s.counts(denoise=False),
"De-noising should not apply to C++.")
stats = timer.collect_callgrind(number=1000, repeats=20)
assert isinstance(stats, tuple)
# NB: Unlike the example above, there is no expectation that all
# repeats will be identical.
counts = collections.Counter([s.counts(denoise=True) // 10_000 * 10_000 for s in stats])
self.assertGreater(max(counts.values()), 1, repr(counts))
def test_manipulate_callgrind_stats(self):
stats_no_data, stats_with_data = load_callgrind_artifacts()
# Mock `torch.set_printoptions(linewidth=160)`
wide_linewidth = benchmark_utils.FunctionCounts(
stats_no_data.stats(inclusive=False)._data, False, _linewidth=160)
for l in repr(wide_linewidth).splitlines(keepends=False):
self.assertLessEqual(len(l), 160)
self.assertEqual(
# `delta` is just a convenience method.
stats_with_data.delta(stats_no_data)._data,
(stats_with_data.stats() - stats_no_data.stats())._data
)
deltas = stats_with_data.as_standardized().delta(stats_no_data.as_standardized())
def custom_transforms(fn: str):
fn = re.sub(re.escape("/usr/include/c++/8/bits/"), "", fn)
fn = re.sub(r"build/../", "", fn)
fn = re.sub(".+" + re.escape("libsupc++"), "libsupc++", fn)
return fn
self.regularizeAndAssertExpectedInline(
stats_no_data,
"""\
<torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.CallgrindStats object at 0xXXXXXXXXXXXX>
y = torch.ones(())
All Noisy symbols removed
Instructions: 8869966 8728096
Baseline: 6682 5766
1000 runs per measurement, 1 thread""",
)
self.regularizeAndAssertExpectedInline(
stats_no_data.counts(),
"""8869966""",
)
self.regularizeAndAssertExpectedInline(
stats_no_data.counts(denoise=True),
"""8728096""",
)
self.regularizeAndAssertExpectedInline(
stats_no_data.stats(),
"""\
<torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.FunctionCounts object at 0xXXXXXXXXXXXX>
408000 ???:__tls_get_addr [/usr/lib64/ld-2.28.so]
388193 ???:_int_free [/usr/lib64/libc-2.28.so]
274000 build/../torch/csrc/utils/python ... rch/torch/lib/libtorch_python.so]
264000 build/../aten/src/ATen/record_fu ... ytorch/torch/lib/libtorch_cpu.so]
192000 build/../c10/core/Device.h:c10:: ... epos/pytorch/torch/lib/libc10.so]
169855 ???:_int_malloc [/usr/lib64/libc-2.28.so]
154000 build/../c10/core/TensorOptions. ... ytorch/torch/lib/libtorch_cpu.so]
148561 /tmp/build/80754af9/python_15996 ... da3/envs/throwaway/bin/python3.6]
135000 ???:malloc [/usr/lib64/libc-2.28.so]
...
2000 /usr/include/c++/8/ext/new_allocator.h:torch::PythonArgs::intlist(int)
2000 /usr/include/c++/8/bits/stl_vect ... *, _object*, _object*, _object**)
2000 /usr/include/c++/8/bits/stl_vect ... rningHandler::~PyWarningHandler()
2000 /usr/include/c++/8/bits/stl_vect ... ject*, _object*, _object**, bool)
2000 /usr/include/c++/8/bits/stl_algobase.h:torch::PythonArgs::intlist(int)
2000 /usr/include/c++/8/bits/shared_p ... ad_accumulator(at::Tensor const&)
2000 /usr/include/c++/8/bits/move.h:c ... te<c10::AutogradMetaInterface> >)
2000 /usr/include/c++/8/bits/atomic_b ... DispatchKey&&, caffe2::TypeMeta&)
2000 /usr/include/c++/8/array:at::Ten ... , at::Tensor&, c10::Scalar) const
Total: 8869966""",
)
self.regularizeAndAssertExpectedInline(
stats_no_data.stats(inclusive=True),
"""\
<torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.FunctionCounts object at 0xXXXXXXXXXXXX>
8959166 ???:0x0000000000001050 [/usr/lib64/ld-2.28.so]
8959166 ???:(below main) [/usr/lib64/libc-2.28.so]
8959166 /tmp/build/80754af9/python_15996 ... a3/envs/throwaway/bin/python3.6]
8959166 /tmp/build/80754af9/python_15996 ... a3/envs/throwaway/bin/python3.6]
8959166 /tmp/build/80754af9/python_15996 ... a3/envs/throwaway/bin/python3.6]
8959166 /tmp/build/80754af9/python_15996 ... a3/envs/throwaway/bin/python3.6]
8959166 /tmp/build/80754af9/python_15996 ... a3/envs/throwaway/bin/python3.6]
8959166 /tmp/build/80754af9/python_15996 ... a3/envs/throwaway/bin/python3.6]
8959166 /tmp/build/80754af9/python_15996 ... a3/envs/throwaway/bin/python3.6]
...
92821 /tmp/build/80754af9/python_15996 ... a3/envs/throwaway/bin/python3.6]
91000 build/../torch/csrc/tensor/pytho ... ch/torch/lib/libtorch_python.so]
91000 /data/users/test_user/repos/pyto ... nsors::get_default_scalar_type()
90090 ???:pthread_mutex_lock [/usr/lib64/libpthread-2.28.so]
90000 build/../c10/core/TensorImpl.h:c ... ch/torch/lib/libtorch_python.so]
90000 build/../aten/src/ATen/record_fu ... torch/torch/lib/libtorch_cpu.so]
90000 /data/users/test_user/repos/pyto ... uard(c10::optional<c10::Device>)
90000 /data/users/test_user/repos/pyto ... ersionCounter::~VersionCounter()
88000 /data/users/test_user/repos/pyto ... ratorKernel*, at::Tensor const&)""",
)
self.regularizeAndAssertExpectedInline(
wide_linewidth,
"""\
<torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.FunctionCounts object at 0xXXXXXXXXXXXX>
408000 ???:__tls_get_addr [/usr/lib64/ld-2.28.so]
388193 ???:_int_free [/usr/lib64/libc-2.28.so]
274000 build/../torch/csrc/utils/python_arg_parser.cpp:torch::FunctionSignature ... bool) [/data/users/test_user/repos/pytorch/torch/lib/libtorch_python.so]
264000 build/../aten/src/ATen/record_function.cpp:at::RecordFunction::RecordFun ... ordScope) [/data/users/test_user/repos/pytorch/torch/lib/libtorch_cpu.so]
192000 build/../c10/core/Device.h:c10::Device::validate() [/data/users/test_user/repos/pytorch/torch/lib/libc10.so]
169855 ???:_int_malloc [/usr/lib64/libc-2.28.so]
154000 build/../c10/core/TensorOptions.h:c10::TensorOptions::merge_in(c10::Tens ... ns) const [/data/users/test_user/repos/pytorch/torch/lib/libtorch_cpu.so]
148561 /tmp/build/80754af9/python_1599604603603/work/Python/ceval.c:_PyEval_EvalFrameDefault [/home/test_user/miniconda3/envs/throwaway/bin/python3.6]
135000 ???:malloc [/usr/lib64/libc-2.28.so]
...
2000 /usr/include/c++/8/ext/new_allocator.h:torch::PythonArgs::intlist(int)
2000 /usr/include/c++/8/bits/stl_vector.h:torch::PythonArgParser::raw_parse(_object*, _object*, _object*, _object**)
2000 /usr/include/c++/8/bits/stl_vector.h:torch::PyWarningHandler::~PyWarningHandler()
2000 /usr/include/c++/8/bits/stl_vector.h:torch::FunctionSignature::parse(_object*, _object*, _object*, _object**, bool)
2000 /usr/include/c++/8/bits/stl_algobase.h:torch::PythonArgs::intlist(int)
2000 /usr/include/c++/8/bits/shared_ptr_base.h:torch::autograd::impl::try_get_grad_accumulator(at::Tensor const&)
2000 /usr/include/c++/8/bits/move.h:c10::TensorImpl::set_autograd_meta(std::u ... AutogradMetaInterface, std::default_delete<c10::AutogradMetaInterface> >)
2000 /usr/include/c++/8/bits/atomic_base.h:at::Tensor at::detail::make_tensor ... t_null_type<c10::StorageImpl> >&&, c10::DispatchKey&&, caffe2::TypeMeta&)
2000 /usr/include/c++/8/array:at::Tensor& c10::Dispatcher::callWithDispatchKe ... , c10::Scalar)> const&, c10::DispatchKey, at::Tensor&, c10::Scalar) const
Total: 8869966""" # noqa: B950
)
self.regularizeAndAssertExpectedInline(
stats_no_data.as_standardized().stats(),
"""\
<torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.FunctionCounts object at 0xXXXXXXXXXXXX>
408000 ???:__tls_get_addr
388193 ???:_int_free
274000 build/../torch/csrc/utils/python ... ject*, _object*, _object**, bool)
264000 build/../aten/src/ATen/record_fu ... ::RecordFunction(at::RecordScope)
192000 build/../c10/core/Device.h:c10::Device::validate()
169855 ???:_int_malloc
154000 build/../c10/core/TensorOptions. ... erge_in(c10::TensorOptions) const
148561 Python/ceval.c:_PyEval_EvalFrameDefault
135000 ???:malloc
...
2000 /usr/include/c++/8/ext/new_allocator.h:torch::PythonArgs::intlist(int)
2000 /usr/include/c++/8/bits/stl_vect ... *, _object*, _object*, _object**)
2000 /usr/include/c++/8/bits/stl_vect ... rningHandler::~PyWarningHandler()
2000 /usr/include/c++/8/bits/stl_vect ... ject*, _object*, _object**, bool)
2000 /usr/include/c++/8/bits/stl_algobase.h:torch::PythonArgs::intlist(int)
2000 /usr/include/c++/8/bits/shared_p ... ad_accumulator(at::Tensor const&)
2000 /usr/include/c++/8/bits/move.h:c ... te<c10::AutogradMetaInterface> >)
2000 /usr/include/c++/8/bits/atomic_b ... DispatchKey&&, caffe2::TypeMeta&)
2000 /usr/include/c++/8/array:at::Ten ... , at::Tensor&, c10::Scalar) const
Total: 8869966""",
)
self.regularizeAndAssertExpectedInline(
deltas,
"""\
<torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.FunctionCounts object at 0xXXXXXXXXXXXX>
85000 Objects/dictobject.c:lookdict_unicode
59089 ???:_int_free
43000 ???:malloc
25000 build/../torch/csrc/utils/python ... :torch::PythonArgs::intlist(int)
24000 ???:__tls_get_addr
23000 ???:free
21067 Objects/dictobject.c:lookdict_unicode_nodummy
20000 build/../torch/csrc/utils/python ... :torch::PythonArgs::intlist(int)
18000 Objects/longobject.c:PyLong_AsLongLongAndOverflow
...
2000 /home/nwani/m3/conda-bld/compile ... del_op.cc:operator delete(void*)
1000 /usr/include/c++/8/bits/stl_vector.h:torch::PythonArgs::intlist(int)
193 ???:_int_malloc
75 ???:_int_memalign
-1000 build/../c10/util/SmallVector.h: ... _contiguous(c10::ArrayRef<long>)
-1000 build/../c10/util/SmallVector.h: ... nsor_restride(c10::MemoryFormat)
-1000 /usr/include/c++/8/bits/stl_vect ... es(_object*, _object*, _object*)
-8000 Python/ceval.c:_PyEval_EvalFrameDefault
-16000 Objects/tupleobject.c:PyTuple_New
Total: 432917""",
)
self.regularizeAndAssertExpectedInline(len(deltas), """35""")
self.regularizeAndAssertExpectedInline(
deltas.transform(custom_transforms),
"""\
<torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.FunctionCounts object at 0xXXXXXXXXXXXX>
85000 Objects/dictobject.c:lookdict_unicode
59089 ???:_int_free
43000 ???:malloc
25000 torch/csrc/utils/python_numbers.h:torch::PythonArgs::intlist(int)
24000 ???:__tls_get_addr
23000 ???:free
21067 Objects/dictobject.c:lookdict_unicode_nodummy
20000 torch/csrc/utils/python_arg_parser.h:torch::PythonArgs::intlist(int)
18000 Objects/longobject.c:PyLong_AsLongLongAndOverflow
...
2000 c10/util/SmallVector.h:c10::TensorImpl::compute_contiguous() const
1000 stl_vector.h:torch::PythonArgs::intlist(int)
193 ???:_int_malloc
75 ???:_int_memalign
-1000 stl_vector.h:torch::autograd::TH ... es(_object*, _object*, _object*)
-1000 c10/util/SmallVector.h:c10::Tens ... _contiguous(c10::ArrayRef<long>)
-1000 c10/util/SmallVector.h:c10::Tens ... nsor_restride(c10::MemoryFormat)
-8000 Python/ceval.c:_PyEval_EvalFrameDefault
-16000 Objects/tupleobject.c:PyTuple_New
Total: 432917""",
)
self.regularizeAndAssertExpectedInline(
deltas.filter(lambda fn: fn.startswith("???")),
"""\
<torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.FunctionCounts object at 0xXXXXXXXXXXXX>
59089 ???:_int_free
43000 ???:malloc
24000 ???:__tls_get_addr
23000 ???:free
193 ???:_int_malloc
75 ???:_int_memalign
Total: 149357""",
)
self.regularizeAndAssertExpectedInline(
deltas[:5],
"""\
<torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.FunctionCounts object at 0xXXXXXXXXXXXX>
85000 Objects/dictobject.c:lookdict_unicode
59089 ???:_int_free
43000 ???:malloc
25000 build/../torch/csrc/utils/python_ ... h:torch::PythonArgs::intlist(int)
24000 ???:__tls_get_addr
Total: 236089""",
)
def test_compare(self):
# Simulate several approaches.
costs = (
# overhead_optimized_fn()
(1e-6, 1e-9),
# compute_optimized_fn()
(3e-6, 5e-10),
# special_case_fn() [square inputs only]
(1e-6, 4e-10),
)
sizes = (
(16, 16),
(16, 128),
(128, 128),
(4096, 1024),
(2048, 2048),
)
# overhead_optimized_fn()
class _MockTimer_0(self._MockTimer):
_function_costs = tuple(
(f"fn({i}, {j})", costs[0][0] + costs[0][1] * i * j)
for i, j in sizes
)
class MockTimer_0(benchmark_utils.Timer):
_timer_cls = _MockTimer_0
# compute_optimized_fn()
class _MockTimer_1(self._MockTimer):
_function_costs = tuple(
(f"fn({i}, {j})", costs[1][0] + costs[1][1] * i * j)
for i, j in sizes
)
class MockTimer_1(benchmark_utils.Timer):
_timer_cls = _MockTimer_1
# special_case_fn()
class _MockTimer_2(self._MockTimer):
_function_costs = tuple(
(f"fn({i}, {j})", costs[2][0] + costs[2][1] * i * j)
for i, j in sizes if i == j
)
class MockTimer_2(benchmark_utils.Timer):
_timer_cls = _MockTimer_2
results = []
for i, j in sizes:
results.append(
MockTimer_0(
f"fn({i}, {j})",
label="fn",
description=f"({i}, {j})",
sub_label="overhead_optimized",
).blocked_autorange(min_run_time=10)
)
results.append(
MockTimer_1(
f"fn({i}, {j})",
label="fn",
description=f"({i}, {j})",
sub_label="compute_optimized",
).blocked_autorange(min_run_time=10)
)
if i == j:
results.append(
MockTimer_2(
f"fn({i}, {j})",
label="fn",
description=f"({i}, {j})",
sub_label="special_case (square)",
).blocked_autorange(min_run_time=10)
)
def rstrip_lines(s: str) -> str:
# VSCode will rstrip the `expected` string literal whether you like
# it or not. So we have to rstrip the compare table as well.
return "\n".join([i.rstrip() for i in s.splitlines(keepends=False)])
compare = benchmark_utils.Compare(results)
self.regularizeAndAssertExpectedInline(
rstrip_lines(str(compare).strip()),
"""\
[------------------------------------------------- fn ------------------------------------------------]
| (16, 16) | (16, 128) | (128, 128) | (4096, 1024) | (2048, 2048)
1 threads: --------------------------------------------------------------------------------------------
overhead_optimized | 1.3 | 3.0 | 17.4 | 4174.4 | 4174.4
compute_optimized | 3.1 | 4.0 | 11.2 | 2099.3 | 2099.3
special_case (square) | 1.1 | | 7.5 | | 1674.7
Times are in microseconds (us)."""
)
compare.trim_significant_figures()
self.regularizeAndAssertExpectedInline(
rstrip_lines(str(compare).strip()),
"""\
[------------------------------------------------- fn ------------------------------------------------]
| (16, 16) | (16, 128) | (128, 128) | (4096, 1024) | (2048, 2048)
1 threads: --------------------------------------------------------------------------------------------
overhead_optimized | 1 | 3.0 | 17 | 4200 | 4200
compute_optimized | 3 | 4.0 | 11 | 2100 | 2100
special_case (square) | 1 | | 8 | | 1700
Times are in microseconds (us)."""
)
compare.colorize()
columnwise_colored_actual = rstrip_lines(str(compare).strip())
columnwise_colored_expected = textwrap.dedent(
"""\
[------------------------------------------------- fn ------------------------------------------------]
| (16, 16) | (16, 128) | (128, 128) | (4096, 1024) | (2048, 2048)
1 threads: --------------------------------------------------------------------------------------------
overhead_optimized | 1 | \x1b[92m\x1b[1m 3.0 \x1b[0m\x1b[0m | \x1b[2m\x1b[91m 17 \x1b[0m\x1b[0m | 4200 | \x1b[2m\x1b[91m 4200 \x1b[0m\x1b[0m
compute_optimized | \x1b[2m\x1b[91m 3 \x1b[0m\x1b[0m | 4.0 | 11 | \x1b[92m\x1b[1m 2100 \x1b[0m\x1b[0m | 2100
special_case (square) | \x1b[92m\x1b[1m 1 \x1b[0m\x1b[0m | | \x1b[92m\x1b[1m 8 \x1b[0m\x1b[0m | | \x1b[92m\x1b[1m 1700 \x1b[0m\x1b[0m
Times are in microseconds (us).""" # noqa: B950
)
compare.colorize(rowwise=True)
rowwise_colored_actual = rstrip_lines(str(compare).strip())
rowwise_colored_expected = textwrap.dedent(
"""\
[------------------------------------------------- fn ------------------------------------------------]
| (16, 16) | (16, 128) | (128, 128) | (4096, 1024) | (2048, 2048)
1 threads: --------------------------------------------------------------------------------------------
overhead_optimized | \x1b[92m\x1b[1m 1 \x1b[0m\x1b[0m | \x1b[2m\x1b[91m 3.0 \x1b[0m\x1b[0m | \x1b[31m\x1b[1m 17 \x1b[0m\x1b[0m | \x1b[31m\x1b[1m 4200 \x1b[0m\x1b[0m | \x1b[31m\x1b[1m 4200 \x1b[0m\x1b[0m
compute_optimized | \x1b[92m\x1b[1m 3 \x1b[0m\x1b[0m | 4.0 | \x1b[2m\x1b[91m 11 \x1b[0m\x1b[0m | \x1b[31m\x1b[1m 2100 \x1b[0m\x1b[0m | \x1b[31m\x1b[1m 2100 \x1b[0m\x1b[0m
special_case (square) | \x1b[92m\x1b[1m 1 \x1b[0m\x1b[0m | | \x1b[31m\x1b[1m 8 \x1b[0m\x1b[0m | | \x1b[31m\x1b[1m 1700 \x1b[0m\x1b[0m
Times are in microseconds (us).""" # noqa: B950
)
def print_new_expected(s: str) -> None:
print(f'{"":>12}"""\\', end="")
for l in s.splitlines(keepends=False):
print("\n" + textwrap.indent(repr(l)[1:-1], " " * 12), end="")
print('"""\n')
if expecttest.ACCEPT:
# expecttest does not currently support non-printable characters,
# so these two entries have to be updated manually.
if columnwise_colored_actual != columnwise_colored_expected:
print("New columnwise coloring:\n")
print_new_expected(columnwise_colored_actual)
if rowwise_colored_actual != rowwise_colored_expected:
print("New rowwise coloring:\n")
print_new_expected(rowwise_colored_actual)
self.assertEqual(columnwise_colored_actual, columnwise_colored_expected)
self.assertEqual(rowwise_colored_actual, rowwise_colored_expected)
@unittest.skipIf(IS_WINDOWS and os.getenv("VC_YEAR") == "2019", "Random seed only accepts int32")
def test_fuzzer(self):
fuzzer = benchmark_utils.Fuzzer(
parameters=[
benchmark_utils.FuzzedParameter(
"n", minval=1, maxval=16, distribution="loguniform")],
tensors=[benchmark_utils.FuzzedTensor("x", size=("n",))],
seed=0,
)
expected_results = [
(0.7821, 0.0536, 0.9888, 0.1949, 0.5242, 0.1987, 0.5094),
(0.7166, 0.5961, 0.8303, 0.005),
]
for i, (tensors, _, _) in enumerate(fuzzer.take(2)):
x = tensors["x"]
self.assertEqual(
x, torch.tensor(expected_results[i]), rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/benchmark_utils/test_benchmark_utils.py
|
# Owner(s): ["module: unknown"]
import os
import tempfile
import torch
from backend import Model, to_custom_backend, get_custom_backend_library_path
from torch.testing._internal.common_utils import TestCase, run_tests
class TestCustomBackend(TestCase):
def setUp(self):
# Load the library containing the custom backend.
self.library_path = get_custom_backend_library_path()
torch.ops.load_library(self.library_path)
# Create an instance of the test Module and lower it for
# the custom backend.
self.model = to_custom_backend(torch.jit.script(Model()))
def test_execute(self):
"""
Test execution using the custom backend.
"""
a = torch.randn(4)
b = torch.randn(4)
# The custom backend is hardcoded to compute f(a, b) = (a + b, a - b).
expected = (a + b, a - b)
out = self.model(a, b)
self.assertTrue(expected[0].allclose(out[0]))
self.assertTrue(expected[1].allclose(out[1]))
def test_save_load(self):
"""
Test that a lowered module can be executed correctly
after saving and loading.
"""
# Test execution before saving and loading to make sure
# the lowered module works in the first place.
self.test_execute()
# Save and load.
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
torch.jit.save(self.model, f.name)
loaded = torch.jit.load(f.name)
finally:
os.unlink(f.name)
self.model = loaded
# Test execution again.
self.test_execute()
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/custom_backend/test_custom_backend.py
|
import argparse
import os.path
import sys
import torch
def get_custom_backend_library_path():
"""
Get the path to the library containing the custom backend.
Return:
The path to the custom backend object, customized by platform.
"""
if sys.platform.startswith("win32"):
library_filename = "custom_backend.dll"
elif sys.platform.startswith("darwin"):
library_filename = "libcustom_backend.dylib"
else:
library_filename = "libcustom_backend.so"
path = os.path.abspath("build/{}".format(library_filename))
assert os.path.exists(path), path
return path
def to_custom_backend(module):
"""
This is a helper that wraps torch._C._jit_to_test_backend and compiles
only the forward method with an empty compile spec.
Args:
module: input ScriptModule.
Returns:
The module, lowered so that it can run on TestBackend.
"""
lowered_module = torch._C._jit_to_backend("custom_backend", module, {"forward": {"": ""}})
return lowered_module
class Model(torch.nn.Module):
"""
Simple model used for testing that to_backend API supports saving, loading,
and executing in C++.
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, a, b):
return (a + b, a - b)
def main():
parser = argparse.ArgumentParser(
description="Lower a Module to a custom backend"
)
parser.add_argument("--export-module-to", required=True)
options = parser.parse_args()
# Load the library containing the custom backend.
library_path = get_custom_backend_library_path()
torch.ops.load_library(library_path)
assert library_path in torch.ops.loaded_libraries
# Lower an instance of Model to the custom backend and export it
# to the specified location.
lowered_module = to_custom_backend(torch.jit.script(Model()))
torch.jit.save(lowered_module, options.export_module_to)
if __name__ == "__main__":
main()
|
pytorch-master
|
test/custom_backend/backend.py
|
pytorch-master
|
test/quantization/__init__.py
|
|
# Owner(s): ["oncall: quantization"]
import torch
import math
from typing import Tuple
from torch.ao.quantization import (
FakeQuantize,
MovingAverageMinMaxObserver,
default_observer,
default_fixed_qparams_range_0to1_fake_quant,
)
from torch.ao.quantization._learnable_fake_quantize import _LearnableFakeQuantize
from torch.testing._internal.common_quantized import (
_fake_quantize_per_channel_affine_reference,
_fake_quantize_per_channel_affine_grad_reference,
to_tensor,
)
import torch.nn as nn
# Standard library
import io
import itertools
import unittest
import numpy as np
# Testing utils
from hypothesis import given, settings
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import TestCase
# Reference method for fake quantize
# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
def _fake_quantize_per_tensor_affine_reference(X, scale, zero_point, quant_min, quant_max):
dtype = X.dtype
res = ((torch.clamp(torch.round(X.to(torch.float32) * (1.0 / scale) + zero_point), quant_min, quant_max) - zero_point) * scale)
return res.to(dtype)
# Reference method for the gradient of the fake quantize operator
# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
def _fake_quantize_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, quant_max):
dtype = X.dtype
Xq = torch.round(X.to(torch.float32) * (1.0 / scale) + zero_point)
mask = (Xq >= quant_min) * (Xq <= quant_max)
res = torch.zeros_like(dY)
res[mask] = dY[mask]
return res.to(dtype)
# Reference method for the gradients of the fake quantize operator
def _fake_quantize_learnable_per_tensor_affine_grad_reference(dY, X, scale, zero_point, quant_min, quant_max, device):
r"""This method references the following literatures for back propagation on scale and zero point.
- https://arxiv.org/pdf/1902.08153.pdf
- https://arxiv.org/pdf/1903.08066.pdf
"""
zero_point_rounded = int((zero_point + 0.5).clamp(quant_min, quant_max).item())
Xq = torch.round(X * (1.0 / scale) + zero_point_rounded)
indicate_small_scale = (Xq < quant_min).float().to(device)
indicate_big_scale = (Xq > quant_max).float().to(device)
indicate_middle_scale = torch.ones(indicate_small_scale.shape).to(device) - \
indicate_small_scale - indicate_big_scale
indicate_saturate_zp = ((Xq < quant_min).float() + (Xq > quant_max).float()).to(device)
indicate_unsaturate_zp = torch.ones(indicate_saturate_zp.shape).to(device) - indicate_saturate_zp
Xq = Xq.clamp(quant_min, quant_max)
Xfq = (Xq - zero_point_rounded) * scale
grad_small_scale = quant_min - zero_point_rounded
grad_big_scale = quant_max - zero_point_rounded
grad_middle_scale = ((Xfq - X) / scale).to(device)
grad_saturate_zp = -scale.to(device)
grad_unsaturate_zp = 0
grad_scale = indicate_small_scale * grad_small_scale + \
indicate_big_scale * grad_big_scale + \
indicate_middle_scale * grad_middle_scale
grad_zp = indicate_saturate_zp * grad_saturate_zp + \
indicate_unsaturate_zp * grad_unsaturate_zp
grad_X = _fake_quantize_per_tensor_affine_grad_reference(
dY, X, scale, zero_point, quant_min, quant_max).to(device)
grad_scale = (grad_scale * dY).sum().unsqueeze(dim=0)
grad_zp = (grad_zp * dY).sum().unsqueeze(dim=0)
return grad_X, grad_scale, grad_zp
# Reference method for quantization.
def _quantize_per_tensor(x, scale, zero_point, quant_min, quant_max):
return ((x / scale) + zero_point).round().clamp(quant_min, quant_max)
# Reference method for the per channel gradients of the learnable fake quantize operator
def _fake_quantize_learnable_per_channel_affine_grad_reference(
dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max, device):
r"""This method references the following literatures for back propagation on scale and zero point.
- https://arxiv.org/pdf/1902.08153.pdf
- https://arxiv.org/pdf/1903.08066.pdf
"""
per_channel_zero_point = ((per_channel_zero_point.detach() + 0.5).clamp(quant_min, quant_max)).type(torch.int32)
grad_X = _fake_quantize_per_channel_affine_grad_reference(
dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max).to(device)
per_channel_scale = per_channel_scale.detach().type(torch.float)
grad_scale = torch.zeros([per_channel_scale.size(0)]).to(device)
grad_zero_point = torch.zeros([per_channel_zero_point.size(0)]).to(device)
X_flattened = torch.unbind(X, dim=axis)
dY_flattened = torch.unbind(dY, dim=axis)
for i, X_i in enumerate(torch.unbind(X, dim=axis), 0):
scale_i = per_channel_scale[i]
zero_point_i = per_channel_zero_point[i]
X_i = X_flattened[i]
dY_i = dY_flattened[i]
Xq_i = ((X_i / scale_i) + zero_point_i).round()
Xfq_i = (Xq_i - zero_point_i) * scale_i
indicate_small_scale_i = (Xq_i < quant_min).float().to(device)
indicate_big_scale_i = (Xq_i > quant_max).float().to(device)
indicate_middle_scale_i = torch.ones(indicate_small_scale_i.shape).to(device) - \
indicate_small_scale_i - indicate_big_scale_i
indicate_saturate_zp_i = ((Xq_i < quant_min).float() +
(Xq_i > quant_max).float()).to(device)
indicate_unsaturate_zp_i = torch.ones(indicate_saturate_zp_i.shape).to(device) - \
indicate_saturate_zp_i
Xq_i = Xq_i.clamp(quant_min, quant_max)
Xfq_i = (Xq_i - zero_point_i) * scale_i
grad_small_scale_i = quant_min - zero_point_i
grad_big_scale_i = quant_max - zero_point_i
grad_middle_scale_i = ((Xfq_i - X_i) / scale_i).to(device)
grad_saturate_zp_i = -scale_i.to(device)
grad_unsaturate_zp_i = 0
grad_scale_i = indicate_small_scale_i * grad_small_scale_i + \
indicate_middle_scale_i * grad_middle_scale_i + \
indicate_big_scale_i * grad_big_scale_i
grad_zp_i = indicate_saturate_zp_i * grad_saturate_zp_i + \
indicate_unsaturate_zp_i * grad_unsaturate_zp_i
grad_scale_i = (grad_scale_i * dY_i).sum().unsqueeze(dim=0)
grad_zp_i = (grad_zp_i * dY_i).sum().unsqueeze(dim=0)
grad_scale[i] = grad_scale_i
grad_zero_point[i] = grad_zp_i
return grad_X, grad_scale, grad_zero_point
def _get_tensor_min_max(
X: torch.Tensor,
running_min: float = float("inf"),
running_max: float = float("-inf"),
averaging_const: float = 0.01) -> Tuple[float, float]:
min_val = X.min().to(dtype=torch.float32).item()
max_val = X.max().to(dtype=torch.float32).item()
if not math.isinf(running_min):
min_val = running_min + averaging_const * (min_val - running_min)
if not math.isinf(running_max):
max_val = running_max + averaging_const * (max_val - running_max)
return min_val, max_val
def _get_per_row_min_max(
x: torch.Tensor,
min_vals: torch.Tensor,
max_vals: torch.Tensor,
axis: int = 0,
averaging_const: float = 0.01) -> Tuple[torch.Tensor, torch.Tensor]:
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[axis] = 0
new_axis_list[0] = axis
y = x.permute(*new_axis_list)
y = torch.flatten(y, start_dim=1)
# min_vals, max_vals = torch.aminmax(y, dim=1)
if math.isinf(min_vals[0]) or math.isinf(max_vals[0]):
min_vals, max_vals = torch.aminmax(y, dim=1)
else:
min_vals_cur, max_vals_cur = torch.aminmax(y, dim=1)
min_vals = min_vals + averaging_const * (min_vals_cur - min_vals)
max_vals = max_vals + averaging_const * (max_vals_cur - max_vals)
return min_vals, max_vals
def _get_scale_zp(
min_val: float,
max_val: float,
dtype: torch.dtype,
reduce_range: bool = False,
preserve_sparsity: bool = False) -> Tuple[float, int]:
"""
Calculate the quantization parameters (scale, zero_point)
based on the min and max element of the tensor
"""
if dtype == torch.qint8:
if reduce_range:
qmin, qmax = -64, 63
else:
qmin, qmax = -128, 127
else:
if reduce_range:
qmin, qmax = 0, 127
else:
qmin, qmax = 0, 255
if min_val < 0 and max_val > 0 and preserve_sparsity:
symmetric_qmin = int(-((qmax - qmin) / 2 + 1))
symmetric_qmax = int((qmax - qmin) / 2)
max_scale = max(
abs(min_val / symmetric_qmin), abs(max_val / symmetric_qmax)
)
min_val = max_scale * symmetric_qmin
max_val = max_scale * symmetric_qmax
min_val = min(min_val, 0.0)
max_val = max(max_val, 0.0)
scale = (max_val - min_val) / (qmax - qmin)
if scale == 0.0 or math.isinf(1.0 / scale):
scale = 0.1
zero_point = 0
zero_point_from_min = qmin - min_val / float(scale)
zero_point_from_max = qmax - max_val / float(scale)
zero_point_from_min_error = abs(qmin) - abs(min_val / float(scale))
zero_point_from_max_error = abs(qmax) - abs(max_val / float(scale))
if zero_point_from_min_error < zero_point_from_max_error:
initial_zero_point = zero_point_from_min
else:
initial_zero_point = zero_point_from_max
if min_val < 0 and max_val > 0 and preserve_sparsity:
initial_zero_point = (qmin + qmax) / 2 + 1
nudged_zero_point = 0
if initial_zero_point < qmin:
nudged_zero_point = qmin
elif initial_zero_point > qmax:
nudged_zero_point = qmax
else:
nudged_zero_point = int(round(initial_zero_point))
return (scale, int(nudged_zero_point))
NP_RANDOM_SEED = 19
tolerance = 1e-6
class TestFakeQuantizeOps(TestCase):
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_forward_per_tensor(self, device, X):
r"""Tests the forward path of the FakeQuantizePerTensorAffine op.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
Y = _fake_quantize_per_tensor_affine_reference(X.cpu(), scale, zero_point, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip("temporarily disable the test")
def test_backward_per_tensor(self, device, X):
r"""Tests the backward method.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
X.requires_grad_()
Y = _fake_quantize_per_tensor_affine_reference(X.cpu(), scale, zero_point, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
dout = torch.rand_like(X, dtype=torch.float).to(device)
dX = _fake_quantize_per_tensor_affine_grad_reference(
dout, X, scale, zero_point, quant_min, quant_max)
Y_prime.backward(dout)
np.testing.assert_allclose(dX.cpu(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
def test_forward_backward_per_tensor_with_amp(self):
net = nn.Sequential(nn.Conv2d(1, 1, 3))
net.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')
net_prep = torch.ao.quantization.prepare_qat(net)
with torch.cuda.amp.autocast():
x = torch.randn(4, 1, 5, 5)
out = net_prep(x).sum()
out.backward()
self.assertTrue(net_prep[0].weight.grad is not None)
def test_forward_per_tensor_half_precision_numerics(self):
scale = .1
zero = 0
maxi = 255
mini = 0
for i in range(20):
X1 = torch.randn(5, 5).to(torch.float16)
Y1 = torch.fake_quantize_per_tensor_affine(X1, scale, zero, mini, maxi)
Y1r = _fake_quantize_per_tensor_affine_reference(X1, scale, zero, mini, maxi)
self.assertEqual(Y1, Y1r, rtol=tolerance, atol=tolerance)
# to force overflow
X2 = torch.tensor(2**15 + .01).to(torch.float16)
Y2 = torch.fake_quantize_per_tensor_affine(X2, scale, zero, mini, maxi)
Y2r = _fake_quantize_per_tensor_affine_reference(X2, scale, zero, mini, maxi)
self.assertEqual(Y2, Y2r, rtol=tolerance, atol=tolerance)
scale = 10
# to force underflow
X3 = torch.tensor(2**-24).to(torch.float16)
Y3 = torch.fake_quantize_per_tensor_affine(X3, scale, zero, mini, maxi)
Y3r = _fake_quantize_per_tensor_affine_reference(X3, scale, zero, mini, maxi)
self.assertEqual(Y3, Y3r, rtol=tolerance, atol=tolerance)
def _test_forward_per_tensor_cachemask_impl(self, device):
float_types = (torch.float32, torch.float16, torch.float64)
torch_types = (torch.qint8, torch.quint8)
Xs = (torch.randn(4, 8, device=device), torch.randn(4, 16, device=device)[:, ::2])
tensor_qparam = (True, False)
for float_type, torch_type, X, tensor_qparams in itertools.product(float_types, torch_types, Xs, tensor_qparam):
# pick the scale + zp so that some values get clipped
X = X.to(float_type)
obs = torch.ao.quantization.MinMaxObserver(torch_type)
obs.to(device)
obs(X * 0.75)
scale, zero_point = obs.calculate_qparams()
quant_min, quant_max = obs.quant_min, obs.quant_max
if not tensor_qparam:
scale, zero_point = float(scale), int(zero_point)
Y_test = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
Y_ref = _fake_quantize_per_tensor_affine_reference(
X, scale, zero_point, quant_min, quant_max).to(device)
self.assertEqual(Y_test, Y_ref, rtol=tolerance, atol=tolerance)
self.assertTrue(Y_test.dtype == float_type)
def test_forward_per_tensor_cachemask_cpu(self):
device = torch.device('cpu')
self._test_forward_per_tensor_cachemask_impl(device)
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_forward_per_tensor_cachemask_cuda(self):
device = torch.device('cuda')
self._test_forward_per_tensor_cachemask_impl(device)
def _test_backward_per_tensor_cachemask_impl(self, device):
float_types = (torch.float32, torch.float16, torch.float64)
torch_types = (torch.qint8, torch.quint8)
tensor_qparam = (True, False)
for float_type, torch_type, tensor_qparam in itertools.product(float_types, torch_types, tensor_qparam):
X = torch.randn(4, 8).to(device).to(float_type)
X.requires_grad_()
# pick the scale + zp so that some values get clipped
obs = torch.ao.quantization.MinMaxObserver(torch_type)
obs.to(device)
obs(X * 0.75)
scale, zero_point = obs.calculate_qparams()
if not tensor_qparam:
scale, zero_point = float(scale), int(zero_point)
quant_min, quant_max = obs.quant_min, obs.quant_max
# forward pass
Y_test = torch.fake_quantize_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max)
Y_ref = _fake_quantize_per_tensor_affine_reference(
X, scale, zero_point, quant_min, quant_max).to(device)
self.assertEqual(Y_test, Y_ref, rtol=tolerance, atol=tolerance)
# backward pass
dout = torch.rand_like(X, dtype=torch.float).to(device)
dX = _fake_quantize_per_tensor_affine_grad_reference(
dout, X, scale, zero_point, quant_min, quant_max)
Y_test.backward(dout)
self.assertEqual(dX, X.grad)
self.assertTrue(X.grad.dtype == float_type)
def test_backward_per_tensor_cachemask_cpu(self):
device = torch.device('cpu')
self._test_backward_per_tensor_cachemask_impl(device)
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_backward_per_tensor_cachemask_cuda(self):
device = torch.device('cuda')
self._test_backward_per_tensor_cachemask_impl(device)
def _test_learnable_forward_per_tensor(self, X, device, scale_base, zero_point_base):
X_base = torch.tensor(X).to(device)
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** n_bits - 1
X = X_base.clone().float()
scale_base = scale_base.to(device).float()
zero_point_base = zero_point_base.to(dtype=torch.int32, device=device)
scale = scale_base.clone()
zero_point = zero_point_base.clamp(quant_min, quant_max)
Y = _fake_quantize_per_tensor_affine_reference(
X, scale, zero_point, quant_min, quant_max).to(device)
for grad_factor in [0.1, 1.0, 10.0]:
Y_prime = torch._fake_quantize_learnable_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max, grad_factor).to(device)
self.assertTrue(
torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),
"Expected kernel forward function to have results match the reference forward function")
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_learnable_forward_per_tensor_cpu(self, X):
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_forward_per_tensor(
X, 'cpu', scale_base, zero_point_base)
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_forward_per_tensor_cuda(self, X):
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_forward_per_tensor(
X, 'cuda', scale_base, zero_point_base)
def _test_learnable_backward_per_tensor(self, X, device, scale_base, zero_point_base):
r"""Tests the backward method with additional backprop support for scale and zero point.
"""
X_base = torch.tensor(X).to(device)
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** n_bits - 1
X = X_base.clone().float().to(device)
X.requires_grad_()
scale_base = scale_base.to(device)
zero_point_base = zero_point_base.to(device)
scale = scale_base.clone()
scale.requires_grad_()
zero_point = zero_point_base.clone().clamp(quant_min, quant_max)
zero_point.requires_grad_()
for grad_factor in [0.1, 1.0, 10.0]:
Y_prime = torch._fake_quantize_learnable_per_tensor_affine(
X, scale, zero_point, quant_min, quant_max, grad_factor).to(device)
dout = torch.rand_like(X, dtype=torch.float).to(device)
dX, dScale, dZeroPoint = _fake_quantize_learnable_per_tensor_affine_grad_reference(
dout, X, scale, zero_point, quant_min, quant_max, device)
Y_prime.backward(dout)
expected_dX = dX.to(device).detach()
actual_dX = X.grad.to(device).detach()
expected_dScale = dScale.to(device).detach()
actual_dScale = scale.grad.to(device).detach()
expected_dZeroPoint = dZeroPoint.to(device).detach()
actual_dZeroPoint = zero_point.grad.to(device).detach()
self.assertTrue(
torch.allclose(
expected_dX, actual_dX, rtol=tolerance, atol=tolerance),
"Expected dX to match X.grad")
self.assertTrue(
torch.allclose(
expected_dScale * grad_factor, actual_dScale, rtol=tolerance, atol=tolerance),
"Expected dScale to match scale.grad")
self.assertTrue(
torch.allclose(
expected_dZeroPoint * grad_factor, actual_dZeroPoint, rtol=tolerance, atol=tolerance),
"Expected dZeroPoint to match zero_point.grad")
X.grad.data.zero_()
scale.grad.data.zero_()
zero_point.grad.data.zero_()
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_backward_per_tensor_cpu(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_backward_per_tensor(
X, 'cpu', scale_base, zero_point_base)
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5,),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_backward_per_tensor_cuda(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, _) = X
scale_base = torch.normal(mean=0, std=1, size=(1,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(1,))
self._test_learnable_backward_per_tensor(
X, 'cuda', scale_base, zero_point_base)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=[torch.quint8])),
)
def test_fq_module_per_tensor(self, device, X):
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
X.requires_grad_()
fq_module = torch.ao.quantization.default_fake_quant().to(device)
Y_prime = fq_module(X)
assert fq_module.scale is not None
assert fq_module.zero_point is not None
Y = _fake_quantize_per_tensor_affine_reference(X, fq_module.scale, fq_module.zero_point, quant_min, quant_max)
np.testing.assert_allclose(Y.cpu().detach().numpy(), Y_prime.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
# Test backward
dout = torch.rand_like(X, dtype=torch.float, device=device)
Y_prime.backward(dout)
dX = _fake_quantize_per_tensor_affine_grad_reference(dout, X, fq_module.scale, fq_module.zero_point, quant_min, quant_max)
np.testing.assert_allclose(dX.cpu().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_fixed_qparams_fq_module(self, device, X):
X, (scale, zero_point, torch_type) = X
X = to_tensor(X, device)
fq_module = default_fixed_qparams_range_0to1_fake_quant()
fq_module.to(device)
fixed_scale = fq_module.scale.clone()
fixed_zero_point = fq_module.zero_point.clone()
# run fq module and make sure the quantization parameters does not change
torch.ao.quantization.enable_observer(fq_module)
fq_module(X)
self.assertEqual(fixed_scale, fq_module.scale)
self.assertEqual(fixed_zero_point, fq_module.zero_point)
def test_fq_serializable_per_tensor(self):
observer = default_observer
quant_min = 0
quant_max = 127
for FakeQuantizeClass in [FakeQuantize, _LearnableFakeQuantize]:
fq_module = FakeQuantizeClass(observer, quant_min, quant_max)
X = torch.tensor([-5, -3.5, -2, 0, 3, 5, 7], dtype=torch.float32)
y_ref = fq_module(X)
state_dict = fq_module.state_dict()
self.assertEqual(state_dict['scale'], 0.094488)
self.assertEqual(state_dict['zero_point'], 53)
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
loaded_fq_module = FakeQuantizeClass(observer, quant_min, quant_max)
loaded_fq_module.load_state_dict(loaded_dict)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_fq_module.state_dict()[key])
self.assertEqual(loaded_fq_module.calculate_qparams(), fq_module.calculate_qparams())
def test_fake_quant_control(self):
for fq_module in [torch.ao.quantization.default_fake_quant(),
_LearnableFakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0,
quant_max=255,
dtype=torch.quint8, qscheme=torch.per_tensor_affine,
reduce_range=True)()]:
torch.manual_seed(42)
X = torch.rand(20, 10, dtype=torch.float32)
# Output of fake quant is not identical to input
Y = fq_module(X)
self.assertNotEqual(Y, X)
if type(fq_module) == _LearnableFakeQuantize:
fq_module.toggle_fake_quant(False)
else:
torch.ao.quantization.disable_fake_quant(fq_module)
X = torch.rand(20, 10, dtype=torch.float32)
Y = fq_module(X)
# Fake quant is disabled,output is identical to input
self.assertEqual(Y, X)
# Explicit copy at this point in time, because FakeQuant keeps internal
# state in mutable buffers.
scale = fq_module.scale.clone().detach()
zero_point = fq_module.zero_point.clone().detach()
if type(fq_module) == _LearnableFakeQuantize:
fq_module.toggle_observer_update(False)
fq_module.toggle_fake_quant(True)
else:
torch.ao.quantization.disable_observer(fq_module)
torch.ao.quantization.enable_fake_quant(fq_module)
X = 10.0 * torch.rand(20, 10, dtype=torch.float32) - 5.0
Y = fq_module(X)
self.assertNotEqual(Y, X)
# Observer is disabled, scale and zero-point do not change
self.assertEqual(fq_module.scale, scale)
self.assertEqual(fq_module.zero_point, zero_point)
if type(fq_module) == _LearnableFakeQuantize:
fq_module.toggle_observer_update(True)
else:
torch.ao.quantization.enable_observer(fq_module)
Y = fq_module(X)
self.assertNotEqual(Y, X)
# Observer is enabled, scale and zero-point are different
self.assertNotEqual(fq_module.scale, scale)
self.assertNotEqual(fq_module.zero_point, zero_point)
def test_fake_quant_preserves_qparam_shapes_for_activations(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(4, 4)
def forward(self, x):
x = self.linear(x)
return x
m = Model()
m.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')
torch.ao.quantization.prepare_qat(m, inplace=True)
scale_shape_before = m.linear.activation_post_process.scale.shape
zero_point_shape_before = m.linear.activation_post_process.zero_point.shape
x = torch.rand(4, 4, 4, 4)
m(x)
scale_shape_after = m.linear.activation_post_process.scale.shape
zero_point_shape_after = m.linear.activation_post_process.zero_point.shape
self.assertEqual(
scale_shape_before, scale_shape_after,
msg="FakeQuant scale shape must stay consistent")
self.assertEqual(
zero_point_shape_before, zero_point_shape_after,
msg="FakeQuant zero_point shape must stay consistent")
def fake_quant_scriptable(self):
observer = default_observer
quant_min = 0
quant_max = 255
for FakeQuantizeClass in [FakeQuantize, _LearnableFakeQuantize]:
fq_module = FakeQuantizeClass(observer, quant_min, quant_max)
scripted_module = torch.jit.script(fq_module)
X = torch.tensor([-5, -3.5, -2, 0, 3, 5, 7], dtype=torch.float32)
fq_module(X)
scripted_module(X)
self.assertEqual(fq_module.calculate_qparams(), scripted_module.calculate_qparams())
buf = io.BytesIO()
torch.jit.save(scripted_module, buf)
buf.seek(0)
loaded_module = torch.jit.load(buf)
self.assertEqual(fq_module.calculate_qparams(), loaded_module.calculate_qparams())
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_forward_per_channel(self, device, X):
r"""Tests the forward path of the FakeQuantizePerTensorAffine op.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
scale = to_tensor(scale, device)
zero_point = torch.tensor(zero_point).to(dtype=torch.int32, device=device)
Y = _fake_quantize_per_channel_affine_reference(X.cpu(), scale.cpu(), zero_point.cpu(), axis, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
def _test_forward_per_channel_cachemask_impl(self, device):
torch_types = (torch.qint8, torch.quint8)
float_types = (torch.float32, torch.float16, torch.float64)
zero_point_types = (torch.int, torch.float32, torch.float16)
for torch_type, float_type, zero_point_type in itertools.product(torch_types, float_types, zero_point_types):
X = torch.randn(1, 2, 4, 4, dtype=float_type).to(device)
# pick the scale + zp so that some values get clipped
axis = 1
obs = torch.ao.quantization.PerChannelMinMaxObserver(axis, torch_type).to(device)
obs(X * 0.75)
scale, zero_point = obs.calculate_qparams()
# TODO(future PR): fix the wrong dtype in obs.calculate_qparams and remove the cast
zero_point = zero_point.to(zero_point_type)
quant_min, quant_max = obs.quant_min, obs.quant_max
Y = _fake_quantize_per_channel_affine_reference(
X.cpu(), scale.cpu(), zero_point.cpu(), axis, quant_min, quant_max)
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(Y, Y_prime.cpu(), rtol=tolerance, atol=tolerance)
self.assertTrue(Y.dtype == float_type)
def test_forward_per_channel_cachemask_cpu(self):
self._test_forward_per_channel_cachemask_impl('cpu')
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_forward_per_channel_cachemask_cuda(self):
self._test_forward_per_channel_cachemask_impl('cuda')
def test_forward_per_channel_half_precision_numerics(self):
scale = torch.randn(5).abs()
zero = torch.randn(5).to(dtype=torch.int)
axis = 1
mini = 0
maxi = 255
for i in range(20):
X1 = torch.randn(4, 5).to(torch.float16)
Y1 = torch.fake_quantize_per_channel_affine(X1, scale, zero, axis, mini, maxi)
Y1r = _fake_quantize_per_channel_affine_reference(X1, scale, zero, axis, mini, maxi)
self.assertEqual(Y1, Y1r, rtol=tolerance, atol=tolerance)
# to force overflow
X2 = torch.randn(4, 5).to(torch.float16)
X2[0, 0] = 2**15 + .01
Y2 = torch.fake_quantize_per_channel_affine(X2, scale, zero, axis, mini, maxi)
Y2r = _fake_quantize_per_channel_affine_reference(X2, scale, zero, axis, mini, maxi)
self.assertEqual(Y2, Y2r, rtol=tolerance, atol=tolerance)
scale = torch.zeros(5) + 10
# to force underflow
X3 = torch.randn(4, 5).to(torch.float16)
X3[0, 0] = 2**-24
Y3 = torch.fake_quantize_per_channel_affine(X3, scale, zero, axis, mini, maxi)
Y3r = _fake_quantize_per_channel_affine_reference(X3, scale, zero, axis, mini, maxi)
self.assertEqual(Y3, Y3r, rtol=tolerance, atol=tolerance)
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_fake_quant_per_channel_qparam_range(self, X):
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
for device in ['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']:
X = to_tensor(X, device)
scale = to_tensor(scale, device)
# Ensure that zero_point < quant_min.
zero_point = torch.full(zero_point.shape, -1 - quant_min).to(dtype=torch.int32, device=device)
# For non-float zero_point, fakequant requires zero_point between quant_min and quant_max.
with self.assertRaisesRegex(RuntimeError, "`zero_point` must be between `quant_min` and `quant_max`."):
Y = torch.fake_quantize_per_channel_affine(X, scale, zero_point, axis, quant_min, quant_max)
# For float zero_point, fakequant can be outside quant_min and quant_max.
for zero_point_dtype in [torch.float32, torch.float16]:
zero_point = zero_point.to(dtype=zero_point_dtype)
Y = torch.fake_quantize_per_channel_affine(X, scale, zero_point, axis, quant_min, quant_max)
Y_ref = _fake_quantize_per_channel_affine_reference(X.cpu(), scale.cpu(), zero_point.cpu(),
axis, quant_min, quant_max)
np.testing.assert_allclose(Y.cpu().numpy(), Y_ref.cpu().numpy(), rtol=tolerance, atol=tolerance)
def _test_learnable_forward_per_channel(self, X_base, device, scale_base, zero_point_base, axis):
r"""Tests the forward path of the learnable FakeQuantizePerTensorAffine op.
"""
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** (n_bits) - 1
scale_base = scale_base.to(device)
zero_point_base = zero_point_base.to(device)
X_curr = X_base.clone()
scale_curr = scale_base.clone()
zero_point_curr = zero_point_base.clone()
Y = _fake_quantize_per_channel_affine_reference(
X_curr, scale_curr, zero_point_curr.round().clamp(quant_min, quant_max), axis, quant_min, quant_max).to(device)
for grad_factor in [0.1, 1.0, 10.0]:
Y_prime = torch._fake_quantize_learnable_per_channel_affine(
X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, grad_factor).to(device)
self.assertTrue(
torch.allclose(Y, Y_prime, rtol=tolerance, atol=tolerance),
"Expected kernel forward function to have results match the reference forward function")
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
def test_learnable_forward_per_channel_cpu(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, axis, _) = X
X_base = torch.tensor(X).to('cpu')
channel_size = X_base.size(axis)
scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))
self._test_learnable_forward_per_channel(
X_base, 'cpu', scale_base, zero_point_base, axis)
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_forward_per_channel_cuda(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, axis, _) = X
X_base = torch.tensor(X).to('cuda')
channel_size = X_base.size(axis)
scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))
self._test_learnable_forward_per_channel(
X_base, 'cuda', scale_base, zero_point_base, axis)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(1, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_backward_per_channel(self, device, X):
r"""Tests the backward method.
"""
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
zero_point_types = (torch.int, torch.float, torch.float16)
for zero_point_type in zero_point_types:
X = to_tensor(X, device)
scale = to_tensor(scale, device)
zero_point = to_tensor(zero_point, device).to(dtype=zero_point_type)
X.requires_grad_()
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
dout = torch.rand_like(X, dtype=torch.float).to(device)
dX = _fake_quantize_per_channel_affine_grad_reference(
dout, X, scale, zero_point, axis, quant_min, quant_max)
Y_prime.backward(dout)
np.testing.assert_allclose(dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
def _test_backward_per_channel_cachemask_impl(self, device):
torch_types = (torch.qint8, torch.quint8)
float_types = (torch.float32, torch.float16, torch.float64)
zero_point_types = (torch.int, torch.float32, torch.float16)
for torch_type, float_type, zero_point_type in itertools.product(torch_types, float_types, zero_point_types):
X = torch.randn(1, 2, 4, 4, dtype=float_type).to(device)
# pick the scale + zp so that some values get clipped
axis = 1
obs = torch.ao.quantization.PerChannelMinMaxObserver(axis, torch_type).to(device)
obs(X * 0.75)
scale, zero_point = obs.calculate_qparams()
# TODO(future PR): fix the wrong dtype in obs.calculate_qparams and remove the cast
zero_point = zero_point.to(zero_point_type)
quant_min, quant_max = obs.quant_min, obs.quant_max
X.requires_grad_()
Y_prime = torch.fake_quantize_per_channel_affine(
X, scale, zero_point, axis, quant_min, quant_max)
dout = torch.rand_like(X, dtype=float_type).to(device)
dX = _fake_quantize_per_channel_affine_grad_reference(
dout, X, scale, zero_point, axis, quant_min, quant_max)
Y_prime.backward(dout)
np.testing.assert_allclose(
dX.cpu().detach().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
assert(X.grad.dtype == float_type)
def test_backward_per_channel_cachemask_cpu(self):
self._test_backward_per_channel_cachemask_impl('cpu')
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_backward_per_channel_cachemask_cuda(self):
self._test_backward_per_channel_cachemask_impl('cuda')
def _test_learnable_backward_per_channel(self, X_base, device, scale_base, zero_point_base, axis):
r"""Tests the backward path of the learnable FakeQuantizePerTensorAffine op.
"""
for n_bits in (4, 8):
quant_min, quant_max = 0, 2 ** n_bits - 1
scale_base = scale_base.to(device)
zero_point_base = zero_point_base.to(device=device)
X_curr = X_base.clone()
X_curr.requires_grad_()
scale_curr = scale_base.clone()
scale_curr.requires_grad_()
zero_point_curr = zero_point_base.clone()
zero_point_curr.requires_grad_()
for grad_factor in [0.1, 1.0, 10.0]:
Y_prime = torch._fake_quantize_learnable_per_channel_affine(
X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, grad_factor).to(device)
dout = torch.rand(X_curr.shape, dtype=torch.float).to(device)
dX, dScale, dZeroPoint = _fake_quantize_learnable_per_channel_affine_grad_reference(
dout, X_curr, scale_curr, zero_point_curr, axis, quant_min, quant_max, device)
Y_prime.backward(dout)
dX_expected = dX.to(device).detach()
dX_actual = X_curr.to(device).grad.detach()
dScale_expected = dScale.to(device).detach()
dScale_actual = scale_curr.to(device).grad.detach()
dZeroPoint_expected = dZeroPoint.to(device).detach()
dZeroPoint_actual = zero_point_curr.to(device).grad.detach()
tolerance = 1e-4
self.assertTrue(
torch.allclose(dX_expected, dX_actual, rtol=tolerance, atol=tolerance),
"Expected dX={} to match X.grad={}, X={}, s={}, z={}, dout={}, n_bits={}".format(
dX_expected, dX_actual, X_curr, scale_curr, zero_point_curr, dout, n_bits))
self.assertTrue(
torch.allclose(dScale_expected * grad_factor, dScale_actual, rtol=tolerance, atol=tolerance),
"Expected dScale={} to match scale.grad={}, X={}, s={}, z={}, dout={}, n_bits={}".format(
dScale_expected * grad_factor, dScale_actual,
X_curr, scale_curr, zero_point_curr, dout, n_bits))
self.assertTrue(
torch.allclose(dZeroPoint_expected * grad_factor, dZeroPoint_actual, rtol=tolerance, atol=tolerance),
"Expected dZeroPoint={} to match zero_point.grad={}, X={}, s={}, z={}, dout={}, n_bits={}".format(
dZeroPoint_expected * grad_factor, dZeroPoint_actual,
X_curr, scale_curr, zero_point_curr, dout, n_bits))
X_curr.grad.data.zero_()
scale_curr.grad.data.zero_()
zero_point_curr.grad.data.zero_()
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_learnable_backward_per_channel_cpu(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (_, _, axis, _) = X
X_base = torch.tensor(X).to('cpu')
channel_size = X_base.size(axis)
scale_base = torch.normal(mean=0, std=1, size=(channel_size,)).clamp(1e-4, 100)
zero_point_base = torch.normal(mean=0, std=128, size=(channel_size,))
self._test_learnable_backward_per_channel(
X_base, 'cpu', scale_base, zero_point_base, axis)
@given(X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
qparams=hu.qparams(dtypes=torch.quint8)))
@unittest.skipIf(not TEST_CUDA, "No gpu is not available.")
def test_learnable_backward_per_channel_cuda(self, X):
torch.random.manual_seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
X_base = torch.tensor(X).to('cuda')
scale_base = to_tensor(scale, 'cuda')
zero_point_base = to_tensor(zero_point, 'cuda')
self._test_learnable_backward_per_channel(
X_base, 'cuda', scale_base, zero_point_base, axis)
def test_numerical_consistency_per_tensor(self):
self._test_numerical_consistency('per_tensor')
def test_numerical_consistency_per_channel(self):
self._test_numerical_consistency('per_channel')
def _test_numerical_consistency(self, test_type):
r"""Comparing numerical consistency between quantize/dequantize op and the fake quantize op across devices and dtypes
"""
torch.random.manual_seed(NP_RANDOM_SEED)
torch_types = [torch.qint8, torch.quint8]
float_types = [torch.float, torch.float16, torch.float64]
if test_type == "per_channel":
zero_types = [torch.int, torch.float, torch.float16]
else:
zero_types = [torch.int]
devices = [torch.device('cpu'), torch.device('cuda')] if torch.cuda.is_available() else [torch.device('cpu')]
axis = 1
for i in range(20):
for torch_type, float_type, device, zero_type in itertools.product(torch_types, float_types, devices, zero_types):
X = torch.randn(3, 3, device=device).to(float_type)
scales = (10 * torch.randn(3, device=device)).abs()
scale = scales.mean().to(float).item()
zeros = (10 * torch.randn(3, device=device)).abs().to(dtype=zero_type)
zero = zeros.max().view(1).item()
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
test_was_run = False
if test_type == "per_tensor":
test_was_run = True
Y = torch.dequantize(torch.quantize_per_tensor(X.to('cpu').to(torch.float),
scale, zero, torch_type)).to(device).to(float_type)
Y_prime = torch.fake_quantize_per_tensor_affine(X, scale, zero, quant_min, quant_max)
self.assertEqual(
Y, Y_prime, "Difference found between dequant+quant_per_tensor and fake_quantize_per_tensor")
if test_type == "per_channel":
test_was_run = True
Y = torch.dequantize(torch.quantize_per_channel(X.to('cpu').to(torch.float), scales.to(
'cpu'), zeros.to('cpu'), axis, torch_type)).to(device).to(float_type)
Y_prime = torch.fake_quantize_per_channel_affine(X, scales, zeros, axis, quant_min, quant_max)
self.assertEqual(
Y, Y_prime, "Difference found between dequant+quant_per_channel and fake_quantize_per_channel")
self.assertTrue(test_was_run)
class TestFusedObsFakeQuant(TestCase):
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
symmetric_quant=st.booleans())
@settings(deadline=None)
def test_fused_obs_fake_quant_moving_avg(self, device, symmetric_quant) -> None:
"""
Tests the case where we call the fused_obs_fake_quant op multiple times
and update the running_min and max of the activation tensors.
"""
in_running_min_ref = out_running_min_ref = float("inf")
in_running_min_op = torch.tensor(float("inf"), device=device)
in_running_max_ref = out_running_max_ref = float("-inf")
in_running_max_op = torch.tensor(float("-inf"), device=device)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
observer_on = fake_quant_on = 0
pt_op = torch.fused_moving_avg_obs_fake_quant
# enable observer after 2 iterations and fake_quant after 4 iterations
for i in range(10):
if i > 2:
observer_on = 1
if i > 4:
fake_quant_on = 1
x = torch.randn(5, 5, device=device)
out = pt_op(
x,
torch.tensor(observer_on, device=device),
torch.tensor(fake_quant_on, device=device),
in_running_min_op,
in_running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
False,
symmetric_quant,
)
if observer_on:
(
in_running_min_ref,
in_running_max_ref,
) = _get_tensor_min_max(
x,
running_min=in_running_min_ref,
running_max=in_running_max_ref,
averaging_const=0.01,
)
if fake_quant_on:
x_scale, x_zero_point = _get_scale_zp(
in_running_min_ref,
in_running_max_ref,
torch.quint8,
preserve_sparsity=symmetric_quant,
)
x_in = _fake_quantize_per_tensor_affine_reference(
x, x_scale, x_zero_point, 0, 255
)
self.assertEqual(scale, x_scale)
self.assertEqual(zero_point, x_zero_point)
else:
x_in = x
self.assertEqual(in_running_min_ref, in_running_min_op)
self.assertEqual(in_running_max_ref, in_running_max_op)
torch.testing.assert_allclose(out, x_in)
# Test empty input works
x = torch.empty(0, 5, device=device)
out = pt_op(
x,
torch.tensor(1, device=device),
torch.tensor(1, device=device),
in_running_min_op,
in_running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
False,
symmetric_quant,
)
output_shape = (0, 5)
self.assertEqual(out.shape, output_shape)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
symmetric_quant=st.booleans())
@settings(deadline=None)
def test_fused_obs_fake_quant_moving_avg_per_channel(self, device, symmetric_quant) -> None:
"""
Tests the case where we call the fused_obs_fake_quant op multiple times
and update the running_min and max of the activation tensors.
"""
m = 5
sizes = [[5, 5], [5, 4, 3]]
for size in sizes:
in_running_min_ref = torch.empty(m, device=device).fill_(float("inf"))
in_running_min_op = torch.empty(m, device=device).fill_(float("inf"))
in_running_max_ref = torch.empty(m, device=device).fill_(float("-inf"))
in_running_max_op = torch.empty(m, device=device).fill_(float("-inf"))
avg_const = 0.01
scale = torch.empty(m, device=device).fill_(0.1)
zero_point = torch.empty(m, dtype=torch.int, device=device).fill_(0)
observer_on = fake_quant_on = 0
pt_op = torch.fused_moving_avg_obs_fake_quant
# enable observer after 2 iterations and fake_quant after 4 iterations
for i in range(10):
if i > 2:
observer_on = 1
if i > 4:
fake_quant_on = 1
x = torch.randn(size, device=device)
out = pt_op(
x,
torch.tensor(observer_on, device=device),
torch.tensor(fake_quant_on, device=device),
in_running_min_op,
in_running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
True, # per_channel_enabled
symmetric_quant,
)
if observer_on:
(
in_running_min_ref,
in_running_max_ref,
) = _get_per_row_min_max(x, in_running_min_ref, in_running_max_ref)
if fake_quant_on:
x_scale = torch.empty(m, device=device)
x_zero_point = torch.empty(m, dtype=torch.int, device=device)
for i in range(x_scale.numel()):
x_scale[i], x_zero_point[i] = _get_scale_zp(
in_running_min_ref[i].item(),
in_running_max_ref[i].item(),
torch.quint8,
preserve_sparsity=symmetric_quant,
)
x_in = _fake_quantize_per_channel_affine_reference(
x, x_scale, x_zero_point, 0, 0, 255
)
self.assertEqual(scale, x_scale)
self.assertEqual(zero_point, x_zero_point)
else:
x_in = x
self.assertEqual(in_running_min_ref, in_running_min_op)
self.assertEqual(in_running_max_ref, in_running_max_op)
torch.testing.assert_allclose(out, x_in)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),)
@settings(deadline=None)
def test_fused_obs_fake_quant_backward_op(self, device) -> None:
n = m = k = 10
input_shape = (m, n)
output_shape = (m, n)
x = torch.randn(input_shape, device=device, requires_grad=True)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
x_min, x_max = _get_tensor_min_max(x)
x_scale, x_zero_point = _get_scale_zp(
x_min, x_max, torch.quint8
)
x_scale = torch.tensor(x_scale, device=device)
x_zero_point = torch.tensor(x_zero_point, dtype=torch.int, device=device)
x_fake_quant = torch.fake_quantize_per_tensor_affine(
x, x_scale, x_zero_point, 0, 255
)
pt_op = torch.fused_moving_avg_obs_fake_quant
out = pt_op(
x,
torch.tensor(1, device=device),
torch.tensor(1, device=device),
torch.tensor(x_min, device=device),
torch.tensor(x_max, device=device),
scale,
zero_point,
avg_const,
0,
255,
0,
False,
)
# verify the output matches
torch.testing.assert_allclose(out, x_fake_quant)
# verify the gradient matches expectation of fake_quant op
dout = torch.rand_like(x, dtype=torch.float).to(device)
out.backward(dout)
dX = _fake_quantize_per_tensor_affine_grad_reference(
dout, x, x_scale, x_zero_point, 0, 255)
self.assertEqual(dX, x.grad)
self.assertTrue(x.grad.dtype == torch.float32)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),)
@settings(deadline=None)
def test_fused_backward_op_fake_quant_off(self, device) -> None:
n = m = 4
input_shape = (m, n)
output_shape = (m, n)
x = torch.randn(input_shape, device=device, requires_grad=True)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
x_min, x_max = _get_tensor_min_max(x)
x_scale, x_zero_point = _get_scale_zp(
x_min, x_max, torch.quint8
)
pt_op = torch.fused_moving_avg_obs_fake_quant
out = pt_op(
x,
torch.tensor(0, device=device),
torch.tensor(0, device=device),
torch.tensor(x_min, device=device),
torch.tensor(x_max, device=device),
scale,
zero_point,
avg_const,
0,
255,
0,
False,
)
# verify the output matches
torch.testing.assert_allclose(out, x)
# verify the gradient matches expectation of fake_quant op
dout = torch.rand_like(x, dtype=torch.float).to(device)
out.backward(dout)
dX = _fake_quantize_per_tensor_affine_grad_reference(
dout, x, x_scale, x_zero_point, 0, 255)
self.assertEqual(dX, x.grad)
self.assertTrue(x.grad.dtype == torch.float32)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/quantization/core/test_workflow_ops.py
|
# Owner(s): ["oncall: quantization"]
import torch
import torch.nn.intrinsic as nni
import torch.nn.qat as nnqat
import torch.nn.quantized._reference as nnqr
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.ao.quantization.backend_config import (
BackendConfig,
BackendPatternConfig,
DTypeConfig,
ObservationType,
)
from torch.ao.quantization.fake_quantize import FixedQParamsFakeQuantize
from torch.ao.quantization.fuser_method_mappings import reverse_sequential_wrapper2
from torch.ao.quantization.fx.quantization_patterns import _default_root_node_getter
from torch.ao.quantization.observer import default_fixed_qparams_range_0to1_observer
class TestBackendConfig(QuantizationTestCase):
# =============
# DTypeConfig
# =============
dtype_config1 = DTypeConfig(
input_dtype=torch.quint8,
output_dtype=torch.quint8,
weight_dtype=torch.qint8,
bias_dtype=torch.float
)
dtype_config2 = DTypeConfig(
input_dtype=torch.float16,
output_dtype=torch.float,
is_dynamic=True
)
dtype_config_dict1 = {
"input_dtype": torch.quint8,
"output_dtype": torch.quint8,
"weight_dtype": torch.qint8,
"bias_dtype": torch.float,
}
dtype_config_dict2 = {
"input_dtype": torch.float16,
"output_dtype": torch.float,
"is_dynamic": True,
}
def test_dtype_config_from_dict(self):
self.assertEqual(DTypeConfig.from_dict(self.dtype_config_dict1), self.dtype_config1)
self.assertEqual(DTypeConfig.from_dict(self.dtype_config_dict2), self.dtype_config2)
def test_dtype_config_to_dict(self):
self.assertEqual(self.dtype_config1.to_dict(), self.dtype_config_dict1)
self.assertEqual(self.dtype_config2.to_dict(), self.dtype_config_dict2)
# ======================
# BackendPatternConfig
# ======================
_fuser_method = reverse_sequential_wrapper2(nni.LinearReLU)
_num_tensor_args_to_observation_type = {
0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
}
_input_type_to_index = {
"bias": 0,
"input": 1,
"weight": 2,
}
_fake_quantize = FixedQParamsFakeQuantize.with_args(observer=default_fixed_qparams_range_0to1_observer)
def _extra_inputs_getter(self, p):
return (torch.rand(3, 3),)
def _get_backend_op_config1(self):
return BackendPatternConfig((torch.nn.ReLU, torch.nn.Linear)) \
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
.add_dtype_config(self.dtype_config1) \
.add_dtype_config(self.dtype_config2) \
.set_root_module(torch.nn.Linear) \
.set_qat_module(nnqat.Linear) \
.set_reference_quantized_module(nnqr.Linear) \
.set_fused_module(nni.LinearReLU) \
.set_fuser_method(self._fuser_method)
def _get_backend_op_config2(self):
return BackendPatternConfig(torch.add) \
.add_dtype_config(self.dtype_config2) \
._set_root_node_getter(_default_root_node_getter) \
._set_extra_inputs_getter(self._extra_inputs_getter) \
._set_num_tensor_args_to_observation_type(self._num_tensor_args_to_observation_type) \
._set_input_type_to_index(self._input_type_to_index) \
._set_input_output_observed(False) \
._set_overwrite_output_fake_quantize(self._fake_quantize) \
._set_overwrite_output_observer(default_fixed_qparams_range_0to1_observer)
def _get_backend_pattern_config_dict1(self):
return {
"pattern": (torch.nn.ReLU, torch.nn.Linear),
"observation_type": ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
"dtype_configs": [self.dtype_config_dict1, self.dtype_config_dict2],
"root_module": torch.nn.Linear,
"qat_module": nnqat.Linear,
"reference_quantized_module_for_root": nnqr.Linear,
"fused_module": nni.LinearReLU,
"fuser_method": self._fuser_method,
}
def _get_backend_pattern_config_dict2(self):
return {
"pattern": torch.add,
"observation_type": ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
"dtype_configs": [self.dtype_config_dict2],
"root_node_getter": _default_root_node_getter,
"extra_inputs_getter": self._extra_inputs_getter,
"num_tensor_args_to_observation_type": self._num_tensor_args_to_observation_type,
"input_type_to_index": self._input_type_to_index,
"input_output_observed": False,
"overwrite_output_fake_quantize": self._fake_quantize,
"overwrite_output_observer": default_fixed_qparams_range_0to1_observer
}
def test_backend_op_config_set_observation_type(self):
conf = BackendPatternConfig(torch.nn.Linear)
self.assertEqual(conf.observation_type, ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT)
conf.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT)
self.assertEqual(conf.observation_type, ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT)
def test_backend_op_config_add_dtype_config(self):
conf = BackendPatternConfig(torch.nn.Linear)
self.assertEqual(len(conf.dtype_configs), 0)
conf.add_dtype_config(self.dtype_config1)
conf.add_dtype_config(self.dtype_config2)
self.assertEqual(len(conf.dtype_configs), 2)
self.assertEqual(conf.dtype_configs[0], self.dtype_config1)
self.assertEqual(conf.dtype_configs[1], self.dtype_config2)
def test_backend_op_config_set_root_module(self):
conf = BackendPatternConfig(nni.LinearReLU)
self.assertTrue(conf.root_module is None)
conf.set_root_module(torch.nn.Linear)
self.assertEqual(conf.root_module, torch.nn.Linear)
def test_backend_op_config_set_qat_module(self):
conf = BackendPatternConfig(torch.nn.Linear)
self.assertTrue(conf.qat_module is None)
conf.set_qat_module(nnqat.Linear)
self.assertEqual(conf.qat_module, nnqat.Linear)
def test_backend_op_config_set_reference_quantized_module(self):
conf = BackendPatternConfig(torch.nn.Linear)
self.assertTrue(conf.reference_quantized_module is None)
conf.set_reference_quantized_module(nnqr.Linear)
self.assertEqual(conf.reference_quantized_module, nnqr.Linear)
def test_backend_op_config_set_fused_module(self):
conf = BackendPatternConfig((torch.nn.ReLU, torch.nn.Linear))
self.assertTrue(conf.fused_module is None)
conf.set_fused_module(nni.LinearReLU)
self.assertEqual(conf.fused_module, nni.LinearReLU)
def test_backend_op_config_set_fuser_method(self):
conf = BackendPatternConfig((torch.nn.ReLU, torch.nn.Linear))
self.assertTrue(conf.fuser_method is None)
conf.set_fuser_method(self._fuser_method)
self.assertEqual(conf.fuser_method, self._fuser_method)
def test_backend_op_config_set_root_node_getter(self):
conf = BackendPatternConfig((torch.nn.ReLU, torch.nn.Linear))
self.assertTrue(conf._root_node_getter is None)
conf._set_root_node_getter(_default_root_node_getter)
self.assertEqual(conf._root_node_getter, _default_root_node_getter)
def test_backend_op_config_set_extra_inputs_getter(self):
conf = BackendPatternConfig(torch.nn.Linear)
self.assertTrue(conf._extra_inputs_getter is None)
conf._set_extra_inputs_getter(self._extra_inputs_getter)
self.assertEqual(conf._extra_inputs_getter, self._extra_inputs_getter)
def test_backend_op_config_set_num_tensor_args_to_observation_type(self):
conf = BackendPatternConfig(torch.add)
self.assertEqual(len(conf._num_tensor_args_to_observation_type), 0)
conf._set_num_tensor_args_to_observation_type(self._num_tensor_args_to_observation_type)
self.assertEqual(conf._num_tensor_args_to_observation_type, self._num_tensor_args_to_observation_type)
def test_backend_op_config_set_input_type_to_index(self):
conf = BackendPatternConfig(torch.addmm)
self.assertEqual(len(conf._input_type_to_index), 0)
conf._set_input_type_to_index(self._input_type_to_index)
self.assertEqual(conf._input_type_to_index, self._input_type_to_index)
def test_backend_op_config_set_input_output_observed(self):
conf = BackendPatternConfig(torch.nn.Embedding)
self.assertTrue(conf._input_output_observed is None)
conf._set_input_output_observed(False)
self.assertEqual(conf._input_output_observed, False)
def test_backend_op_config_set_overwrite_output_fake_quantize(self):
conf = BackendPatternConfig(torch.sigmoid)
self.assertTrue(conf._overwrite_output_fake_quantize is None)
conf._set_overwrite_output_fake_quantize(self._fake_quantize)
self.assertEqual(conf._overwrite_output_fake_quantize, self._fake_quantize)
def test_backend_op_config_set_overwrite_output_observer(self):
conf = BackendPatternConfig(torch.sigmoid)
self.assertTrue(conf._overwrite_output_observer is None)
conf._set_overwrite_output_observer(default_fixed_qparams_range_0to1_observer)
self.assertEqual(conf._overwrite_output_observer, default_fixed_qparams_range_0to1_observer)
def test_backend_op_config_from_dict(self):
conf_dict1 = self._get_backend_pattern_config_dict1()
conf1 = BackendPatternConfig.from_dict(conf_dict1)
self.assertEqual(conf1.pattern, (torch.nn.ReLU, torch.nn.Linear))
self.assertEqual(conf1.observation_type, ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT)
self.assertEqual(conf1.root_module, torch.nn.Linear)
self.assertEqual(conf1.qat_module, nnqat.Linear)
self.assertEqual(conf1.reference_quantized_module, nnqr.Linear)
self.assertEqual(conf1.fused_module, nni.LinearReLU)
self.assertEqual(conf1.fuser_method, self._fuser_method)
self.assertTrue(conf1._root_node_getter is None)
self.assertTrue(conf1._extra_inputs_getter is None)
self.assertEqual(len(conf1._num_tensor_args_to_observation_type), 0)
self.assertEqual(len(conf1._input_type_to_index), 0)
self.assertTrue(conf1._input_output_observed is None)
self.assertTrue(conf1._overwrite_output_fake_quantize is None)
self.assertTrue(conf1._overwrite_output_observer is None)
# Test temporary/internal keys
conf_dict2 = self._get_backend_pattern_config_dict2()
conf2 = BackendPatternConfig.from_dict(conf_dict2)
self.assertEqual(conf2.pattern, torch.add)
self.assertEqual(conf2.observation_type, ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT)
self.assertTrue(conf2.root_module is None)
self.assertTrue(conf2.qat_module is None)
self.assertTrue(conf2.reference_quantized_module is None)
self.assertTrue(conf2.fused_module is None)
self.assertTrue(conf2.fuser_method is None)
self.assertEqual(conf2._root_node_getter, _default_root_node_getter)
self.assertEqual(conf2._extra_inputs_getter, self._extra_inputs_getter)
self.assertEqual(conf2._num_tensor_args_to_observation_type, self._num_tensor_args_to_observation_type)
self.assertEqual(conf2._input_type_to_index, self._input_type_to_index)
self.assertEqual(conf2._input_output_observed, False)
self.assertEqual(conf2._overwrite_output_fake_quantize, self._fake_quantize)
self.assertEqual(conf2._overwrite_output_observer, default_fixed_qparams_range_0to1_observer)
def test_backend_op_config_to_dict(self):
conf1 = self._get_backend_op_config1()
conf2 = self._get_backend_op_config2()
conf_dict1 = self._get_backend_pattern_config_dict1()
conf_dict2 = self._get_backend_pattern_config_dict2()
self.assertEqual(conf1.to_dict(), conf_dict1)
self.assertEqual(conf2.to_dict(), conf_dict2)
# ===============
# BackendConfig
# ===============
def test_backend_config_set_name(self):
conf = BackendConfig("name1")
self.assertEqual(conf.name, "name1")
conf.set_name("name2")
self.assertEqual(conf.name, "name2")
def test_backend_config_set_backend_pattern_config(self):
conf = BackendConfig("name1")
self.assertEqual(len(conf.configs), 0)
backend_op_config1 = self._get_backend_op_config1()
backend_op_config2 = self._get_backend_op_config2()
conf.set_backend_pattern_config(backend_op_config1)
self.assertEqual(conf.configs, {
(torch.nn.ReLU, torch.nn.Linear): backend_op_config1,
})
conf.set_backend_pattern_config(backend_op_config2)
self.assertEqual(conf.configs, {
(torch.nn.ReLU, torch.nn.Linear): backend_op_config1,
torch.add: backend_op_config2
})
def test_backend_config_from_dict(self):
op1 = self._get_backend_op_config1()
op2 = self._get_backend_op_config2()
op_dict1 = self._get_backend_pattern_config_dict1()
op_dict2 = self._get_backend_pattern_config_dict2()
conf_dict = {
"name": "name1",
"configs": [op_dict1, op_dict2],
}
conf = BackendConfig.from_dict(conf_dict)
self.assertEqual(conf.name, "name1")
self.assertEqual(len(conf.configs), 2)
key1 = (torch.nn.ReLU, torch.nn.Linear)
key2 = torch.add
self.assertTrue(key1 in conf.configs)
self.assertTrue(key2 in conf.configs)
self.assertEqual(conf.configs[key1].to_dict(), op_dict1)
self.assertEqual(conf.configs[key2].to_dict(), op_dict2)
def test_backend_config_to_dict(self):
op1 = self._get_backend_op_config1()
op2 = self._get_backend_op_config2()
op_dict1 = self._get_backend_pattern_config_dict1()
op_dict2 = self._get_backend_pattern_config_dict2()
conf = BackendConfig("name1").set_backend_pattern_config(op1).set_backend_pattern_config(op2)
conf_dict = {
"name": "name1",
"configs": [op_dict1, op_dict2],
}
self.assertEqual(conf.to_dict(), conf_dict)
if __name__ == '__main__':
raise RuntimeError("This _test file is not meant to be run directly, use:\n\n"
"\tpython _test/_test_quantization.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/quantization/core/test_backend_config.py
|
# Owner(s): ["oncall: quantization"]
import numpy as np
import math
import torch
import io
import unittest
from copy import deepcopy
from hypothesis import given
from hypothesis import strategies as st
from torch.testing._internal.common_utils import TemporaryFileName
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
import itertools
import tempfile
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.qscheme = torch.per_tensor_symmetric
def _calculate_dynamic_qparams(X, dtype, reduce_range=False):
"""Calculate the dynamic quantization parameters (scale, zero_point)
according to the min and max element of the tensor"""
if isinstance(X, torch.Tensor):
X = X.cpu().data.numpy()
if dtype == torch.qint8:
if reduce_range:
qmin, qmax = -64, 63
else:
qmin, qmax = -128, 127
else: # dtype == torch.quint8
if reduce_range:
qmin, qmax = 0, 127
else:
qmin, qmax = 0, 255
min_val = X.min().astype(dtype=np.float32)
max_val = X.max().astype(dtype=np.float32)
min_val = min(0.0, min_val)
max_val = max(0.0, max_val)
scale = (np.float64(max_val) - min_val) / (qmax - qmin)
if scale == 0.0 or math.isinf(1.0 / scale):
scale = np.float64(0.1)
zero_point = 0
zero_point_from_min = qmin - min_val / float(scale)
zero_point_from_max = qmax - max_val / float(scale)
zero_point_from_min_error = abs(qmin) - abs(min_val / float(scale))
zero_point_from_max_error = abs(qmax) - abs(max_val / float(scale))
if zero_point_from_min_error < zero_point_from_max_error:
initial_zero_point = zero_point_from_min
else:
initial_zero_point = zero_point_from_max
nudged_zero_point = 0
if initial_zero_point < qmin:
nudged_zero_point = qmin
elif initial_zero_point > qmax:
nudged_zero_point = qmax
else:
nudged_zero_point = int(round(initial_zero_point))
return [scale.astype(np.float32), int(nudged_zero_point)]
def get_supported_device_types():
return ['cpu', 'cuda'] if torch.cuda.is_available() and not TEST_WITH_ROCM else ['cpu']
# Note we explicitly cast variables to np.float32 in a couple of places to avoid
# the default casting in Python often resuling in double precision and to make
# sure we're doing the same numerics as C++ code.
def param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16):
xmin, xmax = np.min(x), np.max(x)
stepsize = (xmax - xmin) / np.float32(n_bins)
min_bins = np.float32(n_bins) * (np.float32(1) - np.float32(ratio))
xq, loss = _compress_uniform_simplified(x, bit_rate, xmin, xmax)
solutions = [] # [(left, right, loss)] # local optima solution
cur_min, cur_max, cur_loss = xmin, xmax, loss
thr = min_bins * stepsize
while cur_min + thr < cur_max:
# move left
xq, loss1 = _compress_uniform_simplified(
x, bit_rate, cur_min + stepsize, cur_max
)
# move right
xq, loss2 = _compress_uniform_simplified(
x, bit_rate, cur_min, cur_max - stepsize
)
if cur_loss < loss1 and cur_loss < loss2:
# found a local optima
solutions.append((cur_min, cur_max, cur_loss))
if loss1 < loss2:
cur_min, cur_max, cur_loss = cur_min + stepsize, cur_max, loss1
else:
cur_min, cur_max, cur_loss = cur_min, cur_max - stepsize, loss2
if len(solutions):
best = solutions[0]
for solution in solutions:
if solution[-1] < best[-1]:
best = solution
return best[1], best[0] # xmax, xmin
return xmax, xmin
def _compress_uniform_simplified(X, bit_rate, xmin, xmax, fp16_scale_bias=True):
# affine transform to put Xq in [0,2**bit_rate - 1]
# Xq = (2 ** bit_rate - 1) * (Xq - xmin) / data_range
if fp16_scale_bias:
xmin = xmin.astype(np.float16).astype(np.float32)
data_range = xmax - xmin
scale = np.where(
data_range == 0, np.float32(1), data_range / np.float32(2 ** bit_rate - 1)
)
if fp16_scale_bias:
scale = scale.astype(np.float16).astype(np.float32)
inverse_scale = np.float32(1) / scale
Xq = np.clip(np.round((X - xmin) * inverse_scale), 0, np.float32(2 ** bit_rate - 1))
Xq = Xq * scale + xmin
# Manually compute loss instead of using np.linalg.norm to use the same
# accumulation order used by C++ code
vlen = 8
loss_v = np.zeros(vlen).astype(np.float32)
for i in range(len(Xq) // vlen * vlen):
loss_v[i % vlen] += (X[i] - Xq[i]) * (X[i] - Xq[i])
loss = np.float32(0)
for i in range(vlen):
loss += loss_v[i]
for i in range(len(Xq) // vlen * vlen, len(Xq)):
loss += (X[i] - Xq[i]) * (X[i] - Xq[i])
loss = np.sqrt(loss)
return Xq, loss
class TestQuantizedTensor(TestCase):
def test_per_tensor_qtensor_to_memory_format(self):
n = np.random.randint(1, 10)
c = np.random.randint(2, 10)
h = np.random.randint(2, 10)
w = np.random.randint(2, 10)
x = torch.rand(n, c, h, w)
scale = np.random.uniform(0.1, 1.0)
zero_point = np.random.randint(0.0, 10)
qints = [torch.qint8, torch.quint8, torch.qint32]
dtype = qints[np.random.randint(0, len(qints))]
qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=dtype)
x_nhwc = x.to(memory_format=torch.channels_last)
qx_nhwc_using_to = qx.to(memory_format=torch.channels_last)
qx_nhwc_using_contiguous = qx.contiguous(memory_format=torch.channels_last)
self.assertEqual(qx_nhwc_using_to.stride(), qx_nhwc_using_contiguous.stride())
self.assertEqual(qx_nhwc_using_to.stride(), x_nhwc.stride())
# When the last two dimensions of a 4D tensor are both size 1 or if c == 1, we have a degenerate case
# see https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html
# In this case, the output of torch.Tensor.to and torch.Tensor.contiguous should not be the same
x = torch.rand(10, 2, 1, 1)
qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=dtype)
qx_nhwc_using_to = qx.to(memory_format=torch.channels_last)
qx_nhwc_using_contiguous = qx.contiguous(memory_format=torch.channels_last)
self.assertNotEqual(qx_nhwc_using_to.stride(), qx_nhwc_using_contiguous.stride())
x = torch.rand(10, 1, 2, 2)
qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=dtype)
qx_nhwc_using_to = qx.to(memory_format=torch.channels_last)
qx_nhwc_using_contiguous = qx.contiguous(memory_format=torch.channels_last)
self.assertNotEqual(qx_nhwc_using_to.stride(), qx_nhwc_using_contiguous.stride())
def test_per_channel_qtensor_to_memory_format(self):
n = np.random.randint(1, 10)
c = np.random.randint(2, 10)
h = np.random.randint(2, 10)
w = np.random.randint(2, 10)
x = torch.rand(n, c, h, w)
x_nhwc = x.to(memory_format=torch.channels_last)
scale = np.random.uniform(0.1, 1.0)
zero_point = np.random.randint(0.0, 10)
qints = [torch.qint8, torch.quint8, torch.qint32]
dtype = qints[np.random.randint(0, len(qints))]
for axis in range(x.ndim):
scales = torch.rand(x.size(axis)) + 0.00001
zero_points = torch.randint(low=0, high=10, size=(x.size(axis), ))
qx = torch.quantize_per_channel(x, scales=scales, zero_points=zero_points, dtype=dtype, axis=axis)
qx_nhwc_using_to = qx.to(memory_format=torch.channels_last)
self.assertEqual(qx_nhwc_using_to.stride(), x_nhwc.stride())
@unittest.skipIf(not TEST_CUDA, "No gpu is available.")
def test_qtensor_cuda(self):
self._test_qtensor(torch.device('cuda'))
self._test_qtensor_dynamic(torch.device('cuda'))
def test_qtensor_cpu(self):
self._test_qtensor(torch.device('cpu'))
self._test_qtensor_dynamic(torch.device('cpu'))
def _test_qtensor_dynamic(self, device):
# max number of tensor dimensions
max_tensor_order = 4
# max size for any tensor dimension
max_dim_sz = 20
num_dim = np.random.randint(low=1, high=max_tensor_order)
dims = np.random.randint(low=1, high=max_dim_sz, size=num_dim)
mat2quant = torch.randn(*dims, dtype=torch.float, device=device)
reduce_flag = False
for dtype in [torch.qint8, torch.quint8]:
q_d = torch.quantize_per_tensor_dynamic(mat2quant, dtype, reduce_flag)
scale, zero_pt = _calculate_dynamic_qparams(mat2quant, dtype, reduce_flag)
q_s = torch.quantize_per_tensor(mat2quant, scale, zero_pt, dtype)
self.assertEqual(q_d, q_s)
def _test_qtensor(self, device):
device = str(device)
num_elements = 10
scale = 1.0
zero_point = 2
for dtype in [torch.qint8, torch.quint8, torch.qint32]:
r = torch.ones(num_elements, dtype=torch.float, device=device)
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
self.assertEqual(qr.q_scale(), scale)
self.assertEqual(qr.q_zero_point(), zero_point)
self.assertTrue(qr.is_quantized)
self.assertFalse(r.is_quantized)
self.assertEqual(qr.qscheme(), torch.per_tensor_affine)
self.assertTrue(isinstance(qr.qscheme(), torch.qscheme))
# slicing and int_repr
int_repr = qr.int_repr()
for num in int_repr:
self.assertEqual(num, 3)
for num in qr[2:].int_repr():
self.assertEqual(num, 3)
# dequantize
rqr = qr.dequantize()
for i in range(num_elements):
self.assertEqual(r[i], rqr[i])
# we can also print a qtensor
empty_r = torch.ones((0, 1), dtype=torch.float, device=device)
empty_qr = torch.quantize_per_tensor(empty_r, scale, zero_point, dtype)
device_msg = "" if device == 'cpu' else "device='" + device + ":0', "
dtype_msg = str(dtype) + ", "
self.assertEqual(' '.join(str(empty_qr).split()),
"tensor([], " + device_msg + "size=(0, 1), dtype=" + dtype_msg +
"quantization_scheme=torch.per_tensor_affine, " +
"scale=1.0, zero_point=2)")
def test_qtensor_int_repr(self):
# to catch edge case when num elements * bit rate < 8, make sure at lease allocate one byte to hold the int repr
num_elements = 1
device = torch.device('cpu')
scale = 1.0
zero_point = 2
dtype = torch.quint2x4
r = torch.ones(num_elements, dtype=torch.float, device=device)
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
int_repr = qr.int_repr()
self.assertEqual(int_repr.numel(), 1)
# Packed one entry looks like 00000011
self.assertEqual(int_repr[0], 3)
def test_qtensor_sub_byte_aligned_cols(self):
# Packed 4 entries, each of value 3, look like 00110011, 00110011 for torch.qunit4x2, or 11111111 for torch.quint2x4
self._test_qtensor_sub_byte(1, 4, torch.quint4x2, 2, [51, 51])
self._test_qtensor_sub_byte(1, 4, torch.quint2x4, 4, [255])
def test_qtensor_sub_byte_not_aligned_cols(self):
# Packed 5 entries, each of value 3, look like 00110011, 00110011, 00000011 for torch.qunit4x2,
# or 11111111, 00000011 for torch.quint2x4
self._test_qtensor_sub_byte(1, 5, torch.quint4x2, 2, [51, 51, 3])
self._test_qtensor_sub_byte(1, 5, torch.quint2x4, 4, [255, 3])
def _test_qtensor_sub_byte(self, rows, cols, dtype, elements_per_byte, expected_packed_vals):
num_elements = rows * cols
scale = 1.0
zero_point = 2
r = torch.ones((rows, cols), dtype=torch.float)
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
self.assertEqual(qr.q_scale(), scale)
self.assertEqual(qr.q_zero_point(), zero_point)
self.assertTrue(qr.is_quantized)
self.assertFalse(r.is_quantized)
self.assertEqual(qr.storage().size(), rows * math.ceil(cols / elements_per_byte), f"with {dtype}, {elements_per_byte}")
int_repr = qr.int_repr()
self.assertEqual(int_repr.numel(), len(expected_packed_vals))
for num, expected in zip(int_repr, expected_packed_vals):
self.assertEqual(num, expected, f"with dtype={dtype}, elements_per_byte={elements_per_byte}, rows={rows}, cols={cols}")
# Test tensor creation
q = torch._empty_affine_quantized([num_elements], scale=scale, zero_point=zero_point, dtype=dtype)
self.assertEqual(q.storage().size(), math.ceil(num_elements / elements_per_byte), f"with {dtype}, {elements_per_byte}")
# Test save/load
with tempfile.NamedTemporaryFile() as f:
torch.save(qr, f)
f.seek(0)
loaded_q = torch.load(f)
loaded_int_repr = loaded_q.int_repr()
self.assertEqual(int_repr, loaded_int_repr)
def test_qtensor_channel_float_assignment(self):
t1 = torch.rand(2, 3, 5, 5)
t2 = torch.rand(2, 3, 5, 5)
for axis in range(t1.ndim):
scales = np.random.rand(t1.size()[axis])
zero_points = np.random.randint(low=0, high=50, size=t1.size()[axis])
for dtype in [torch.qint8, torch.quint8, torch.qint32]:
qt1 = torch.quantize_per_channel(t1, scales=torch.tensor(scales),
zero_points=torch.tensor(zero_points), dtype=dtype, axis=axis)
qt2 = torch.quantize_per_channel(t2, scales=torch.tensor(scales),
zero_points=torch.tensor(zero_points), dtype=dtype, axis=axis)
i = 0
j = 1
k = 2
l = 4
# scalar assignment verification
qt1[i][j][k][l] = t2[i][j][k][l]
self.assertEqual(qt1[i][j][k][l], qt2[i][j][k][l])
# 1D tensor assignment verification
qt1[i][j][k][2:l] = t2[i][j][k][2:l]
self.assertEqual(qt1[i][j][k][2:l], qt2[i][j][k][2:l])
qt1[i][j][k] = t2[i][j][k]
self.assertEqual(qt1[i][j][k], qt2[i][j][k])
# 2D tensor assignment verification
qt1[i][j][k:] = t2[i][j][k:]
self.assertEqual(qt1[i][j][k:], qt2[i][j][k:])
qt1[i][j] = t2[i][j]
self.assertEqual(qt1[i][j], qt2[i][j])
# 3D tensor assignment verification
qt1[i][j:] = t2[i][j:]
self.assertEqual(qt1[i][j:], qt2[i][j:])
qt1[i] = t2[i]
self.assertEqual(qt1[i], qt2[i])
# 4D tensor assignment verification
qt1[:1] = t2[:1]
self.assertEqual(qt1[:1], qt2[:1])
qt1[:] = t2[:]
self.assertEqual(qt1[:], qt2[:])
# non-contiguous case **this should raise an exception**
with self.assertRaisesRegex(RuntimeError, "Quantized copy only works with contiguous and NHWC Tensors"):
qt1[:, 0] = t2[:, 0]
def test_qtensor_float_assignment(self):
# Scalar Tensor
# item
scale = 1.0
zero_point = 2
devices = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
for device in devices:
r = torch.ones(1, dtype=torch.float).to(device=device)
for dtype in [torch.qint8, torch.quint8, torch.qint32]:
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype=dtype)
self.assertEqual(qr.item(), 1)
self.assertEqual(qr[0].item(), 1)
# assignment
self.assertTrue(qr[0].is_quantized)
qr[0] = torch.Tensor([11.3]).to(device=device) # float assignment
self.assertEqual(qr.item(), 11)
x = torch.ones(1, dtype=torch.float).to(device=device) * 15.3
# Copying from a float Tensor
qr[:] = x
self.assertEqual(qr.item(), 15)
dtype_msg = str(dtype) + ", "
if device == "cuda":
self.assertEqual(' '.join(str(qr).split()),
"tensor([15.], device='" + str(qr.device) + "', size=(1,), dtype=" + dtype_msg +
"quantization_scheme=torch.per_tensor_affine, " +
"scale=1.0, zero_point=2)")
else:
self.assertEqual(' '.join(str(qr).split()),
"tensor([15.], size=(1,), dtype=" + dtype_msg +
"quantization_scheme=torch.per_tensor_affine, " +
"scale=1.0, zero_point=2)")
def test_qtensor_quant_dequant(self):
scale = 0.02
zero_point = 2
for device in get_supported_device_types():
r = torch.rand(3, 2, 4, 5, dtype=torch.float, device=device) * 4 - 2
for memory_format in [torch.contiguous_format, torch.channels_last]:
r = r.contiguous(memory_format=memory_format)
for dtype in [torch.qint8, torch.quint8, torch.qint32]:
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
rqr = qr.dequantize()
self.assertTrue(np.allclose(r.cpu().numpy(), rqr.cpu().numpy(), atol=2 / scale))
# Also check 5D tensors work.
for device in get_supported_device_types():
r = torch.rand(3, 2, 4, 5, 6, dtype=torch.float, device=device) * 4 - 2
for dtype in [torch.qint8, torch.quint8, torch.qint32]:
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
rqr = qr.dequantize()
self.assertTrue(np.allclose(r.cpu().numpy(), rqr.cpu().numpy(), atol=2 / scale))
# legacy constructor/new doesn't support qtensors
def test_qtensor_legacy_new_failure(self):
r = torch.rand(3, 2, dtype=torch.float) * 4 - 2
scale = 0.02
zero_point = 2
qr = torch.quantize_per_tensor(r, scale, zero_point, torch.quint8)
self.assertRaises(RuntimeError, lambda: qr.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: qr.new(r.storage()))
self.assertRaises(RuntimeError, lambda: qr.new(r))
self.assertRaises(RuntimeError, lambda: qr.new(torch.Size([2, 3])))
self.assertRaises(RuntimeError, lambda: qr.new([6]))
def test_per_channel_qtensor_creation_cpu(self):
self._test_per_channel_qtensor_creation(torch.device('cpu'))
def _test_dequantize_fp16(self, device):
data_orig = torch.randn(1, 2, 4, 4, dtype=torch.float, device=device)
data_fp16 = data_orig.to(torch.float16)
data_fp16_dequant = data_fp16.dequantize()
data_fp16_fp32 = data_fp16.to(torch.float)
self.assertTrue(data_fp16_dequant.dtype == torch.float)
self.assertTrue(torch.allclose(data_fp16_fp32, data_fp16_dequant))
def test_dequantize_fp16_cpu(self):
self._test_dequantize_fp16(torch.device('cpu'))
@unittest.skipIf(not TEST_CUDA, "No gpu is available.")
def test_dequantize_fp16_cuda(self):
self._test_dequantize_fp16(torch.device('cuda'))
@unittest.skipIf(not TEST_CUDA, "No gpu is available.")
def test_per_channel_qtensor_creation_cuda(self):
self._test_per_channel_qtensor_creation(torch.device('cuda'))
def _test_per_channel_qtensor_creation(self, device):
numel = 10
ch_axis = 0
scales = torch.rand(numel, device=device)
zero_points_int = torch.randint(0, 10, size=(numel,), device=device)
zero_points_float = torch.randn(numel, device=device)
for dtype, zero_points in itertools.product([torch.qint8, torch.quint8], [zero_points_float, zero_points_int]):
q = torch._empty_per_channel_affine_quantized(
[numel], scales=scales, zero_points=zero_points, axis=ch_axis, dtype=dtype, device=device)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(scales, q.q_per_channel_scales())
self.assertEqual(zero_points, q.q_per_channel_zero_points())
self.assertEqual(ch_axis, q.q_per_channel_axis())
# create Tensor from uint8_t Tensor, scales and zero_points
for zero_points in [zero_points_float, zero_points_int]:
int_tensor = torch.randint(0, 100, size=(numel,), dtype=torch.uint8, device=device)
q = torch._make_per_channel_quantized_tensor(int_tensor, scales, zero_points, ch_axis)
self.assertEqual(int_tensor, q.int_repr())
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(scales, q.q_per_channel_scales())
self.assertEqual(zero_points, q.q_per_channel_zero_points())
self.assertEqual(ch_axis, q.q_per_channel_axis())
def test_qtensor_creation(self):
scale = 0.5
zero_point = 10
numel = 10
for device in get_supported_device_types():
q = torch._empty_affine_quantized([numel], scale=scale, zero_point=zero_point,
device=device, dtype=torch.quint8)
self.assertEqual(scale, q.q_scale())
self.assertEqual(zero_point, q.q_zero_point())
# create Tensor from uint8_t Tensor, scale and zero_point
int_tensor = torch.randint(0, 100, size=(10,), device=device, dtype=torch.uint8)
q = torch._make_per_tensor_quantized_tensor(int_tensor, scale, zero_point)
self.assertEqual(int_tensor, q.int_repr())
self.assertEqual(scale, q.q_scale())
self.assertEqual(zero_point, q.q_zero_point())
# create via empty_like
q = torch._empty_affine_quantized([numel], scale=scale, zero_point=zero_point,
device=device, dtype=torch.quint8)
q_el = torch.empty_like(q)
self.assertEqual(q.q_scale(), q_el.q_scale())
self.assertEqual(q.q_zero_point(), q_el.q_zero_point())
self.assertEqual(q.dtype, q_el.dtype)
# create via empty_like but change the dtype (currently not supported)
with self.assertRaises(RuntimeError):
torch.empty_like(q, dtype=torch.qint8)
def test_qtensor_dtypes(self):
r = torch.rand(3, 2, dtype=torch.float) * 4 - 2
scale = 0.2
zero_point = 2
for dtype in [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4]:
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
rqr = qr.dequantize()
self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / scale))
@unittest.skipIf(not TEST_CUDA, "No gpu is available.")
def test_per_tensor_to_device(self):
dtypes = [
torch.quint8,
torch.qint8,
torch.qint32,
]
device = torch.device('cuda')
for dtype in dtypes:
r = torch.rand(2, 2, dtype=torch.float) * 10
scale = torch.rand(2).abs().max().item()
zero_point = (torch.rand(2) * 10).round().to(torch.long).max().item()
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
qr = qr.to(device)
qr_cuda = torch.quantize_per_tensor(r.to(device), scale, zero_point, dtype)
qr_cuda = qr_cuda.to('cpu')
self.assertEqual('cuda', qr.device.type)
self.assertEqual('cpu', qr_cuda.device.type)
@unittest.skipIf(not TEST_CUDA, "No gpu is available.")
def test_per_channel_to_device(self):
dtype_and_zero_types = [
(torch.quint8, torch.float),
(torch.qint8, torch.float),
# (torch.qint32, torch.float) not supported for quantize_per_channel
(torch.quint8, torch.long),
(torch.qint8, torch.long),
(torch.qint32, torch.long),
]
axis = 1
device = torch.device('cuda')
for dtype, zero_type in dtype_and_zero_types:
r = torch.rand(2, 2, dtype=torch.float) * 10
scales = torch.rand(2).abs()
zero_points = (torch.rand(2) * 10).round().to(zero_type)
dqr = torch.quantize_per_channel(r, scales, zero_points, axis, dtype)
dqr = dqr.to(device)
dqr_cuda = torch.quantize_per_channel(r.to(device), scales.to(
device), zero_points.to(device), axis, dtype)
dqr_cuda = dqr_cuda.to('cpu')
self.assertEqual('cuda', dqr.device.type)
self.assertEqual('cuda', dqr.q_per_channel_scales().device.type)
self.assertEqual('cuda', dqr.q_per_channel_zero_points().device.type)
self.assertEqual('cpu', dqr_cuda.device.type)
self.assertEqual('cpu', dqr_cuda.q_per_channel_scales().device.type)
self.assertEqual('cpu', dqr_cuda.q_per_channel_zero_points().device.type)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_compare_per_tensor_device_numerics(self):
dtypes = [
torch.quint8,
torch.qint8,
torch.qint32,
]
device = torch.device('cuda')
for dtype in dtypes:
r = torch.rand(2, 2) * 10
r[0, 0] = 2.5
scale = torch.rand(2).abs().max().item()
zero_point = (torch.rand(2) * 10).round().to(torch.long).max().item()
qtr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
dqtr = qtr.dequantize()
qtr_cuda = torch.quantize_per_tensor(r.to(device), scale, zero_point, dtype)
dqtr_cuda = qtr_cuda.dequantize()
self.assertEqual(qtr.int_repr(), qtr_cuda.int_repr())
self.assertTrue(np.allclose(dqtr, dqtr_cuda.cpu()))
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_compare_per_channel_device_numerics(self):
dtype_and_zero_types = [
(torch.quint8, torch.float),
(torch.qint8, torch.float),
# (torch.qint32, torch.float) not supported for quantize_per_channel
(torch.quint8, torch.long),
(torch.qint8, torch.long),
(torch.qint32, torch.long),
]
axis = 1
device = torch.device('cuda')
for i in range(20):
for dtype, zero_type in dtype_and_zero_types:
r = torch.rand(2, 2) * 10
r[0, 0] = 2.5
scales = torch.rand(2).abs()
zero_points = (torch.rand(2) * 10).round().to(zero_type)
qr = torch.quantize_per_channel(r, scales, zero_points, axis, dtype)
dqr = qr.dequantize()
qr_cuda = torch.quantize_per_channel(r.to(device), scales.to(
device), zero_points.to(device), axis, dtype)
dqr_cuda = qr_cuda.dequantize()
self.assertEqual(qr.int_repr(), qr_cuda.int_repr())
self.assertTrue(np.allclose(dqr, dqr_cuda.cpu()))
def _test_quantize_per_channel(self, r, scales, zero_points, axis, float_params):
def _quantize_per_channel_ref_nd(data, scales, zero_points, float_params):
dims = data.size()
data = data.view(-1, dims[axis], np.prod(dims[axis + 1:]))
res = torch.empty_like(data)
quant_min, quant_max = 0, 255
for i in range(res.size()[0]):
for j in range(res.size()[1]):
for k in range(res.size()[2]):
if float_params:
inv_scale = 1.0 / scales[j]
res[i][j][k] = np.clip(
np.round(data[i][j][k] * inv_scale + zero_points[j]), quant_min, quant_max)
else:
res[i][j][k] = np.clip(
np.round(data[i][j][k] / scales[j]) + zero_points[j], quant_min, quant_max)
res = res.view(*dims)
return res
contig_format = torch.channels_last if r.ndim == 4 else torch.channels_last_3d
for memory_format in [torch.contiguous_format, contig_format]:
ref_res = _quantize_per_channel_ref_nd(r, scales, zero_points, float_params)
r_contig = r.contiguous(memory_format=memory_format)
qr = torch.quantize_per_channel(r_contig, scales, zero_points, axis, torch.quint8)
rqr = qr.dequantize()
self.assertTrue(np.allclose(qr.int_repr(), ref_res))
self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / np.min(scales.numpy())))
def test_qtensor_quantize_per_channel(self):
r = torch.rand(3, 2, dtype=torch.float) * 4 - 2
scales = torch.tensor([0.2, 0.03], dtype=torch.double)
zero_points = torch.tensor([5, 10], dtype=torch.long)
axis = 1
def quantize_c(data, scales, zero_points):
res = torch.empty((3, 2))
quant_min, quant_max = 0, 255
for i in range(3):
for j in range(2):
res[i][j] = np.clip(np.round(data[i][j] / scales[j]) + zero_points[j], quant_min, quant_max)
return res
qr = torch.quantize_per_channel(r, scales, zero_points, axis, torch.quint8)
rqr = qr.dequantize()
self.assertTrue(np.allclose(qr.int_repr(), quantize_c(r, scales, zero_points)))
self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / np.min(scales.numpy())))
# Check 4D tensor with 2 different memory formats.
r = torch.rand(3, 2, 4, 5, dtype=torch.float) * 4 - 2
scales = torch.tensor([0.2, 0.03], dtype=torch.double)
zero_points = torch.tensor([5, 10], dtype=torch.long)
self._test_quantize_per_channel(r, scales, zero_points, 1 , False)
scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.double)
zero_points = torch.tensor([5, 10, 7], dtype=torch.long)
self._test_quantize_per_channel(r, scales, zero_points, 0, False)
# Check 5D tensor.
r = torch.rand(3, 2, 4, 5, 7, dtype=torch.float) * 4 - 2
scales = torch.tensor([0.2, 0.03], dtype=torch.double)
zero_points = torch.tensor([5, 10], dtype=torch.long)
self._test_quantize_per_channel(r, scales, zero_points, 1, False)
scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.double)
zero_points = torch.tensor([5, 10, 7], dtype=torch.long)
self._test_quantize_per_channel(r, scales, zero_points, 0, False)
def test_quantize_per_channel_float_qparams(self):
r = torch.rand(3, 2, dtype=torch.float) * 4
scales = torch.tensor([0.2, 0.03], dtype=torch.float)
zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)
axis = 1
# Reference quantize function with FP zero_point.
def quantize_ref(data, scales, zero_points):
res = torch.empty((3, 2))
quant_min, quant_max = 0, 255
for i in range(3):
for j in range(2):
inv_scale = 1.0 / scales[j]
res[i][j] = np.clip(np.round(data[i][j] * inv_scale + zero_points[j]), quant_min, quant_max)
return res
qr = torch.quantize_per_channel(r, scales, zero_points, axis, torch.quint8)
dequant_tensor = qr.dequantize()
ref = quantize_ref(r, scales, zero_points)
self.assertTrue(np.allclose(qr.int_repr(), ref))
self.assertTrue(np.allclose(r.numpy(), dequant_tensor.numpy(), atol=1))
# Check 4D tensor with 2 different memory formats.
r = torch.rand(3, 2, 4, 5, dtype=torch.float) * 4
scales = torch.tensor([0.2, 0.03], dtype=torch.float)
zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)
self._test_quantize_per_channel(r, scales, zero_points, 1, True)
scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.float)
zero_points = torch.tensor([0.1, 0.2, 1.], dtype=torch.float)
self._test_quantize_per_channel(r, scales, zero_points, 0, True)
# Check 5D tensor.
r = torch.rand(3, 2, 4, 5, 7, dtype=torch.float) * 4 - 2
scales = torch.tensor([0.2, 0.03], dtype=torch.float)
zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)
self._test_quantize_per_channel(r, scales, zero_points, 1, True)
scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.float)
zero_points = torch.tensor([0.1, 0.2, 1.], dtype=torch.float)
self._test_quantize_per_channel(r, scales, zero_points, 0, True)
def test_quantize_per_channel_sub_byte(self):
""" Tests the per channel quantization scheme for 4-bit qtensors.
The scale and zero point for this have to be in floating point. """
r = torch.rand(3, 2, dtype=torch.float) * 4
scales = torch.tensor([0.2, 0.3, 0.1], dtype=torch.float)
zero_points = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float)
qr = torch.quantize_per_channel(r, scales, zero_points, 0, torch.quint4x2)
dequant_tensor = qr.dequantize()
def _get_qranges(bit_width):
if bit_width == 4:
return 0, 15
def _quantize_per_channel_sub_byte_ref(data, scales, zero_points, axis, bit_width):
dims = data.size()
data = data.view(-1, dims[axis], np.prod(dims[axis + 1:]))
qtensor_size = math.ceil(data.numel() / 2)
res = torch.empty(qtensor_size, dtype=torch.uint8)
elem_per_byte = 8 // bit_width
quant_min, quant_max = _get_qranges(bit_width)
for i in range(data.size()[0]):
for j in range(data.size()[1]):
for k in range(data.size()[2]):
inv_scale = 1.0 / scales[j]
index = i * data.size()[1] * data.size()[2] + j * data.size()[2] + k
qvalue = np.clip(
np.round(data[i][j][k] * inv_scale + zero_points[j]), quant_min, quant_max).to(dtype=torch.int)
res_idx = int(index / elem_per_byte)
if (index % elem_per_byte == 0):
res[res_idx] = qvalue
else:
res[res_idx] |= (qvalue << ((index % elem_per_byte) * bit_width))
return res
ref_res = _quantize_per_channel_sub_byte_ref(r, scales, zero_points, 0, 4)
self.assertTrue(np.allclose(qr.int_repr(), ref_res))
self.assertTrue(np.allclose(r.numpy(), dequant_tensor.numpy(), atol=1 / np.min(scales.numpy())))
# Check 4D tensor with non-zero axis.
r = torch.rand(3, 2, 4, 5, dtype=torch.float) * 4
scales = torch.tensor([0.2, 0.03], dtype=torch.float)
zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)
qr = torch.quantize_per_channel(r, scales, zero_points, axis=1, dtype=torch.quint4x2)
ref_res = _quantize_per_channel_sub_byte_ref(r, scales, zero_points, 1, 4)
self.assertTrue(np.allclose(qr.int_repr(), ref_res))
def test_qtensor_permute(self):
scale = 0.02
zero_point = 1
for device in get_supported_device_types():
r = torch.rand(10, 30, 2, 2, device=device, dtype=torch.float) * 4 - 2
for dtype in [torch.qint8, torch.quint8, torch.qint32]:
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype=dtype)
qr = qr.transpose(0, 1)
rqr = qr.dequantize()
# compare transpose + dequantized result with orignal transposed result
self.assertTrue(np.allclose(r.cpu().numpy().transpose([1, 0, 2, 3]), rqr.cpu().numpy(), atol=2 / scale))
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype=dtype)
qr1 = qr.permute([1, 0, 2, 3])
qr2 = qr.transpose(0, 1)
# compare int representation after transformations
self.assertEqual(qr1.int_repr(), qr2.int_repr())
self.assertEqual(qr1.q_scale(), qr2.q_scale())
self.assertEqual(qr1.q_zero_point(), qr2.q_zero_point())
# compare dequantized result
self.assertEqual(qr1.dequantize(), qr2.dequantize())
# compare permuted + dequantized result with original transposed result
self.assertTrue(np.allclose(qr2.dequantize().cpu().numpy(),
r.cpu().numpy().transpose([1, 0, 2, 3]), atol=2 / scale))
# make permuted result contiguous
self.assertEqual(qr2.contiguous().int_repr(), qr2.int_repr())
# change memory format
qlast = qr.contiguous(memory_format=torch.channels_last)
self.assertEqual(qr.stride(), list(reversed(sorted(qr.stride()))))
self.assertNotEqual(qlast.stride(), list(reversed(sorted(qlast.stride()))))
self.assertEqual(qr.int_repr(), qlast.int_repr())
self.assertEqual(qr.q_scale(), qlast.q_scale())
self.assertEqual(qr.q_zero_point(), qlast.q_zero_point())
self.assertEqual(qlast.dequantize(), qr.dequantize())
# permuting larger tensors
x = torch.randn(64, 64, device=device)
qx = torch.quantize_per_tensor(x, 1.0, 0, dtype)
# should work
qx.permute([1, 0])
def test_qtensor_per_channel_permute(self):
for device in get_supported_device_types():
r = torch.rand(20, 10, 2, 2, dtype=torch.float, device=device) * 4 - 2
dtype = torch.qint8
scales = torch.rand(10, device=device) * 0.02 + 0.01
zero_points = torch.round(torch.rand(10, device=device) * 2 - 1).to(torch.long)
qr = torch.quantize_per_channel(r, scales, zero_points, 1, dtype)
# we can't reorder the axis
with self.assertRaises(RuntimeError):
qr.transpose(0, 1)
# but we can change memory format
qlast = qr.contiguous(memory_format=torch.channels_last)
self.assertEqual(qr.stride(), list(reversed(sorted(qr.stride()))))
self.assertNotEqual(qlast.stride(), list(reversed(sorted(qlast.stride()))))
self.assertEqual(qr.int_repr(), qlast.int_repr())
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(scales, qlast.q_per_channel_scales())
self.assertEqual(zero_points, qlast.q_per_channel_zero_points())
self.assertEqual(1, qlast.q_per_channel_axis())
self.assertEqual(qlast.dequantize(), qr.dequantize())
def test_qtensor_load_save(self):
scale = 0.2
zero_point = 10
# storage is not accessible on the cuda right now
device = "cpu"
r = torch.rand(15, 2, dtype=torch.float32, device=device) * 2
for dtype in [torch.qint8, torch.quint8, torch.qint32]:
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype=dtype)
qrv = qr[:, 1]
with tempfile.NamedTemporaryFile() as f:
# Serializing and Deserializing Tensor
torch.save((qr, qrv), f)
f.seek(0)
qr2, qrv2 = torch.load(f)
self.assertEqual(qr, qr2)
self.assertEqual(qrv, qrv2)
self.assertEqual(qr2.storage().data_ptr(), qrv2.storage().data_ptr())
def test_qtensor_per_channel_load_save(self):
r = torch.rand(20, 10, dtype=torch.float) * 4 - 2
scales = torch.rand(10, dtype=torch.double) * 0.02 + 0.01
zero_points = torch.round(torch.rand(10) * 20 + 1).to(torch.long)
# quint32, cuda is not supported yet
for dtype in [torch.quint8, torch.qint8, torch.quint4x2]:
if dtype == torch.quint4x2:
zero_points = torch.ones(10, dtype=torch.float)
qr = torch.quantize_per_channel(r, scales, zero_points, 1, dtype)
with tempfile.NamedTemporaryFile() as f:
# Serializing and Deserializing Tensor
torch.save(qr, f)
f.seek(0)
qr2 = torch.load(f)
self.assertEqual(qr, qr2)
def test_qtensor_copy(self):
scale = 0.5
zero_point = 10
numel = 10
for dtype in [torch.qint8, torch.quint8, torch.qint32]:
for device in get_supported_device_types():
# copy from same scale and zero_point
q = torch._empty_affine_quantized([numel], scale=scale,
zero_point=zero_point, device=device, dtype=dtype)
q2 = torch._empty_affine_quantized([numel], scale=scale,
zero_point=zero_point, device=device, dtype=dtype)
q.copy_(q2)
self.assertEqual(q.int_repr(), q2.int_repr())
self.assertEqual(q.q_scale(), q2.q_scale())
self.assertEqual(q.q_zero_point(), q2.q_zero_point())
# copying from different scale and zero_point
new_scale = 3.2
new_zero_point = 5
q = torch._empty_affine_quantized([numel], scale=new_scale,
zero_point=new_zero_point, device=device, dtype=dtype)
# check original scale and zero_points are set correctly
self.assertEqual(q.q_scale(), new_scale)
self.assertEqual(q.q_zero_point(), new_zero_point)
q.copy_(q2)
# check scale and zero_points has been copied
self.assertEqual(q, q2)
# can't copy from quantized tensor to non-quantized tensor
r = torch.empty([numel], dtype=torch.float)
q = torch._empty_affine_quantized([numel], scale=scale, zero_point=zero_point, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "please use dequantize"):
r.copy_(q)
# copy from float doesn't support cuda
device = 'cpu'
# check copy from non-quantized to quantized
r = torch.randn([numel], dtype=torch.float, device=device)
q = torch._empty_affine_quantized([numel], scale=scale, zero_point=zero_point, dtype=dtype, device=device)
q.copy_(r)
qr = torch.quantize_per_tensor(r, scale=scale, zero_point=zero_point, dtype=dtype)
self.assertEqual(q, qr)
def test_torch_qtensor_deepcopy(self):
# cuda is not supported yet
device = "cpu"
q_int = torch.randint(0, 100, [3, 5], device=device, dtype=torch.uint8)
scale, zero_point = 2.0, 3
q = torch._make_per_tensor_quantized_tensor(q_int, scale=scale, zero_point=zero_point)
qc = deepcopy(q)
self.assertEqual(qc, q)
def test_clone(self):
numel = 10
scale = 0.5
zero_point = 10
options = itertools.product(
get_supported_device_types(),
[torch.qint8, torch.quint8, torch.qint32])
for device, dtype in options:
per_tensor_quantized = torch._empty_affine_quantized(
[numel], scale=scale, zero_point=zero_point,
device=device, dtype=dtype)
per_channel_quantized = torch._empty_per_channel_affine_quantized(
[numel],
scales=torch.tensor([scale] * numel, device=device),
zero_points=torch.tensor([zero_point] * numel, device=device),
axis=0,
device=device,
dtype=dtype
)
qtensors = [per_tensor_quantized, per_channel_quantized]
for q in qtensors:
q2 = q.clone()
# Check to make sure the scale and zero_point has been copied.
self.assertEqual(q, q2)
def test_qtensor_fill_per_tensor(self):
numel = 10
scale = 0.5
zero_point = 10
ones = torch.ones(numel).to(torch.float)
qtypes = [torch.qint8, torch.quint8, torch.qint32]
vals2fill = [-1, 1, 2**32] # positive, negative, overflow
devices = get_supported_device_types()
for qtype, val2fill, device in itertools.product(qtypes, vals2fill, devices):
ones = ones.to(device)
q_filled = torch._empty_affine_quantized(
[numel], scale=scale, zero_point=zero_point, device=device,
dtype=qtype)
q_filled.fill_(val2fill)
# reference tensor for comparing q_filled
q_ref = torch.quantize_per_tensor(ones * val2fill, scale,
zero_point, qtype)
self.assertEqual(q_filled.int_repr(), q_ref.int_repr())
self.assertEqual(q_filled.dequantize(), q_ref.dequantize())
# Make sure the scale and zero_point don't change
self.assertEqual(q_filled.q_scale(), scale)
self.assertEqual(q_filled.q_zero_point(), zero_point)
# Adapted from test_qtensor_fill_per_tensor but for a NHWC tensor (requires 4D)
def test_qtensor_fill_per_tensor_nhwc(self):
dims = torch.randint(low=1, high=10, size=(4, )).tolist()
scale = 0.5
zero_point = 10
ones = torch.ones(dims).to(torch.float)
qtypes = [torch.qint8, torch.quint8, torch.qint32]
vals2fill = [-1, 1, 2**32] # positive, negative, overflow
memory_formats = [torch.contiguous_format, torch.channels_last]
devices = get_supported_device_types()
for qtype, val2fill, memory_format, device in itertools.product(qtypes, vals2fill, memory_formats, devices):
q_filled = torch._empty_affine_quantized(
dims, scale=scale, zero_point=zero_point, device=device,
dtype=qtype, memory_format=memory_format)
q_filled.fill_(val2fill)
# reference tensor for comparing q_filled
q_ref = torch.quantize_per_tensor(ones * val2fill, scale,
zero_point, qtype)
self.assertEqual(q_filled.int_repr(), q_ref.int_repr())
self.assertEqual(q_filled.dequantize(), q_ref.dequantize())
# Make sure the scale and zero_point don't change
self.assertEqual(q_filled.q_scale(), scale)
self.assertEqual(q_filled.q_zero_point(), zero_point)
# adapted from test_qtensor_fill_per_tensor
def test_qtensor_fill_per_channel(self):
dims = [4, 5]
axis = 0
# adding a constant to avoid too small of a scale
scales = torch.rand(dims[axis], dtype=torch.float64) + 0.1
zero_points = torch.randint(low=0, high=10, size=(dims[axis], ))
ones = torch.ones(dims).to(torch.float)
qtypes = [torch.qint8, torch.quint8, torch.qint32]
vals2fill = [-1, 1, 2**32] # positive, negative, overflow
devices = get_supported_device_types()
for qtype, val2fill, device in itertools.product(qtypes, vals2fill, devices):
scales = scales.to(device)
zero_points = zero_points.to(device)
ones = ones.to(device)
q_filled = torch._empty_per_channel_affine_quantized(
dims, scales=scales, zero_points=zero_points, device=device,
axis=axis, dtype=qtype)
q_filled.fill_(val2fill)
# reference tensor for comparing q_filled
q_ref = torch.quantize_per_channel(ones * val2fill, scales=scales,
zero_points=zero_points, axis=axis, dtype=qtype)
self.assertEqual(q_filled.int_repr(), q_ref.int_repr())
self.assertEqual(q_filled.dequantize(), q_ref.dequantize())
# Make sure the scale and zero_point don't change
self.assertEqual(q_filled.q_per_channel_scales(), scales)
self.assertEqual(q_filled.q_per_channel_zero_points(), zero_points)
# adapted from test_qtensor_fill_per_tensor
def test_qtensor_masked_fill(self):
numel = 10
scale = 0.5
zero_point = 10
ones = torch.ones(numel).to(torch.float)
types = [torch.qint8, torch.quint8, torch.qint32]
fills = [-1, 1, 2**32] # positive, negative, overflow
device = 'cpu'
ones = ones.to(device)
for qtype, fill_with in itertools.product(types, fills):
q_filled = torch._empty_affine_quantized(
[numel], scale=scale, zero_point=zero_point, device=device,
dtype=qtype)
q_filled.fill_(fill_with)
q_masked_fill = torch._empty_affine_quantized(
[numel], scale=scale, zero_point=zero_point, device=device,
dtype=qtype)
# mask fill the whole tensor, equivalent to calling plain vanilla fill
mask = torch.tensor(True)
torch.tensor(fill_with)
q_masked_fill.masked_fill_(mask, fill_with)
int_repr = torch.quantize_per_tensor(ones * fill_with, scale,
zero_point, qtype)
fill_with = int_repr.dequantize()
int_repr = int_repr.int_repr()
self.assertEqual(q_filled, q_masked_fill)
self.assertEqual(q_masked_fill.int_repr(), int_repr)
self.assertEqual(q_masked_fill.dequantize(), fill_with)
# Make sure the scale and zero_point don't change
self.assertEqual(q_masked_fill.q_scale(), scale)
self.assertEqual(q_masked_fill.q_zero_point(), zero_point)
# the above loop does the same test as test_qtensor_fill
# now we will check masked_fill for subset of indices
mask = torch.randint(0, 2, (numel, ))
mask = mask.bool()
x = torch.rand(numel)
qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=qtype)
for qtype, fill_with in itertools.product(types, fills):
q_masked_fill = qx.clone()
q_masked_fill.masked_fill_(mask, fill_with)
ref = qx.clone()
for i in range(numel):
if mask[i]:
# this assignment doesn't end up calling masked_fill, allowing us to compare the different implementations
ref[i] = fill_with
self.assertEqual(q_masked_fill, ref)
self.assertEqual(q_masked_fill.int_repr(), ref.int_repr())
self.assertEqual(q_masked_fill.dequantize(), ref.dequantize())
def test_qtensor_index_put(self):
n = 10
m = 10
x_orig = torch.rand(n, m)
indices = tuple(torch.tensor([[0, 0], [1, 1], [5, 5], [7, 3], [0, 5], [6, 9], [-1, -1]]).t())
# for the scalar tensor case, index_put routes to masked_fill
values_list = [torch.tensor(2.5), torch.rand(len(indices[0])) * 1000]
scale = 0.5
zero_point = 10
types = [torch.qint8, torch.quint8, torch.qint32]
fills = [-1, 1, 2**32] # positive, negative, overflow
for qtype, values in itertools.product(types, values_list):
x_ref = x_orig.clone()
x_ref[indices] = values.to(dtype=x_ref.dtype)
qx_ref = torch.quantize_per_tensor(x_ref, scale=scale, zero_point=zero_point, dtype=qtype)
x = x_orig.clone()
qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=qtype)
qx[indices] = values
self.assertEqual(qx_ref, qx)
# adapted from test_qtensor_fill_per_channel and test_qtensor_fill_per_tensor_nhwc
def test_qtensor_fill_per_channel_nhwc(self):
dims = torch.randint(low=1, high=10, size=(4, )).tolist()
axis = 0
# adding a constant to avoid too small of a scale
scales = torch.rand(dims[axis], dtype=torch.float64) + 0.1
zero_points = torch.randint(low=0, high=10, size=(dims[axis], ))
ones = torch.ones(dims).to(torch.float)
qtypes = [torch.qint8, torch.quint8, torch.qint32]
vals2fill = [-1, 1, 2**32] # positive, negative, overflow
memory_formats = [torch.contiguous_format, torch.channels_last]
devices = get_supported_device_types()
for qtype, val2fill, memory_format, device in itertools.product(qtypes, vals2fill, memory_formats, devices):
scales = scales.to(device)
zero_points = zero_points.to(device)
ones = ones.to(device)
q_filled = torch._empty_per_channel_affine_quantized(
dims, scales=scales, zero_points=zero_points, device=device,
axis=axis, dtype=qtype, memory_format=memory_format)
q_filled.fill_(val2fill)
# reference tensor for comparing q_filled
q_ref = torch.quantize_per_channel(ones * val2fill, scales=scales,
zero_points=zero_points, axis=axis, dtype=qtype)
self.assertEqual(q_filled.int_repr(), q_ref.int_repr())
self.assertEqual(q_filled.dequantize(), q_ref.dequantize())
# Make sure the scale and zero_point don't change
self.assertEqual(q_filled.q_per_channel_scales(), scales)
self.assertEqual(q_filled.q_per_channel_zero_points(), zero_points)
@unittest.skipIf(not TEST_CUDA, "No gpu is available.")
def test_qtensor_index_select_cuda(self):
self._test_qtensor_index_select('cuda')
def test_qtensor_index_select_cpu(self):
self._test_qtensor_index_select('cpu')
def _test_qtensor_index_select(self, device):
for quant_type in [torch.quint8, torch.qint8]:
dims = 3
index = torch.randint(dims, [1]).item()
selected = torch.randperm(dims)[:2].to(device)
scale = 1
zp = 0
x = torch.randn([3] * dims, device=device) * 10
x_selected = torch.index_select(x, index, selected)
x_selected_quantized = torch.quantize_per_tensor(x_selected, scale, zp, quant_type)
x_quantized = torch.quantize_per_tensor(x, scale, zp, quant_type)
x_quantized_selected = torch.index_select(x_quantized, index, selected)
self.assertEqual(x_quantized_selected, x_selected_quantized)
def test_qtensor_view(self):
scale, zero_point, dtype = 1.0, 2, torch.uint8
for device in get_supported_device_types():
q_int = torch.randint(0, 100, [1, 2, 3], device=device, dtype=dtype)
q = torch._make_per_tensor_quantized_tensor(q_int, scale=scale, zero_point=zero_point)
q2 = q.view(1, 3, 2)
self.assertEqual(q.numel(), q2.numel())
# testing -1
self.assertEqual(q, q2.view(1, -1, 3))
a_int = torch.randint(0, 100, [1, 2, 3, 4], device=device, dtype=dtype)
a = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)
b = a.transpose(1, 2) # swaps 2nd and 3rd dimension
c = a.view(1, 3, 2, 4) # does not change tensor layout in memory
self.assertEqual(b.size(), c.size())
self.assertEqual(b.q_scale(), c.q_scale())
self.assertEqual(b.q_zero_point(), c.q_zero_point())
self.assertNotEqual(b.stride(), c.stride())
# size is the same but the underlying data is different
self.assertNotEqual(b.int_repr(), c.int_repr())
# torch.equal is not supported for the cuda backend
if device == 'cpu':
self.assertFalse(torch.equal(b, c))
# a case can't view non-contiguos Tensor
a_int = torch.randint(0, 100, [1, 2, 3, 4], device=device, dtype=dtype)
a = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)
b = a.transpose(1, 2) # swaps 2nd and 3rd dimension
err_str = "view size is not compatible with input tensor's size and stride*"
with self.assertRaisesRegex(RuntimeError, err_str):
b.view(1, 4, 2, 3)
# view on contiguous tensor is fine
b.contiguous().view(1, 4, 2, 3)
def test_qtensor_resize(self):
for device in get_supported_device_types():
scale, zero_point, dtype = 1.0, 2, torch.uint8
sizes1 = [1, 2, 3, 4]
sizes2 = [1 * 2, 3 * 4]
sizes3 = [1, 2 * 3, 4]
sizes4 = [1 * 2 * 3 * 4]
sizes5 = [1, 2, 1, 3, 1, 4]
q1_int = torch.randint(0, 100, sizes1, dtype=dtype, device=device)
q1 = torch._make_per_tensor_quantized_tensor(q1_int, scale=scale, zero_point=zero_point)
q2 = q1.resize(*sizes2)
q3 = q2.resize(*sizes3)
q4 = q3.resize(*sizes4)
q5 = q4.resize(*sizes5)
self.assertEqual(q1.numel(), q2.numel())
self.assertEqual(q1.numel(), q3.numel())
self.assertEqual(q1.numel(), q4.numel())
self.assertEqual(q1.numel(), q5.numel())
# Compare original and post-transpose
a_int = torch.randint(0, 100, sizes1, dtype=dtype, device=device)
a = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)
b = a.transpose(1, 2) # swaps 2nd and 3rd dimension
c = b.resize(*sizes1) # Change the sizes back to the original
self.assertEqual(a.size(), c.size())
self.assertEqual(b.q_scale(), c.q_scale())
self.assertEqual(b.q_zero_point(), c.q_zero_point())
self.assertNotEqual(b.stride(), c.stride())
# size is the same but the underlying data is different
self.assertNotEqual(b.int_repr(), c.int_repr())
# torch.equal is not supported for the cuda backend
if device == 'cpu':
self.assertFalse(torch.equal(b, c))
# Throws an error if numel is wrong
q1_int = torch.randint(0, 100, sizes1, dtype=dtype, device=device)
q1 = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)
err_str = "requested resize to*"
with self.assertRaisesRegex(RuntimeError, err_str):
q2 = q1.resize(*sizes1[:-1])
# resize on both contiguous and non-contiguous tensor should be fine
q3 = q1.resize(*sizes2)
q4 = q1.contiguous().resize(*sizes2)
def test_qtensor_reshape(self):
scale, zero_point, dtype = 1.0, 2, torch.uint8
for device in get_supported_device_types():
q_int = torch.randint(0, 100, [3, 5], dtype=dtype, device=device)
q = torch._make_per_tensor_quantized_tensor(q_int, scale=scale, zero_point=zero_point)
q2 = q.reshape([15])
self.assertEqual(q.numel(), q2.numel())
self.assertEqual(q2.size(), [15])
# testing -1
self.assertEqual(q, q2.reshape([3, -1]))
a_int = torch.randint(0, 100, [1, 2, 3, 4], dtype=dtype, device=device)
a = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)
b = a.transpose(1, 2) # swaps 2nd and 3rd dimension
c = a.reshape(1, 3, 2, 4) # does not change tensor layout
self.assertEqual(b.size(), c.size())
self.assertEqual(b.q_scale(), c.q_scale())
self.assertEqual(b.q_zero_point(), c.q_zero_point())
self.assertNotEqual(b.stride(), c.stride())
self.assertNotEqual(b.int_repr(), c.int_repr())
# torch.equal is not supported for the cuda backend
if device == 'cpu':
self.assertFalse(torch.equal(b, c))
# we can use reshape for non-contiguous Tensor
a_int = torch.randint(0, 100, [1, 2, 3, 4], dtype=dtype, device=device)
a = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)
b = a.transpose(1, 2) # swaps 2nd and 3rd dimension
c = b.reshape(1, 4, 2, 3)
def test_qtensor_unsqueeze(self):
for device in get_supported_device_types():
x = torch.randn((1, 3, 4), device=device)
qx = torch.quantize_per_tensor(x, scale=1.0, zero_point=0, dtype=torch.quint8)
qy = qx.unsqueeze(2)
self.assertEqual(qy.size(), (1, 3, 1, 4))
qy = qy.squeeze(2)
self.assertEqual(qy.size(), qx.size())
# Per channel qtensor
scales = torch.tensor([1.0], device=device)
zero_points = torch.tensor([0], device=device)
qx = torch.quantize_per_channel(x, scales=scales, zero_points=zero_points, dtype=torch.quint8, axis=0)
qy = qx.unsqueeze(0)
self.assertEqual(qy.size(), (1, 1, 3, 4))
self.assertEqual(qy.q_per_channel_axis(), 1)
qz = qy.squeeze(0)
self.assertEqual(qz.size(), x.size())
self.assertEqual(qz.q_per_channel_axis(), 0)
with self.assertRaisesRegex(RuntimeError, "Squeeze is only possible on non-axis dimension for Per-Channel"):
qz = qy.squeeze(1)
# squeeze without dim specified
x = torch.randn((3, 1, 2, 1, 4), device=device)
scales = torch.tensor([1.0, 1.0], device=device)
zero_points = torch.tensor([0, 0], device=device)
qx = torch.quantize_per_channel(x, scales=scales, zero_points=zero_points, dtype=torch.quint8, axis=2)
qz = qx.squeeze()
self.assertEqual(qz.size(), (3, 2, 4))
self.assertEqual(qz.q_per_channel_axis(), 1)
with self.assertRaisesRegex(RuntimeError, "Squeeze is only possible on non-axis dimension for Per-Channel"):
qz = qy.squeeze()
def test_repeat(self):
scale, zero_point, dtype = 1.0, 2, torch.uint8
for device in get_supported_device_types():
q_int = torch.randint(0, 100, [3], dtype=dtype, device=device)
q_int_repeat = q_int.repeat(4, 2)
q_ref = torch._make_per_tensor_quantized_tensor(q_int_repeat, scale=scale, zero_point=zero_point)
q = torch._make_per_tensor_quantized_tensor(q_int, scale=scale, zero_point=zero_point)
q_repeat = q.repeat(4, 2)
self.assertEqual(q_ref, q_repeat)
def test_qscheme_pickle(self):
f = Foo()
buf = io.BytesIO()
torch.save(f, buf)
buf.seek(0)
f2 = torch.load(buf)
self.assertEqual(f2.qscheme, torch.per_tensor_symmetric)
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=2, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
reduce_range=st.booleans()
)
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_choose_qparams(self, X, reduce_range):
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X)
X_scale, X_zp = _calculate_dynamic_qparams(X, torch.quint8, reduce_range=reduce_range)
qparams = torch._choose_qparams_per_tensor(X, reduce_range)
np.testing.assert_array_almost_equal(X_scale, qparams[0], decimal=3)
self.assertEqual(X_zp, qparams[1])
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_cuda_quantization_does_not_pin_memory(self):
# Context - https://github.com/pytorch/pytorch/issues/41115
x = torch.randn(3)
self.assertEqual(x.is_pinned(), False)
q_int = torch.randint(0, 100, [1, 2, 3], device="cuda", dtype=torch.uint8)
q = torch._make_per_tensor_quantized_tensor(q_int, scale=0.1, zero_point=0)
x = torch.randn(3)
self.assertEqual(x.is_pinned(), False)
# There's no way to actually pin the memory of a quantized tensor
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_quant_pin_memory(self):
x = torch.randn(3).pin_memory()
self.assertEqual(x.is_pinned(), True)
x_q = torch.quantize_per_tensor(x, 1, 0, torch.quint8)
self.assertEqual(x_q.is_pinned(), False)
x_pin = torch.empty_quantized([3], x_q, pin_memory=True, dtype=torch.quint8)
self.assertEqual(x_pin.is_pinned(), False)
self.assertRaises(RuntimeError, lambda: x_q.pin_memory())
def test_fp16_saturate_op(self):
x = torch.ones(5, 5, dtype=torch.float32) * 65532
x[0] = torch.ones(5) * -65532
# range of fp16 value is [-65504, + 65504]
ref = torch.ones(5, 5) * 65504
ref[0] = torch.ones(5) * -65504
y = torch._saturate_weight_to_fp16(x)
self.assertEqual(y, ref)
def test_choose_qparams_optimized(self):
for bit_width in [4, 2]:
x = torch.randn(64, dtype=torch.float)
y = torch.choose_qparams_optimized(x, numel=64, n_bins=200, ratio=0.16, bit_width=bit_width)
ref = param_search_greedy(x.numpy(), bit_rate=bit_width)
self.assertEqual(y[0].numpy(), ref[0])
self.assertEqual(y[1].numpy(), ref[1])
def _test_pickle_checkpoint_qtensor(self, device):
with TemporaryFileName() as fname:
class M(torch.jit.ScriptModule):
__constants__ = ['fname']
def __init__(self):
super(M, self).__init__()
self.fname = fname
@torch.jit.script_method
def forward(self, x, y):
torch.save((x, y), self.fname)
return y
q = torch.quantize_per_tensor(
torch.rand(2, 3, dtype=torch.float), scale=0.1, zero_point=10, dtype=torch.quint8).to(device)
qc = torch.quantize_per_channel(
torch.rand(2, 3, dtype=torch.float),
scales=torch.tensor([0.1, 0.5, 0.01]),
zero_points=torch.tensor([10, 0, 20]),
axis=1, dtype=torch.quint8).to(device)
m = M()
m(q, qc)
with open(fname, "rb") as handle:
loaded_q, loaded_qc = torch.load(fname)
self.assertEqual(loaded_q, q)
self.assertEqual(loaded_qc, qc)
def test_pickle_checkpoint_qtensor(self):
self._test_pickle_checkpoint_qtensor('cpu')
def test_jit_serialization(self):
class SimpleQTensor(torch.jit.ScriptModule):
def __init__(self, per_channel):
super(SimpleQTensor, self).__init__()
x = torch.rand(5, 5).float()
if not per_channel:
x_q = torch.quantize_per_tensor(x, 0.2, 10, torch.quint8)
else:
s = torch.rand(5, dtype=torch.float64) + 0.1
zp = torch.randint(5, 15, (5,))
x_q = torch.quantize_per_channel(x, s, zp, 1, torch.quint8)
self.register_buffer('x', x_q)
@torch.jit.script_method
def forward(self):
return self.x
for per_channel in [False, True]:
model = SimpleQTensor(per_channel)
buffer = io.BytesIO()
torch.jit.save(model, buffer)
buffer.seek(0)
model_loaded = torch.jit.load(buffer)
self.assertEqual(model_loaded(), model())
def test_bfp16_quantize(self):
X = torch.randn(5 , 10)
quantized_X = X.to(torch.bfloat16)
dedequantized_X = quantized_X.to(torch.float32)
torch.testing.assert_allclose(X, dedequantized_X, rtol=1e-4, atol=5e-3)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/quantization/core/test_quantized_tensor.py
|
# Owner(s): ["oncall: quantization"]
import torch
from torch.testing._internal.common_utils import TestCase
from torch.ao.quantization.utils import get_fqn_to_example_inputs
class TestUtils(TestCase):
def _test_get_fqn_to_example_inputs(self, M, example_inputs, expected_fqn_to_dim):
m = M().eval()
fqn_to_example_inputs = get_fqn_to_example_inputs(m, example_inputs)
for fqn, expected_dims in expected_fqn_to_dim.items():
assert fqn in expected_fqn_to_dim
example_inputs = fqn_to_example_inputs[fqn]
for example_input, expected_dim in zip(example_inputs, expected_dims):
assert example_input.dim() == expected_dim
def test_get_fqn_to_example_inputs_simple(self):
class Sub(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
self.sub = Sub()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.sub(x)
return x
expected_fqn_to_dim = {
"": (2,),
"linear1": (2,),
"linear2": (2,),
"sub": (2,),
"sub.linear1": (2,),
"sub.linear2": (2,)
}
example_inputs = (torch.rand(1, 5),)
self._test_get_fqn_to_example_inputs(M, example_inputs, expected_fqn_to_dim)
def test_get_fqn_to_example_inputs_default_kwargs(self):
""" Test that we can get example inputs for functions with default keyword arguments
"""
class Sub(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
def forward(self, x, key1=torch.rand(1), key2=torch.rand(1)):
x = self.linear1(x)
x = self.linear2(x)
return x
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
self.sub = Sub()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
# only override `key2`, `key1` will use default
x = self.sub(x, key2=torch.rand(1, 2))
return x
expected_fqn_to_dim = {
"": (2,),
"linear1": (2,),
"linear2": (2,),
# second arg is `key1`, which is using default argument
# third arg is `key2`, override by callsite
"sub": (2, 1, 2),
"sub.linear1": (2,),
"sub.linear2": (2,)
}
example_inputs = (torch.rand(1, 5),)
self._test_get_fqn_to_example_inputs(M, example_inputs, expected_fqn_to_dim)
def test_get_fqn_to_example_inputs_complex_args(self):
""" Test that we can record complex example inputs such as lists and dicts
"""
class Sub(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
def forward(self, x, list_arg, dict_arg):
x = self.linear1(x)
x = self.linear2(x)
return x
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
self.sub = Sub()
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.sub(x, [x], {"3": x})
return x
example_inputs = (torch.rand(1, 5),)
m = M().eval()
fqn_to_example_inputs = get_fqn_to_example_inputs(m, example_inputs)
assert "sub" in fqn_to_example_inputs
assert isinstance(fqn_to_example_inputs["sub"][1], list)
assert isinstance(fqn_to_example_inputs["sub"][2], dict) and \
"3" in fqn_to_example_inputs["sub"][2]
|
pytorch-master
|
test/quantization/core/test_utils.py
|
# Owner(s): ["oncall: quantization"]
import torch
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.quantized._reference as nnqr
import torch.ao.quantization
from torch.ao.quantization import (
get_default_static_quant_module_mappings,
default_float_qparams_observer,
PerChannelMinMaxObserver,
)
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
prepare_dynamic,
_make_conv_test_input,
skipIfNoFBGEMM,
lengths_to_offsets
)
from torch.testing._internal.common_quantized import (
_calculate_dynamic_qparams,
override_quantized_engine,
override_qengines,
qengine_is_qnnpack,
qengine_is_onednn,
)
from hypothesis import assume, given
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
import copy
import io
import numpy as np
import itertools
"""
Note that tests in this file are just API test, to make sure we wrapped the
quantized operator implementations correctly in the user facing APIs, these are
not correctness test for the underlying quantized operators. For correctness
test please see `test/quantization/test_quantized_op.py`.
"""
class TestStaticQuantizedModule(QuantizationTestCase):
def test_relu(self):
relu_module = nn.ReLU()
relu6_module = nnq.ReLU6()
x = torch.arange(-10, 10, dtype=torch.float)
y_ref = torch.relu(x)
y6_ref = torch.nn.modules.ReLU6()(x)
qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.qint32)
qy = relu_module(qx)
qy6 = relu6_module(qx)
self.assertEqual(y_ref, qy.dequantize(),
msg="ReLU module API failed")
self.assertEqual(y6_ref, qy6.dequantize(),
msg="ReLU6 module API failed")
@override_qengines
def test_linear_api(self):
"""test API functionality for nn.quantized.linear and nn.intrinsic.quantized.linear_relu"""
options = itertools.product(
[1, 5],
[16, 32],
[4, 8],
[True, False],
[True, False],
[True, False])
for (batch_size, in_features, out_features, use_bias,
use_fused, per_channel) in options:
self._test_linear_api_impl(
batch_size, in_features, out_features, use_bias, use_fused,
per_channel)
def _test_linear_api_impl(self, batch_size, in_features, out_features, use_bias, use_fused, per_channel):
if torch.backends.quantized.engine == 'qnnpack':
per_channel = False
# use_fused -> quantized class
class_map = {
True: nniq.LinearReLU,
False: nnq.Linear,
}
W = torch.rand(out_features, in_features).float()
if per_channel:
scale_tensor = torch.ones(out_features, dtype=torch.double)
zero_point_tensor = torch.zeros(out_features, dtype=torch.long)
for i in range(len(scale_tensor)):
scale_tensor[i] = (i + 1.0) / 255.0
W_q = torch.quantize_per_channel(W, scales=scale_tensor,
zero_points=zero_point_tensor,
axis=0, dtype=torch.qint8)
else:
# ONEDNN only supports symmetric quantization of weight
W_zp = 0 if qengine_is_onednn() else 4
W_q = torch.quantize_per_tensor(W, 0.1, W_zp, torch.qint8)
X = torch.rand(batch_size, in_features).float()
X_q = torch.quantize_per_tensor(X, 0.2, 10, torch.quint8)
B = torch.rand(out_features).float() if use_bias else None
scale = 0.5
zero_point = 3
qlinear = class_map[use_fused](in_features, out_features)
qlinear_copy = copy.deepcopy(qlinear)
# set random quantized weight and bias before test torch scriptable
qlinear_copy.set_weight_bias(W_q, B)
self.checkScriptable(qlinear_copy, [[X_q]], check_save_load=True)
# Run module with default-initialized parameters.
# This tests that the constructor is correct.
qlinear(X_q)
qlinear.set_weight_bias(W_q, B)
# Simple round-trip test to ensure weight()/set_weight() API
self.assertEqual(qlinear.weight(), W_q, atol=1e-5, rtol=0)
# testing packed param implementation
qlinear.scale = float(scale)
qlinear.zero_point = int(zero_point)
Z_q = qlinear(X_q)
# Check if the module implementation matches calling the
# ops directly
W_pack = qlinear._packed_params._packed_params
if use_fused:
Z_ref = torch.ops.quantized.linear_relu(X_q, W_pack, scale, zero_point)
else:
Z_ref = torch.ops.quantized.linear(X_q, W_pack, scale, zero_point)
self.assertEqual(Z_ref, Z_q)
self.assertTrue(
("QuantizedLinearReLU" if use_fused else "QuantizedLinear") in str(qlinear))
# Test serialization of quantized Linear Module using state_dict
model_dict = qlinear.state_dict()
b = io.BytesIO()
torch.save(model_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in model_dict:
if isinstance(model_dict[key], torch._C.ScriptObject):
assert isinstance(loaded_dict[key], torch._C.ScriptObject)
w_model, b_model = torch.ops.quantized.linear_unpack(model_dict[key])
w_loaded, b_loaded = torch.ops.quantized.linear_unpack(loaded_dict[key])
self.assertEqual(w_model, w_loaded)
self.assertEqual(b_model, b_loaded)
else:
self.assertEqual(model_dict[key], loaded_dict[key])
loaded_qlinear = class_map[use_fused](
in_features, out_features)
loaded_qlinear.load_state_dict(loaded_dict)
linear_unpack = torch.ops.quantized.linear_unpack
self.assertEqual(linear_unpack(qlinear._packed_params._packed_params),
linear_unpack(loaded_qlinear._packed_params._packed_params))
self.assertEqual(qlinear.scale, loaded_qlinear.scale)
self.assertEqual(qlinear.zero_point, loaded_qlinear.zero_point)
# scripting will add __overloads__ to __dict__, which is why we script a copy
# to be able to do the check in the next line
self.checkScriptable(copy.deepcopy(loaded_qlinear), [[X_q]], check_save_load=True)
self.assertTrue(dir(qlinear) == dir(loaded_qlinear))
self.assertEqual(qlinear._weight_bias(), loaded_qlinear._weight_bias())
self.assertEqual(qlinear._weight_bias(), torch.ops.quantized.linear_unpack(qlinear._packed_params._packed_params))
Z_q2 = loaded_qlinear(X_q)
self.assertEqual(Z_q, Z_q2)
# Test serialization
b = io.BytesIO()
torch.save(qlinear, b)
b.seek(0)
loaded = torch.load(b)
self.assertEqual(qlinear.weight(), loaded.weight())
self.assertEqual(qlinear.scale, loaded.scale)
self.assertEqual(qlinear.zero_point, loaded.zero_point)
# Test torch.package
buffer = io.BytesIO()
with PackageExporter(buffer) as pe:
pe.save_pickle("module", "qlinear.pkl", qlinear)
buffer.seek(0)
importer = PackageImporter(buffer)
loaded_from_package = importer.load_pickle("module", "qlinear.pkl")
self.assertEqual(qlinear.weight(), loaded_from_package.weight())
self.assertEqual(qlinear.scale, loaded_from_package.scale)
self.assertEqual(qlinear.zero_point, loaded_from_package.zero_point)
for name, module in loaded_from_package.named_modules():
# noop, just make sure attribute "_modules" is restored correctly during torch.package import
assert(name is not None)
# Test copy and deepcopy
copied_linear = copy.copy(qlinear)
self.assertEqual(copied_linear.bias(), qlinear.bias())
self.assertEqual(copied_linear.scale, qlinear.scale)
self.assertEqual(copied_linear.zero_point,
qlinear.zero_point)
Y_copied = copied_linear(X_q)
np.testing.assert_array_almost_equal(
Z_q.int_repr().numpy(), Y_copied.int_repr().numpy(), decimal=0)
deepcopied_linear = copy.deepcopy(qlinear)
self.assertEqual(deepcopied_linear.bias(), qlinear.bias())
self.assertEqual(deepcopied_linear.scale, qlinear.scale)
self.assertEqual(deepcopied_linear.zero_point,
qlinear.zero_point)
Y_deepcopied = copied_linear(X_q)
np.testing.assert_array_almost_equal(
Z_q.int_repr().numpy(), Y_deepcopied.int_repr().numpy(), decimal=0)
# Test JIT
self.checkScriptable(qlinear, [[X_q]], check_save_load=True)
# Make sure `from_float` works for all linear variants
modules_under_test = [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear]
for mut in modules_under_test:
# Test from_float.
float_linear = mut(in_features, out_features).float()
float_linear.qconfig = torch.ao.quantization.default_qconfig
torch.ao.quantization.prepare(float_linear, inplace=True)
float_linear(X.float())
# Sequential allows swapping using "convert".
quantized_float_linear = torch.nn.Sequential(float_linear)
quantized_float_linear = torch.ao.quantization.convert(quantized_float_linear, inplace=True)
# Smoke test to make sure the module actually runs
quantized_float_linear(X_q)
# Smoke test extra_repr
self.assertTrue('QuantizedLinear' in str(quantized_float_linear))
def test_quant_dequant_api(self):
r = torch.tensor([[1., -1.], [1., -1.]], dtype=torch.float)
scale, zero_point, dtype = 1.0, 2, torch.qint8
# testing Quantize API
qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
quant_m = nnq.Quantize(scale, zero_point, dtype)
qr2 = quant_m(r)
self.assertEqual(qr, qr2)
# testing Dequantize API
rqr = qr.dequantize()
dequant_m = nnq.DeQuantize()
rqr2 = dequant_m(qr2)
self.assertEqual(rqr, rqr2)
def _test_conv_api_impl(
self, module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size, out_channels_per_group,
groups, kernel_size, stride, padding, padding_mode, dilation,
X_scale, X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, use_fused, use_channelwise):
for i in range(len(kernel_size)):
assume(input_feature_map_size[i] + 2 * padding[i]
>= dilation[i] * (kernel_size[i] - 1) + 1)
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
(X, X_q, W, W_q, b) = _make_conv_test_input(
batch_size, in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, X_scale, X_zero_point,
W_scale, W_zero_point, use_bias, use_channelwise)
# Make sure the weight shape is correct
self.assertTrue(qconv_module.weight().shape == W_q.shape)
qconv_module.set_weight_bias(W_q, b)
qconv_module.scale = Y_scale
qconv_module.zero_point = Y_zero_point
if use_fused:
conv_module[0].weight.data = W
if use_bias:
conv_module[0].bias.data = b
else:
conv_module.weight.data = W
if use_bias:
conv_module.bias.data = b
# Test members
self.assertTrue(module_name == qconv_module._get_name(), module_name + " " + qconv_module._get_name())
self.assertTrue(hasattr(qconv_module, '_packed_params'))
self.assertTrue(hasattr(qconv_module, 'scale'))
self.assertTrue(hasattr(qconv_module, 'zero_point'))
# Test properties
self.assertEqual(W_q, qconv_module.weight())
if use_bias:
self.assertEqual(b, qconv_module.bias())
self.assertEqual(Y_scale, qconv_module.scale)
self.assertEqual(Y_zero_point, qconv_module.zero_point)
# Test forward
Y_exp = conv_module(X)
Y_exp = torch.quantize_per_tensor(
Y_exp, scale=Y_scale, zero_point=Y_zero_point, dtype=torch.quint8)
Y_act = qconv_module(X_q)
# Make sure the results match
# assert_array_almost_equal compares using the following formula:
# abs(desired-actual) < 1.5 * 10**(-decimal)
# (https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html)
# We use decimal = 0 to ignore off-by-1 differences between reference
# and test. Off-by-1 differences arise due to the order of round and
# zero_point addition operation, i.e., if addition followed by round is
# used by reference and round followed by addition is used by test, the
# results may differ by 1.
# For example, the result of round(2.5) + 1 is 3 while round(2.5 + 1) is
# 4 assuming the rounding mode is round-to-nearest, ties-to-even.
# skip numerics checking for reference module
np.testing.assert_array_almost_equal(
Y_exp.int_repr().numpy(), Y_act.int_repr().numpy(), decimal=0)
# Test serialization of quantized Conv Module using state_dict
model_dict = qconv_module.state_dict()
self.assertEqual(model_dict['weight'], W_q)
if use_bias:
self.assertEqual(model_dict['bias'], b)
bytes_io = io.BytesIO()
torch.save(model_dict, bytes_io)
bytes_io.seek(0)
loaded_dict = torch.load(bytes_io)
for key in loaded_dict:
self.assertEqual(model_dict[key], loaded_dict[key])
loaded_qconv_module = type(qconv_module)(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, use_bias, padding_mode=padding_mode)
loaded_qconv_module.load_state_dict(loaded_dict)
self.assertTrue(dir(loaded_qconv_module) == dir(qconv_module))
self.assertTrue(module_name == loaded_qconv_module._get_name())
self.assertTrue(hasattr(loaded_qconv_module, '_packed_params'))
self.assertTrue(hasattr(loaded_qconv_module, '_weight_bias'))
self.assertEqual(qconv_module.weight(), loaded_qconv_module.weight())
if use_bias:
self.assertEqual(qconv_module.bias(), loaded_qconv_module.bias())
self.assertEqual(qconv_module.scale, loaded_qconv_module.scale)
self.assertEqual(qconv_module.zero_point,
loaded_qconv_module.zero_point)
Y_loaded = loaded_qconv_module(X_q)
np.testing.assert_array_almost_equal(
Y_exp.int_repr().numpy(), Y_loaded.int_repr().numpy(), decimal=0)
# Test serialization
b = io.BytesIO()
torch.save(qconv_module, b)
b.seek(0)
loaded_conv = torch.load(b)
self.assertEqual(loaded_conv.bias(), qconv_module.bias())
self.assertEqual(loaded_conv.scale, qconv_module.scale)
self.assertEqual(loaded_conv.zero_point,
qconv_module.zero_point)
# Test copy and deepcopy
copied_conv = copy.copy(qconv_module)
self.assertEqual(copied_conv.bias(), qconv_module.bias())
self.assertEqual(copied_conv.scale, qconv_module.scale)
self.assertEqual(copied_conv.zero_point,
qconv_module.zero_point)
Y_copied = copied_conv(X_q)
np.testing.assert_array_almost_equal(
Y_exp.int_repr().numpy(), Y_copied.int_repr().numpy(), decimal=0)
deepcopied_conv = copy.deepcopy(qconv_module)
self.assertEqual(deepcopied_conv.bias(), qconv_module.bias())
self.assertEqual(deepcopied_conv.scale, qconv_module.scale)
self.assertEqual(deepcopied_conv.zero_point,
qconv_module.zero_point)
Y_deepcopied = copied_conv(X_q)
np.testing.assert_array_almost_equal(
Y_exp.int_repr().numpy(), Y_deepcopied.int_repr().numpy(), decimal=0)
# JIT testing
self.checkScriptable(
qconv_module, [[X_q]],
check_save_load=True)
# Test from_float
fused_conv_module = torch.nn.intrinsic._FusedModule(conv_module)
fused_conv_module.qconfig = torch.ao.quantization.default_qconfig
torch.ao.quantization.prepare(fused_conv_module, inplace=True)
fused_conv_module(X.float())
converted_qconv_module = fused_conv_module
reference_mapping = get_default_static_quant_module_mappings()
reference_mapping[type(conv_module)] = type(qconv_module)
torch.ao.quantization.convert(converted_qconv_module, mapping=reference_mapping, inplace=True)
# Smoke test to make sure the module actually runs
if use_bias:
if use_fused:
self.assertEqual(conv_module[0].bias,
converted_qconv_module[0].bias())
else:
self.assertEqual(conv_module.bias,
converted_qconv_module[0].bias())
# Smoke test extra_repr
self.assertTrue(module_name == converted_qconv_module[0]._get_name())
@override_qengines
def test_conv1d_api(self):
options = itertools.product(
["zeros", "reflect"], # pad_mode
[True, False], # use_bias
[True, False], # use_fused
[True, False], # use_channelwise
)
for pad_mode, use_bias, use_fused, use_channelwise in options:
if torch.backends.quantized.engine == "qnnpack":
use_channelwise = False
batch_size = 2
in_channels_per_group = 2
length = 8
out_channels_per_group = 2
groups = 3
kernel = 3
stride = 2
pad = 1
dilation = 1
# Tests the correctness of the conv2d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (length,)
kernel_size = (kernel, )
stride = (stride, )
pad = (pad, )
dilation = (dilation, )
X_scale = 1.3
X_zero_point = 2
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
if torch.backends.quantized.engine == 'qnnpack':
use_channelwise = False
# use_fused -> quantized class
class_map = {
True: (nniq.ConvReLU1d, "QuantizedConvReLU1d"),
False: (nnq.Conv1d, "QuantizedConv1d")
}
qconv_cls, module_name = class_map[use_fused]
qconv_module = qconv_cls(
in_channels, out_channels, kernel, stride, pad,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv1d(
in_channels, out_channels, kernel, stride, pad,
dilation, groups, use_bias, padding_mode=pad_mode)
if use_fused:
relu_module = nn.ReLU()
conv_module = nni.ConvReLU1d(conv_module, relu_module)
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, pad, pad_mode,
dilation, X_scale, X_zero_point, W_scale, W_zero_point, Y_scale,
Y_zero_point, use_bias, use_fused, use_channelwise)
@override_qengines
def test_conv2d_api(self):
options = itertools.product(
["zeros", "reflect"], # pad_mode
[True, False], # use_bias
[True, False], # use_fused
[True, False], # use_channelwise
)
for pad_mode, use_bias, use_fused, use_channelwise in options:
if torch.backends.quantized.engine == "qnnpack":
use_channelwise = False
batch_size = 2
in_channels_per_group = 2
H = 8
W = 8
out_channels_per_group = 2
groups = 3
kernel_h = 3
kernel_w = 3
stride_h = 2
stride_w = 2
pad_h = 1
pad_w = 1
dilation = 1
# Tests the correctness of the conv2d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (H, W)
kernel_size = (kernel_h, kernel_w)
stride = (stride_h, stride_w)
padding = (pad_h, pad_w)
dilation = (dilation, dilation)
X_scale = 1.3
X_zero_point = 2
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
# use_fused -> quantized class
class_map = {
True: (nniq.ConvReLU2d, "QuantizedConvReLU2d"),
False: (nnq.Conv2d, "QuantizedConv2d")
}
qconv_cls, module_name = class_map[use_fused]
qconv_module = qconv_cls(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv2d(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode)
if use_fused:
relu_module = nn.ReLU()
conv_module = nni.ConvReLU2d(conv_module, relu_module)
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, padding,
pad_mode, dilation, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_fused, use_channelwise)
@skipIfNoFBGEMM
def test_conv3d_api(self):
options = itertools.product(
[True, False], # use_bias
[True, False], # use_fused
[True, False], # use_channelwise
)
for use_bias, use_fused, use_channelwise in options:
if torch.backends.quantized.engine == "qnnpack":
use_channelwise = False
batch_size = 2
in_channels_per_group = 2
H = 8
W = 8
D = 8
out_channels_per_group = 2
groups = 3
kernel_h = 3
kernel_w = 3
kernel_d = 3
stride_h = 2
stride_w = 2
stride_d = 2
pad_mode = "zeros" # 3d doesn't support reflect padding
pad_h = 1
pad_w = 1
pad_d = 1
dilation = 1
# Tests the correctness of the conv3d module.
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
input_feature_map_size = (D, H, W)
kernel_size = (kernel_d, kernel_h, kernel_w)
stride = (stride_d, stride_h, stride_w)
padding = (pad_d, pad_h, pad_w)
dilation = (dilation, dilation, dilation)
X_scale = 1.3
X_zero_point = 2
W_scale = [0.5]
W_zero_point = [0] if qengine_is_onednn() else [3]
Y_scale = 5.0
Y_zero_point = 4
# use_fused -> quantized class
class_map = {
True: (nniq.ConvReLU3d, "QuantizedConvReLU3d"),
False: (nnq.Conv3d, "QuantizedConv3d")
}
with override_quantized_engine('fbgemm'):
qconv_cls, module_name = class_map[use_fused]
qconv_module = qconv_cls(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode
)
conv_module = nn.Conv3d(
in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, use_bias, padding_mode=pad_mode)
if use_fused:
relu_module = nn.ReLU()
conv_module = nni.ConvReLU3d(conv_module, relu_module)
conv_module = conv_module.float()
self._test_conv_api_impl(
module_name, qconv_module, conv_module, batch_size,
in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, stride, padding,
pad_mode, dilation, X_scale, X_zero_point, W_scale,
W_zero_point, Y_scale, Y_zero_point, use_bias, use_fused,
use_channelwise)
def test_pool_api(self):
"""Tests the correctness of the pool module.
The correctness is defined against the functional implementation.
"""
N, C, H, W = 10, 10, 10, 3
kwargs = {
'kernel_size': 2,
'stride': None,
'padding': 0,
'dilation': 1
}
scale, zero_point = 1.0 / 255, 128
X = torch.randn(N, C, H, W, dtype=torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
qX_expect = torch.nn.functional.max_pool2d(qX, **kwargs)
pool_under_test = torch.nn.quantized.MaxPool2d(**kwargs)
qX_hat = pool_under_test(qX)
self.assertEqual(qX_expect, qX_hat)
# JIT Testing
self.checkScriptable(pool_under_test, [[X]])
def test_dropout(self):
"""Tests the correctness of the dropout module.
The correctness is defined against the functional implementation.
"""
x = torch.randn((2, 4, 6, 8), dtype=torch.float)
float_mod = torch.nn.Dropout(p=0.5)
float_mod.training = False
y_ref = float_mod(x)
quant_ref = torch.quantize_per_tensor(y_ref, 1.0, 0, dtype=torch.quint8)
quant_mod = nnq.Dropout(p=0.5)
qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.quint8)
qy = quant_mod(qx)
self.assertEqual(quant_ref.int_repr().numpy(), qy.int_repr().numpy(),
msg="Dropout module API failed")
def _test_dropout_serialization(self, get_model, data1, data2):
m1 = get_model()
m1.qconfig = torch.ao.quantization.default_qconfig
mp1 = torch.ao.quantization.prepare(m1)
mp1(data1)
mq1 = torch.ao.quantization.convert(mp1)
ref1 = mq1(data2)
m2 = get_model()
m2.qconfig = torch.quantization.default_qconfig
mp2 = torch.ao.quantization.prepare(m2)
mq2 = torch.ao.quantization.convert(mp2)
mq2.load_state_dict(mq1.state_dict())
ref2 = mq2(data2)
self.assertTrue(torch.allclose(ref1, ref2))
def test_dropout_serialization(self):
data1 = torch.randn(2, 4, 6, 8)
data2 = torch.randn(2, 4, 6, 8)
def _get_model():
return nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Dropout(p=0.5),
torch.ao.quantization.DeQuantStub()
).eval()
self._test_dropout_serialization(_get_model, data1, data2)
def test_batch_norm2d(self):
"""Tests the correctness of the batchnorm2d module.
The correctness is defined against the functional implementation.
"""
x = torch.randn((2, 4, 6, 8), dtype=torch.float)
float_mod = torch.nn.BatchNorm2d(4)
float_mod.training = False
y_ref = float_mod(x)
quant_ref = torch.quantize_per_tensor(y_ref, 1.0, 0, dtype=torch.quint8)
quant_mod = nnq.BatchNorm2d(4)
qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.quint8)
qy = quant_mod(qx)
self.assertEqual(quant_ref.int_repr().numpy(), qy.int_repr().numpy(),
msg="BatchNorm2d module API failed")
def test_batch_norm3d(self):
"""Tests the correctness of the batchnorm3d module.
The correctness is defined against the functional implementation.
"""
x = torch.randn((2, 4, 6, 8, 10), dtype=torch.float)
float_mod = torch.nn.BatchNorm3d(4)
float_mod.training = False
y_ref = float_mod(x)
quant_ref = torch.quantize_per_tensor(y_ref, 1.0, 0, dtype=torch.quint8)
quant_mod = nnq.BatchNorm3d(4)
qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.quint8)
qy = quant_mod(qx)
self.assertEqual(quant_ref.int_repr().numpy(), qy.int_repr().numpy(),
msg="BatchNorm3d module API failed")
def _test_batch_norm_serialization(self, get_model, data1, data2):
m1 = get_model()
m1.qconfig = torch.ao.quantization.default_qconfig
mp1 = torch.ao.quantization.prepare(m1)
mp1(data1)
mq1 = torch.ao.quantization.convert(mp1)
ref1 = mq1(data2)
m2 = get_model()
m2.qconfig = torch.quantization.default_qconfig
mp2 = torch.ao.quantization.prepare(m2)
mq2 = torch.ao.quantization.convert(mp2)
mq2.load_state_dict(mq1.state_dict())
ref2 = mq2(data2)
self.assertTrue(torch.allclose(ref1, ref2))
def test_batch_norm2d_serialization(self):
data1 = torch.randn(2, 4, 6, 8)
data2 = torch.randn(2, 4, 6, 8)
def _get_model():
return nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.BatchNorm2d(4),
torch.ao.quantization.DeQuantStub()
).eval()
self._test_batch_norm_serialization(_get_model, data1, data2)
def test_batch_norm3d_serialization(self):
data1 = torch.randn(2, 4, 6, 8, 1)
data2 = torch.randn(2, 4, 6, 8, 1)
def _get_model():
return nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.BatchNorm3d(4),
torch.ao.quantization.DeQuantStub()
).eval()
self._test_batch_norm_serialization(_get_model, data1, data2)
def test_layer_norm(self):
"""Tests the correctness of the layernorm module.
The correctness is defined against the functional implementation.
"""
x_scale = 10.0 / 256
x_zero_point = 0
y_scale = 5.0 / 256
y_zero_point = 127
dims = (1, 4, 8)
X = (torch.randn(dims, dtype=torch.float) - 0.5) * 10
qX = torch.quantize_per_tensor(X, x_scale, x_zero_point, dtype=torch.quint8)
dqX = qX.dequantize()
float_mod = torch.nn.LayerNorm(dqX.size()[1:]).float()
float_mod.weight = torch.nn.Parameter(torch.rand(*dims[1:]))
float_mod.bias = torch.nn.Parameter(torch.rand(*dims[1:]))
dqY_ref = float_mod(dqX)
qY_ref = torch.quantize_per_tensor(
dqY_ref, y_scale, y_zero_point, dtype=torch.quint8)
quant_mod = nnq.LayerNorm(
qX.size()[1:], float_mod.weight, float_mod.bias, y_scale, y_zero_point)
qY = quant_mod(qX)
self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(),
msg="LayerNorm module API failed, qY_ref\n{} vs qY\n{}"
.format(qY_ref, qY))
def test_group_norm(self):
"""Tests the correctness of the groupnorm module.
The correctness is defined against the functional implementation.
"""
x_scale = 10.0 / 256
x_zero_point = 0
y_scale = 5.0 / 256
y_zero_point = 127
dims = (1, 4, 8)
X = (torch.randn(dims, dtype=torch.float) - 0.5) * 10
qX = torch.quantize_per_tensor(X, x_scale, x_zero_point, dtype=torch.quint8)
dqX = qX.dequantize()
float_mod = torch.nn.GroupNorm(2, 4).float()
float_mod.weight = torch.nn.Parameter(torch.rand(dims[1]))
float_mod.bias = torch.nn.Parameter(torch.rand(dims[1]))
dqY_ref = float_mod(dqX)
qY_ref = torch.quantize_per_tensor(
dqY_ref, y_scale, y_zero_point, dtype=torch.quint8)
quant_mod = nnq.GroupNorm(
2, 2, float_mod.weight, float_mod.bias, y_scale, y_zero_point)
qY = quant_mod(qX)
self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(),
msg="GroupNorm module API failed, qY_ref\n{} vs qY\n{}"
.format(qY_ref, qY))
def test_instance_norm(self):
"""Tests the correctness of the instancenorm{n}d modules.
The correctness is defined against the functional implementation.
"""
x_scale = 10.0 / 256
x_zero_point = 0
y_scale = 5.0 / 256
y_zero_point = 127
dims_to_modules = [
((1, 4, 8), torch.nn.InstanceNorm1d, nnq.InstanceNorm1d),
((1, 4, 8, 1), torch.nn.InstanceNorm2d, nnq.InstanceNorm2d),
((1, 4, 8, 1, 1), torch.nn.InstanceNorm3d, nnq.InstanceNorm3d),
]
for dim_to_modules in dims_to_modules:
dims, float_cls, q_cls = dim_to_modules
X = (torch.randn(dims, dtype=torch.float) - 0.5) * 10
qX = torch.quantize_per_tensor(
X, x_scale, x_zero_point, dtype=torch.quint8)
dqX = qX.dequantize()
float_mod = float_cls(dims[1]).float()
float_mod.weight = torch.nn.Parameter(torch.rand(dims[1]))
float_mod.bias = torch.nn.Parameter(torch.rand(dims[1]))
dqY_ref = float_mod(dqX)
qY_ref = torch.quantize_per_tensor(
dqY_ref, y_scale, y_zero_point, dtype=torch.quint8)
quant_mod = q_cls(
dims[1], float_mod.weight, float_mod.bias, y_scale,
y_zero_point)
qY = quant_mod(qX)
self.assertEqual(
qY_ref.int_repr().numpy(), qY.int_repr().numpy(),
msg="InstanceNorm module API failed, qY_ref\n{} vs qY\n{}"
.format(qY_ref, qY))
def _test_activation_module_impl(self, name, float_module_class, quantized_module_class, extra_kwargs):
"""Tests the correctness of the ELU module.
The correctness is defined against the functional implementation.
"""
x_scale = 10.0 / 256
x_zero_point = 0
y_scale = 5.0 / 256
y_zero_point = 127
alpha = 1.5
dims = (1, 4, 8)
X = (torch.randn(dims, dtype=torch.float) - 0.5) * 10
qX = torch.quantize_per_tensor(X, x_scale, x_zero_point, dtype=torch.quint8)
dqX = qX.dequantize()
float_mod = float_module_class(**extra_kwargs).float()
dqY_ref = float_mod(dqX)
qY_ref = torch.quantize_per_tensor(
dqY_ref, y_scale, y_zero_point, dtype=torch.quint8)
quant_mod = quantized_module_class(y_scale, y_zero_point, **extra_kwargs)
qY = quant_mod(qX)
self.assertEqual(qY_ref.int_repr().numpy(), qY.int_repr().numpy(),
msg="{} module API failed, qY_ref\n{} vs qY\n{}"
.format(name, qY_ref, qY))
def _test_leaky_relu_serialization(self):
scale_original = 10.0 / 256
zero_point_original = 1.0
quant_mod_original = nnq.LeakyReLU(scale_original, zero_point_original)
state_dict = quant_mod_original.state_dict()
scale_new = 5.0 / 256
zero_point_new = 2.0
quant_mod_new = nnq.LeakyReLU(scale_new, zero_point_new)
quant_mod_new.load_state_dict(state_dict)
self.assertEqual(quant_mod_original.scale, quant_mod_new.scale)
self.assertEqual(quant_mod_original.zero_point, quant_mod_new.zero_point)
def test_elu(self):
"""Tests the correctness of the ELU module.
The correctness is defined against the functional implementation.
"""
self._test_activation_module_impl("ELU", nn.ELU, nnq.ELU, {"alpha": 1.5})
def test_leaky_relu(self):
self._test_activation_module_impl("LeakyReLU", nn.LeakyReLU, nnq.LeakyReLU, {"negative_slope": 0.2})
self._test_leaky_relu_serialization()
def test_sigmoid(self):
self._test_activation_module_impl("Sigmoid", nn.Sigmoid, nnq.Sigmoid, {})
@given(
num_embeddings=st.integers(10, 50),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
set_qconfig=st.booleans(),
)
@skipIfNoFBGEMM
def test_embedding_api(self, num_embeddings, embedding_dim, set_qconfig):
num_lengths = np.random.randint(1, 6)
lengths = np.random.randint(0, 21, size=num_lengths).astype(np.int32)
num_indices = np.sum(lengths)
indices = torch.from_numpy(np.random.randint(low=0, high=num_embeddings, size=num_indices, dtype=np.int64))
weights = torch.from_numpy((np.random.random_sample((num_embeddings, embedding_dim)) + 1).astype(np.float32))
obs = default_float_qparams_observer()
obs(weights)
qparams = obs.calculate_qparams()
dtypes = [torch.quint4x2, torch.quint8]
embedding_funcs = [torch.ops.quantized.embedding_4bit, torch.ops.quantized.embedding_byte]
for dtype, embedding_func in zip(dtypes, embedding_funcs):
# Quantize the weights
qweight = torch.quantize_per_channel(weights, qparams[0], qparams[1], axis=0, dtype=dtype)
qemb = nnq.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, dtype=dtype)
qemb.set_weight(qweight)
qemb(indices)
# Ensure the module has the correct weights
self.assertEqual(qweight, qemb.weight())
w_packed = qemb._packed_params._packed_weight
module_out = qemb(indices)
# Call the bit qembedding operator directly
ref = embedding_func(w_packed, indices, pruned_weights=False)
self.assertEqual(module_out, ref)
self.checkEmbeddingSerialization(qemb, num_embeddings, embedding_dim, indices, None, set_qconfig=False,
is_emb_bag=False, dtype=dtype)
@given(
num_embeddings=st.integers(10, 50),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
num_offsets=st.integers(1, 20),
set_qconfig=st.booleans(),
)
@skipIfNoFBGEMM
def test_embedding_bag_api(self, num_embeddings, embedding_dim, num_offsets, set_qconfig):
r"""Test execution and serialization for dynamic quantized embedding_bag modules on int8
"""
num_lengths = np.random.randint(1, 6)
lengths = np.random.randint(0, 21, size=num_lengths).astype(np.int32)
num_indices = np.sum(lengths)
indices = torch.from_numpy(np.random.randint(low=0, high=num_embeddings, size=num_indices, dtype=np.int64))
offsets = lengths_to_offsets(lengths)
# include the last offset
offsets = torch.cat((offsets, torch.tensor([indices.size(0)], dtype=torch.long)), 0)
weights = torch.from_numpy((np.random.random_sample((num_embeddings, embedding_dim)) + 1).astype(np.float32))
for qdtype in [torch.quint8, torch.quint4x2]:
obs = PerChannelMinMaxObserver(dtype=qdtype, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(weights)
# Get the scale and zero point for the weight tensor
qparams = obs.calculate_qparams()
# Quantize the weights to 8bits
qweight = torch.quantize_per_channel(weights, qparams[0], qparams[1], axis=0, dtype=qdtype)
qemb = nnq.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim,
include_last_offset=True, mode='sum', _weight=qweight, dtype=qdtype)
qemb(indices, offsets)
# Ensure the module has the correct weights
self.assertEqual(qweight, qemb.weight())
w_packed = qemb._packed_params._packed_weight
module_out = qemb(indices, offsets)
# Call the qembedding_bag operator directly
if qdtype == torch.quint8:
ref = torch.ops.quantized.embedding_bag_byte(w_packed, indices, offsets, mode=0,
per_sample_weights=None,
include_last_offset=True)
else:
ref = torch.ops.quantized.embedding_bag_4bit(w_packed, indices, offsets, mode=0,
per_sample_weights=None,
include_last_offset=True)
self.assertEqual(module_out, ref)
self.checkEmbeddingSerialization(qemb, num_embeddings, embedding_dim, indices,
offsets, set_qconfig, is_emb_bag=True, dtype=qdtype)
def test_prelu(self):
x = torch.randn((4, 4, 4, 4), dtype=torch.float)
qx = torch.quantize_per_tensor(x, 1.0, 0, dtype=torch.quint8)
# num_parameters = 1
prelu_module = nnq.PReLU(output_scale=1.0, output_zero_point=0, num_parameters=1)
w = torch.randn(1, dtype=torch.float)
qw = torch.quantize_per_tensor(w, 1.0, 0, dtype=torch.quint8)
prelu_module.set_weight(qw)
qy = prelu_module(qx)
qy_ref = torch.prelu(qx, qw)
self.assertEqual(qy_ref, qy,
msg="PReLU module API failed")
# num_parameters = num_channels
prelu_module = nnq.PReLU(output_scale=1.0, output_zero_point=0, num_parameters=4)
w = torch.randn(4, dtype=torch.float)
qw = torch.quantize_per_tensor(w, 1.0, 0, dtype=torch.quint8)
prelu_module.set_weight(qw)
qy = prelu_module(qx)
qy_ref = torch.prelu(qx, qw)
self.assertEqual(qy_ref, qy,
msg="PReLU module API failed")
class TestDynamicQuantizedModule(QuantizationTestCase):
def _test_qconv_impl(self, q_mod, dq_mod, dim, dtype, bias):
in_channels = 3
out_channels = 10
kernel_size = 2
stride = 1
padding = 0
dilation = 1
groups = 1
padding_mode = 'zeros'
if qengine_is_qnnpack():
reduce_range = False
else:
reduce_range = True
X_fp32 = torch.randn(*([in_channels] * dim))
s, z = _calculate_dynamic_qparams(X_fp32, dtype, reduce_range)
X_q = torch.quantize_per_tensor(X_fp32, s, z, dtype)
X_dq = torch.dequantize(X_q)
quantized_module = q_mod(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode)
dynamic_module = dq_mod(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode)
quantized_module.scale, quantized_module.zero_point = s, z
dynamic_module.set_weight_bias(*quantized_module._weight_bias())
Y_q_ref = quantized_module(X_q)
Y_ref = torch.dequantize(Y_q_ref)
Y = dynamic_module(X_dq, reduce_range)
self.assertEqual(Y, Y_ref)
# Test serialization of quantized Conv Module using state_dict
W_q, b = dynamic_module._weight_bias()
model_dict = dynamic_module.state_dict()
self.assertEqual(model_dict['weight'], W_q)
self.assertEqual(model_dict['bias'], b)
bytes_io = io.BytesIO()
torch.save(model_dict, bytes_io)
bytes_io.seek(0)
loaded_dict = torch.load(bytes_io)
for key in loaded_dict:
self.assertEqual(model_dict[key], loaded_dict[key])
loaded_qconv_module = type(dynamic_module)(
in_channels, out_channels, kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode)
loaded_qconv_module.load_state_dict(loaded_dict)
self.assertTrue(dir(loaded_qconv_module) == dir(dynamic_module))
self.assertTrue(dynamic_module._get_name() == loaded_qconv_module._get_name())
self.assertTrue(hasattr(loaded_qconv_module, '_packed_params'))
self.assertTrue(hasattr(loaded_qconv_module, '_weight_bias'))
self.assertEqual(dynamic_module.weight(), loaded_qconv_module.weight())
if bias:
self.assertEqual(dynamic_module.bias(), loaded_qconv_module.bias())
self.assertEqual(dynamic_module.scale, loaded_qconv_module.scale)
self.assertEqual(dynamic_module.zero_point,
loaded_qconv_module.zero_point)
Y_loaded = loaded_qconv_module(X_fp32, reduce_range)
np.testing.assert_array_almost_equal(
Y.numpy(), Y_loaded.numpy(), decimal=0)
# Test serialization
b = io.BytesIO()
torch.save(dynamic_module, b)
b.seek(0)
loaded_conv = torch.load(b)
self.assertEqual(loaded_conv.bias(), dynamic_module.bias())
self.assertEqual(loaded_conv.scale, dynamic_module.scale)
self.assertEqual(loaded_conv.zero_point,
dynamic_module.zero_point)
# Test copy and deepcopy
copied_conv = copy.copy(dynamic_module)
self.assertEqual(copied_conv.bias(), dynamic_module.bias())
self.assertEqual(copied_conv.scale, dynamic_module.scale)
self.assertEqual(copied_conv.zero_point,
dynamic_module.zero_point)
Y_copied = copied_conv(X_fp32, reduce_range)
np.testing.assert_array_almost_equal(
Y.numpy(), Y_copied.numpy(), decimal=0)
deepcopied_conv = copy.deepcopy(dynamic_module)
self.assertEqual(deepcopied_conv.bias(), dynamic_module.bias())
self.assertEqual(deepcopied_conv.scale, dynamic_module.scale)
self.assertEqual(deepcopied_conv.zero_point,
dynamic_module.zero_point)
Y_deepcopied = copied_conv(X_fp32, reduce_range)
np.testing.assert_array_almost_equal(
Y.numpy(), Y_deepcopied.numpy(), decimal=0)
# need to fix this
# JIT testing
self.checkScriptable(
dynamic_module, [[X_dq]],
check_save_load=True)
# Test from_float
conv_module = dynamic_module._FLOAT_MODULE(in_channels, out_channels, kernel_size)
conv_module.qconfig = torch.ao.quantization.default_dynamic_qconfig # type: ignore[assignment]
prepare_dynamic(conv_module)
conv_module(X_dq)
quantized_conv_module = dq_mod.from_float(conv_module)
# Smoke test to make sure the module actually runs
quantized_conv_module(X_dq)
# Smoke test extra_repr
self.assertEqual(dynamic_module._get_name(), quantized_conv_module._get_name())
@override_qengines
def test_dynamic_conv1d(self):
q_mod = torch.nn.quantized.Conv1d
dq_mod = torch.nn.quantized.dynamic.Conv1d
dim = 3
dtype = torch.quint8
for bias in [True, False]:
self._test_qconv_impl(q_mod, dq_mod, dim, dtype, bias)
@override_qengines
def test_dynamic_conv2d(self):
q_mod = torch.nn.quantized.Conv2d
dq_mod = torch.nn.quantized.dynamic.Conv2d
dim = 4
dtype = torch.quint8
for bias in [True, False]:
self._test_qconv_impl(q_mod, dq_mod, dim, dtype, bias)
@override_qengines
def test_dynamic_conv3d(self):
q_mod = torch.nn.quantized.Conv3d
dq_mod = torch.nn.quantized.dynamic.Conv3d
dim = 5
dtype = torch.quint8
if qengine_is_qnnpack():
return # qnnpack doesn't support unpacking conv3d
for bias in [True, False]:
self._test_qconv_impl(q_mod, dq_mod, dim, dtype, bias)
@override_qengines
def test_dynamic_convtranspose1d(self):
q_mod = torch.nn.quantized.ConvTranspose1d
dq_mod = torch.nn.quantized.dynamic.ConvTranspose1d
dim = 3
dtype = torch.quint8
for bias in [True, False]:
self._test_qconv_impl(q_mod, dq_mod, dim, dtype, bias)
@override_qengines
def test_dynamic_convtranspose2d(self):
q_mod = torch.nn.quantized.ConvTranspose2d
dq_mod = torch.nn.quantized.dynamic.ConvTranspose2d
dim = 4
dtype = torch.quint8
for bias in [True, False]:
self._test_qconv_impl(q_mod, dq_mod, dim, dtype, bias)
@override_qengines
def test_dynamic_convtranspose3d(self):
q_mod = torch.nn.quantized.ConvTranspose3d
dq_mod = torch.nn.quantized.dynamic.ConvTranspose3d
dim = 5
dtype = torch.quint8
if qengine_is_qnnpack():
return # qnnpack doesn't support unpacking conv3d
for bias in [True, False]:
self._test_qconv_impl(q_mod, dq_mod, dim, dtype, bias)
@given(
batch_size=st.integers(1, 5),
in_features=st.integers(16, 32),
out_features=st.integers(4, 8),
use_bias=st.booleans(),
use_default_observer=st.booleans(),
)
@override_qengines
def test_linear_api(self, batch_size, in_features, out_features, use_bias, use_default_observer):
"""test API functionality for nn.quantized.dynamic.Linear"""
W = torch.rand(out_features, in_features).float()
qscheme = torch.per_tensor_symmetric if qengine_is_onednn() else torch.per_tensor_affine
W_scale, W_zp = _calculate_dynamic_qparams(W, torch.qint8, qscheme=qscheme)
W_q = torch.quantize_per_tensor(W, W_scale, W_zp, torch.qint8)
X = torch.rand(batch_size, in_features).float()
B = torch.rand(out_features).float() if use_bias else None
qlinear = nnqd.Linear(in_features, out_features)
# Run module with default-initialized parameters.
# This tests that the constructor is correct.
qlinear.set_weight_bias(W_q, B)
qlinear(X)
# Simple round-trip test to ensure weight()/set_weight() API
self.assertEqual(qlinear.weight(), W_q)
W_pack = qlinear._packed_params._packed_params
Z_dq = qlinear(X)
# Check if the module implementation matches calling the
# ops directly
Z_ref = torch.ops.quantized.linear_dynamic(X, W_pack, reduce_range=True)
self.assertEqual(Z_ref, Z_dq)
# Test serialization of dynamic quantized Linear Module using state_dict
model_dict = qlinear.state_dict()
b = io.BytesIO()
torch.save(model_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in model_dict:
if isinstance(model_dict[key], torch._C.ScriptObject):
assert isinstance(loaded_dict[key], torch._C.ScriptObject)
w_model, b_model = torch.ops.quantized.linear_unpack(model_dict[key])
w_loaded, b_loaded = torch.ops.quantized.linear_unpack(loaded_dict[key])
self.assertEqual(w_model, w_loaded)
self.assertEqual(b_model, b_loaded)
else:
self.assertEqual(model_dict[key], loaded_dict[key])
loaded_qlinear = nnqd.Linear(in_features, out_features)
loaded_qlinear.load_state_dict(loaded_dict)
linear_unpack = torch.ops.quantized.linear_unpack
self.assertEqual(linear_unpack(qlinear._packed_params._packed_params),
linear_unpack(loaded_qlinear._packed_params._packed_params))
if use_bias:
self.assertEqual(qlinear.bias(), loaded_qlinear.bias())
self.assertTrue(dir(qlinear) == dir(loaded_qlinear))
self.assertTrue(hasattr(qlinear, '_packed_params'))
self.assertTrue(hasattr(loaded_qlinear, '_packed_params'))
self.assertTrue(hasattr(qlinear, '_weight_bias'))
self.assertTrue(hasattr(loaded_qlinear, '_weight_bias'))
self.assertEqual(qlinear._weight_bias(), loaded_qlinear._weight_bias())
self.assertEqual(qlinear._weight_bias(), torch.ops.quantized.linear_unpack(qlinear._packed_params._packed_params))
Z_dq2 = qlinear(X)
self.assertEqual(Z_dq, Z_dq2)
b = io.BytesIO()
torch.save(qlinear, b)
b.seek(0)
loaded = torch.load(b)
self.assertEqual(qlinear.weight(), loaded.weight())
self.assertEqual(qlinear.zero_point, loaded.zero_point)
# Test JIT
self.checkScriptable(qlinear, [[X]], check_save_load=True)
modules_under_test = [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear]
for mut in modules_under_test:
# Test from_float
float_linear = mut(in_features, out_features).float()
if use_default_observer:
float_linear.qconfig = torch.ao.quantization.default_dynamic_qconfig
prepare_dynamic(float_linear)
float_linear(X.float())
quantized_float_linear = nnqd.Linear.from_float(float_linear)
# Smoke test to make sure the module actually runs
quantized_float_linear(X)
# Smoke test extra_repr
self.assertTrue('QuantizedLinear' in str(quantized_float_linear))
@given(
dtype=st.sampled_from([torch.qint8, torch.float16]),
bidirectional=st.booleans(),
)
@override_qengines
def test_lstm_api(self, dtype, bidirectional):
r"""Test execution and serialization for dynamic quantized lstm modules on int8 and fp16
"""
# Check that module matches the numerics of the op and ensure that module can be
# instantiated for all engines and dtypes
seq_len = 4
batch = 2
input_size = 3
hidden_size = 7
num_layers = 2
bias = True
weight_keys = []
bias_keys = []
num_directions = 2 if bidirectional else 1
for layer in range(num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
key_name1 = 'weight_ih_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
key_name2 = 'weight_hh_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
weight_keys.append(key_name1)
weight_keys.append(key_name2)
key_name1 = 'bias_ih_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
key_name2 = 'bias_hh_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
bias_keys.append(key_name1)
bias_keys.append(key_name2)
if not (dtype == torch.float16 and torch.backends.quantized.engine in ("qnnpack", "onednn")):
# fp16 dynamic quant is not supported for qnnpack or onednn
x = torch.randn(seq_len, batch, input_size)
h = torch.randn(num_layers * (bidirectional + 1), batch, hidden_size)
c = torch.randn(num_layers * (bidirectional + 1), batch, hidden_size)
cell_dq = torch.nn.quantized.dynamic.LSTM(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=False,
dropout=0.0,
bidirectional=bidirectional,
dtype=dtype)
ref_dq = torch.nn.quantized.dynamic.LSTM(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=False,
dropout=0.0,
bidirectional=bidirectional,
dtype=dtype)
_all_params = ([m.param for m in cell_dq._all_weight_values])
result = torch.quantized_lstm(x, (h, c),
_all_params,
cell_dq.bias,
cell_dq.num_layers,
float(cell_dq.dropout),
False,
bidirectional,
False,
dtype=dtype,
use_dynamic=True)
y, (h, c) = cell_dq(x, (h, c))
self.assertEqual(result[0], y)
self.assertEqual(result[1], h)
self.assertEqual(result[2], c)
x = torch.randn(10, 20, 3)
self.check_eager_serialization(cell_dq, ref_dq, [x])
self.check_weight_bias_api(cell_dq, weight_keys, bias_keys)
@override_qengines
def test_gru_api(self):
r"""Test execution and serialization for dynamic quantized lstm modules on int8 and fp16
"""
# Check that module matches the numerics of the op and ensure that module can be
# instantiated for all engines and dtypes
for dtype in [torch.qint8, torch.float16]:
if dtype == torch.float16 and torch.backends.quantized.engine in ("qnnpack", "onednn"):
# fp16 dynamic quant is not supported for qnnpack or onednn
continue
# Test default instantiation
seq_len = 4
batch = 2
input_size = 3
hidden_size = 7
num_layers = 2
bias = True
bidirectional = False
x = torch.rand(seq_len, batch, input_size)
h = torch.rand(num_layers * (bidirectional + 1), batch, hidden_size)
cell_dq = torch.nn.quantized.dynamic.GRU(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=False,
dropout=0.0,
bidirectional=bidirectional,
dtype=dtype)
_all_params = ([m.param for m in cell_dq._all_weight_values])
result = torch.quantized_gru(x,
h,
_all_params,
cell_dq.bias,
cell_dq.num_layers,
float(cell_dq.dropout),
False,
bidirectional,
False)
y, h = cell_dq(x, h)
self.assertEqual(result[0], y, msg="GRU module API failed")
self.assertEqual(result[1], h, msg="GRU module API failed")
@given(
dtype=st.sampled_from([torch.qint8, torch.float16]),
)
@override_qengines
def test_cell_api(self, dtype):
r"""Test execution and serialization for dynamic quantized lstm modules on int8 and fp16
"""
# Check that module matches the numerics of the op and ensure that module can be
# instantiated for all engines and dtypes
batch = 7
input_size = 3
hidden_size = 7
bias = True
x = torch.rand(batch, input_size)
h = torch.rand(batch, hidden_size)
cell_dict = {'LSTMCell': torch.nn.quantized.dynamic.LSTMCell,
'GRUCell': torch.nn.quantized.dynamic.GRUCell,
'RNNTanh': torch.nn.quantized.dynamic.RNNCell,
'RNNReLU': torch.nn.quantized.dynamic.RNNCell
}
state = {'LSTMCell': (h, h),
'GRUCell': h,
'RNNTanh': h,
'RNNReLU': h}
qfn_dict = {'LSTMCell': torch.ops.quantized.quantized_lstm_cell_dynamic,
'GRUCell': torch.ops.quantized.quantized_gru_cell_dynamic,
'RNNTanh': torch.ops.quantized.quantized_rnn_tanh_cell_dynamic,
'RNNReLU': torch.ops.quantized.quantized_rnn_relu_cell_dynamic}
for rnn_type in cell_dict.keys():
if not (dtype == torch.float16 and torch.backends.quantized.engine in ("qnnpack", "onednn")):
# fp16 dynamic quant is not supported for qnnpack or onednn
kwargs = {'input_size': input_size, 'hidden_size': hidden_size, 'bias': bias, 'dtype': dtype}
if rnn_type == 'RNNReLU':
kwargs['nonlinearity'] = "relu"
elif rnn_type == 'RNNTanh':
kwargs['nonlinearity'] = "tanh"
cell_dq = cell_dict[rnn_type](**kwargs)
result = qfn_dict[rnn_type](x, state[rnn_type],
cell_dq._packed_weight_ih, cell_dq._packed_weight_hh,
cell_dq.bias_ih, cell_dq.bias_hh)
result_module = cell_dq(x, state[rnn_type])
self.assertEqual(result[0], result_module[0], msg="RNNCell module API failed")
self.assertEqual(result[1], result_module[1], msg="RNNCell module API failed")
weight_keys = ['weight_ih', 'weight_hh']
bias_keys = ['bias_ih', 'bias_hh']
self.check_eager_serialization(cell_dq, cell_dict[rnn_type](**kwargs), [x])
self.check_weight_bias_api(cell_dq, weight_keys, bias_keys)
class TestReferenceQuantizedModule(QuantizationTestCase):
def _quant_dequant_weight(self, weight, weight_qparams):
qscheme = weight_qparams["qscheme"]
scale = weight_qparams["scale"]
zero_point = weight_qparams["zero_point"]
dtype = weight_qparams["dtype"]
if qscheme == torch.per_tensor_affine:
weight = torch.quantize_per_tensor(weight, scale, zero_point, dtype)
else:
# per channel affine
axis = weight_qparams["axis"]
weight = torch.quantize_per_channel(weight, scale, zero_point, axis, dtype)
weight = weight.dequantize()
return weight
# TODO: add tests for conv and linear
def test_rnn_cell(self):
""" Checks the rnn cell reference quantized modules has correct numerics
This includes LSTMCell, GRUCell, RNNCell
"""
batch = 7
input_size = 3
hidden_size = 7
bias = True
x = torch.rand(batch, input_size)
h = torch.rand(batch, hidden_size)
cell_dict = {'LSTMCell': torch.nn.LSTMCell,
'GRUCell': torch.nn.GRUCell,
'RNNTanh': torch.nn.RNNCell,
'RNNReLU': torch.nn.RNNCell
}
state = {'LSTMCell': (h, h),
'GRUCell': h,
'RNNTanh': h,
'RNNReLU': h}
qfn_dict = {'LSTMCell': nnqr.LSTMCell,
'GRUCell': nnqr.GRUCell,
'RNNTanh': nnqr.RNNCell,
'RNNReLU': nnqr.RNNCell}
for rnn_type in cell_dict.keys():
kwargs = {'input_size': input_size, 'hidden_size': hidden_size, 'bias': bias}
if rnn_type == 'RNNReLU':
kwargs['nonlinearity'] = "relu"
elif rnn_type == 'RNNTanh':
kwargs['nonlinearity'] = "tanh"
fp_cell = cell_dict[rnn_type](**kwargs)
# initialize ref rnn cell module
weight_qparams = {
'qscheme': torch.per_tensor_affine,
'dtype': torch.quint8,
'scale': 2.0,
'zero_point': 5
}
weight_qparams_dict = {
"weight_ih": weight_qparams,
"weight_hh": weight_qparams,
}
ref_kwargs = kwargs.copy()
ref_kwargs["weight_qparams_dict"] = weight_qparams_dict
ref_cell = qfn_dict[rnn_type](**ref_kwargs)
# reassign the weights from fp32 rnn cell modulea
ref_cell.weight_ih = fp_cell.weight_ih
ref_cell.weight_hh = fp_cell.weight_hh
ref_cell.bias_ih = fp_cell.bias_ih
ref_cell.bias_hh = fp_cell.bias_hh
ref_res = ref_cell(x, state[rnn_type])
# change the weight of fp_res, we first want to run a quantie and
# dequantize on the weight
fp_cell.weight_ih = torch.nn.Parameter(self._quant_dequant_weight(fp_cell.weight_ih, weight_qparams_dict["weight_ih"]))
fp_cell.weight_hh = torch.nn.Parameter(self._quant_dequant_weight(fp_cell.weight_hh, weight_qparams_dict["weight_hh"]))
fp_res = fp_cell(x, state[rnn_type])
self.assertEqual(ref_res[0], fp_res[0], msg="RNNCell module API failed")
self.assertEqual(ref_res[1], fp_res[1], msg="RNNCell module API failed")
def test_rnn(self):
""" Checks the rnn reference quantized modules has correct numerics
This includes LSTM
"""
seq_len = 4
batch = 2
input_size = 3
hidden_size = 7
num_layers = 2
bias = True
for bidirectional in [True, False]:
x = torch.randn(seq_len, batch, input_size)
h = torch.randn(num_layers * (bidirectional + 1), batch, hidden_size)
c = torch.randn(num_layers * (bidirectional + 1), batch, hidden_size)
fp32_rnn = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=False,
dropout=0.0,
bidirectional=bidirectional)
# initialize ref rnn module
weight_qparams = {
'qscheme': torch.per_tensor_affine,
'dtype': torch.qint8,
'scale': 2.0,
'zero_point': 5
}
weight_qparams_dict = {key: weight_qparams for key in fp32_rnn._flat_weights_names if key.startswith("weight")}
ref_rnn = nnqr.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=False,
dropout=0.0,
bidirectional=bidirectional,
weight_qparams_dict=weight_qparams_dict)
for wn in fp32_rnn._flat_weights_names:
setattr(ref_rnn, wn, copy.deepcopy(getattr(fp32_rnn, wn)))
ref_rnn._flat_weights = copy.deepcopy(fp32_rnn._flat_weights)
# quantize and dequantize the weights for fp32_rnn module
flat_weights = []
for wn in fp32_rnn._flat_weights_names:
if wn.startswith("weight"):
weight = self._quant_dequant_weight(getattr(fp32_rnn, wn), weight_qparams)
else:
weight = getattr(fp32_rnn, wn)
flat_weights.append(weight)
fp32_rnn._flat_weights = flat_weights
fp32_res = fp32_rnn(x, (h, c))
ref_res = ref_rnn(x, (h, c))
self.assertEqual(fp32_res, ref_res)
def test_sparse(self):
""" Embedding and EmbeddingBag
"""
num_embeddings = 10
embedding_dim = 3
# embedding input
ex = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
# embedding bag input
ebx = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
offsets = torch.tensor([0, 4], dtype=torch.long)
fp_to_ref = {
nn.Embedding: (nnqr.Embedding, (ex,)),
nn.EmbeddingBag: (nnqr.EmbeddingBag, (ebx, offsets)),
}
per_tensor_weight_qparams = {
'qscheme': torch.per_tensor_affine,
'dtype': torch.quint8,
'scale': 2.0,
'zero_point': 5,
}
per_channel_weight_qparams = {
'qscheme': torch.per_channel_affine,
'dtype': torch.quint8,
'scale': torch.randn(10),
'zero_point': torch.randint(0, 255, (10,)),
'axis': 0,
}
per_channel_weight_qparams_quint4x2 = {
'qscheme': torch.per_channel_affine_float_qparams,
'dtype': torch.quint4x2,
'scale': torch.randn(10),
'zero_point': torch.randint(0, 255, (10,)),
'axis': 0,
}
weight_qparams_options = [
per_tensor_weight_qparams,
per_channel_weight_qparams,
per_channel_weight_qparams_quint4x2,
]
for fp_cls, weight_qparams in itertools.product([nn.Embedding, nn.EmbeddingBag], weight_qparams_options):
# TODO: torch.quint4x2 not supported in quantize_per_channel, need to add support
if weight_qparams['dtype'] == torch.quint4x2:
continue
ref_cls, args = fp_to_ref[fp_cls]
fp32_embedding = fp_cls(num_embeddings, embedding_dim)
ref_embedding = ref_cls(num_embeddings, embedding_dim, weight_qparams=weight_qparams)
ref_embedding.weight = fp32_embedding.weight
# quantize and dequantize the weight for fp32 module
fp32_embedding.weight = torch.nn.Parameter(self._quant_dequant_weight(fp32_embedding.weight, weight_qparams))
fp32_res = fp32_embedding(*args)
ref_res = ref_embedding(*args)
self.assertEqual(fp32_res, ref_res)
|
pytorch-master
|
test/quantization/core/test_quantized_module.py
|
# Owner(s): ["oncall: quantization"]
import re
import contextlib
from pathlib import Path
import torch
# import torch.nn.quantized as nnq
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
SingleLayerLinearModel,
)
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_utils import IS_ARM64
class TestQuantizationDocs(QuantizationTestCase):
r"""
The tests in this section import code from the quantization docs and check that
they actually run without errors. In cases where objects are undefined in the code snippet,
they must be provided in the test. The imports seem to behave a bit inconsistently,
they can be imported either in the test file or passed as a global input
"""
def run(self, result=None):
with override_quantized_engine("qnnpack") if IS_ARM64 else contextlib.nullcontext():
super(TestQuantizationDocs, self).run(result)
def _get_code(
self, path_from_pytorch, unique_identifier, offset=2, short_snippet=False
):
r"""
This function reads in the code from the docs given a unique identifier.
Most code snippets have a 2 space indentation, for other indentation levels,
change the offset `arg`. the `short_snippet` arg can be set to allow for testing
of smaller snippets, the check that this arg controls is used to make sure that
we are not accidentally only importing a blank line or something.
"""
def get_correct_path(path_from_pytorch):
r"""
Current working directory when CI is running test seems to vary, this function
looks for docs and if it finds it looks for the path to the
file and if the file exists returns that path, otherwise keeps looking. Will
only work if cwd contains pytorch or docs or a parent contains docs.
"""
# get cwd
cur_dir_path = Path(".").resolve()
# check if cwd contains pytorch, use that if it does
if (cur_dir_path / "pytorch").is_dir():
cur_dir_path = (cur_dir_path / "pytorch").resolve()
# need to find the file, so we check current directory
# and all parent directories to see if the path leads to it
check_dir = cur_dir_path
while not check_dir == check_dir.parent:
file_path = (check_dir / path_from_pytorch).resolve()
if file_path.is_file():
return file_path
check_dir = check_dir.parent.resolve()
# no longer passing when file not found
raise FileNotFoundError("could not find {}".format(path_from_pytorch))
path_to_file = get_correct_path(path_from_pytorch)
if path_to_file:
file = open(path_to_file)
content = file.readlines()
# it will register as having a newline at the end in python
if "\n" not in unique_identifier:
unique_identifier += "\n"
assert unique_identifier in content, "could not find {} in {}".format(
unique_identifier, path_to_file
)
# get index of first line of code
line_num_start = content.index(unique_identifier) + 1
# next find where the code chunk ends.
# this regex will match lines that don't start
# with a \n or " " with number of spaces=offset
r = r = re.compile("^[^\n," + " " * offset + "]")
# this will return the line of first line that matches regex
line_after_code = next(filter(r.match, content[line_num_start:]))
last_line_num = content.index(line_after_code)
# remove the first `offset` chars of each line and gather it all together
code = "".join(
[x[offset:] for x in content[line_num_start + 1 : last_line_num]]
)
# want to make sure we are actually getting some code,
assert last_line_num - line_num_start > 3 or short_snippet, (
"The code in {} identified by {} seems suspiciously short:"
"\n\n###code-start####\n{}###code-end####".format(
path_to_file, unique_identifier, code
)
)
return code
return None
def _test_code(self, code, global_inputs=None):
r"""
This function runs `code` using any vars in `global_inputs`
"""
# if couldn't find the
if code is not None:
expr = compile(code, "test", "exec")
exec(expr, global_inputs)
def test_quantization_doc_ptdq(self):
path_from_pytorch = "docs/source/quantization.rst"
unique_identifier = "PTDQ API Example::"
code = self._get_code(path_from_pytorch, unique_identifier)
self._test_code(code)
def test_quantization_doc_ptsq(self):
path_from_pytorch = "docs/source/quantization.rst"
unique_identifier = "PTSQ API Example::"
code = self._get_code(path_from_pytorch, unique_identifier)
self._test_code(code)
def test_quantization_doc_qat(self):
path_from_pytorch = "docs/source/quantization.rst"
unique_identifier = "QAT API Example::"
def _dummy_func(*args, **kwargs):
return None
input_fp32 = torch.randn(1, 1, 1, 1)
global_inputs = {"training_loop": _dummy_func, "input_fp32": input_fp32}
code = self._get_code(path_from_pytorch, unique_identifier)
self._test_code(code, global_inputs)
def test_quantization_doc_fx(self):
path_from_pytorch = "docs/source/quantization.rst"
unique_identifier = "FXPTQ API Example::"
input_fp32 = SingleLayerLinearModel().get_example_inputs()
global_inputs = {"UserModel": SingleLayerLinearModel, "input_fp32": input_fp32}
code = self._get_code(path_from_pytorch, unique_identifier)
self._test_code(code, global_inputs)
def test_quantization_doc_custom(self):
path_from_pytorch = "docs/source/quantization.rst"
unique_identifier = "Custom API Example::"
global_inputs = {"nnq": torch.nn.quantized}
code = self._get_code(path_from_pytorch, unique_identifier)
self._test_code(code, global_inputs)
|
pytorch-master
|
test/quantization/core/test_docs.py
|
pytorch-master
|
test/quantization/core/__init__.py
|
|
# Owner(s): ["oncall: quantization"]
from builtins import round
import copy
import itertools
import numpy as np
import unittest
import operator
import random
import torch
from torch import _VF
import torch.jit
import torch.nn.functional as F
from torch.nn.modules.utils import _single, _pair
from hypothesis import settings, HealthCheck
from hypothesis import assume, given, note
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_utils import TestCase, skipIfSlowGradcheckEnv
from torch.testing._internal.common_utils import IS_PPC, TEST_WITH_UBSAN, IS_MACOS, BUILD_WITH_CAFFE2
from torch.testing._internal.common_quantization import skipIfNoFBGEMM, skipIfNoQNNPACK
from torch.testing._internal.common_quantized import _quantize, _dequantize, _calculate_dynamic_qparams, \
override_quantized_engine, supported_qengines, override_qengines, _snr
from torch.testing._internal.common_quantized import (
qengine_is_qnnpack,
qengine_is_onednn,
)
from torch.ao.quantization import PerChannelMinMaxObserver
from torch.testing._internal.common_cuda import TEST_CUDNN, TEST_CUDA
import torch.backends.xnnpack
from typing import Optional
np_dtype = {
torch.quint8 : np.uint8,
torch.qint8 : np.int8,
torch.qint32 : np.int32
}
# Make sure we won't have overflows from vpmaddubsw instruction used in FBGEMM.
# On the current Intel x86 architecture, we need to utilize vpmaddubsw instruction
# for the 8-bit int multiplication. This instruction vertically multiplies each
# unsigned 8-bit integer from a with the corresponding signed 8-bit integer from
# b, producing intermediate signed 16-bit integers. This function modifies the
# weights to eliminate the overflow on the signed 16-bit integers.
def avoid_vpmaddubsw_overflow_linear(
batch_size, input_channels, output_channels, X, X_min, X_max, W, W_min, W_max
):
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
if x0 * w0 + x1 * w1 < -(1 << 15):
w1_adjusted = (-(1 << 15) - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
elif x0 * w0 + x1 * w1 > (1 << 15) - 1:
w1_adjusted = ((1 << 15) - 1 - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
# Go through the same loop again to double check we don't have any overflow
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
assert -(1 << 15) <= x0 * w0 + x1 * w1 < (1 << 15)
# Reference quantized Linear operator
def qlinear_ref(X_q, X_scale, X_zp, W_q, W_scale, W_zp, b_q, Y_scale, Y_zp, dtype=np.uint8):
X_q = np.reshape(X_q, (-1, X_q.shape[X_q.ndim - 1]))
row_offsets_ref = X_q.sum(axis=1).astype(np.int32).reshape((-1, 1))
col_offsets_ref = W_q.sum(axis=1).astype(np.int32).reshape((1, -1))
assert X_q.ndim == 2
batch_size, input_channels = X_q.shape
Prod_XqWq_ref = (
np.matmul(X_q.astype(np.int32), W_q.astype(np.int32).T)
- W_zp * row_offsets_ref
- X_zp * col_offsets_ref
+ input_channels * X_zp * W_zp
)
if b_q is not None:
Prod_XqWq_ref += b_q
Y_q_ref = _quantize(Prod_XqWq_ref, Y_scale / (X_scale * W_scale), Y_zp, dtype=dtype)
return Y_q_ref
"""Computes the output shape given pooling parameters."""
def pool_output_shape(input_size, kernel_size, padding, stride,
dilation, ceiling_mode=False):
if stride is None:
stride = kernel_size
output_size = (
(input_size + 2 * padding - dilation * (kernel_size - 1) - 1
+ (stride - 1 if ceiling_mode else 0)) // stride + 1)
if (ceiling_mode and
((output_size - 1) * stride >= input_size + padding)):
output_size -= 1
return output_size
"""
Util for creating a random tensor and quantization params when Hypothesis
is undesirable.
"""
def _get_random_tensor_and_q_params(shapes, rand_scale, torch_type):
X = (torch.rand(*shapes, dtype=torch.float) - 0.5) * rand_scale
# Calculate reasonable quantization params
min_val = torch.min(X)
max_val = torch.max(X)
if torch_type == torch.qint32:
X_zero_point = int(torch.randint(-1 * (2 ** 31), 2 ** 31 - 1, (1,)))
num_bins = 2 ** 32
X_scale = float(max_val - min_val) / num_bins
elif torch_type == torch.qint8:
X_zero_point = int(torch.randint(-128, 127, (1,)))
num_bins = 2 ** 8
X_scale = float(max_val - min_val) / num_bins
else: # torch.quint8
X_zero_point = 127
num_bins = 2 ** 8
X_scale = float(max_val - min_val) / num_bins
if X_scale == 0:
X_scale = 1e-10
return X, X_scale, X_zero_point
@skipIfSlowGradcheckEnv
class TestQuantizedOps(TestCase):
"""Helper function to test quantized activation functions."""
def _test_activation_function(self, X, fn_name, test_configs):
r"""
When writing a unit test for the activation function,
instead of specifying the test routines only applicable to the activation function itself,
you utilize the _test_activation_function that provides general testing.
To utilize the helper function, a test config must be provided.
A test config is a list that contains metadata about the quantized activation
functions that will be tested and how the tests need to be set up; it allows simpler and
more concise unit tests to be written by specifying the configurations needed
and calling the provided helper function _test_activation_function.
Inside the list, each config (as a dictionary) represents a suite of tests that assert the
correctness of various quantization functions.
You can check out the test_qrelu, test_qrelu6, test_qsigmoid, and test_qhardsigmoid for
how their test configs are specified.
Here's a list of the fields that can be included in a test config:
quantized_fn: a list of the quantized functions to be tested
reference_fn: the original reference function to be called on the
the dequantized X
extra_kwargs: the additional keyword arguments
for each test entry in ops_under_test, it must have at least the fields
for quantized_fn and reference_fn.
output_range: the output range the operator will map to. By default, if it is
no specified, the range will not be controlled and depend on Xmin and Xmax.
change_zero_point: a boolean flag indicating if the zero point parameter should
be determined based on torch_type during quantization (see sigmoid/hardsigmoid for
examples). By default, if it is not specified, change_zero_point is assumed to be
False and zero point will just take on the default value from X.
`output_is_observed`: if specified and is True, we'll append extra
output_scale/output_zero_point keyword argument when calling quantized op
"""
# Retrives the default parameters from X.
X, (scale, zero_point, torch_type) = X
if not isinstance(X, torch.Tensor):
X = torch.from_numpy(X)
# Quantizes the reference to account for max error.
# q_min and q_max only depend on the initial torch_type.
q_min, q_max = torch.iinfo(torch_type).min, torch.iinfo(torch_type).max
for op_group in test_configs:
ref_op = op_group['reference_fn']
for q_op in op_group['quantized_fn']:
for memory_format in (torch.channels_last, torch.contiguous_format):
if memory_format == torch.channels_last and len(X.shape) != 4:
continue
X = X.to(memory_format=memory_format)
# Retrieves the inplace keyword arguments
# some functions require inplace=True to test in-place.
# copy.copy is needed because these are modified in place
extra_kwargs = \
copy.copy(op_group.get('extra_kwargs', dict()))
output_is_observed = \
copy.copy(op_group.get('output_is_observed', False))
# Quantizes and dequantizes to account for max error.
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
dqX = qX.dequantize()
dqY_hat = ref_op(dqX.clone(), **extra_kwargs)
# Adjusts output_scale if needed.
# The output_scale determines the quantization scale for functions that
# have a constrained output range. e.x. sigmoid ranges from 0 to 1.
output_scale = scale
if 'output_range' in op_group:
(f_min, f_max) = op_group['output_range']
output_scale = (f_max - f_min) / (q_max - q_min + 1.0)
# Adjusts output_zero_point if needed (see explanation for the
# change_zero_point parameter above).
# output_zero_point determines the additional offset that will be
# added to a scaled value during quantization.
if op_group.get('change_zero_point', False):
output_zero_point = 0 if torch_type == torch.qint32 else q_min
else:
output_zero_point = zero_point
# Quantizes the dequantized version of Y_hat.
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=output_scale,
zero_point=output_zero_point,
dtype=torch_type)
if output_is_observed:
extra_kwargs.update({'output_scale': output_scale, 'output_zero_point': output_zero_point})
# Finds qY using in-place or non-in-place quantized operators.
qY = q_op(qX, **extra_kwargs)
self.assertEqual(qY, qY_hat, msg='{} - {} failed: ({} vs. {})'.format(
fn_name, q_op, qY, qY_hat
))
"""Tests the correctness of the quantized::relu op."""
@override_qengines
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_qrelu(self, X):
relu_test_configs = [
{
'quantized_fn': [
torch.relu,
torch.relu_,
torch.nn.functional.relu,
torch.nn.functional.relu,
],
'reference_fn': torch.nn.functional.relu
},
{
'quantized_fn': [
torch.nn.functional.relu,
torch.nn.functional.relu,
],
'reference_fn': torch.nn.functional.relu,
'extra_kwargs': {
'inplace': True
}
}
]
self._test_activation_function(X, 'relu', relu_test_configs)
"""Tests the correctness of the quantized::relu6 op."""
def test_qrelu6(self):
relu6_test_configs = [
{
'quantized_fn': [
torch.ops.quantized.relu6,
torch.nn.quantized.ReLU6(inplace=False),
torch.nn.quantized.ReLU6(inplace=True)
],
'reference_fn': torch.nn.functional.relu6
}
]
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
scales = (0.05, 0.1)
zero_points = (0, 5)
test_cases = itertools.product(shapes, dtypes, scales, zero_points)
for shape, dtype, scale, zero_point in test_cases:
X = torch.randn(*shape) * 10
X = (X, (scale, zero_point, dtype))
self._test_activation_function(X, 'relu6', relu6_test_configs)
"""Tests the correctness of the quantized::sigmoid op."""
@override_qengines
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_sigmoid_non_observed(self, X):
sigmoid_test_configs = [
{
'quantized_fn': [
torch.sigmoid
],
'reference_fn': torch.sigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True
}
]
self._test_activation_function(X, 'sigmoid', sigmoid_test_configs)
"""Tests the correctness of the quantized::sigmoid op."""
# TODO: enable after observed output is supported in qnnpack
# @override_qengines
@skipIfNoFBGEMM
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_sigmoid(self, X):
sigmoid_test_configs = [
{
'quantized_fn': [
torch.ops.quantized.sigmoid
],
'reference_fn': torch.sigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True,
'output_is_observed': True,
}
]
self._test_activation_function(X, 'sigmoid', sigmoid_test_configs)
"""Tests the correctness of the quantized::hardsigmoid op."""
@override_qengines
def test_qhardsigmoid(self):
hardsigmoid_test_configs = [
{
'quantized_fn': [
torch.nn.quantized.functional.hardsigmoid
],
'reference_fn': torch.nn.functional.hardsigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True,
},
{
'quantized_fn': [
torch.nn.quantized.functional.hardsigmoid
],
'reference_fn': torch.nn.functional.hardsigmoid,
'output_range': (0.0, 1.0),
'change_zero_point': True,
'extra_kwargs': {
'inplace': True,
},
},
]
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
test_cases = itertools.product(shapes, dtypes)
for shape, dtype in test_cases:
X = (np.random.rand(*shape).astype(np.float32), (1.0, 0, dtype))
self._test_activation_function(X, 'hardsigmoid', hardsigmoid_test_configs)
@override_qengines
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
def test_leaky_relu_observed_output(self, X):
leaky_relu_test_configs = [
{
'quantized_fn': [
torch.ops.quantized.leaky_relu
],
'reference_fn': torch.nn.functional.leaky_relu,
'extra_kwargs': {
'negative_slope': 0.1,
'inplace': False,
},
'output_is_observed': True,
}
]
self._test_activation_function(X, 'leaky_relu', leaky_relu_test_configs)
"""Tests the correctness of the quantized::relu op."""
def test_leaky_relu(self):
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
memory_formats = (torch.channels_last, torch.contiguous_format)
test_cases = itertools.product(shapes, dtypes, memory_formats)
for shape, dtype, memory_format in test_cases:
if memory_format == torch.channels_last and len(shape) != 4:
continue
X, scale, zero_point, torch_type, alpha = \
torch.randn(*shape), 0.1, 0, dtype, 0.01
X = X.to(memory_format=memory_format)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
dqX = qX.dequantize()
# torch.nn.functional
op = torch.nn.functional.leaky_relu
dqY = op(dqX, negative_slope=alpha)
qY = torch.quantize_per_tensor(dqY, scale=scale, zero_point=zero_point,
dtype=torch_type)
qY_hat = op(qX, negative_slope=alpha)
self.assertEqual(qY.dequantize(), qY_hat.dequantize(),
msg="F.leaky_relu failed ({} vs {})".format(qY, qY_hat))
"""Tests the correctness of the quantized::elu op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams()),
alpha=st.floats(0.01, 10.0, allow_nan=False, allow_infinity=False))
def test_qelu(self, X, alpha):
X, (scale, zero_point, torch_type) = X
output_scale = 0.5
output_zero_point = 1
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
# calculate ELU(dqX) and quantize
dqX = qX.dequantize()
dqY_hat = dqX.clone()
dqY_hat = torch.nn.functional.elu(dqX, alpha)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=output_scale, zero_point=output_zero_point,
dtype=torch_type)
qY = torch.nn.quantized.functional.elu(qX, output_scale, output_zero_point, alpha=alpha)
self.assertEqual(qY, qY_hat,
msg="F.elu failed ({} vs {})".format(qY, qY_hat))
"""Tests the correctness of the quantized::celu op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
elements=hu.floats(-1e2, 1e2, allow_nan=False, allow_infinity=False),
qparams=hu.qparams(scale_max=9.999999747378752e-06)),
alpha=st.floats(0.01, 100.0, allow_nan=False, allow_infinity=False))
def test_qcelu(self, X, alpha):
X, (scale, zero_point, torch_type) = X
output_scale = 0.5
output_zero_point = 1
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
# calculate CELU(dqX) and quantize
dqX = qX.dequantize()
dqY_hat = torch.nn.functional.celu(dqX, alpha)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=output_scale, zero_point=output_zero_point,
dtype=torch_type)
# test regular
qY = torch.ops.quantized.celu(qX, output_scale, output_zero_point, alpha=alpha)
self.assertEqual(qY, qY_hat,
msg="F.celu failed ({} vs {})".format(qY, qY_hat))
"""Tests the correctness of the quantized::gelu op."""
def test_qgelu(self):
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
memory_formats = (torch.channels_last, torch.contiguous_format)
approximation = ['none', 'tanh']
test_cases = itertools.product(shapes, dtypes, memory_formats, approximation)
devices = ["cpu", "cuda"] if TEST_CUDA else ["cpu"]
for shape, dtype, memory_format, approximate in test_cases:
if memory_format == torch.channels_last and len(shape) != 4:
continue
X, scale, zero_point, torch_type = \
torch.randn(*shape), 0.1, 0, dtype
X = X.to(memory_format=memory_format)
for device in devices:
X = X.to(device=device)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
dqX = qX.dequantize()
op = torch.nn.functional.gelu
dqY = op(dqX, approximate=approximate)
qY = torch.quantize_per_tensor(dqY, scale=scale, zero_point=zero_point,
dtype=torch_type)
qY_hat = op(qX)
self.assertEqual(qY.dequantize(), qY_hat.dequantize(),
msg="F.gelu failed ({} vs {})".format(qY, qY_hat))
"""Tests the correctness of the quantized::prelu op."""
def test_qprelu(self):
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
num_params = (0, 1) # 0: num_parameter = num_channels
dtypes = (torch.quint8, torch.qint8)
memory_formats = (torch.channels_last, torch.contiguous_format)
test_cases = itertools.product(shapes, num_params, dtypes, memory_formats)
for shape, num_param, dtype, memory_format in test_cases:
if memory_format == torch.channels_last and len(shape) != 4:
continue
X, scale, zero_point, torch_type = \
torch.randn(*shape), 0.1, 0, dtype
X = X.to(memory_format=memory_format)
num_parameter = 1 if num_param == 1 or len(shape) == 1 else shape[1]
W = torch.randn(num_parameter)
W, w_scale, w_zero_point = \
torch.randn(num_parameter), 0.2, 0
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
dqX = qX.dequantize()
qW = torch.quantize_per_tensor(W, scale=w_scale, zero_point=w_zero_point,
dtype=torch_type)
dqW = qW.dequantize()
op = torch.nn.functional.prelu
qop = torch.ops.quantized.prelu
dqY = op(dqX, dqW)
qY = torch.quantize_per_tensor(dqY, scale=scale, zero_point=zero_point,
dtype=torch_type)
qY_hat = qop(qX, qW, scale, zero_point)
self.assertEqual(qY.dequantize(), qY_hat.dequantize(),
msg="F.prelu failed ({} vs {})".format(qY, qY_hat))
"""Tests the correctness of the quantized::qlayer_norm op."""
@skipIfNoFBGEMM
def test_qlayer_norm(self):
# hypothesis is flaky for this test, create test cases manually
side_lens = (1, 8, 11)
torch_types = (torch.qint8, torch.quint8)
y_scales = (0.1, 4.23)
y_zero_points = (0, 1)
channels_last_list = (True, False)
affine_list = (True, False)
combined = [side_lens, torch_types, y_scales, y_zero_points,
channels_last_list, affine_list]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
side_len, torch_type, Y_scale, Y_zero_point, channels_last, \
affine = test_case
shapes = [side_len] * 4
# In the FP kernel, mean and variance are calculated in floating point.
# In the quantized kernel, they are calculated in integer arithmetic.
# Because of this, the numerics do not always match exactly which is
# expected and acceptable. We do two things to allow this failure
# in this test:
# 1. do not use Hypothesis to generate the input tensor. Hypothesis
# favors homogeneous inputs in its search strategies which isn't
# representative of the inputs we care about, and tends to maximize
# this particular numerics difference.
# 2. allow a small % of off by Y_scale errors. Even when the
# variance of the input is high, there can be off by one errors
# in the result if the input value happens to fall exactly on
# the bin boundary of the output scale.
#
# If we want the numerics to match we could switch to calculating
# mean+var in floating point in the future, at the cost of speed.
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
qX = torch.quantize_per_tensor(X, scale=X_scale,
zero_point=X_zero_point,
dtype=torch_type)
if channels_last:
qX = qX.contiguous(memory_format=torch.channels_last)
dqX = qX.dequantize()
# Enforce non-homogeneous inputs
enough_unique_vals_in_each_layer = sum(
1 if (
dqX[i].shape[0] < 5 or
float(torch.unique(dqX[i]).shape[0]) / dqX[i].shape[0] > 0.01
) else 0
for i in range(dqX.shape[0])
) == dqX.shape[0]
assume(enough_unique_vals_in_each_layer)
# Initialize the weights non-randomly for reproducibility, to avoid
# flaky tests
if affine:
weight = torch.ones(*qX.size()[1:], dtype=torch.float) * 0.5
bias = torch.ones(*qX.size()[1:], dtype=torch.float) * 1
else:
weight = None
bias = None
epsilon = 1e-5
qY = torch.ops.quantized.layer_norm(
qX, qX.size()[1:], weight=weight, bias=bias, eps=epsilon,
output_scale=Y_scale, output_zero_point=Y_zero_point)
Y_hat = F.layer_norm(
dqX, dqX.size()[1:], weight=weight, bias=bias, eps=epsilon)
qY_hat = torch.quantize_per_tensor(
Y_hat, scale=Y_scale, zero_point=Y_zero_point, dtype=torch_type)
# Due to the numerics difference mentioned above between calculating
# the variance in float vs int, the results can still be slightly
# different.
dqY = qY.dequantize()
dqY_hat = qY_hat.dequantize()
diff = dqY - dqY_hat
# off-by-one errors are magnitude of Y_scale
num_diff = torch.sum(diff > Y_scale * 1.0001)
pct_diff = float(num_diff) / (diff.numel() + 1e-5)
num_diff_off_by_one = torch.sum((diff > 0) * (diff <= Y_scale))
pct_diff_off_by_one = float(num_diff_off_by_one) / (diff.numel() + 1e-5)
self.assertTrue(pct_diff < 1e-6)
self.assertTrue(pct_diff_off_by_one < 0.01)
"""Tests the correctness of the quantized::qnnpack_tanh op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()))
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_qtanh(self, X):
# Note: QNNPACK is tested separately in TestQNNPackOps
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X)
Y = torch.tanh(X)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=torch_type)
# Quantize the reference to account for max error.
# Note that the output scale has +1, because we use scale of 2.0/2^BITS
# in the implementations.
f_min, f_max = -1.0, 1.0
q_min, q_max = torch.iinfo(torch_type).min, torch.iinfo(torch_type).max
output_scale = (f_max - f_min) / (q_max - q_min + 1.0)
output_zero_point = int(round((q_max + q_min) / 2.0))
qY = torch.quantize_per_tensor(Y, scale=output_scale,
zero_point=output_zero_point,
dtype=torch_type)
qY_hat = torch.tanh(qX)
self.assertEqual(qY, qY_hat,
msg="TanH failed: {} vs. {}".format(qY, qY_hat))
"""Tests the correctness of the quantized::threshold op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
elements=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
qparams=hu.qparams()),
threshold=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False),
value=hu.floats(-1e3, 1e3, allow_nan=False, allow_infinity=False))
def test_qthreshold(self, X, threshold, value):
X, (scale, zero_point, torch_type) = X
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
# calculate threshold(dqX) and quantize
dqX = qX.dequantize()
dqY_hat = dqX.clone()
dqY_hat = torch.nn.functional.threshold(dqY_hat, threshold, value)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
'native': torch.threshold,
'nn.functional': torch.nn.functional.threshold,
'nn.quantized.functional': torch.nn.quantized.functional.threshold
}
for name, op in ops_under_test.items():
qY = op(qX, threshold, value)
self.assertEqual(qY, qY_hat, msg="{} qthreshold failed".format(name))
"""Tests the correctness of the quantized::clamp op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 8, 1, 8, max_numel=10**5),
elements=hu.floats(-1e6, 1e6, allow_nan=False),
qparams=hu.qparams()),
min_val=hu.floats(-1e6, 1e6, allow_nan=False),
max_val=hu.floats(-1e6, 1e6, allow_nan=False))
def test_qclamp(self, X, min_val, max_val):
X, (scale, zero_point, torch_type) = X
assume(min_val <= max_val)
Y_clamp = torch.clamp(torch.from_numpy(X), min=min_val, max=max_val)
qY_clamp = torch.quantize_per_tensor(Y_clamp, scale=scale,
zero_point=zero_point, dtype=torch_type)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
'ops.quantized': torch.ops.quantized.clamp,
}
for name, op in ops_under_test.items():
qY_clamp_hat = op(qX, min=min_val, max=max_val)
self.assertEqual(qY_clamp, qY_clamp_hat, msg="{} qclamp failed".format(name))
if torch.backends.quantized.engine == 'fbgemm':
with override_quantized_engine('fbgemm'):
Y_min_clamp = torch.clamp(X, min=min_val)
Y_max_clamp = torch.clamp(X, max=max_val)
qY_min_clamp = torch.quantize_per_tensor(Y_min_clamp, scale=scale,
zero_point=zero_point, dtype=torch_type)
qY_max_clamp = torch.quantize_per_tensor(Y_max_clamp, scale=scale,
zero_point=zero_point, dtype=torch_type)
for name, op in ops_under_test.items():
qY_min_clamp_hat = op(qX, min=min_val)
self.assertEqual(qY_min_clamp, qY_min_clamp_hat, msg="{} qclamp failed".format(name))
qY_max_clamp_hat = op(qX, max=max_val)
self.assertEqual(qY_max_clamp, qY_max_clamp_hat, msg="{} qclamp failed".format(name))
"""Tests the correctness of the quantized::hardtanh op."""
@skipIfNoFBGEMM
@given(X=hu.tensor(shapes=hu.array_shapes(1, 8, 1, 8, max_numel=10**5),
elements=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False),
qparams=hu.qparams()),
min_val=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False),
max_val=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False))
def test_hardtanh(self, X, min_val, max_val):
with override_quantized_engine('fbgemm'):
X, (scale, zero_point, torch_type) = X
assume(min_val <= max_val)
Y = X.copy()
Y[Y < min_val] = min_val
Y[Y > max_val] = max_val
qY = torch.quantize_per_tensor(torch.from_numpy(Y), scale=scale,
zero_point=zero_point, dtype=torch_type)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
'nn.quantized.functional.hardtanh':
torch.nn.quantized.functional.hardtanh,
}
for name, op in ops_under_test.items():
qY_hat = op(qX, min_val, max_val)
self.assertEqual(qY, qY_hat, msg="{} hardtanh failed".format(name))
ops_under_test_inplace = {
'inplace nn.quantized.functional.hardtanh':
torch.nn.quantized.functional.hardtanh,
}
for name, op_ in ops_under_test_inplace.items():
qY_hat = qX.clone()
op_(qY_hat, min_val, max_val, inplace=True)
self.assertEqual(qY, qY_hat, msg="{} hardtanh failed".format(name))
"""Tests the correctness of the quantized::hardswish op."""
@override_qengines
def test_hardswish(self):
max_sides = (3, 4)
side_lens = (1, 7)
torch_types = (torch.quint8, torch.qint8)
y_scales = (0.1, )
y_zero_points = (1,)
combined = [max_sides, side_lens, torch_types, y_scales, y_zero_points]
test_cases = itertools.product(*combined)
for test_case in test_cases:
max_side, side_len, torch_type, Y_scale, Y_zero_point = test_case
if torch.backends.quantized.engine == 'qnnpack' and torch_type != torch.quint8:
continue
shapes = [side_len] * max_side
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 2.0, torch_type)
for memory_format in torch.channels_last, torch.contiguous_format:
if memory_format == torch.channels_last and len(shapes) == 4:
X = X.to(memory_format=memory_format)
qX = torch.quantize_per_tensor(X, scale=X_scale, zero_point=X_zero_point,
dtype=torch_type)
dqX = qX.dequantize()
dqY_hat = F.hardswish(dqX)
qY_hat = torch.quantize_per_tensor(dqY_hat, scale=Y_scale,
zero_point=Y_zero_point,
dtype=torch_type)
qY = torch.nn.quantized.functional.hardswish(
qX, scale=Y_scale, zero_point=Y_zero_point)
self.assertEqual(
qY, qY_hat,
msg="Hardswish failed: {} vs {}, {}".format(qY, qY_hat, torch.backends.quantized.engine))
"""Tests the correctness of the binary op + scalar."""
def _test_binary_op_scalar_relu(self, A, b, binary_op_name, binary_op, quantized_op, quantized_op_relu):
import copy
op_scalar = quantized_op
op_scalar_relu = quantized_op_relu
A, (scale, zero_point, dtype) = A
A = A.astype(np.float32)
qA = torch.quantize_per_tensor(torch.from_numpy(A), scale, zero_point, dtype)
if binary_op_name == 'add':
C = binary_op(qA.dequantize(), round(b / scale) * scale)
else:
C = binary_op(qA.dequantize(), b)
C_relu = copy.deepcopy(C)
C_relu[C_relu < 0] = 0
C_hat = op_scalar(qA, b)
C_ref = torch.quantize_per_tensor(C, C_hat.q_scale(), C_hat.q_zero_point(), dtype)
C_relu_hat = op_scalar_relu(qA, b)
C_relu_ref = torch.quantize_per_tensor(
C_relu, C_relu_hat.q_scale(), C_relu_hat.q_zero_point(), dtype)
self.assertEqual(C_ref.dequantize(), C_hat.dequantize(),
msg="{}_scalar results don't match: "
"{} vs {}".format(binary_op_name, C_ref.dequantize(), C_hat.dequantize()))
self.assertEqual(C_relu_ref.dequantize(), C_relu_hat.dequantize(),
msg="{}_scalar_relu results don't match: "
"{} vs {}".format(binary_op_name, C_relu_ref.dequantize(), C_relu_hat.dequantize()))
@unittest.skipIf(IS_MACOS, "skipping macos test")
@given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5),
elements=hu.floats(-1e6, 1e6, allow_nan=False),
qparams=hu.qparams()),
b=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False))
def test_add_scalar_relu(self, A, b):
self._test_binary_op_scalar_relu(A, b, "add", operator.add, torch.ops.quantized.add, torch.ops.quantized.add_relu)
@unittest.skipIf(IS_MACOS, "skipping macos test")
@given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5),
elements=hu.floats(-1e6, 1e6, allow_nan=False),
qparams=hu.qparams()),
b=hu.floats(-1e6, 1e6, allow_nan=False, allow_infinity=False))
def test_mul_scalar_relu(self, A, b):
self._test_binary_op_scalar_relu(A, b, "mul", operator.mul, torch.ops.quantized.mul, torch.ops.quantized.mul_relu)
"""Tests the correctness of the add and add_relu op."""
def test_qadd_relu_same_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
add_relu = torch.ops.quantized.add_relu
add = torch.ops.quantized.add
add_out = torch.ops.quantized.add
add_relu_out = torch.ops.quantized.add_relu
# NB: This is a strange size so that we exercise both the vectorized
# implementation (64-element chunks at at time) as well as the scalar
# implementation
A = torch.arange(-128, 130, dtype=torch.float)
B = torch.arange(-128, 130, dtype=torch.float)
scale = 2.0
zero_point = 127
qA = torch.quantize_per_tensor(A, scale=scale, zero_point=zero_point,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale, zero_point=zero_point,
dtype=dtype)
# Add ReLU ground truth
C = (qA.dequantize() + qB.dequantize()).numpy()
qC = _quantize(C, scale, zero_point, dtype=np_dtype[dtype])
qC_hat = add(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized addition failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
add_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="Add.out failed")
# Add + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale, zero_point, dtype=np_dtype[dtype])
qCrelu_hat = add_relu(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
add_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="AddReLU.out failed")
"""Tests the correctness of the cudnn add and add_relu op
(Similar to test_qadd_relu_different_qparams, will probably merge in the future)"""
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skip("Local only - currently the test_qadd_relu_cudnn op is bulid "
"with USE_EXPERIMENTAL_CUDNN_V8_API, we can enable the test "
"after it is built by default")
def test_qadd_relu_cudnn(self):
dtype = torch.qint8
add_relu = torch.ops.quantized.add_relu
add = torch.ops.quantized.add
A = torch.arange(-128, 130, dtype=torch.float).to(torch.device("cuda"))
B = torch.arange(-128, 130, dtype=torch.float).to(torch.device("cuda"))
scale_A = 2.5
scale_B = 6.3
scale_C = 12.9
zero_point = 0
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point,
dtype=dtype)
# Add ground truth
C = (qA.dequantize() + qB.dequantize()).to(device="cpu").numpy()
qC = _quantize(C, scale_C, zero_point, dtype=np_dtype[dtype])
qC_hat = add(qA, qB, scale=scale_C, zero_point=zero_point).to(device="cpu")
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized addition failed.")
# Add + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale_C, zero_point, dtype=np_dtype[dtype])
qCrelu_hat = add_relu(qA, qB, scale=scale_C, zero_point=zero_point).to(device="cpu")
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
"""Tests the correctness of the cudnn add and add_relu op for nhwc format"""
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skip("Local only - currently the test_qadd_relu_cudnn_nhwc op is bulid "
"with USE_EXPERIMENTAL_CUDNN_V8_API, we can enable the test "
"after it is built by default")
def test_qadd_relu_cudnn_nhwc(self):
dtype = torch.qint8
add_relu = torch.ops.quantized.add_relu
add = torch.ops.quantized.add
A = torch.rand(16, 8, 4, 12).to(device="cuda")
B = torch.rand(16, 8, 4, 12).to(device="cuda")
scale_A = 2.5
scale_B = 6.3
scale_C = 12.9
zero_point = 0
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point,
dtype=dtype)
# Add ground truth
C = (qA.dequantize() + qB.dequantize()).to(device="cpu").numpy()
qC = _quantize(C, scale_C, zero_point, dtype=np_dtype[dtype])
qC_hat = add(qA, qB, scale=scale_C, zero_point=zero_point).to(device="cpu")
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized addition failed.")
# Add + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale_C, zero_point, dtype=np_dtype[dtype])
qCrelu_hat = add_relu(qA, qB, scale=scale_C, zero_point=zero_point).to(device="cpu")
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
"""Tests the correctness of the add and add_relu op."""
def test_qadd_relu_different_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
add_relu = torch.ops.quantized.add_relu
add = torch.ops.quantized.add
add_out = torch.ops.quantized.add
add_relu_out = torch.ops.quantized.add_relu
# NB: This is a strange size so that we exercise both the vectorized
# implementation (64-element chunks at at time) as well as the scalar
# implementation
A = torch.arange(-128, 130, dtype=torch.float)
B = torch.arange(-128, 130, dtype=torch.float)
scale_A = 3.0
zero_point_A = 7
scale_B = 5.0
zero_point_B = 127
scale_C = 0.5
zero_point_C = 5
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point_A,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point_B,
dtype=dtype)
# Add ground truth
C = (qA.dequantize() + qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=np_dtype[dtype])
qC_hat = add(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized addition failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
add_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="Add.out failed")
# Add + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale_C, zero_point_C, dtype=np_dtype[dtype])
qCrelu_hat = add_relu(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
add_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="AddReLU.out failed")
"""Tests the correctness of the mul and mul_relu op."""
def test_qmul_relu_same_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
mul_relu = torch.ops.quantized.mul_relu
mul = torch.ops.quantized.mul
mul_out = torch.ops.quantized.mul
mul_relu_out = torch.ops.quantized.mul_relu
A = torch.arange(-100, 100, dtype=torch.float)
B = torch.arange(-100, 100, dtype=torch.float)
scale = 2.0
zero_point = 127
qA = torch.quantize_per_tensor(A, scale=scale, zero_point=zero_point,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale, zero_point=zero_point,
dtype=dtype)
# mul ReLU ground truth
C = (qA.dequantize() * qB.dequantize()).numpy()
qC = _quantize(C, scale, zero_point, dtype=np_dtype[dtype])
qC_hat = mul(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized mulition failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
mul_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="mul.out failed")
# mul + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale, zero_point, dtype=np_dtype[dtype])
qCrelu_hat = mul_relu(qA, qB, scale=scale, zero_point=zero_point)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized mulition with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale,
zero_point=zero_point,
dtype=dtype)
mul_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="mulReLU.out failed")
# Scalar multiplication
for b in B:
C_ref = qA.dequantize().numpy() * b.item()
qC_hat = torch.ops.quantized.mul(qA, b.item())
self.assertEqual(C_ref, qC_hat.dequantize())
# Scalar multiplication + relu
for b in B:
C_ref = qA.dequantize().numpy() * b.item()
C_ref[C_ref < 0] = 0
qC_hat = torch.ops.quantized.mul_relu(qA, b.item())
self.assertEqual(C_ref, qC_hat.dequantize())
"""Tests the correctness of the mul and mul_relu op."""
def test_qmul_relu_different_qparams(self):
for dtype in [torch.quint8, torch.qint8, torch.qint32]:
mul_relu = torch.ops.quantized.mul_relu
mul = torch.ops.quantized.mul
mul_out = torch.ops.quantized.mul
mul_relu_out = torch.ops.quantized.mul_relu
A = torch.arange(-100, 100, dtype=torch.float)
B = torch.arange(-100, 100, dtype=torch.float)
scale_A = 3.0
zero_point_A = 7
scale_B = 5.0
zero_point_B = 127
scale_C = 0.5
zero_point_C = 5
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point_A,
dtype=dtype)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point_B,
dtype=dtype)
# mul ground truth
C = (qA.dequantize() * qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=np_dtype[dtype])
qC_hat = mul(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized multiplication failed.")
qC_out_hat = torch._empty_affine_quantized(qC.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
mul_out(qA, qB, out=qC_out_hat)
self.assertEqual(qC_hat, qC_out_hat, msg="mul.out failed")
# mul + ReLU ground truth
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = _quantize(Crelu, scale_C, zero_point_C, dtype=np_dtype[dtype])
qCrelu_hat = mul_relu(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qCrelu, qCrelu_hat.int_repr(),
"Quantized multiplication with ReLU failed.")
qCrelu_out_hat = torch._empty_affine_quantized(qCrelu.shape,
scale=scale_C,
zero_point=zero_point_C,
dtype=dtype)
mul_relu_out(qA, qB, out=qCrelu_out_hat)
self.assertEqual(qCrelu_hat, qCrelu_out_hat,
msg="mulReLU.out failed")
"""Tests the correctness of the matmul op."""
@given(num_dims=st.integers(2, 5),
outer_dims=st.lists(st.integers(2, 6), min_size=3, max_size=3),
m=st.integers(2, 6),
k=st.integers(2, 6),
n=st.integers(2, 6),
dtypes=st.sampled_from(((torch.qint8, np.int8),
(torch.quint8, np.uint8))))
def test_qmatmul(self, num_dims, outer_dims, m, k, n, dtypes):
(torch_dtype, np_dtype) = dtypes
size_a = outer_dims[:num_dims - 2] + [m, k]
size_b = outer_dims[:num_dims - 2] + [k, n]
A = torch.randn(size=size_a, dtype=torch.float32) * 3
B = torch.randn(size=size_b, dtype=torch.float32) * 3
scale_A = 3.1
zero_point_A = 7
scale_B = 5.3
zero_point_B = 127
scale_C = 1.3
zero_point_C = 5
qA = torch.quantize_per_tensor(A,
scale=scale_A,
zero_point=zero_point_A,
dtype=torch_dtype)
qB = torch.quantize_per_tensor(B,
scale=scale_B,
zero_point=zero_point_B,
dtype=torch_dtype)
# matmul ground truth
C = torch.matmul(qA.dequantize(), qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=(np_dtype))
qC_hat = torch.ops.quantized.matmul(qA,
qB,
scale=scale_C,
zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized multiplication failed.")
# Using per channel quantization fails
axis = 0
scales_A = torch.rand(size=(A.shape[axis],))
zero_points_A = torch.randint(low=0, high=5, size=(A.shape[axis],))
scales_B = torch.rand(size=(B.shape[axis],))
zero_points_B = torch.randint(low=0, high=5, size=(B.shape[axis],))
qA = torch.quantize_per_channel(A,
scales=scales_A,
zero_points=zero_points_A,
axis=axis,
dtype=torch.qint8)
qB = torch.quantize_per_channel(B,
scales=scales_B,
zero_points=zero_points_B,
axis=axis,
dtype=torch.qint8)
np.testing.assert_raises_regex(RuntimeError,
".*per-tensor.*",
torch.ops.quantized.matmul,
qA,
qB,
scale_C,
zero_point_C)
"""Tests the correctness of the quantized softmax op."""
@given(dims=st.lists(st.integers(2, 5), min_size=5, max_size=5))
def test_qsoftmax(self, dims):
for (num_dims, dim, memory_format) in [
(2, 1, torch.contiguous_format), # 2d softmax over last dim
(4, 3, torch.contiguous_format), # >2 dims, softmax along last dim
(5, 2, torch.contiguous_format), # >2 dims, softmax along not last dim (requires permute)
(4, 3, torch.channels_last), # >2 dims, softmax along last dim, but not contiguous
(4, 1, torch.channels_last), # Channels Last, doesn't require permute
(5, 1, torch.channels_last_3d), # Channels Last 3D, doesn't require permute
]:
size = dims[:num_dims]
torch_dtype = torch.quint8
np_dtype = np.uint8
scale_X = 1.3
zero_point_X = 5
X = torch.rand(size=size, dtype=torch.float32) * 8 + zero_point_X
X = X.to(memory_format=memory_format)
scale_Y = 1 / 256
zero_point_Y = 0
qX = torch.quantize_per_tensor(X,
scale=scale_X,
zero_point=zero_point_X,
dtype=torch_dtype)
# softmax ground truth
Y = torch.softmax(qX.dequantize(), dim=dim).numpy()
qY = _quantize(Y, scale_Y, zero_point_Y, dtype=np_dtype)
qY_hat = torch.ops.quantized.softmax(qX,
dim=dim,
output_scale=scale_Y,
output_zero_point=zero_point_Y)
np.testing.assert_equal(qY, qY_hat.int_repr(),
"Quantized softmax failed.")
"""Tests the correctness of the quantized softmax op using qnnpack."""
@skipIfNoQNNPACK
def test_qsoftmax_qnnpack(self):
with override_quantized_engine('qnnpack'):
self.test_qsoftmax()
"""Tests the correctness of the mul and mul_relu op."""
def test_qmul_broadcast(self):
mul_relu = torch.ops.quantized.mul_relu
mul = torch.ops.quantized.mul
mul_out = torch.ops.quantized.mul
mul_relu_out = torch.ops.quantized.mul_relu
# A = torch.arange(-25, 25, dtype=torch.float)
# B = torch.arange(-25, 25, dtype=torch.float)
A = torch.randn(8, 1, 6, 1)
B = torch.randn(7, 1, 5)
scale_A = 3.0
zero_point_A = 7
scale_B = 5.0
zero_point_B = 127
scale_C = 0.5
zero_point_C = 5
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point_A,
dtype=torch.quint8)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point_B,
dtype=torch.quint8)
# mul ground truth
C = (qA.dequantize() * qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C)
qC_hat = mul(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qC, qC_hat.int_repr(),
"Quantized multiplication failed.")
"""Tests that quantized add works with broadcasting"""
def test_qadd_broadcast(self):
A = torch.randn(1, 1, 4, 4)
B = torch.randn(2, 1, 4, 4)
qA = torch.quantize_per_tensor(A, 0.02, 0, torch.quint8)
qB = torch.quantize_per_tensor(B, 0.04, 2, torch.quint8)
output_scale = 0.01
output_zp = 1
# ground truth
C = qA.dequantize() + qB.dequantize()
qC = torch.quantize_per_tensor(C, output_scale, output_zp, torch.quint8)
# quantized
qC_hat_1 = torch.ops.quantized.add(qA, qB, output_scale, output_zp)
qC_hat_2 = torch.ops.quantized.add(qB, qA, output_scale, output_zp)
self.assertTrue(torch.allclose(qC.dequantize(), qC_hat_1.dequantize()))
self.assertTrue(torch.allclose(qC.dequantize(), qC_hat_2.dequantize()))
"""Tests channel shuffle operation on quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=2, max_side=32, max_numel=10**5),
qparams=hu.qparams(dtypes=[torch.quint8])),
groups=st.integers(2, 6))
def test_channel_shuffle(self, X, groups):
X, (scale, zero_point, torch_type) = X
channels = X.shape[-3]
iH, iW = X.shape[-2:]
assume(channels % groups == 0)
a = torch.from_numpy(X)
a = torch.rand(a.shape)
a_out = torch.nn.functional.channel_shuffle(a, groups)
a_ref = torch.quantize_per_tensor(a_out, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
a_hat = torch.nn.functional.channel_shuffle(qa, groups)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="torch.nn.functional.channel_shuffle results are off")
"""Tests 1D max pool operation on quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=2, max_dims=3,
min_side=1, max_side=10),
qparams=hu.qparams()),
kernel=st.sampled_from((3, 5, 7)),
stride=st.sampled_from((None, 1, 2)),
dilation=st.integers(1, 2),
padding=st.integers(0, 2),
ceil_mode=st.booleans())
def test_max_pool1d(self, X, kernel, stride, dilation, padding, ceil_mode):
X, (scale, zero_point, torch_type) = X
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iW = X.shape[-1]
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
assume(oW > 0)
a = torch.from_numpy(X)
a_pool = torch.nn.functional.max_pool1d(a, kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
"torch": torch.max_pool1d,
"nn.functional": torch.nn.functional.max_pool1d,
"nn.quantized.functional": torch.nn.quantized.functional.max_pool1d
}
for name, op in ops_under_test.items():
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="{} results are off".format(name))
# Test the ops.quantized separately, because None is not treated.
a_hat = torch.ops.quantized.max_pool1d(
qa, kernel_size=_single(kernel),
stride=_single(kernel if stride is None else stride),
padding=_single(padding), dilation=_single(dilation),
ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="ops.quantized.max_pool1d results are off")
# TODO: merge this test with test_max_pool2d when USE_EXPERIMENTAL_CUDNN_V8_API flag is enabled in CI
"""Tests 2D cudnn max pool operation on quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=1, max_side=10),
# cudnn's support for quantized pooling is limited to
# int8 currently
qparams=hu.qparams(dtypes=[torch.qint8])),
kernel=st.sampled_from((3, 5, 7)),
stride=st.sampled_from((None, 1, 2)),
# currently there is no support for dilation for cudnn
# pooling
dilation=st.integers(1, 1),
padding=st.integers(0, 2),
ceil_mode=st.booleans())
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skip("Local only - currently the qconv2d_cudnn op is bulid "
"with USE_EXPERIMENTAL_CUDNN_V8_API, we can enable the test "
"after it is built by default")
def test_max_pool2d_cudnn(self, X, kernel, stride, dilation, padding, ceil_mode):
X, (scale, zero_point, torch_type) = X
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation, ceil_mode)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
assume(oW > 0)
a = torch.from_numpy(X).to(device="cuda")
a_pool = torch.nn.functional.max_pool2d(a, kernel_size=kernel,
stride=stride,
padding=padding, dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
# Test the ops.quantized separately, because None is not treated.
a_hat = torch.ops.quantized.max_pool2d(
qa, kernel_size=_pair(kernel),
stride=_pair(kernel if stride is None else stride),
padding=_pair(padding), dilation=_pair(dilation), ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="ops.quantized.max_pool2d results are off")
"""Tests 2D max pool operation on quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
kernel=st.sampled_from((3, 5, 7)),
stride=st.sampled_from((None, 1, 2)),
dilation=st.integers(1, 2),
padding=st.integers(0, 2),
ceil_mode=st.booleans())
def test_max_pool2d(self, X, kernel, stride, dilation, padding, ceil_mode):
X, (scale, zero_point, torch_type) = X
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation, ceil_mode)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
assume(oW > 0)
a = torch.from_numpy(X)
a_pool = torch.nn.functional.max_pool2d(a, kernel_size=kernel,
stride=stride,
padding=padding, dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
ops_under_test = {
"torch": torch.max_pool2d,
"nn.functional": torch.nn.functional.max_pool2d,
"nn.quantized.functional": torch.nn.quantized.functional.max_pool2d
}
for name, op in ops_under_test.items():
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="{} results are off".format(name))
# Test the ops.quantized separately, because None is not treated.
a_hat = torch.ops.quantized.max_pool2d(
qa, kernel_size=_pair(kernel),
stride=_pair(kernel if stride is None else stride),
padding=_pair(padding), dilation=_pair(dilation), ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="ops.quantized.max_pool2d results are off")
"""Tests max pool operation on NHWC quantized tensors."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
kernel=st.sampled_from((3, 5, 7)),
stride=st.sampled_from((None, 1, 2)),
dilation=st.integers(1, 2),
padding=st.integers(0, 2),
ceil_mode=st.booleans())
def test_max_pool2d_nhwc(self, X, kernel, stride, dilation, padding, ceil_mode):
X, (scale, zero_point, torch_type) = X
# Ensure we hit the vectorized paths
# 176 = 128 + 32 + 16
# 128 hits the interleaved path
# 32 hits the non-interleaved path
# 16 hits the scalar path
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation, ceil_mode)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation, ceil_mode)
assume(oW > 0)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
a = torch.from_numpy(X_nchw).permute([0, 3, 1, 2])
a_pool = torch.nn.functional.max_pool2d(a, kernel_size=kernel,
stride=stride,
padding=padding, dilation=dilation,
ceil_mode=ceil_mode)
a_ref = torch.quantize_per_tensor(a_pool, scale=scale,
zero_point=zero_point, dtype=torch_type)
a_ref = a_ref.dequantize()
qa = torch.quantize_per_tensor(torch.from_numpy(X_nchw), scale=scale, zero_point=zero_point,
dtype=torch_type).permute([0, 3, 1, 2])
self.assertTrue(qa.stride() != sorted(qa.stride()))
ops_under_test = {
"torch": torch.max_pool2d,
"nn.functional": torch.nn.functional.max_pool2d,
"nn.quantized.functional": torch.nn.quantized.functional.max_pool2d
}
for name, op in ops_under_test.items():
a_hat = op(qa, kernel_size=kernel, stride=stride, padding=padding,
dilation=dilation, ceil_mode=ceil_mode)
self.assertTrue(a_hat.stride() != sorted(a_hat.stride()))
self.assertEqual(a_ref, a_hat.dequantize(),
msg="{} results are off".format(name))
# Test the ops.quantized separately, because None is not treated.
a_hat = torch.ops.quantized.max_pool2d(
qa, kernel_size=_pair(kernel),
stride=_pair(kernel if stride is None else stride),
padding=_pair(padding), dilation=_pair(dilation), ceil_mode=ceil_mode)
self.assertEqual(a_ref, a_hat.dequantize(),
msg="ops.quantized.max_pool2d results are off")
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.quint8)),
kernel=st.sampled_from((3, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool2d(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
"""
X, (scale, zero_point, torch_type) = X
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X = qX.dequantize()
# Run reference on float tensor and then quantize the result for comparison
X_ref = torch.nn.functional.avg_pool2d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool2d,
"nn.quantized.functional": torch.nn.quantized.functional.avg_pool2d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
qX_ref = torch.quantize_per_tensor(X_ref, scale=qX_hat.q_scale(), zero_point=qX_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), qX_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), qX_hat.int_repr()))
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.qint8)),
kernel=st.sampled_from((4, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool2d_nhwc(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: 1) we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
2) we cannot test the qint32, since the float point precision is much lower than int32 for big number,
which will make the test be very flaky.
"""
X, (scale, zero_point, torch_type) = X
H, W = X.shape[-2:]
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw), scale=scale,
zero_point=zero_point, dtype=torch_type).permute([0, 3, 1, 2])
X = qX.dequantize()
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.avg_pool2d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool2d,
"nn.quantized.functional": torch.nn.quantized.functional.avg_pool2d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
qX_ref = torch.quantize_per_tensor(X_ref, scale=X_hat.q_scale(), zero_point=X_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), X_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), X_hat.int_repr()))
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=5, max_dims=5,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.quint8)),
kernel=st.sampled_from((3, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool3d(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
"""
X, (scale, zero_point, torch_type) = X
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iD, iH, iW = X.shape[-3:]
oD = pool_output_shape(iD, kernel, padding, stride, dilation=1)
assume(oD > 0)
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X = qX.dequantize()
# Run reference on float tensor and then quantize the result for comparison
X_ref = torch.nn.functional.avg_pool3d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool3d,
"nn.quantized.functional": torch.nn.quantized.functional.avg_pool3d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
qX_ref = torch.quantize_per_tensor(X_ref, scale=qX_hat.q_scale(), zero_point=qX_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), qX_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), qX_hat.int_repr()))
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=5, max_dims=5,
min_side=5, max_side=10),
qparams=hu.qparams(dtypes=torch.qint8)),
kernel=st.sampled_from((4, 5)),
stride=st.sampled_from((None, 1, 2)),
padding=st.integers(0, 2),
ceil_mode=st.sampled_from((True, False)),
count_include_pad=st.sampled_from((True, False)),
divisor_override=st.sampled_from((None, None)))
def test_avg_pool3d_nhwc(self, X, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override):
"""
Note: 1) we currently cannot test the divisor_override, because quantized op will clamp the result
within range. However, the float op will not.
2) we cannot test the qint32, since the float point precision is much lower than int32 for big number,
which will make the test be very flaky.
"""
X, (scale, zero_point, torch_type) = X
D, H, W = X.shape[-3:]
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iD, iH, iW = X.shape[-3:]
oD = pool_output_shape(iD, kernel, padding, stride, dilation=1)
assume(oD > 0)
oH = pool_output_shape(iH, kernel, padding, stride, dilation=1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation=1)
assume(oW > 0)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 4, 1]))
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw), scale=scale,
zero_point=zero_point, dtype=torch_type).permute([0, 4, 1, 2, 3])
X = qX.dequantize()
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.avg_pool3d(
X, kernel_size=kernel, stride=stride, padding=padding,
ceil_mode=ceil_mode, count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.avg_pool3d,
"nn.quantized.functional": torch.nn.quantized.functional.avg_pool3d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, kernel_size=kernel, stride=stride, padding=padding, ceil_mode=ceil_mode,
count_include_pad=count_include_pad, divisor_override=divisor_override)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
qX_ref = torch.quantize_per_tensor(X_ref, scale=X_hat.q_scale(), zero_point=X_hat.q_zero_point(),
dtype=torch_type)
self.assertEqual(qX_ref.int_repr().to(torch.double), X_hat.int_repr().to(torch.double), atol=1.0, rtol=0,
msg=error_message.format(name, qX_ref.int_repr(), X_hat.int_repr()))
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
"""Tests adaptive average pool operation on NHWC quantized tensors."""
def test_adaptive_avg_pool2d_nhwc(self):
side_lens = (range(1, 10))
dim_lens = (range(3, 4))
torch_type = torch.qint8
zero_points = (0, 1)
combined = [side_lens, dim_lens, zero_points]
test_cases = itertools.product(*combined)
for test_case in test_cases:
output_size_h = random.randint(1, 10)
output_size_w = random.randint(1, 10)
side_len, dim_len, zero_point = test_case
shapes = [side_len] * dim_len
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, zero_point)
X = np.array(X)
scale = 1
H, W = X.shape[-2:]
output_size_h = output_size_h if (output_size_h <= H) else H
output_size_w = output_size_w if (output_size_w <= W) else W
if output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_h, output_size_w)
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
if X.ndim == 4:
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
X = torch.from_numpy(X_nchw).permute([0, 3, 1, 2])
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([0, 3, 1, 2])
else: # ndim == 3
X_nchw = np.ascontiguousarray(X.transpose([1, 2, 0]))
X = torch.from_numpy(X_nchw).permute([2, 0, 1])
qX = torch.quantize_per_tensor(torch.from_numpy(X_nchw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([2, 0, 1])
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.adaptive_avg_pool2d(qX.int_repr().to(torch.double), output_size).round()
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.adaptive_avg_pool2d,
"nn.quantized.functional":
torch.nn.quantized.functional.adaptive_avg_pool2d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, output_size=output_size)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(X_ref, X_hat.int_repr(), atol=1.0, rtol=0,
msg=error_message.format(name, X_ref, X_hat.int_repr()))
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
def test_adaptive_avg_pool(self):
side_lens = (range(1, 10))
dim_lens = (range(3, 5))
torch_type = torch.qint8
zero_points = (0, 1)
combined = [side_lens, dim_lens, zero_points]
test_cases = itertools.product(*combined)
for test_case in test_cases:
output_size_d = random.randint(1, 10)
output_size_h = random.randint(1, 10)
output_size_w = random.randint(1, 10)
side_len, dim_len, zero_point = test_case
shapes = [side_len] * dim_len
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, zero_point)
X = np.array(X)
scale = 1
ndim = X.ndim
dim_to_check = []
if ndim <= 4:
dim_to_check.append(2)
if ndim >= 4:
dim_to_check.append(3)
D, H, W = X.shape[-3:]
output_size_d = output_size_d if (output_size_d <= D) else D
output_size_h = output_size_h if (output_size_h <= H) else H
output_size_w = output_size_w if (output_size_w <= W) else W
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
for dim in dim_to_check:
if dim == 2:
if output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_h, output_size_w)
elif dim == 3:
if output_size_d == output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_d, output_size_h, output_size_w)
# Run reference on int_repr + round to avoid double rounding error.
ref_op = getattr(torch.nn.functional, 'adaptive_avg_pool{}d'.format(dim))
X_ref = ref_op(qX.int_repr().to(torch.float), output_size).round()
ops_under_test = {
"nn.functional":
getattr(torch.nn.functional, 'adaptive_avg_pool{}d'.format(dim)),
"nn.quantized.functional":
getattr(torch.nn.quantized.functional, 'adaptive_avg_pool{}d'.format(dim))
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
# TODO: torch.cuda.is_available() should be swapped for a flag that checks if cudnn
# is enabled in the build when cudnn supports adaptive average pooling
devices = ["cpu", "cuda"] if (dim == 2 and torch.cuda.is_available()) else ["cpu"]
for device in devices:
qX_hat = op(qX.to(device=device), output_size=output_size)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
X_ref, qX_hat.int_repr(), atol=1.0,
rtol=0, msg=error_message.format(name, X_ref, qX_hat))
self.assertEqual(
scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale,
qX_hat.q_scale()))
self.assertEqual(
zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
"""Tests adaptive average pool operation on NHWC quantized tensors."""
def test_adaptive_avg_pool3d_ndhwc(self):
side_lens = (range(1, 10))
dim_lens = (range(4, 5))
torch_type = torch.qint8
zero_point = 0
combined = [side_lens, dim_lens]
test_cases = itertools.product(*combined)
for test_case in test_cases:
output_size_d = random.randint(1, 10)
output_size_h = random.randint(1, 10)
output_size_w = random.randint(1, 10)
side_len, dim_len = test_case
shapes = [side_len] * dim_len
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, zero_point)
X = np.array(X)
scale = 1
D, H, W = X.shape[-3:]
output_size_d = output_size_d if (output_size_d <= D) else D
output_size_h = output_size_h if (output_size_h <= H) else H
output_size_w = output_size_w if (output_size_w <= W) else W
if output_size_d == output_size_h == output_size_w:
output_size = output_size_h
else:
output_size = (output_size_d, output_size_h, output_size_w)
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
if X.ndim == 5:
X_ncdhw = np.ascontiguousarray(X.transpose([0, 2, 3, 4, 1]))
X = torch.from_numpy(X_ncdhw).permute([0, 4, 1, 2, 3])
qX = torch.quantize_per_tensor(torch.from_numpy(X_ncdhw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([0, 4, 1, 2, 3])
else: # ndim == 4
X_ncdhw = np.ascontiguousarray(X.transpose([1, 2, 3, 0]))
X = torch.from_numpy(X_ncdhw).permute([3, 0, 1, 2])
qX = torch.quantize_per_tensor(torch.from_numpy(X_ncdhw),
scale=scale,
zero_point=zero_point,
dtype=torch_type).permute([3, 0, 1, 2])
# Run reference on int_repr + round to avoid double rounding error.
X_ref = torch.nn.functional.adaptive_avg_pool3d(
qX.int_repr().to(torch.double), output_size).round()
self.assertTrue(qX.stride() != sorted(qX.stride()))
ops_under_test = {
"nn.functional": torch.nn.functional.adaptive_avg_pool3d,
"nn.quantized.functional":
torch.nn.quantized.functional.adaptive_avg_pool3d
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
X_hat = op(qX, output_size=output_size)
self.assertTrue(X_hat.stride() != sorted(X_hat.stride()))
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(X_ref, X_hat.int_repr(), atol=1.0, rtol=0,
msg=error_message.format(name, X_ref, X_hat.int_repr()))
self.assertEqual(scale, X_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, X_hat.q_scale()))
self.assertEqual(zero_point, X_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
X_hat.q_zero_point()))
def test_qtopk(self):
x_dims = [3, 4] # Num elements in the shape
sides = [3, 5] # Side of the tensor generated
dims = [0, 1, 2, 3] # dimension over which to perform topk
largest = [False, True] # Return largest or smallest element
sorted = [False, True] # Return sorted or not
dtypes = [torch.qint8, torch.quint8]
is_nhwc = [False, True] # Is input in the NHWC format?
test_cases = itertools.product(x_dims, sides, dims, largest, sorted, dtypes, is_nhwc)
k = 2
for x_dim, side, dim, larg, sort, dtype, nhwc in test_cases:
if nhwc and x_dim != 4: # NHWC requires 4 dimensions
continue
if dim >= x_dim: # Dimension to find top-k for should exist
continue
shape = [side] * x_dim
X, scale, zp = _get_random_tensor_and_q_params(shape, 1.0, dtype)
qX = torch.quantize_per_tensor(X, scale, zp, dtype)
if nhwc:
qX = qX.permute([0, 3, 1, 2])
X = np.transpose(X, [0, 3, 1, 2])
unquantized_out = torch.topk(qX.dequantize(), k, dim=dim, largest=larg, sorted=sort)
values = torch.quantize_per_tensor(X, scale, zp, dtype)
indices = torch.tensor(X).long()
quantized_out = torch.topk(qX, k, dim=dim, largest=larg, sorted=sort)
assert(len(unquantized_out) == len(quantized_out))
torch.testing.assert_close(quantized_out[0].dequantize(), unquantized_out[0])
torch.testing.assert_close(quantized_out[1], unquantized_out[1])
"""Tests quantize concatenation (both fused and not)."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=3, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
num=st.integers(1, 4),
dim=st.integers(1, 4),
relu=st.booleans())
def test_cat(self, X, num, dim, relu):
tensors_q = []
tensors_ref = []
X, (scale, zero_point, torch_type) = X
assume(dim < X.ndim)
X = torch.from_numpy(X)
new_shape = np.array(X.shape)
new_shape[dim] = 0
for idx in range(num):
tensors_q.append(torch.quantize_per_tensor(X, scale, zero_point,
torch_type))
tensors_ref.append(X)
new_shape[dim] += tensors_ref[-1].shape[dim]
cat_ref = torch.cat(tensors_ref, dim=dim)
cat_ref = torch.quantize_per_tensor(cat_ref, scale, zero_point, torch_type)
cat_ref = cat_ref.dequantize()
if relu:
cat_ref = F.relu(cat_ref)
q_cat_op = torch.ops.quantized.cat_relu
q_cat_out_op = torch.ops.quantized.cat_relu_out
else:
q_cat_op = torch.ops.quantized.cat
q_cat_out_op = torch.ops.quantized.cat_out
cat_q = q_cat_op(tensors_q, dim=dim, scale=scale,
zero_point=zero_point)
cat_q = cat_q.dequantize()
np.testing.assert_equal(cat_ref.numpy(), cat_q.numpy())
cat_q_out = torch._empty_affine_quantized(
list(new_shape), scale=scale,
zero_point=zero_point, dtype=torch_type)
q_cat_out_op(tensors_q, dim=dim, out=cat_q_out)
cat_q_out = cat_q_out.dequantize()
np.testing.assert_equal(cat_ref.numpy(), cat_q_out.numpy())
# Test the cat on per-channel quantized tensor.
ch_axis = 1
scales = torch.from_numpy(np.array([1.0] * X.shape[ch_axis]))
scales = scales.to(torch.float64)
zero_points = torch.from_numpy(np.array([0] * X.shape[ch_axis]))
zero_points = zero_points.to(torch.long)
tensors_q[0] = torch.quantize_per_channel(
X, scales, zero_points, axis=ch_axis, dtype=torch_type)
with self.assertRaisesRegex(RuntimeError, "supported.*cat"):
cat_q = q_cat_op(tensors_q, dim=ch_axis, scale=scale,
zero_point=zero_point)
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=5, max_side=10),
qparams=hu.qparams()),
size=st.sampled_from((1, 3, 5, 10)),
mode=st.sampled_from(("bilinear", "nearest", "nearest-exact")),
scale_factor=st.sampled_from((None, 1.5, 2.0)),
align_corners=st.sampled_from((True, False)),
nhwc_layout=st.sampled_from((True, False)))
def test_interpolate(self, X, size, mode, scale_factor, align_corners, nhwc_layout):
"""
This test cover upsample_nearest2d and upsample_bilinear2d
"""
X, (scale, zero_point, torch_type) = X
if scale_factor is not None:
size = None
if mode in ("nearest", "nearest-exact"):
align_corners = None
if nhwc_layout:
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 1]))
X = torch.from_numpy(X_nchw).permute([0, 3, 1, 2])
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type).permute([0, 3, 1, 2])
else:
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X_ref = torch.nn.functional.interpolate(
qX.int_repr().to(torch.float), size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
ops_under_test = {
"nn.functional": torch.nn.functional.interpolate,
"nn.quantized.functional": torch.nn.quantized.functional.interpolate
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0,
msg="{} results are off: qX_hat={} X_ref={}"
.format(name, qX_hat.int_repr(), X_ref))
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=5, max_dims=5,
min_side=5, max_side=10),
qparams=hu.qparams()),
size=st.sampled_from((1, 3, 5, 5, 10)),
mode=st.sampled_from(("nearest", "nearest-exact")),
scale_factor=st.sampled_from((None, 1.5, 2.0)),
align_corners=st.sampled_from((True, False)),
nhwc_layout=st.sampled_from((True, False)))
def test_interpolate3d(self, X, size, mode, scale_factor, align_corners, nhwc_layout):
"""
This test cover upsample_nearest3d
"""
X, (scale, zero_point, torch_type) = X
if scale_factor is not None:
size = None
align_corners = None
if nhwc_layout:
if X.shape[1] < 176:
X = np.repeat(X, 176 / X.shape[1], 1)
X_nchw = np.ascontiguousarray(X.transpose([0, 2, 3, 4, 1]))
X = torch.from_numpy(X_nchw).permute([0, 4, 1, 2, 3])
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type).permute([0, 4, 1, 2, 3])
else:
X = torch.from_numpy(X)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X_ref = torch.nn.functional.interpolate(
qX.int_repr().to(torch.float), size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
ops_under_test = {
"nn.functional": torch.nn.functional.interpolate,
"nn.quantized.functional": torch.nn.quantized.functional.interpolate
}
error_message = r"Results are off for {}:\n\tExpected:\n{}\n\tGot:\n{}"
for name, op in ops_under_test.items():
qX_hat = op(qX, size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(X_ref, qX_hat.int_repr(), atol=1.0, rtol=0,
msg="{} results are off: qX_hat={}, X_ref={}"
.format(name, qX_hat.int_repr(), X_ref))
self.assertEqual(scale, qX_hat.q_scale(),
msg=error_message.format(name + '.scale', scale, qX_hat.q_scale()))
self.assertEqual(zero_point, qX_hat.q_zero_point(),
msg=error_message.format(name + '.zero_point', scale,
qX_hat.q_zero_point()))
"""Tests quantize concatenation (both fused and not)."""
@given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=4,
min_side=1, max_side=10),
qparams=hu.qparams()),
relu=st.booleans())
def test_cat_nhwc(self, X, relu):
# X is NHWC
X, (scale, zero_point, torch_type) = X
# Tile out X so # channels is > 64
X = np.repeat(X, 70 / X.shape[3], 3)
X = torch.from_numpy(np.ascontiguousarray(X))
Y = X.clone()
Y = torch.from_numpy(np.ascontiguousarray(Y))
# We add a fast path in qcat: when inputs share the same scale and zero_point,
# it will go direct memcpy instead of dequant-cat-quant.
for scaleX, scaleY in ((scale, scale), (scale, scale * 1.1)):
# Here, we quantize and get quantized tensors in NHWC for both dims and strides. The
# permute switches it so that the tensor looks like NCHW but it laid out in memory as
# NHWC.
qX = torch.quantize_per_tensor(X, scaleX, zero_point, torch_type).permute([0, 3, 1, 2])
qY = torch.quantize_per_tensor(Y, scaleY, zero_point, torch_type).permute([0, 3, 1, 2])
ref = torch.cat([qX.dequantize(), qY.dequantize()], dim=1)
if relu:
ref[ref < 0] = 0.0
ref = torch.quantize_per_tensor(ref, scale=scale, zero_point=zero_point, dtype=torch_type)
if relu:
out = torch.ops.quantized.cat_relu(
[qX, qY], dim=1, scale=scale, zero_point=zero_point)
else:
out = torch.ops.quantized.cat([qX, qY], dim=1, scale=scale, zero_point=zero_point)
torch.testing.assert_close(out.dequantize(), ref.dequantize())
self.assertNotEqual(out.stride(), sorted(out.stride()))
@override_qengines
def test_mean(self):
scale_list = (1, 0.25)
zero_point_list = (0, 2)
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4), (4, 4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
dims = ((), (-1,), (0,), (1,), (2,), (3,), (0, 1), (1, 2), (3, 4))
test_cases = itertools.product(scale_list, zero_point_list, shapes, dtypes, dims)
op = torch.mean
for scale, zp, shape, dtype, dim in test_cases:
if not all([d < len(shape) for d in dim]):
continue
X = torch.randn(*shape) * 10
qX = torch.quantize_per_tensor(X, scale, zp, dtype)
Y = op(qX.dequantize(), dim)
Y = torch.quantize_per_tensor(Y, scale, zp, dtype).dequantize()
qY = op(qX, dim)
self.assertEqual(Y, qY.dequantize())
@skipIfNoQNNPACK
@given(keep=st.booleans())
def test_quantized_mean_qnnpack(self, keep):
with override_quantized_engine("qnnpack"):
# using multiple of 4 sizes to satisfy pytorch_q8gavgpool_ukernel_up8xm__sse2() 4-byte alignment demand under ASAN
in_dim = (4, 4, 4, 4)
if keep:
out_dim = (4, 4, 1, 1)
else:
out_dim = (4, 4)
X = torch.ones(in_dim)
Y = torch.ones(out_dim)
XQ = torch.quantize_per_tensor(X, scale=0.2, zero_point=0, dtype=torch.quint8)
YQ = torch.quantize_per_tensor(Y, scale=0.2, zero_point=0, dtype=torch.quint8)
MQ = XQ.mean((2, 3), keepdim=keep)
self.assertTrue(torch.equal(MQ, YQ))
@override_qengines
def test_std(self):
scale_list = (1, 0.25)
zero_point_list = (0, 2)
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4), (4, 4, 4, 4, 4))
dtypes = (torch.quint8, torch.qint8)
dims = ((), (-1,), (0,), (1,), (2,), (3,), (0, 1), (1, 2), (3, 4))
unbiased_list = (True, False)
keep_dim_list = (True, False)
test_cases = itertools.product(scale_list, zero_point_list, shapes,
dtypes, dims, unbiased_list, keep_dim_list)
op = torch.std
for scale, zp, shape, dtype, dim, unbiased, keep_dim in test_cases:
if not all([d < len(shape) for d in dim]):
continue
X = torch.randn(*shape) * 10
qX = torch.quantize_per_tensor(X, scale, zp, dtype)
Y = op(qX.dequantize(), dim, unbiased, keep_dim)
Y = torch.quantize_per_tensor(Y, scale, zp, dtype).dequantize()
qY = op(qX, dim, unbiased, keep_dim)
self.assertEqual(Y, qY.dequantize())
"""Tests the correctness of the quantized equal op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()),
X2=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams()),
X_per_channel=st.booleans(),
X2_per_channel=st.booleans())
def test_equal(self, X, X2, X_per_channel, X2_per_channel):
X, X_params = X
(scale, zero_point, torch_type) = X_params
X2, X2_params = X2
(scale2, zero_point2, torch_type2) = X2_params
X = torch.from_numpy(X)
if X_per_channel:
X_scheme = 'per_channel'
channels = X.shape[-1]
qX = torch.quantize_per_channel(
X,
scales=torch.tensor([scale] * channels),
zero_points=torch.tensor([zero_point] * channels),
dtype=torch_type,
axis=X.ndim - 1)
else:
X_scheme = 'per_tensor'
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
X2 = torch.from_numpy(X2)
if X2_per_channel:
X2_scheme = 'per_channel'
channels = X2.shape[-1]
qX2 = torch.quantize_per_channel(
X2,
scales=torch.tensor([scale2] * channels),
zero_points=torch.tensor([zero_point2] * channels),
dtype=torch_type2,
axis=X2.ndim - 1)
else:
X2_scheme = 'per_tensor'
qX2 = torch.quantize_per_tensor(X2, scale=scale2, zero_point=zero_point2,
dtype=torch_type2)
def equal_ref(qX, qX2):
if qX.qscheme() != qX2.qscheme():
return False
if qX.shape != qX2.shape:
return False
if qX.dtype != qX2.dtype:
return False
if qX.qscheme() == torch.per_tensor_affine:
if qX.q_scale() != qX2.q_scale():
return False
if qX.q_zero_point() != qX2.q_zero_point():
return False
elif qX.qscheme() == torch.per_channel_affine:
if (qX.q_per_channel_scales() !=
qX2.q_per_channel_scales()).any():
return False
if (qX.q_per_channel_zero_points() !=
qX2.q_per_channel_zero_points()).any():
return False
else:
raise NotImplementedError("Don't know what to do with",
qX.qscheme())
if (qX.int_repr().to(float) != qX2.int_repr().to(float)).any():
return False
return True
self.assertEqual(qX.equal(qX), equal_ref(qX, qX))
self.assertEqual(qX.equal(qX2), equal_ref(qX, qX2))
@skipIfNoFBGEMM
def test_group_norm(self):
# hypothesis is flaky for this test, create test cases manually
batches_list = (1, 7)
num_groups_list = (1, 4)
channels_per_groups = (1, 36, 72)
elements_per_channels = (8, 128, 1024)
torch_types = (torch.qint8, torch.quint8)
y_scales = (0.1, 4.23)
y_zero_points = (0, 1)
channels_last_list = [True, False]
affine_list = [True, False]
combined = [batches_list, num_groups_list, channels_per_groups, elements_per_channels,
torch_types, y_scales, y_zero_points, channels_last_list, affine_list]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
batches, num_groups, channels_per_group, elements_per_channel, \
torch_type, Y_scale, Y_zero_point, channels_last, \
affine = test_case
num_channels = num_groups * channels_per_group
# minimum rank for channels_last
shapes = (batches, num_channels, elements_per_channel, 1)
# In the FP kernel, sums and sums of squares are calculated in floating point.
# In the int8 and uint8 versions of the quantized kernel, they are
# calculated in integer arithmetic (which is exact).
# Because of this, the numerics do not always match exactly which is
# expected and acceptable. We do the following to allow this failure
# in this test:
# 1. do not use Hypothesis to generate the input tensor. Hypothesis
# favors homogeneous inputs in its search strategies which isn't
# representative of the inputs we care about, and tends to maximize
# this particular numerics difference.
# 2. allow a small % of off by Y_scale errors. Even when the
# variance of the input is high, there can be off by one errors
# in the result if the input value happens to fall exactly on
# the bin boundary of the output scale.
#
# If we want the numerics to match we could switch to calculating
# mean+var in floating point in the future, at the cost of speed.
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
# Initialize the weights non-randomly for reproducibility
if affine:
weight = torch.ones(num_channels).float() * 0.5
bias = torch.ones(num_channels).float()
for i in range(num_channels):
weight[i] *= i
bias[i] *= i
else:
weight = None
bias = None
eps = 0.001
qX = torch.quantize_per_tensor(X, X_scale, X_zero_point, torch_type)
if channels_last:
qX = qX.contiguous(memory_format=torch.channels_last)
dqX = qX.dequantize()
# Enforce non-homogeneous inputs
for batch_idx in range(batches):
for group_idx in range(num_groups):
ch_start = group_idx * channels_per_group
ch_end = ch_start + channels_per_group
group_vals = dqX[batch_idx][ch_start:ch_end]
assume(
float(torch.unique(group_vals).shape[0]) / group_vals.numel() > 0.001
or group_vals.numel() < 5)
qY = torch.ops.quantized.group_norm(qX, num_groups, weight, bias, eps, Y_scale, Y_zero_point)
dqY_hat = F.group_norm(dqX, num_groups=num_groups, weight=weight, bias=bias, eps=eps)
qY_hat = torch.quantize_per_tensor(dqY_hat, Y_scale, Y_zero_point, torch_type)
# Due to the numerics difference mentioned above between calculating
# the variance in float vs int, the results can still be slightly
# different.
dqY = qY.dequantize()
dqY_hat = qY_hat.dequantize()
diff = dqY - dqY_hat
# off-by-one errors are magnitude of Y_scale
num_diff = torch.sum(diff > Y_scale * 1.0001)
pct_diff = float(num_diff) / (diff.numel() + 1e-5)
num_diff_off_by_one = torch.sum((diff > 0) * (diff <= Y_scale))
pct_diff_off_by_one = float(num_diff_off_by_one) / (diff.numel() + 1e-5)
self.assertTrue(pct_diff < 1e-6)
self.assertTrue(pct_diff_off_by_one < 0.01)
@skipIfNoFBGEMM
def test_instance_norm(self):
max_sides = (4, 5)
shape_list = ([2, 2, 2, 2], [8, 8, 8, 8], [11, 11, 11, 11])
torch_types = (torch.qint8, torch.quint8)
y_scales = (0.1, 4.23)
y_zero_points = (0, 1)
channels_last_list = (True, False)
affine_list = (True, False)
combined = [shape_list, torch_types, y_scales, y_zero_points, channels_last_list, affine_list]
test_cases_product = itertools.product(*combined)
test_cases = list(test_case for test_case in test_cases_product)
# add just one test case to test overflow
test_cases.append([
[1, 4, 224, 224, 160], # shape,
torch.qint8, # torch_type
0.1, # scale
0, # zero_point
False, # channels_last
True, # affine
])
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
shapes, torch_type, Y_scale, Y_zero_point, channels_last, affine = test_case
if channels_last and shapes.__len__() >= 5:
# required rank 4 tensor to use channels_last format
continue
# In the FP kernel, sums and sums of squares are calculated in floating point.
# In the int8 and uint8 versions of the quantized kernel, they are
# calculated in integer arithmetic (which is exact).
# Because of this, the numerics do not always match exactly which is
# expected and acceptable. We do the following to allow this failure
# in this test:
# 1. do not use Hypothesis to generate the input tensor. Hypothesis
# favors homogeneous inputs in its search strategies which isn't
# representative of the inputs we care about, and tends to maximize
# this particular numerics difference.
# 2. allow a small % of off by Y_scale errors. Even when the
# variance of the input is high, there can be off by one errors
# in the result if the input value happens to fall exactly on
# the bin boundary of the output scale.
#
# If we want the numerics to match we could switch to calculating
# mean+var in floating point in the future, at the cost of speed.
X, X_scale, X_zero_point = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
num_channels = shapes[1]
if affine:
weight = torch.rand(num_channels).float() * 0.5
bias = torch.rand(num_channels).float()
for i in range(num_channels):
weight[i] *= i
bias[i] *= i
else:
weight = None
bias = None
eps = 0.001
qX = torch.quantize_per_tensor(X, X_scale, X_zero_point, torch_type)
if channels_last:
qX = qX.contiguous(memory_format=torch.channels_last)
dqX = qX.dequantize()
# Enforce non-homogeneous inputs
batches = shapes[0]
for batch_idx in range(batches):
for ch_idx in range(num_channels):
ch_vals = dqX[batch_idx][ch_idx]
assume(
float(torch.unique(ch_vals).shape[0]) / ch_vals.numel() > 0.01
or ch_vals.numel() < 5 or ch_vals.numel() > 25600)
qY = torch.ops.quantized.instance_norm(qX, weight, bias, eps, Y_scale, Y_zero_point)
dqY_hat = F.instance_norm(dqX, weight=weight, bias=bias, eps=eps)
qY_hat = torch.quantize_per_tensor(dqY_hat, Y_scale, Y_zero_point, torch_type)
# Due to the numerics difference mentioned above between calculating
# the variance in float vs int, the results can still be slightly
# different.
dqY = qY.dequantize()
dqY_hat = qY_hat.dequantize()
diff = dqY - dqY_hat
# off-by-one errors are magnitude of Y_scale
num_diff = torch.sum(diff > Y_scale * 1.0001)
pct_diff = float(num_diff) / (diff.numel() + 1e-5)
num_diff_off_by_one = torch.sum((diff > 0) * (diff <= Y_scale))
pct_diff_off_by_one = float(num_diff_off_by_one) / (diff.numel() + 1e-5)
self.assertTrue(pct_diff < 1e-6)
self.assertTrue(pct_diff_off_by_one < 0.01)
@skipIfNoFBGEMM
def test_batch_norm_relu(self):
# hypothesis too slow for this test, create test cases manually
max_sides = (2, 3, 4, 5)
side_lens = (1, 8, 11)
torch_types = (torch.qint8, torch.quint8)
combined = [max_sides, side_lens, torch_types]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
max_side, side_len, torch_type = test_case
Y_zero_point = 1
Y_scale = 0.5
shapes = [side_len] * max_side
X, scale_x, zero_point_x = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
dtype_x = torch_type
c = X.shape[1]
mean = torch.rand(c).float()
var = torch.rand(c).float()
weight = torch.rand(c).float()
bias = torch.rand(c).float()
eps = 0.001
qx = torch.quantize_per_tensor(X, scale_x, zero_point_x, dtype_x)
if len(X.shape) == 2 or len(X.shape) == 3:
qy = torch.ops.quantized.batch_norm1d_relu(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
elif len(X.shape) == 4:
qy = torch.ops.quantized.batch_norm2d_relu(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
else:
qy = torch.ops.quantized.batch_norm3d_relu(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
float_ref = F.batch_norm(qx.dequantize(), weight=weight, bias=bias,
running_mean=mean, running_var=var,
training=False, momentum=0, eps=eps).numpy()
float_ref_relu = float_ref.copy()
float_ref_relu[float_ref < 0] = 0
quantize_ref = torch.quantize_per_tensor(
torch.from_numpy(float_ref_relu), Y_scale, Y_zero_point, dtype_x)
self.assertEqual(
qy.int_repr().numpy(),
quantize_ref.int_repr().numpy(),
msg="{} vs {}".format(qy, quantize_ref))
@skipIfNoFBGEMM
def test_batch_norm(self):
# hypothesis too slow for this test, create test cases manually
max_sides = (2, 3, 4, 5)
side_lens = (1, 8, 11)
torch_types = (torch.qint8, torch.quint8)
combined = [max_sides, side_lens, torch_types]
test_cases = itertools.product(*combined)
with override_quantized_engine("fbgemm"):
for test_case in test_cases:
max_side, side_len, torch_type = test_case
Y_zero_point = 1
Y_scale = 0.5
shapes = [side_len] * max_side
X, scale_x, zero_point_x = \
_get_random_tensor_and_q_params(shapes, 1.0, torch_type)
dtype_x = torch_type
c = X.shape[1]
mean = torch.rand(c).float()
var = torch.rand(c).float()
weight = torch.rand(c).float()
bias = torch.rand(c).float()
eps = 0.001
qx = torch.quantize_per_tensor(X, scale_x, zero_point_x, dtype_x)
if len(X.shape) == 2 or len(X.shape) == 3:
qy = torch.ops.quantized.batch_norm1d(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
elif len(X.shape) == 4:
qy = torch.ops.quantized.batch_norm2d(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
elif len(X.shape) == 5:
qy = torch.ops.quantized.batch_norm3d(
qx, weight, bias, mean, var, eps, Y_scale, Y_zero_point)
float_ref = F.batch_norm(qx.dequantize(), weight=weight, bias=bias,
running_mean=mean, running_var=var, training=False,
momentum=0, eps=eps)
quantize_ref = torch.quantize_per_tensor(float_ref, Y_scale, Y_zero_point, dtype_x)
self.assertEqual(
qy.int_repr().numpy(), quantize_ref.int_repr().numpy(),
msg="{} vs {}".format(qy, quantize_ref))
@override_qengines
def test_empty_batch(self):
scale = 1.0
zero_point = 0
X = torch.ones((0, 2, 4, 4), dtype=torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
# upsample_nearest2d
qY = torch.nn.functional.upsample_nearest(qX, scale_factor=2)
np.testing.assert_equal(qY.size(), (0, 2, 8, 8),
"Quantized upsample_nearsest2d with batch size 0 failed.")
# relu
qY = torch.nn.functional.relu(qX)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized relu with batch size 0 failed.")
# tanh
qY = torch.tanh(qX)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized tanh with batch size 0 failed.")
# sigmoid
qY = torch.sigmoid(qX)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized sigmoid with batch size 0 failed.")
# interpolate
op = torch.nn.quantized.functional.interpolate
for mode in ["nearest", "bilinear", "nearest-exact"]:
qY = op(qX, scale_factor=2, mode=mode)
np.testing.assert_equal(qY.size(), (0, 2, 8, 8),
"Quantized interpolate with batch size 0 failed.")
# avg_pool
kernel = (2, 2)
stride = (1, 1)
padding = (0, 0)
op = torch.nn.quantized.functional.avg_pool2d
qY = op(qX, kernel, stride, padding)
np.testing.assert_equal(qY.size(), (0, 2, 3, 3),
"Quantized avg_pool2d with batch size 0 failed.")
# adaptive_avg_pool
op = torch.nn.quantized.functional.adaptive_avg_pool2d
qY = op(qX, (3, 3))
np.testing.assert_equal(qY.size(), (0, 2, 3, 3),
"Quantized adaptive_avg_pool2d with batch size 0 failed.")
# max_pool
dilation = (1, 1)
qY = torch.ops.quantized.max_pool2d(qX, kernel, stride, padding, dilation, ceil_mode=False)
oH = pool_output_shape(4, 2, 0, 1, 1)
oW = pool_output_shape(4, 2, 0, 1, 1)
np.testing.assert_equal(qY.size(), (0, 2, oH, oW),
"Quantized maxpool2d with batch size 0 failed.")
# hardtanh
qY = torch.nn.quantized.functional.hardtanh(qX, -1, 6)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized hardtanh with batch size 0 failed.")
# mul
qY = torch.ops.quantized.mul(qX, qX, 1.0, 0)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized mul with batch size 0 failed.")
# add
qY = torch.ops.quantized.add(qX, qX, 1.0, 0)
np.testing.assert_equal(qY.size(), qX.size(),
"Quantized addition with batch size 0 failed.")
# conv
w = torch.randn((2, 2, 2, 2), dtype=torch.float)
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.qint8)
bias_float = torch.ones(2, dtype=torch.float)
strides = [1, 1]
pads = [0, 0]
dilations = [1, 1]
w_packed = torch.ops.quantized.conv2d_prepack(qw, bias_float, strides, pads, dilations, 1)
result = torch.ops.quantized.conv2d(qX, w_packed, 1.0, 0)
self.assertEqual(result.shape, (0, 2, 3, 3))
# linear
X = torch.ones((0, 2), dtype=torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
w = torch.randn((2, 2), dtype=torch.float)
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.qint8)
w_packed = torch.ops.quantized.linear_prepack(qw, bias_float)
result = torch.ops.quantized.linear(qX, w_packed, 1.0, 0)
self.assertEqual(result.shape, (0, 2))
# dynamic linear
result = torch.ops.quantized.linear_dynamic(X, w_packed)
self.assertEqual(result.shape, (0, 2))
@override_qengines
def test_linear_bias_unpack(self):
"""
Verifies the correctness of bias() and unpack() API for LinearPackedParamBase.
"""
bias_float = torch.ones(2, dtype=torch.float)
w = torch.randn((2, 2), dtype=torch.float)
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.qint8)
w_packed = torch.ops.quantized.linear_prepack(qw, bias_float)
# test bias()
self.assertEqual(w_packed.bias(), bias_float)
# test unpack()
self.assertEqual(w_packed.unpack()[0], qw)
def test_advanced_indexing(self):
"""
Verifies that the x[:, [0], :, :] syntax works for quantized tensors.
"""
for dtype in (torch.qint8, torch.quint8, torch.qint32):
scale = 0.1
zp = 0
x_q = torch.quantize_per_tensor(
torch.randn(1, 4, 4, 4), scale, zp, dtype)
# reference
x_fp32 = x_q.dequantize()
# single dim, single index
x_q_s1 = x_q[:, [0], :, :]
x_fp32_s1 = x_fp32[:, [0], :, :]
x_fp32_s1_ref = \
torch.quantize_per_tensor(x_fp32_s1, scale, zp, dtype)
self.assertEqual(x_q_s1, x_fp32_s1_ref)
# multiple dim, single index
x_q_s2 = x_q[:, [0], [2], :]
x_fp32_s2 = x_fp32[:, [0], [2], :]
x_fp32_s2_ref = \
torch.quantize_per_tensor(x_fp32_s2, scale, zp, dtype)
self.assertEqual(x_q_s2, x_fp32_s2_ref)
# single dim, multiple indices
x_q_s3 = x_q[:, [2, 0, 1], :, :]
x_fp32_s3 = x_fp32[:, [2, 0, 1], :, :]
x_fp32_s3_ref = \
torch.quantize_per_tensor(x_fp32_s3, scale, zp, dtype)
self.assertEqual(x_q_s3, x_fp32_s3_ref)
# multiple dim, multiple indices
x_q_s4 = x_q[:, [2, 0, 1], :, [1]]
x_fp32_s4 = x_fp32[:, [2, 0, 1], :, [1]]
x_fp32_s4_ref = \
torch.quantize_per_tensor(x_fp32_s4, scale, zp, dtype)
self.assertEqual(x_q_s4, x_fp32_s4_ref)
@override_qengines
def test_custom_module_lstm(self):
qengine = torch.backends.quantized.engine
batch_size = 4
seq_len = 8
input_size = 12
hidden_size = 8
num_layers = 2
dropout = 0 # This is not supported
Bias = [False, True]
Batch_first = [False, True]
Bidirectional = [False, True]
dtype = np.uint8
qtype = torch.quint8
x = np.random.randn(seq_len, batch_size, input_size)
scale, zero_point = _calculate_dynamic_qparams(x, dtype=dtype)
x = torch.from_numpy(x).to(torch.float)
qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point,
dtype=qtype)
x = qx.dequantize()
with torch.no_grad():
for bias, batch_first, bidirectional in itertools.product(
Bias, Batch_first, Bidirectional):
# Assume 12dB is sufficient for functional equivalence
# Without the bias, linear performs poorly
min_power = 10 if bias else 5
max_mse = 5e-6 if bias else 5e-1
if batch_first:
x = x.reshape(batch_size, seq_len, input_size)
qx = qx.reshape(batch_size, seq_len, input_size)
else:
x = x.reshape(seq_len, batch_size, input_size)
qx = qx.reshape(seq_len, batch_size, input_size)
lstm = torch.nn.Sequential(
torch.nn.LSTM(input_size, hidden_size,
num_layers=num_layers,
bias=bias, batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional))
lstm.eval()
y_ref = lstm(x)
# Prepare
lstm.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
lstm_prepared = torch.ao.quantization.prepare(lstm)
self.assertTrue(hasattr(lstm_prepared[0], 'layers'))
self.assertEqual(num_layers, len(lstm_prepared[0].layers))
assert type(lstm_prepared[0]) == torch.nn.quantizable.LSTM
# Calibrate
y = lstm_prepared(x)
self.assertEqual(y_ref, y)
# Quantize
lstm_quantized = torch.ao.quantization.convert(lstm_prepared)
assert type(lstm_quantized[0]) == torch.nn.quantized.LSTM
qy = lstm_quantized(qx)
snr = _snr(y, qy)
snr = [snr[0]] + snr[1]
for signal, mse, power in snr:
self.assertTrue(
power > min_power or mse < max_mse,
msg=(f"Error is too high: SNR(dB): {power}, "
f"Signal: {signal}, MSE: {mse}"))
# Trace
jit_qmodule = torch.jit.trace(lstm_quantized, qx)
# Script
jit_qmodule = torch.jit.script(lstm_quantized)
@override_qengines
def test_custom_module_multi_head_attention(self):
class MultiheadAttentionModel(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(MultiheadAttentionModel, self).__init__()
self.layer = torch.nn.MultiheadAttention(*args, **kwargs)
def forward(
self,
query,
key,
value,
key_padding_mask: Optional[torch.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[torch.Tensor] = None,
):
return self.layer(query, key, value, key_padding_mask, need_weights, attn_mask)
qengine = torch.backends.quantized.engine
min_power = 30
max_mse = 2
num_heads = 16
batch_size = 4
target_seq_length = 128
source_seq_length = 64
qembed_dim = 512 # Must be divisible by the number of heads
kembed_dim = 128
vembed_dim = 256
dropout = 0.0 # This is not supported
Bias = [False, True]
Add_bias_kv = [False, True]
Add_zero_attn = [False, True]
dtype = np.uint8
qtype = torch.quint8
for kdim, vdim in ((kembed_dim, vembed_dim), (None, None)):
fp_data = [
torch.randn(target_seq_length, batch_size, qembed_dim), # Q
torch.randn(source_seq_length, batch_size,
qembed_dim if kdim is None else kembed_dim), # K
torch.randn(source_seq_length, batch_size,
qembed_dim if vdim is None else vembed_dim) # V
]
q_data = []
reduce_range = (qengine in ('fbgemm', 'onednn'))
for idx, x in enumerate(fp_data):
scale, zero_point = _calculate_dynamic_qparams(
x, dtype=dtype, reduce_range=reduce_range)
x = x.to(torch.float)
qx = torch.quantize_per_tensor(x, scale=scale,
zero_point=zero_point, dtype=qtype)
q_data.append(qx)
# Dequantize the data back for reference
fp_data[idx] = qx.dequantize()
with torch.no_grad():
for bias, add_bias_kv, add_zero_attn in itertools.product(
Bias, Add_bias_kv, Add_zero_attn):
mha = MultiheadAttentionModel(qembed_dim, num_heads, dropout,
bias, add_bias_kv, add_zero_attn,
kdim=kdim, vdim=vdim)
mha.eval()
# Prepare
if qengine_is_onednn():
# `reduce_range` is False by default for ONEDNN backend
# but the test fails on earlier CPUs without VNNI.
# So we use a default qconfig with `reduce_range=True` here
mha.qconfig = torch.ao.quantization.get_default_qconfig()
else:
mha.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
mha_prepared = torch.ao.quantization.prepare(
mha)
# Calibrate
y = mha_prepared(*fp_data)
y_ref = mha(*fp_data)
# Check the result of the prepare
self.assertEqual(y_ref[0], y[0]) # Attention
self.assertEqual(y_ref[1], y[1]) # Weight
# Quantize
mha_quantized = torch.ao.quantization.convert(mha_prepared)
qy = mha_quantized(*q_data)
# Reference result
mha.layer = mha_quantized.layer.dequantize()
y_ref = mha(*fp_data)
snr = _snr(y, qy)
for signal, mse, power in snr:
self.assertTrue(
power > min_power or mse < max_mse,
msg=(f"Error is too high: SNR(dB): {power}, "
f"Signal: {signal}, MSE: {mse}; "
f"Run with bias={bias}, "
f"add_bias_kv={add_bias_kv}, "
f"add_zero_attn={add_zero_attn}"))
# Verify the result is scriptable
mha_quantized_scripted = torch.jit.script(mha_quantized)
class TestDynamicQuantizedOps(TestCase):
"""Tests the correctness of the dynamic quantized linear and linear_relu op."""
@override_qengines
@given(
batch_size=st.integers(1, 4),
input_channels=st.integers(16, 32),
output_channels=st.integers(4, 8),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_multi_dim_input=st.booleans(),
use_channelwise=st.booleans(),
reduce_range=st.booleans())
def test_qlinear(self, batch_size, input_channels, output_channels,
use_bias, use_relu, use_multi_dim_input, use_channelwise, reduce_range):
if torch.backends.quantized.engine == 'qnnpack':
reduce_range = False
qlinear_prepack = torch.ops.quantized.linear_prepack
if use_relu:
qlinear_dynamic = torch.ops.quantized.linear_relu_dynamic
else:
qlinear_dynamic = torch.ops.quantized.linear_dynamic
if use_multi_dim_input:
batch_size *= 3 # Test the multi-dim input tensor
X_scale = 1.0
X_zp = 0
X_value_min = 0
X_value_max = 255
if reduce_range:
X_value_max = 127
X_q0 = np.round(np.random.rand(batch_size, input_channels) *
(X_value_max - X_value_min) + X_value_min).astype(np.uint8)
X_q0[0, 0] = X_value_min
X_q0[0, 1] = X_value_max
# W_scale = 1.0
# W_zp = 0
W_scales = np.ones(output_channels)
W_zps = np.zeros(output_channels).astype(np.int)
W_value_min = -128
W_value_max = 127
W_q0 = np.round(
np.random.rand(output_channels, input_channels)
* (W_value_max - W_value_min)
+ W_value_min
).astype(np.int8)
W_q0[0, 0] = W_value_min
W_q0[1, 0] = W_value_max
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) *
(b_value_max - b_value_min) + b_value_min
).astype(np.int32) if use_bias else None
if torch.backends.quantized.engine in ('fbgemm', 'onednn'):
avoid_vpmaddubsw_overflow_linear(
batch_size,
input_channels,
output_channels,
X_q0,
X_value_min,
X_value_max,
W_q0,
W_value_min,
W_value_max,
)
X_fp32 = torch.from_numpy(_dequantize(X_q0, X_scale, X_zp)).to(dtype=torch.float)
if use_multi_dim_input:
X_fp32 = X_fp32.view(3, int(batch_size / 3), input_channels)
# W_scale, W_zp = _calculate_dynamic_qparams(W_fp32, torch.qint8)
# We currently only check the case where W_scale = 1.0, W_zp = 0.
if use_channelwise:
W_fp32 = torch.from_numpy(_dequantize(W_q0, W_scales.reshape(
(-1, 1)), W_zps.reshape((-1, 1)))).to(dtype=torch.float)
W_q = torch.quantize_per_channel(W_fp32, scales=torch.from_numpy(W_scales),
zero_points=torch.from_numpy(W_zps), axis=0, dtype=torch.qint8)
b_fp32 = torch.from_numpy(
_dequantize(b_q0, X_scale * W_scales, 0)
).to(dtype=torch.float) if use_bias else None
else:
W_fp32 = torch.from_numpy(_dequantize(
W_q0, W_scales[0], W_zps[0])).to(dtype=torch.float)
W_q = torch.quantize_per_tensor(W_fp32, scale=W_scales[0], zero_point=(
W_zps[0].astype(int).item()), dtype=torch.qint8)
b_fp32 = torch.from_numpy(
_dequantize(b_q0, X_scale * int(W_scales[0].item()), 0)
).to(dtype=torch.float) if use_bias else None
# Observe X_fp32 and determine X_scale and X_zero_point, this should match
# internals of dynamic linear.
X_scale, X_zp = _calculate_dynamic_qparams(X_fp32, torch.quint8, reduce_range)
X_q = torch.quantize_per_tensor(X_fp32, scale=X_scale, zero_point=X_zp, dtype=torch.quint8)
# Weight prepacking operator for dynamic quantized Linear
W_prepack = qlinear_prepack(W_q, b_fp32)
# Dynamic quantized Linear operator with prepacked weight
Y_fp32 = qlinear_dynamic(X_q.dequantize(), W_prepack, reduce_range)
# Y_fp32 = qlinear_dynamic(X_fp32, W_prepack, b_fp32)
Y_fp32_ref = F.linear(X_q.dequantize(), W_q.dequantize(), b_fp32)
# Y_fp32_ref = F.linear(X_fp32, W_fp32, b_fp32)
# if use_multi_dim_input:
# Y_fp32_ref = Y_fp32_ref.view(3, int(batch_size / 3), output_channels)
if use_relu:
Y_fp32_ref[Y_fp32_ref < 0.0] = 0.0
self.assertEqual(Y_fp32, Y_fp32_ref,
msg="torch.ops.quantized.linear_dynamic results are off")
@skipIfNoFBGEMM
@given(
batch_size=st.integers(1, 4),
input_channels=st.integers(16, 32),
output_channels=st.integers(4, 8),
)
def test_qlinear_legacy(self, batch_size, input_channels, output_channels):
X_scale = 1.0
X_zp = 0
X_value_min = 0
X_value_max = 255
X_q0 = np.round(np.random.rand(batch_size, input_channels) * (
X_value_max - X_value_min) + X_value_min
).astype(np.uint8)
X_q0[0, 0] = X_value_min
X_q0[0, 1] = X_value_max
W_scale = 1.0
W_zp = 0
W_value_min = -128
W_value_max = 127
W_q0 = np.round(
np.random.rand(output_channels, input_channels)
* (W_value_max - W_value_min)
+ W_value_min
).astype(np.int8)
W_q0[0, 0] = W_value_min
W_q0[1, 0] = W_value_max
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) * (b_value_max - b_value_min) +
b_value_min
).astype(np.int32)
avoid_vpmaddubsw_overflow_linear(
batch_size,
input_channels,
output_channels,
X_q0,
X_value_min,
X_value_max,
W_q0,
W_value_min,
W_value_max,
)
X_fp32 = torch.from_numpy(_dequantize(X_q0, X_scale, X_zp)).to(dtype=torch.float)
W_fp32 = torch.from_numpy(_dequantize(W_q0, W_scale, W_zp)).to(dtype=torch.float)
b_fp32 = torch.from_numpy(
_dequantize(b_q0, X_scale * W_scale, 0)
).to(dtype=torch.float)
W_scale, W_zp = _calculate_dynamic_qparams(W_fp32, torch.qint8)
W_q = torch.quantize_per_tensor(W_fp32, scale=W_scale, zero_point=W_zp, dtype=torch.qint8)
# Observe X_fp32 and determine X_scale and X_zero_point, this should match
# internals of dynamic linear.
X_scale, X_zp = _calculate_dynamic_qparams(X_fp32, torch.quint8)
X_q = torch.quantize_per_tensor(X_fp32, scale=X_scale, zero_point=X_zp, dtype=torch.quint8)
W_int8, col_offsets, W_scale, W_zp = torch.fbgemm_linear_quantize_weight(W_q.dequantize())
W_prepack = torch.fbgemm_pack_quantized_matrix(W_int8.clone(), W_int8.size(1), W_int8.size(0))
# Quantized Linear operator with prepacked weight
Y_fp32 = torch.fbgemm_linear_int8_weight(
X_q.dequantize(), W_q.dequantize(), W_prepack, col_offsets,
W_scale, W_zp, b_fp32)
Y_fp32_ref = F.linear(X_q.dequantize(), W_q.dequantize(), b_fp32)
# Y_fp32_ref = F.linear(X_fp32, W_fp32, b_fp32)
self.assertEqual(Y_fp32, Y_fp32_ref,
msg="torch.ops.quantized.fbgemm_linear_dynamic results are off")
@skipIfNoFBGEMM
@given(
input_channels=st.integers(16, 32),
output_channels=st.integers(4, 8),
exponent=st.integers(0, 8))
def test_linear_prepack_fp16_numerics(self, input_channels, output_channels, exponent):
w = torch.randn(output_channels, input_channels) * 10**exponent
bias = None
w_packed_fp16 = torch.ops.quantized.linear_prepack_fp16(w, bias)
w_unpacked_fp16 = torch.ops.quantized.linear_unpack_fp16(w_packed_fp16)
w_fp16 = w.to(torch.float16).to(torch.float32)
self.assertTrue(torch.equal(w_fp16, w_unpacked_fp16[0]))
@skipIfNoFBGEMM
def test_qlinear_dynamic_fp16(self):
options = itertools.product(
(2, 4), # batch_size
(4, 5, 12), # input_channels
(4, 7, 8), # output_channels
(True, False), # use_bias
(True, False), # use_relu
)
for batch_size, input_channels, output_channels, use_bias, use_relu in options:
qlinear_prepack = torch.ops.quantized.linear_prepack_fp16
if use_relu:
qlinear_dynamic = torch.ops.quantized.linear_relu_dynamic_fp16
else:
qlinear_dynamic = torch.ops.quantized.linear_dynamic_fp16
x = torch.randn(batch_size, input_channels)
w = torch.randn(output_channels, input_channels)
bias = torch.randn(output_channels) if use_bias else None
w_packed = qlinear_prepack(w, bias)
out = qlinear_dynamic(x, w_packed)
# qlinear_dynamic_fp16 uses FP32 activation tensors and FP16 weight tensors
# output is FP32
w_fp16 = w.to(torch.float16).to(torch.float32)
ref = F.linear(x, w_fp16, bias)
if use_relu:
ref.relu_()
self.assertEqual(out, ref)
"""Tests the correctness of the dynamic quantized lstm/gru."""
def _get_rnn_inputs(self, seq_len, num_batches, input_size, hidden_size, num_directions, reduce_range):
# For Input (seq_len, batch, input_size)
X = torch.randn(seq_len, num_batches, input_size)
s, z = _calculate_dynamic_qparams(X, torch.quint8, reduce_range)
Xq = torch.quantize_per_tensor(X, s, z, torch.quint8)
# For H and C: (num_layers(1) * num_directions, batch, hidden_size)
if num_directions == 1:
H = torch.randn(num_directions, num_batches, hidden_size)
C = torch.randn(num_directions, num_batches, hidden_size)
else:
H = torch.zeros(num_directions, num_batches, hidden_size)
C = torch.zeros(num_directions, num_batches, hidden_size)
s, z = _calculate_dynamic_qparams(H, torch.quint8, reduce_range)
Hq = torch.quantize_per_tensor(H, s, z, torch.quint8)
s, z = _calculate_dynamic_qparams(C, torch.quint8, reduce_range)
Cq = torch.quantize_per_tensor(C, s, z, torch.quint8)
return Xq, Hq, Cq
def _get_rnn_weights_and_bias(self, input_size, hidden_size, num_directions, per_channel_quant, rnn_type):
hidden_mult_map = {'LSTM': 4, 'LSTMCell': 4, 'GRU': 3, 'GRUCell': 3, 'RNNTanh': 2, 'RNNReLU': 2}
hidden_mult = hidden_mult_map[rnn_type]
weights1 = torch.randn(hidden_mult * hidden_size, input_size)
weights2 = torch.randn(hidden_mult * hidden_size, hidden_size)
scale1 = 0.1 * torch.ones([weights1.size()[0]])
scale2 = 0.3 * torch.ones([weights2.size()[0]])
zero_point1 = torch.zeros(scale1.size()).to(int)
zero_point2 = torch.zeros(scale2.size()).to(int)
b1 = torch.zeros(hidden_mult * hidden_size)
if per_channel_quant:
Wq1 = torch.quantize_per_channel(weights1, scale1, zero_point1, 0, torch.qint8)
Wq2 = torch.quantize_per_channel(weights2, scale2, zero_point2, 0, torch.qint8)
else:
Wq1 = torch.quantize_per_tensor(weights1, float(scale1[0]), int(zero_point1[0]), torch.qint8)
Wq2 = torch.quantize_per_tensor(weights2, float(scale2[0]), int(zero_point2[0]), torch.qint8)
return Wq1, Wq2, b1, b1
@given(
num_batches=st.integers(1, 4),
input_size=st.integers(16, 32),
hidden_size=st.integers(4, 8),
num_directions=st.integers(1, 2),
per_channel_quant=st.booleans())
@override_qengines
def test_qlstmGRU(self, num_batches, input_size, hidden_size,
num_directions, per_channel_quant):
# We test only for seq length of 1 and num layers of 1 as dynamic quantization occurs multiple times
# within the LSTM op and we do not model the quantization between multiple calls of the linear op within the
# lstm op
seq_len = 1
for rnn_type in ['LSTM', 'GRU']:
for dtype in [torch.qint8, torch.float16]:
# Fp16 quantization is not supported for qnnpack or onednn
if torch.backends.quantized.engine in ('qnnpack', 'onednn') and dtype == torch.float16:
continue
if torch.backends.quantized.engine == 'qnnpack':
reduce_range = False
else:
reduce_range = True
Xq, Hq, Cq = self._get_rnn_inputs(seq_len, num_batches, input_size,
hidden_size, num_directions, reduce_range)
Wq1, Wq2, b1, b2 = self._get_rnn_weights_and_bias(input_size,
hidden_size,
num_directions,
per_channel_quant,
rnn_type)
if dtype == torch.qint8:
packed_ih = torch.ops.quantized.linear_prepack(Wq1, b1)
packed_hh = torch.ops.quantized.linear_prepack(Wq2, b2)
cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
packed_ih, packed_hh, b1, b2, reduce_range)
W_ref1 = Wq1.dequantize()
W_ref2 = Wq2.dequantize()
else:
packed_ih = torch.ops.quantized.linear_prepack_fp16(Wq1.dequantize(), b1)
packed_hh = torch.ops.quantized.linear_prepack_fp16(Wq2.dequantize(), b2)
cell_params = torch.ops.quantized.make_quantized_cell_params_fp16(packed_ih, packed_hh)
W_ref1 = Wq1.dequantize().to(torch.float16).to(torch.float32)
W_ref2 = Wq2.dequantize().to(torch.float16).to(torch.float32)
if rnn_type == 'LSTM':
if num_directions > 1:
result_ref = _VF.lstm(Xq.dequantize(),
(Hq.dequantize(), Cq.dequantize()),
[W_ref1, W_ref2, b1, b2, W_ref1, W_ref2, b1, b2],
True,
1,
0,
False,
num_directions > 1,
False)
result_dynamic = torch.quantized_lstm(Xq.dequantize(),
(Hq.dequantize(), Cq.dequantize()),
([cell_params, cell_params]),
True,
1,
0,
False,
True,
False,
dtype=torch.qint8,
use_dynamic=True)
else:
result_ref = _VF.lstm(Xq.dequantize(),
(Hq.dequantize(), Cq.dequantize()),
[W_ref1, W_ref2, b1, b2],
True,
1,
0,
False,
num_directions > 1,
False)
result_dynamic = torch.quantized_lstm(Xq.dequantize(),
(Hq.dequantize(), Cq.dequantize()),
([cell_params]),
True,
1,
0,
False,
num_directions > 1,
False,
dtype=torch.qint8,
use_dynamic=True)
if rnn_type == 'GRU':
if num_directions > 1:
result_ref = _VF.gru(Xq.dequantize(),
Hq.dequantize(),
[W_ref1, W_ref2, b1, b2, W_ref1, W_ref2, b1, b2],
True,
1,
0,
False,
True,
False)
result_dynamic = torch.quantized_gru(Xq.dequantize(),
Hq.dequantize(),
([cell_params, cell_params]),
True,
1,
0,
False,
True,
False)
else:
result_ref = _VF.gru(Xq.dequantize(),
Hq.dequantize(),
[W_ref1, W_ref2, b1, b2],
True,
1,
0,
False,
False,
False)
result_dynamic = torch.quantized_gru(Xq.dequantize(),
Hq.dequantize(),
([cell_params]),
True,
1,
0,
False,
False,
False)
self.assertEqual(result_ref[0], result_dynamic[0], msg="torch.quantized_lstm results are off")
@given(
num_batches=st.integers(1, 4),
input_size=st.integers(16, 32),
hidden_size=st.integers(4, 8),
per_channel_quant=st.booleans())
@override_qengines
def test_qrnncell(self, num_batches, input_size, hidden_size, per_channel_quant):
# We test only for seq length of 1 and num layers of 1 as dynamic quantization occurs multiple times
# within the LSTM op and we do not model the quantization between multiple calls of the linear op within the
# lstm op
seq_len = 1
for rnn_type in ['LSTMCell', 'GRUCell', 'RNNTanh', 'RNNReLU']:
for dtype in [torch.qint8, torch.float16]:
# Fp16 quantization is not supported for qnnpack or onednn
if torch.backends.quantized.engine in ('qnnpack', 'onednn') and dtype == torch.float16:
continue
if torch.backends.quantized.engine == 'qnnpack':
reduce_range = False
else:
reduce_range = True
Xq, Hq, Cq = self._get_rnn_inputs(seq_len, num_batches, input_size, hidden_size, 1, reduce_range)
Wq1, Wq2, b1, b2 = self._get_rnn_weights_and_bias(
input_size, hidden_size, 1, per_channel_quant, rnn_type)
if dtype == torch.qint8:
packed_ih = torch.ops.quantized.linear_prepack(Wq1, b1)
packed_hh = torch.ops.quantized.linear_prepack(Wq2, b2)
W_ref1 = Wq1.dequantize()
W_ref2 = Wq2.dequantize()
else:
packed_ih = torch.ops.quantized.linear_prepack_fp16(Wq1.dequantize(), b1)
packed_hh = torch.ops.quantized.linear_prepack_fp16(Wq2.dequantize(), b2)
W_ref1 = Wq1.dequantize().to(torch.float16).to(torch.float32)
W_ref2 = Wq2.dequantize().to(torch.float16).to(torch.float32)
state = {'LSTMCell': (Hq.dequantize()[0], Cq.dequantize()[0]),
'GRUCell': Hq.dequantize()[0],
'RNNTanh': Hq.dequantize()[0],
'RNNReLU': Hq.dequantize()[0]}
fn_dict = {'LSTMCell': torch._VF.lstm_cell,
'GRUCell': torch._VF.gru_cell,
'RNNTanh': torch._VF.rnn_tanh_cell,
'RNNReLU': torch._VF.rnn_relu_cell}
qfn_dict = {'LSTMCell': torch.ops.quantized.quantized_lstm_cell_dynamic,
'GRUCell': torch.ops.quantized.quantized_gru_cell_dynamic,
'RNNTanh': torch.ops.quantized.quantized_rnn_tanh_cell_dynamic,
'RNNReLU': torch.ops.quantized.quantized_rnn_relu_cell_dynamic}
W_ref_dict = {torch.float16: (Wq1.dequantize().to(torch.float16).to(torch.float32),
Wq2.dequantize().to(torch.float16).to(torch.float32)),
torch.qint8: (Wq1.dequantize(), Wq2.dequantize())}
result_ref = fn_dict[rnn_type](Xq.dequantize()[0], state[rnn_type], W_ref1, W_ref2, b1, b2)
result_dynamic = qfn_dict[rnn_type](Xq.dequantize()[0], state[rnn_type], packed_ih, packed_hh, b1, b2)
self.assertEqual(result_ref[0], result_dynamic[0], msg="torch.quantized_rnncell results are off")
def _test_qconv_op_impl(self, q_mod, dq_op, dim, dtype):
# The goal here is to show that the dynamic op is the same as
# calc params->quantize_input->quantized op->dequantize output
if qengine_is_qnnpack() and (IS_PPC or TEST_WITH_UBSAN):
return # not supported by QNNPACK
if qengine_is_qnnpack():
reduce_range = False
else:
reduce_range = True
X_fp32 = torch.randn(*([2] * dim))
s, z = _calculate_dynamic_qparams(X_fp32, dtype, reduce_range)
quantized_module = q_mod(2, 3, 1)
packed_params = quantized_module._packed_params
quantized_module.scale, quantized_module.zero_point = s, z
X_q = torch.quantize_per_tensor(X_fp32, s, z, dtype)
Y_q_ref = quantized_module(X_q)
Y_ref = torch.dequantize(Y_q_ref)
X_dq = torch.dequantize(X_q)
Y = dq_op(X_dq, packed_params, reduce_range)
self.assertEqual(Y, Y_ref)
@override_qengines
def test_dynamic_conv1d(self):
q_mod = torch.nn.quantized.Conv1d
dq_op = torch.ops.quantized.conv1d_dynamic
dim = 3
dtype = torch.quint8
self._test_qconv_op_impl(q_mod, dq_op, dim, dtype)
@override_qengines
def test_dynamic_conv2d(self):
q_mod = torch.nn.quantized.Conv2d
dq_op = torch.ops.quantized.conv2d_dynamic
dim = 4
dtype = torch.quint8
self._test_qconv_op_impl(q_mod, dq_op, dim, dtype)
@override_qengines
def test_dynamic_conv3d(self):
q_mod = torch.nn.quantized.Conv3d
dq_op = torch.ops.quantized.conv3d_dynamic
dim = 5
dtype = torch.quint8
self._test_qconv_op_impl(q_mod, dq_op, dim, dtype)
@override_qengines
def test_dynamic_convtranspose1d(self):
q_mod = torch.nn.quantized.ConvTranspose1d
dq_op = torch.ops.quantized.conv_transpose1d_dynamic
dim = 3
dtype = torch.quint8
self._test_qconv_op_impl(q_mod, dq_op, dim, dtype)
@override_qengines
def test_dynamic_convtranspose2d(self):
q_mod = torch.nn.quantized.ConvTranspose2d
dq_op = torch.ops.quantized.conv_transpose2d_dynamic
dim = 4
dtype = torch.quint8
self._test_qconv_op_impl(q_mod, dq_op, dim, dtype)
@override_qengines
def test_dynamic_convtranspose3d(self):
q_mod = torch.nn.quantized.ConvTranspose3d
dq_op = torch.ops.quantized.conv_transpose3d_dynamic
dim = 5
dtype = torch.quint8
if qengine_is_qnnpack():
return # TODO: fix MakeDeConvOutputShape overflowing for convT3d with qnnpack
self._test_qconv_op_impl(q_mod, dq_op, dim, dtype)
class TestQuantizedLinear(TestCase):
"""Tests the correctness of the quantized linear and linear_relu op."""
@given(batch_size=st.integers(1, 4),
input_channels=st.integers(16, 32),
output_channels=st.integers(4, 8),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_multi_dim_input=st.booleans(),
use_channelwise=st.booleans())
@override_qengines
def test_qlinear(self, batch_size, input_channels, output_channels, use_bias,
use_relu, use_multi_dim_input, use_channelwise):
decimal_val = 4
dtypes = [torch.quint8]
if torch.backends.quantized.engine == 'qnnpack':
# QNNPACK supports uint8 in the kernels. In the op we shift the int8
# weight values to uint8 to be on par with fbgemm. However, this causes
# some rounding issues in rare cases. So, we relax the check to allow
# off by one results.
decimal_val = 0
# only qnnpack qengine supports qint8 when xnnpack is available
if torch.backends.xnnpack.enabled:
dtypes.append(torch.qint8)
for dtype in dtypes:
# No support for channelwise in xnnpack (int8)
# ONEDNN does not support qint8
if dtype == torch.qint8 and (use_channelwise or qengine_is_onednn()):
return
nptype = np_dtype[dtype]
qlinear_prepack = torch.ops.quantized.linear_prepack
if use_relu:
qlinear = torch.ops.quantized.linear_relu
else:
qlinear = torch.ops.quantized.linear
if use_multi_dim_input:
batch_size *= 3 # Test the multi-dim input tensor
X_scale = 1.5
X_zp = 5
X_value_min = -128 if dtype == torch.qint8 else 0
X_value_max = 127 if dtype == torch.qint8 else 255
X_q0 = np.round(
np.random.rand(batch_size, input_channels) *
(X_value_max - X_value_min)
+ X_value_min
).astype(nptype)
W_scales = np.random.rand(output_channels)
# xnnpack forces W_zp to 0 when using symmetric quantization
# ONEDNN only supports symmetric quantization of weight
if dtype == torch.qint8 or qengine_is_onednn():
W_zps = np.zeros(output_channels).astype(np.int)
else:
W_zps = np.round(np.random.rand(output_channels) * 100 - 50).astype(np.int)
# when using symmetric quantization
# special restriction for xnnpack fully connected op weight
# [-127, 127] instead of [-128, 127]
W_value_min = -127 if dtype == torch.qint8 else -128
W_value_max = 127
W_q0 = np.round(
np.random.rand(output_channels, input_channels)
* (W_value_max - W_value_min)
+ W_value_min
).astype(np.int8) # weight is always int8_t
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) *
(b_value_max - b_value_min) + b_value_min
).astype(np.int32) if use_bias else None
if torch.backends.quantized.engine in ('fbgemm', 'onednn'):
avoid_vpmaddubsw_overflow_linear(
batch_size,
input_channels,
output_channels,
X_q0,
X_value_min,
X_value_max,
W_q0,
W_value_min,
W_value_max,
)
X = torch.from_numpy(_dequantize(
X_q0, X_scale, X_zp)).to(dtype=torch.float)
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zp, dtype=dtype)
if use_channelwise:
W = torch.from_numpy(_dequantize(W_q0, W_scales.reshape(
(-1, 1)), W_zps.reshape((-1, 1)))).to(dtype=torch.float)
W_q = torch.quantize_per_channel(W, scales=torch.from_numpy(W_scales),
zero_points=torch.from_numpy(W_zps), axis=0, dtype=torch.qint8)
b = torch.from_numpy(_dequantize(
b_q0, X_scale * W_scales, 0)).to(dtype=torch.float) if use_bias else None
b_q = torch.quantize_per_channel(b, scales=torch.from_numpy(X_scale * W_scales),
zero_points=torch.zeros(output_channels, dtype=torch.long),
axis=0, dtype=torch.qint32) if use_bias else None
else:
W = torch.from_numpy(_dequantize(
W_q0, W_scales[0], W_zps[0])).to(dtype=torch.float)
W_q = torch.quantize_per_tensor(W, scale=W_scales[0], zero_point=(
W_zps[0].astype(int).item()), dtype=torch.qint8)
b = torch.from_numpy(_dequantize(
b_q0, X_scale * (W_scales[0].item()), 0)).to(dtype=torch.float) if use_bias else None
b_q = torch.quantize_per_tensor(
b, scale=X_scale * (W_scales[0].item()), zero_point=0, dtype=torch.qint32) if use_bias else None
# Compare X_scale * W_scale * input_channels * X_value_max * W_value_max with
# Y_scale * 255 (max for uint8).
Y_scale = 125.1234
Y_zp = 5
# Weight prepacking operator for quantized Linear
float_bias = b if use_bias else None
W_prepack = qlinear_prepack(W_q, float_bias)
if use_multi_dim_input:
X_q = X_q.view(3, int(batch_size / 3), input_channels)
# Quantized Linear operator with prepacked weight
Y_q = qlinear(X_q, W_prepack, Y_scale, Y_zp)
if not use_channelwise:
# Test the per-tensor quantization only
# Reference quantized Linear operator
Y_q_ref = qlinear_ref(X_q0, X_scale, X_zp, W_q0,
W_scales[0], W_zps[0], b_q0, Y_scale, Y_zp, dtype=nptype)
if use_relu:
Y_q_ref[Y_q_ref < Y_zp] = Y_zp
if use_multi_dim_input:
Y_q_ref = np.reshape(
Y_q_ref, (3, int(batch_size / 3), output_channels))
# Assert equal
np.testing.assert_array_almost_equal(Y_q_ref, Y_q.int_repr().numpy(), decimal=decimal_val)
# Test both per-tensor and per-channel quantization
# Reference quantized result from PyTorch Linear operator
W_fp32 = W_q.dequantize().to(dtype=torch.float)
X_fp32 = X_q.dequantize().to(dtype=torch.float)
b_fp32 = b_q.dequantize().to(dtype=torch.float) if use_bias else None
Y_fp32_ref = F.linear(X_fp32, W_fp32, b_fp32)
if use_relu:
Y_fp32_ref[Y_fp32_ref < 0.0] = 0.0
Y_q_ref2 = torch.quantize_per_tensor(
Y_fp32_ref, Y_scale, Y_zp, dtype)
# Assert equal
np.testing.assert_array_almost_equal(
Y_q_ref2.int_repr().numpy(), Y_q.int_repr().numpy(), decimal=decimal_val)
@given(batch_size=st.integers(1, 4),
# in cudnn v. 8.4.0, there is a limitation that input channels
# should be a multiple of 4 for int8 tensors. in cudnn v.8.3.3
# this should be a multiple of 16
input_channels=st.sampled_from([4, 8, 12, 16, 32]),
# constraints on output channels appear to be relax, as it seems we can use any positive integer here
# except 1. It is not clear why 1 will not work. TODO: check with Yang
output_channels=st.integers(2, 36),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_multi_dim_input=st.booleans(),
use_channelwise=st.sampled_from([False])) # channelwise currently not supported for qlinear cudnn
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skip("Local only - currently the qlinear_cudnn op is bulid "
"with USE_EXPERIMENTAL_CUDNN_V8_API, we can enable the test "
"after it is built by default")
# TODO: check with yang regarding CUDNN flags
def test_qlinear_cudnn(self, batch_size, input_channels, output_channels, use_bias,
use_relu, use_multi_dim_input, use_channelwise):
qlinear_prepack = torch.ops.quantized.linear_prepack
if use_relu:
qlinear_op = torch.ops.quantized.linear_relu
else:
qlinear_op = torch.ops.quantized.linear
X_scale = 1.5
X_zp = 0
X_value_min = -128
X_value_max = 127
X_q0 = np.round(
np.random.rand(batch_size, input_channels) *
(X_value_max - X_value_min)
+ X_value_min).astype(np.int8)
W_scale = 2.5
W_zp = 0
W_value_min = -128
W_value_max = 127
W_q0 = np.round(
np.random.rand(output_channels, input_channels)
* (W_value_max - W_value_min)
+ W_value_min
).astype(np.int8)
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) *
(b_value_max - b_value_min) + b_value_min
).astype(np.int32) if use_bias else None
if use_bias:
b_value_min = -10
b_value_max = 10
b_q0 = np.round(
np.random.rand(output_channels) *
(b_value_max - b_value_min) + b_value_min
).astype(np.int32)
else:
bias = None
avoid_vpmaddubsw_overflow_linear(
batch_size,
input_channels,
output_channels,
X_q0,
X_value_min,
X_value_max,
W_q0,
W_value_min,
W_value_max,
)
quant_dtype = torch.qint8
X = torch.from_numpy(_dequantize(
X_q0, X_scale, X_zp)).to(dtype=torch.float).to(device="cuda")
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zp, dtype=quant_dtype)
W = torch.from_numpy(_dequantize(
W_q0, W_scale, W_zp)).to(dtype=torch.float).to(device="cuda")
W_q = torch.quantize_per_tensor(W, scale=W_scale, zero_point=W_zp, dtype=quant_dtype)
b = torch.from_numpy(_dequantize(
b_q0, X_scale * (W_zp), 0)).to(dtype=torch.float).to(device="cuda") if use_bias else None
b_q = torch.quantize_per_tensor(
b, scale=X_scale * W_scale, zero_point=0, dtype=quant_dtype) if use_bias else None
Y_scale = 0.5
Y_zp = 0
# Weight prepacking operator for quantized Linear
float_bias = b if use_bias else None
W_prepack = qlinear_prepack(W_q, float_bias if use_bias else None)
# Quantized Linear operator with prepacked weight
Y_q = qlinear_op(X_q, W_prepack, Y_scale, Y_zp).to(device="cpu")
Y_q_ref = qlinear_ref(X_q0, X_scale, X_zp, W_q0,
W_scale, W_zp, b_q0, Y_scale, Y_zp, dtype=np.int8)
if use_relu:
Y_q_ref[Y_q_ref < Y_zp] = Y_zp
decimal_val = 0
np.testing.assert_array_almost_equal(Y_q_ref, Y_q.int_repr().numpy(), decimal=decimal_val)
"""Tests the correctness of the quantized::linear_unpack op."""
@given(W=hu.tensor(shapes=hu.array_shapes(2, 2,),
qparams=hu.qparams(dtypes=torch.qint8)),
use_channelwise=st.booleans())
@override_qengines
def test_qlinear_unpack(self, W, use_channelwise):
W, (W_scale, W_zp, torch_type) = W
if use_channelwise:
output_channels = W.shape[0]
W_scales = torch.rand(output_channels).to(torch.double)
W_zps = torch.round(torch.rand(output_channels)
* 100 - 50).to(torch.int64)
qlinear_prepack = torch.ops.quantized.linear_prepack
qlinear_unpack = torch.ops.quantized.linear_unpack
# ONEDNN only supports symmetric quantization of weight
if qengine_is_onednn():
if use_channelwise:
W_zps = torch.zeros(output_channels).to(torch.int64)
else:
W_zp = 0
W = torch.from_numpy(W)
if use_channelwise:
W_q = torch.quantize_per_channel(
W, W_scales, W_zps, 0, dtype=torch_type)
else:
W_q = torch.quantize_per_tensor(W, scale=W_scale, zero_point=W_zp,
dtype=torch_type)
# Weight prepacking operator for quantized Linear
W_prepack = qlinear_prepack(W_q)
# Weight unpack operator for quantized Linear (Used for serialization)
W_q_origin = qlinear_unpack(W_prepack)[0]
# Assert equal
np.testing.assert_equal(W_q.int_repr(), W_q_origin.int_repr().numpy())
if use_channelwise:
np.testing.assert_array_almost_equal(np.float32(W_q.q_per_channel_scales().numpy()),
np.float32(
W_q_origin.q_per_channel_scales().numpy()),
decimal=4)
np.testing.assert_equal(W_q.q_per_channel_zero_points(
).numpy(), W_q_origin.q_per_channel_zero_points().numpy())
else:
np.testing.assert_equal(np.float32(
W_q.q_scale()), np.float32(W_q_origin.q_scale()))
np.testing.assert_equal(
W_q.q_zero_point(), W_q_origin.q_zero_point())
@unittest.skipIf(IS_MACOS, "Known test failure on Mac.")
class TestQuantizedEmbeddingOps(TestCase):
def _test_embedding_bag_unpack_impl(self, pack_fn, unpack_fn, bit_rate, optimized_qparams, weights):
data_type = weights.dtype
qtype = torch.quint8
if bit_rate == 8:
w_packed = pack_fn(weights)
else:
w_packed = pack_fn(weights, optimized_qparams=optimized_qparams)
w_unpacked = unpack_fn(w_packed)
if (bit_rate == 8 or bit_rate == 4) and data_type != torch.float16:
# torch.quantize_per_channel does not support float16 yet.
obs_weights = weights
# Combine 3D embeddings (e.g. stacked combination of embeddings)
# in a dimension orthogonal to channels.
if (len(obs_weights.shape) > 2):
stacked_shape = list(weights.size())
stacked_shape[1] *= stacked_shape[0]
obs_weights = weights.reshape(stacked_shape[1:])
# Check numerics of prepack function that accepts qtensor as input.
# We use min-max observer to mimic the quantization performed in the original function.
obs = PerChannelMinMaxObserver(dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(obs_weights)
# Get the scale and zero point for the weight tensor
qparams = obs.calculate_qparams()
if bit_rate == 4:
qtype = torch.quint4x2
# Quantize the weights to 8bits
qweight = torch.quantize_per_channel(obs_weights, qparams[0], qparams[1], axis=0, dtype=qtype)
real_packed_weight = torch.ops.quantized.embedding_bag_prepack(qweight)
self.assertEqual(isinstance(real_packed_weight, torch._C.ScriptObject), True)
unpacked_weight = torch.ops.quantized.embedding_bag_unpack(real_packed_weight)
self.assertEqual(unpacked_weight.int_repr().numpy(), qweight.int_repr().numpy())
self.assertEqual(unpacked_weight.q_per_channel_scales(), qweight.q_per_channel_scales())
self.assertEqual(unpacked_weight.q_per_channel_zero_points(), qweight.q_per_channel_zero_points())
# compare against C2 to ensure numerical equivalency.
from caffe2.python import core, workspace
conversion_op = "FloatToFused8BitRowwiseQuantized" if data_type == torch.float32 else "HalfFloatToFused8BitRowwiseQuantized"
reverse_conversion_op = None
if bit_rate == 4:
conversion_op = "FloatToFused4BitRowwiseQuantized" if data_type == torch.float32 else "HalfToFused4BitRowwiseQuantized"
reverse_conversion_op = "Fused4BitRowwiseQuantizedToFloat"
elif bit_rate == 2:
conversion_op = "FloatToFused2BitRowwiseQuantized" if data_type == torch.float32 else "HalfToFused2BitRowwiseQuantized"
reverse_conversion_op = "Fused2BitRowwiseQuantizedToFloat"
def get_c2_weights(weights, engine_str):
workspace.ResetWorkspace()
workspace.FeedBlob("weights", weights)
workspace.RunOperatorOnce(
core.CreateOperator(
conversion_op, ["weights"], ["quantized_weights"], engine=engine_str
)
)
emb_q = workspace.FetchBlob("quantized_weights")
if bit_rate == 4 or bit_rate == 2:
workspace.RunOperatorOnce(
core.CreateOperator(
reverse_conversion_op, ["quantized_weights"], ["dequantized_weights"]
)
)
dequantized_data = torch.from_numpy(workspace.FetchBlob("dequantized_weights"))
else:
dequantized_data = torch.ops._caffe2.Fused8BitRowwiseQuantizedToFloat(
torch.tensor(emb_q)
)
return torch.from_numpy(emb_q), dequantized_data
if optimized_qparams:
engine = "GREEDY"
else:
engine = ""
# C2 quantization needs the memory format of Tensor to be `continuous`, otherwise it will
# throw exceptions. torch.clone() will make the memory format to be `continuous`
c2_copy = torch.clone(weights)
w_packed_c2, w_unpacked_c2 = get_c2_weights(c2_copy, engine)
# Compare packed weights against C2.
np.testing.assert_allclose(w_packed.numpy(), w_packed_c2.numpy(), atol=1e-6, rtol=1e-6)
# Compare unpacked weights against C2
np.testing.assert_allclose(w_unpacked.numpy(), w_unpacked_c2.numpy(), atol=1e-6, rtol=1e-6)
def _test_embedding_bag_unpack_fn(self, pack_fn, unpack_fn, num_embeddings, embedding_dim, bit_rate,
optimized_qparams, num_batches, data_type=np.float32):
# when num_batches = 1, it will create a 2D tensor
unsplit_weight = torch.from_numpy((np.random.random_sample((
num_batches, num_embeddings, embedding_dim)).squeeze() + 1).astype(np.float32))
# test unsplit weight (memory format is `contiguous`)
self._test_embedding_bag_unpack_impl(pack_fn, unpack_fn, bit_rate, optimized_qparams, unsplit_weight)
# test split weights (memory format is not `contiguous`)
split_dim = len(unsplit_weight.shape) - 2
split_weights = torch.split(unsplit_weight, 1, dim=split_dim)
for weight in split_weights:
self._test_embedding_bag_unpack_impl(pack_fn, unpack_fn, bit_rate, optimized_qparams, weight)
""" Tests the correctness of the embedding_bag_8bit pack/unpack op against C2 """
@unittest.skipIf(not BUILD_WITH_CAFFE2, "Test needs Caffe2")
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
num_batches=st.integers(1, 5),
data_type=st.sampled_from([np.float32, np.float16]),)
def test_embedding_bag_byte_unpack(self, num_embeddings, embedding_dim, num_batches, data_type):
pack_fn = torch.ops.quantized.embedding_bag_byte_prepack
unpack_fn = torch.ops.quantized.embedding_bag_byte_unpack
self._test_embedding_bag_unpack_fn(
pack_fn, unpack_fn, num_embeddings, embedding_dim, 8, False, num_batches, data_type=data_type)
""" Tests the correctness of the embedding_bag_4bit pack/unpack op against C2 """
@unittest.skipIf(not BUILD_WITH_CAFFE2, "Test needs Caffe2")
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
optimized_qparams=st.booleans(),
data_type=st.sampled_from([np.float32, np.float16]),)
def test_embedding_bag_4bit_unpack(self, num_embeddings, embedding_dim, optimized_qparams, data_type):
pack_fn = torch.ops.quantized.embedding_bag_4bit_prepack
unpack_fn = torch.ops.quantized.embedding_bag_4bit_unpack
# 4bit and 2bit quantization right now only works for 2D Tensor so we set the num_batches to 1
self._test_embedding_bag_unpack_fn(
pack_fn, unpack_fn, num_embeddings, embedding_dim, 4, optimized_qparams, 1, data_type=data_type)
""" Tests the correctness of the embedding_bag_2bit pack/unpack op against C2 """
@unittest.skipIf(not BUILD_WITH_CAFFE2, "Test needs Caffe2")
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 8 == 0),
optimized_qparams=st.booleans(),
data_type=st.sampled_from([np.float32, np.float16]),)
def test_embedding_bag_2bit_unpack(self, num_embeddings, embedding_dim, optimized_qparams, data_type):
pack_fn = torch.ops.quantized.embedding_bag_2bit_prepack
unpack_fn = torch.ops.quantized.embedding_bag_2bit_unpack
# 4bit and 2bit quantization right now only works for 2D Tensor so we set the num_batches to 1
self._test_embedding_bag_unpack_fn(
pack_fn, unpack_fn, num_embeddings, embedding_dim, 2, optimized_qparams, 1, data_type=data_type)
def embedding_bag_rowwise_offsets_run(
self, bit_rate, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices, use_32bit_offsets,
enable_per_sample_weights,
include_last_offset, fallback_to_no_sparse, sparsity, atol, rtol):
pt_op = torch.ops.quantized.embedding_bag_byte_rowwise_offsets
pt_prepack_op = torch.ops.quantized.embedding_bag_byte_prepack
if bit_rate == 4:
pt_op = torch.ops.quantized.embedding_bag_4bit_rowwise_offsets
pt_prepack_op = torch.ops.quantized.embedding_bag_4bit_prepack
elif bit_rate == 2:
pt_op = torch.ops.quantized.embedding_bag_2bit_rowwise_offsets
pt_prepack_op = torch.ops.quantized.embedding_bag_2bit_prepack
weights = torch.from_numpy((np.random.random_sample((
num_embeddings, embedding_dim)) + 1).astype(np.float32))
max_segments = 5
max_segment_length = 20
num_lengths = np.random.randint(1, max_segments + 1)
lengths = np.random.randint(0, max_segment_length + 1,
size=num_lengths).astype(np.int32)
num_indices = np.sum(lengths)
def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True):
"""
Convert lengths to offsets
"""
tt = np.zeros((t.shape[0] + 1,), dtype=offset_type)
tt[1:] = t
tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type))
if use_begin_offset:
return tt[:-1]
return tt[1:]
offsets = lengths_to_offsets(lengths)
indices = torch.from_numpy(np.random.randint(
low=0, high=num_embeddings, size=num_indices, dtype=np.int64))
q_weights = pt_prepack_op(weights)
per_sample_weights = torch.from_numpy(np.random.uniform(
low=0.01, high=0.5, size=[len(indices)]).astype(np.float32)) if \
enable_per_sample_weights else None
if include_last_offset:
offsets = torch.cat(
(offsets, torch.tensor([indices.size(0)], dtype=torch.long)), 0
)
# Reference result will be the floating point torch.nn.EmbeddingBag.
def get_reference_result(
num_embeddings, embedding_dim,
include_last_offset, weights, per_sample_weights,
indices, offsets):
embedding_bag = torch.nn.EmbeddingBag(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
include_last_offset=include_last_offset, _weight=weights,
scale_grad_by_freq=False, mode='sum'
)
return embedding_bag(indices, offsets,
per_sample_weights=per_sample_weights)
mapping_table = np.zeros(num_embeddings, dtype=np.int32)
pruned_weights = weights
prune_weights = sparsity > 0
if prune_weights:
if fallback_to_no_sparse:
# Testing that prune_weight with mapping_table {0} will
# fallback to non sparse embedding look up kernel.
mapping_table = np.zeros(1, dtype=np.int32)
else:
# Prune and generate mapping table
num_compressed_rows = 0
unpruned_ids = []
for i in range(num_embeddings):
if np.random.uniform() < sparsity:
mapping_table[i] = -1
q_weights[i, :] = 0
weights[i, :] = 0
else:
mapping_table[i] = num_compressed_rows
num_compressed_rows += 1
unpruned_ids.append(i)
q_weights = q_weights[unpruned_ids]
pruned_weights = weights[unpruned_ids]
result = pt_op(q_weights,
indices.int() if use_32bit_indices else indices,
offsets.int() if use_32bit_offsets else offsets,
mode=0,
pruned_weights=prune_weights,
per_sample_weights=per_sample_weights,
compressed_indices_mapping=torch.tensor(mapping_table),
include_last_offset=include_last_offset)
reference_result = get_reference_result(
num_embeddings, embedding_dim, include_last_offset, weights,
per_sample_weights, indices, offsets)
torch.testing.assert_close(reference_result, result, atol=atol, rtol=rtol)
if bit_rate == 8 or bit_rate == 4:
# Test operator that accepts TorchBind packed weights.
if bit_rate == 4:
qdtype = torch.quint4x2
op = torch.ops.quantized.embedding_bag_4bit
else:
qdtype = torch.quint8
op = torch.ops.quantized.embedding_bag_byte
obs = PerChannelMinMaxObserver(dtype=qdtype, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(pruned_weights)
# Get the scale and zero point for the weight tensor
qparams = obs.calculate_qparams()
# Quantize the weights to 8bits
qweight = torch.quantize_per_channel(pruned_weights, qparams[0], qparams[1], axis=0, dtype=qdtype)
packed_weight = torch.ops.quantized.embedding_bag_prepack(qweight)
result = op(packed_weight, indices, offsets, mode=0,
pruned_weights=prune_weights,
per_sample_weights=per_sample_weights,
compressed_indices_mapping=torch.tensor(mapping_table),
include_last_offset=include_last_offset)
torch.testing.assert_close(reference_result, result, atol=atol, rtol=rtol)
""" Tests the correctness of the embedding_bag_8bit quantized operator """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
num_offsets=st.integers(1, 20),
use_32bit_indices=st.booleans(),
use_32bit_offsets=st.booleans(),
enable_per_sample_weights=st.booleans(),
include_last_offset=st.booleans(),
fallback_to_no_sparse=st.booleans(),
sparsity=st.sampled_from([0.0, 0.5, 0.7]))
def test_embedding_bag_byte(self, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices,
use_32bit_offsets,
enable_per_sample_weights,
include_last_offset,
fallback_to_no_sparse,
sparsity):
self.embedding_bag_rowwise_offsets_run(
8, num_embeddings, embedding_dim, num_offsets,
use_32bit_indices, use_32bit_offsets,
enable_per_sample_weights, include_last_offset,
fallback_to_no_sparse,
sparsity=sparsity, atol=0.005, rtol=1e-3)
""" Tests the correctness of the embedding_bag_4bit quantized operator """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
num_offsets=st.integers(1, 20),
use_32bit_indices=st.booleans(),
use_32bit_offsets=st.booleans(),
enable_per_sample_weights=st.booleans(),
include_last_offset=st.booleans(),
fallback_to_no_sparse=st.booleans(),
sparsity=st.sampled_from([0.0, 0.5, 0.7]))
def test_embedding_bag_4bit(self, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices,
use_32bit_offsets,
enable_per_sample_weights,
include_last_offset,
fallback_to_no_sparse,
sparsity):
self.embedding_bag_rowwise_offsets_run(4, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices, use_32bit_offsets,
enable_per_sample_weights,
include_last_offset,
fallback_to_no_sparse,
sparsity=sparsity,
atol=0.1, rtol=1e-2)
""" Tests the correctness of the embedding_bag_2bit quantized operator """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 8 == 0),
num_offsets=st.integers(1, 20),
use_32bit_indices=st.booleans(),
use_32bit_offsets=st.booleans(),
enable_per_sample_weights=st.booleans(),
include_last_offset=st.booleans(),
fallback_to_no_sparse=st.booleans(),
sparsity=st.sampled_from([0.0, 0.5, 0.7]))
def test_embedding_bag_2bit(self, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices,
use_32bit_offsets,
enable_per_sample_weights,
include_last_offset,
fallback_to_no_sparse,
sparsity):
self.embedding_bag_rowwise_offsets_run(2, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices, use_32bit_offsets,
enable_per_sample_weights,
include_last_offset,
fallback_to_no_sparse,
sparsity=sparsity,
atol=1.0, rtol=1e-1)
""" Tests the correctness of the quantized 8 bit embedding lookup operator """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0))
def test_embedding(self, num_embeddings, embedding_dim):
dtypes = [torch.quint8, torch.quint4x2]
quant_ops = [torch.ops.quantized.embedding_byte, torch.ops.quantized.embedding_4bit]
atols = [0.005, 0.1]
rtols = [1e-3, 1e-2]
prepack_op = torch.ops.quantized.embedding_bag_prepack
for quant_op, dtype, atol, rtol in zip(quant_ops, dtypes, atols, rtols):
weights = torch.from_numpy((np.random.random_sample((
num_embeddings, embedding_dim)) + 1).astype(np.float32))
obs = PerChannelMinMaxObserver(dtype=dtype, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(weights)
# Get the scale and zero point for the weight tensor
qparams = obs.calculate_qparams()
# Quantize the weights to 8bits
qweight = torch.quantize_per_channel(weights, qparams[0], qparams[1], axis=0, dtype=dtype)
max_segments = 5
max_segment_length = 20
num_lengths = np.random.randint(1, max_segments + 1)
lengths = np.random.randint(1, max_segment_length + 1,
size=num_lengths).astype(np.int32)
num_indices = np.sum(lengths)
indices = torch.from_numpy(np.random.randint(
low=0, high=num_embeddings, size=num_indices, dtype=np.int64))
packed_weight = prepack_op(qweight)
qresult = quant_op(packed_weight, indices, pruned_weights=False)
ref = torch.embedding(weights, indices, padding_idx=-1, scale_grad_by_freq=False, sparse=False)
torch.testing.assert_close(ref, qresult, atol=atol, rtol=rtol)
def test_embedding_2d_indices(self):
"""
Tests the case where 2D indices are passed into the operator
In this case the operator computes the correct offsets argument.
Output shape is dependent on the indices dimension.
"""
quant_op = torch.ops.quantized.embedding_byte
prepack_op = torch.ops.quantized.embedding_bag_prepack
indices = torch.tensor([[9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8], [3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3]])
weights = torch.randn(10, 12, dtype=torch.float32)
ref = torch.embedding(weights, indices, padding_idx=-1, scale_grad_by_freq=False, sparse=False)
obs = PerChannelMinMaxObserver(dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(weights)
qparams = obs.calculate_qparams()
qweight = torch.quantize_per_channel(weights, qparams[0], qparams[1], axis=0, dtype=torch.quint8)
packed_weight = prepack_op(qweight)
qresult = quant_op(packed_weight, indices, pruned_weights=False)
torch.testing.assert_close(ref, qresult, atol=0.05, rtol=1e-3)
def test_embedding_bag_2d_indices(self):
"""
Tests the case where 2D indices are passed into the operator
In this case the operator computes the correct offsets argument.
"""
indices = torch.tensor([[9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8], [3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3]])
weights = torch.randn(10, 12, dtype=torch.float32)
embedding_bag = torch.nn.EmbeddingBag(
num_embeddings=10,
embedding_dim=12,
include_last_offset=False, _weight=weights,
scale_grad_by_freq=False, mode='sum'
)
result = embedding_bag(indices)
pt_op = torch.ops.quantized.embedding_bag_byte_rowwise_offsets
pt_prepack_op = torch.ops.quantized.embedding_bag_byte_prepack
q_weights = pt_prepack_op(weights)
qresult = pt_op(q_weights, indices, mode=0, pruned_weights=False)
torch.testing.assert_close(result, qresult, atol=0.05, rtol=1e-3)
# Test TorchBind based embedding_bag operator
obs = PerChannelMinMaxObserver(dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(weights)
# Get the scale and zero point for the weight tensor
qparams = obs.calculate_qparams()
# Quantize the weights to 8bits
qweight = torch.quantize_per_channel(weights, qparams[0], qparams[1], axis=0, dtype=torch.quint8)
packed_weight = torch.ops.quantized.embedding_bag_prepack(qweight)
qresult = torch.ops.quantized.embedding_bag_byte(packed_weight, indices, mode=0)
torch.testing.assert_close(result, qresult, atol=0.05, rtol=1e-3)
class TestQuantizedConv(TestCase):
def _test_qconv_unpack_impl(self, qconv_prepack_fn, qconv_unpack_fn, inputs,
strides, i_pads, o_pads, channelwise):
(X_data, W_data, bias_data, groups, transposed) = inputs
(X, (X_scale, X_zero_point, X_qtype)) = X_data
(W, (W_scale, W_zero_point, W_qtype)) = W_data
(bias, (bias_scale, bias_zero_point, bias_qtype)) = bias_data
W = torch.from_numpy(W).float()
bias = torch.from_numpy(bias).float()
if channelwise and transposed:
# currently transposed conv and per-channel per quantization does not work
return
# ONEDNN only supports symmetric quantization of weight and zero output padding
if qengine_is_onednn():
W_zero_point = 0
o_pads = len(o_pads) * [0] if o_pads is not None else None
if channelwise:
if transposed:
output_channels = W.shape[1] # IC OC/G
else:
output_channels = W.shape[0] # OC IC/G
W_scale = torch.tensor([W_scale] * output_channels)
W_zero_point = torch.tensor([W_zero_point] * output_channels)
W_q = torch.quantize_per_channel(
W, scales=W_scale, zero_points=W_zero_point,
axis=int(transposed), dtype=W_qtype)
else:
W_q = torch.quantize_per_tensor(
W, scale=W_scale, zero_point=W_zero_point, dtype=W_qtype)
if isinstance(strides, int):
dilations = [1]
else:
dilations = (1,) * len(strides)
if transposed:
W_packed = qconv_prepack_fn(W_q, bias, strides, i_pads, o_pads,
dilations, groups)
else:
W_packed = qconv_prepack_fn(W_q, bias, strides, i_pads, dilations,
groups)
(W_unpacked, bias) = qconv_unpack_fn(W_packed)
# Assert equal
np.testing.assert_equal(W_q.int_repr().numpy(),
W_unpacked.int_repr().numpy())
if channelwise:
np.testing.assert_array_almost_equal(
np.float32(W_q.q_per_channel_scales().numpy()),
np.float32(W_unpacked.q_per_channel_scales().numpy()),
decimal=4)
np.testing.assert_equal(W_q.q_per_channel_zero_points(
).numpy(), W_unpacked.q_per_channel_zero_points().numpy())
else:
np.testing.assert_equal(np.float32(
W_q.q_scale()), np.float32(W_unpacked.q_scale()))
np.testing.assert_equal(
W_q.q_zero_point(), W_unpacked.q_zero_point())
def _make_qconv_tensors(
self, batch_size, input_channels_per_group, input_feature_map_shape,
output_channels_per_group, groups, kernels, strides, pads, dilations,
X_scale, X_zero_point, W_scale, W_zero_point,
use_bias, use_channelwise, use_transpose,
device=torch.device("cpu"),
input_dtype=torch.quint8,
weight_dtype=torch.qint8,
):
assert not (use_channelwise and use_transpose), \
"Cannot generate channelwise qconv_transpose_tensors "
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
# Padded input size should be at least as big as dilated kernel
kernels = _single(kernels)
strides = _single(strides)
pads = _single(pads)
dilations = _single(dilations)
for i in range(len(kernels)):
assume(input_feature_map_shape[i] + 2 * pads[i]
>= dilations[i] * (kernels[i] - 1) + 1)
W_scale = W_scale * output_channels
W_zero_point = W_zero_point * output_channels
# Resize W_scale and W_zero_points arrays equal to output_channels
W_scale = W_scale[:output_channels]
W_zero_point = W_zero_point[:output_channels]
# For testing, we use small values for weights and for activations
# so that no overflow occurs in vpmaddubsw instruction. If the
# overflow occurs in qconv implementation and if there is no
# overflow
# In reference we can't exactly match the results with reference.
# Please see the comment in qconv implementation file
# aten/src/ATen/native/quantized/cpu/qconv.cpp for more details.
(W_value_min, W_value_max) = (-5, 5)
# the operator expects them in the format
# (output_channels, input_channels/groups, kernel_d, kernel_h, kernel_w)
# (input_channels, output_channels/groups, kernel_d, kernel_h, kernel_w)
if use_transpose:
output_shape = (input_channels, output_channels_per_group,)
else:
output_shape = (output_channels, input_channels_per_group,)
W_init = torch.randint(
W_value_min,
W_value_max,
output_shape + kernels,
device=device,
)
b_init = torch.randint(0, 10, (output_channels,), device=device)
(X_value_min, X_value_max) = (0, 4)
X_init = torch.randint(
X_value_min,
X_value_max,
(batch_size, input_channels,) + input_feature_map_shape,
device=device
)
X = X_scale * (X_init - X_zero_point).float()
if use_channelwise:
W_shape = (-1, 1) + (1,) * len(kernels)
W_scales_tensor = torch.tensor(W_scale, dtype=torch.float, device=device)
W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float, device=device)
W = W_scales_tensor.reshape(*W_shape) * (
W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float()
b = X_scale * W_scales_tensor * b_init.float()
else:
W = W_scale[0] * (W_init - W_zero_point[0]).float()
b = X_scale * W_scale[0] * b_init.float()
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zero_point, dtype=input_dtype)
if use_channelwise:
W_q = torch.quantize_per_channel(
W, W_scales_tensor, W_zero_points_tensor.long(), 0,
dtype=weight_dtype)
else:
W_q = torch.quantize_per_tensor(
W, scale=W_scale[0], zero_point=W_zero_point[0],
dtype=weight_dtype)
bias_float = b if use_bias else None
return (X, W), (X_q, W_q), bias_float
def _test_qconv_impl(
self, qconv_fn, qconv_prepack_fn, conv_op, batch_size,
input_channels_per_group, input_feature_map_shape,
output_channels_per_group, groups, kernels, strides, pads, o_pads,
dilations, X_scale, X_zero_point, W_scale, W_zero_point, Y_scale,
Y_zero_point, use_bias, use_relu, use_channelwise, use_transpose,
device=torch.device("cpu"),
input_dtype=torch.quint8,
weight_dtype=torch.qint8,
output_dtype=torch.quint8,
):
# ONEDNN only supports symmetric quantization of weight
if qengine_is_onednn() and W_zero_point is not None:
W_zero_point = len(W_zero_point) * [0]
(X, W), (X_q, W_q), bias_float = self._make_qconv_tensors(
batch_size, input_channels_per_group, input_feature_map_shape,
output_channels_per_group, groups, kernels,
strides, pads, dilations, X_scale, X_zero_point, W_scale,
W_zero_point, use_bias, use_channelwise, use_transpose,
device=device, input_dtype=input_dtype, weight_dtype=weight_dtype)
if bias_float is not None:
bias_float = bias_float.to(device)
# Assign weights
W = W_q.dequantize()
X = X_q.dequantize()
conv_op.weight = torch.nn.Parameter(W, requires_grad=False)
conv_op.bias = torch.nn.Parameter(
bias_float, requires_grad=False) if use_bias else None
result_ref = conv_op(X)
if use_relu:
assert not use_transpose, "Cannot fuse ReLU with ConvTranspose"
relu = torch.nn.ReLU()
result_ref = relu(result_ref)
# Quantize reference results for comparison
result_ref_q = torch.quantize_per_tensor(
result_ref, scale=Y_scale, zero_point=Y_zero_point,
dtype=output_dtype)
if qconv_prepack_fn is not None:
if use_transpose:
W_prepack = qconv_prepack_fn(
W_q, bias_float, strides, pads, o_pads, dilations, groups)
else:
W_prepack = qconv_prepack_fn(
W_q, bias_float, strides, pads, dilations, groups)
Y_q = qconv_fn(
X_q,
W_prepack,
Y_scale,
Y_zero_point,
)
else:
# quantized conv op without prepacking
Y_q = qconv_fn(X_q, W_q, bias_float, strides, pads, dilations, groups, Y_scale, Y_zero_point)
# Make sure the results match
# assert_array_almost_equal compares using the following formula:
# abs(desired-actual) < 1.5 * 10**(-decimal)
# (https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html)
# We use decimal = 0 to ignore off-by-1 differences between
# reference and test. Off-by-1 differences arise due to the order of
# round and zero_point addition operation, i.e., if addition
# followed by round is used by reference and round followed by
# addition is used by test, the results may differ by 1.
# For example, the result of round(2.5) + 1 is 3 while
# round(2.5 + 1) is 4 assuming the rounding mode is
# round-to-nearest, ties-to-even.
np.testing.assert_array_almost_equal(
result_ref_q.int_repr().cpu().numpy(), Y_q.int_repr().cpu().numpy(), decimal=0,
err_msg=f'''X: {X_q}, W: {W_q}, b: {bias_float}, strides: {strides},
pads: {pads}, o_pads: {o_pads}, dilations: {dilations},
groups: {groups}, y_s: {Y_scale}, y_zp: {Y_zero_point}''')
# Return the quantized data for later reuse
return X_q, W_q, bias_float
"""Tests the correctness of quantized convolution op."""
@given(batch_size=st.integers(1, 3),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(10, 16),
width=st.integers(7, 14),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 3),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_channelwise=st.booleans())
@override_qengines
def test_qconv2d(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_relu,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
dilations = (dilation, dilation)
qconv = torch.ops.quantized.conv2d
if use_relu:
qconv = torch.ops.quantized.conv2d_relu
qconv_prepack = torch.ops.quantized.conv2d_prepack
conv_op = torch.nn.Conv2d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
)
act_qdtypes = [torch.quint8]
# Only qnnpack qengine supportes qint8
if qengine_is_qnnpack() and torch.backends.xnnpack.enabled:
act_qdtypes.append(torch.qint8)
for X_qdtype in act_qdtypes:
if X_qdtype == torch.qint8:
W_zero_point = [0 for i in range(len(W_zero_point))]
self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads, None,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_relu, use_channelwise, False, input_dtype=X_qdtype, output_dtype=X_qdtype)
# TODO: merge this test with test_qconv2d when CUDNN runtime flags becomes available
"""Tests the correctness of quantized 2D convolution cudnn op."""
@given(batch_size=st.integers(1, 3),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
input_channels_per_group=st.integers(1, 32),
height=st.integers(10, 16),
width=st.integers(7, 14),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 1), # currently padding only supports groups=1
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
# result for dilation == 2 is not correct
# dilation=st.integers(1, 2),
# currently cudnn has only been verified to work for dilation = 1
# TODO: check backend works for dilation > 1
dilation=st.integers(1, 1),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.sampled_from([0]),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(0, 0), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.sampled_from([0]),
use_bias=st.booleans(),
use_relu=st.booleans(),
# TODO: enable channelwise
use_channelwise=st.sampled_from([False]))
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skip("Local only - currently the qconv2d_cudnn op is bulid "
"with USE_EXPERIMENTAL_CUDNN_V8_API, we can enable the test "
"after it is built by default")
def test_qconv2d_cudnn(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_relu,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
dilations = (dilation, dilation)
if use_relu:
qconv = torch.ops.quantized.conv2d_relu
else:
qconv = torch.ops.quantized.conv2d
conv_op = torch.nn.Conv2d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
).to(torch.device("cuda"))
self._test_qconv_impl(
qconv, torch.ops.quantized.conv2d_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads, None,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_relu, use_channelwise, False,
device=torch.device("cuda"),
input_dtype=torch.qint8, weight_dtype=torch.qint8, output_dtype=torch.qint8)
@unittest.skip("used for local benchmarking, comment when we want to run it")
def test_benchmark(self):
batch_size = 16
in_channel = 64
out_channel = 64
kernel_size = 3
height = 256
width = 256
print(
"parameters:",
"batch_size:", batch_size,
"in_channel:", in_channel,
"out_channel:", out_channel,
"kernel_size:", kernel_size,
"height:", height,
"widht:", width
)
conv = torch.nn.Conv2d(in_channel, out_channel, kernel_size).cuda()
input = torch.randn((batch_size, in_channel, height, width), device='cuda')
weight = conv.weight.detach()
stride = (1, 1)
padding = (0, 0)
dilation = (1, 1)
groups = 1
conv_op = torch.nn.functional.conv2d
# profile
from torch.profiler import profile, ProfilerActivity
def trace_handler(p):
output = p.key_averages().table(sort_by="self_cpu_time_total", row_limit=10)
p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json")
my_schedule = torch.profiler.schedule(
wait=5,
warmup=5,
active=20)
# fp32 benchmark
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
schedule=my_schedule,
on_trace_ready=trace_handler) as prof:
for i in range(30):
conv_op(input, weight, None, stride, padding, dilation, groups)
prof.step()
print("fp32 benchmark result:")
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=10))
# fp16 benchmark
input_fp16 = input.to(torch.float16)
weight_fp16 = input.to(torch.float16)
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
schedule=my_schedule,
on_trace_ready=trace_handler) as prof:
for i in range(30):
conv_op(input_fp16, weight_fp16, None, stride, padding, dilation, groups)
prof.step()
print("fp16 benchmark result:")
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=10))
input_int8 = torch.quantize_per_tensor(input, 1, 0, torch.qint8).contiguous(memory_format=torch.channels_last)
weight_int8 = torch.quantize_per_tensor(weight, 1, 0, torch.qint8).contiguous(memory_format=torch.channels_last)
scale = 1.0
zero_point = 0
conv_op = torch.ops.quantized.conv2d
weight_prepacked = torch.ops.quantized.conv2d_prepack(weight_int8, None, stride, padding, dilation, groups)
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
schedule=my_schedule,
on_trace_ready=trace_handler) as prof:
for i in range(30):
conv_op(input_int8, weight_prepacked, scale, zero_point)
prof.step()
print("int8 benchmark result:")
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=10))
"""Tests the correctness of quantized convolution op."""
@given(batch_size=st.integers(1, 3),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
width=st.integers(7, 14),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 3),
kernel=st.integers(1, 7),
stride=st.integers(1, 2),
pad=st.integers(0, 2),
o_pad=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans())
@override_qengines
def test_qconv_transpose1d(
self,
batch_size,
input_channels_per_group,
width,
output_channels_per_group,
groups,
kernel,
stride,
pad,
o_pad,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias):
if not qengine_is_qnnpack():
return # Currently only the QNNPACK is supported
if qengine_is_qnnpack() and (IS_PPC or TEST_WITH_UBSAN):
return # QNNPACK doesn't support these
assume(o_pad < stride and o_pad < dilation)
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel,)
strides = (stride,)
pads = (pad,)
o_pads = (o_pad,)
dilations = (dilation,)
qconv = torch.ops.quantized.conv_transpose1d
qconv_prepack = torch.ops.quantized.conv_transpose1d_prepack
conv_op = torch.nn.ConvTranspose1d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
act_qdtypes = [torch.quint8]
# Only qnnpack qengine supportes qint8
if qengine_is_qnnpack() and torch.backends.xnnpack.enabled:
act_qdtypes.append(torch.qint8)
for X_qdtype in act_qdtypes:
if X_qdtype == torch.qint8:
W_zero_point = [0 for i in range(len(W_zero_point))]
X_q, W_q, bias_float = self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (width, ),
output_channels_per_group, groups, kernels, strides, pads, o_pads,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_relu=False,
use_channelwise=False, use_transpose=True, input_dtype=X_qdtype, output_dtype=X_qdtype)
# check that this doesn't error
test_conv = torch.nn.quantized.ConvTranspose1d(input_channels, output_channels, 1)
test_conv.scale = Y_scale
test_conv(X_q)
# Test the module implementation
qconv_op = torch.nn.quantized.ConvTranspose1d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
qconv_op.scale = Y_scale
qconv_op.zero_point = Y_zero_point
qconv_op.set_weight_bias(W_q, bias_float)
Y_dq_ref = conv_op(X_q.dequantize())
Y_q_ref = torch.quantize_per_tensor(Y_dq_ref, scale=Y_scale,
zero_point=Y_zero_point,
dtype=X_qdtype)
Y_q = qconv_op(X_q)
self.assertEqual(Y_q_ref, Y_q)
"""Tests the correctness of quantized convolution op."""
@given(batch_size=st.integers(1, 3),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(10, 16),
width=st.integers(7, 14),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 3),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
o_pad_h=st.integers(0, 2),
o_pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans())
@override_qengines
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_qconv_transpose2d(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
o_pad_h,
o_pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias):
if qengine_is_qnnpack() and (IS_PPC or TEST_WITH_UBSAN):
return # QNNPACK doesn't support these
# ONEDNN does not support output paddings
if qengine_is_onednn() and (o_pad_h, o_pad_w) != (0, 0):
return
assume(o_pad_h < stride_h and o_pad_h < dilation)
assume(o_pad_w < stride_w and o_pad_w < dilation)
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
o_pads = (o_pad_h, o_pad_w)
dilations = (dilation, dilation)
qconv = torch.ops.quantized.conv_transpose2d
qconv_prepack = torch.ops.quantized.conv_transpose2d_prepack
conv_op = torch.nn.ConvTranspose2d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
act_qdtypes = [torch.quint8]
# Only qnnpack qengine supportes qint8
if qengine_is_qnnpack() and torch.backends.xnnpack.enabled:
act_qdtypes.append(torch.qint8)
for X_qdtype in act_qdtypes:
if X_qdtype == torch.qint8:
W_zero_point = [0 for i in range(len(W_zero_point))]
X_q, W_q, bias_float = self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads, o_pads,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_relu=False,
use_channelwise=False, use_transpose=True, input_dtype=X_qdtype, output_dtype=X_qdtype)
# check that this doesn't error
test_conv = torch.nn.quantized.ConvTranspose2d(input_channels, output_channels, 1)
test_conv.scale = Y_scale
test_conv(X_q)
# Test the module implementation
qconv_op = torch.nn.quantized.ConvTranspose2d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
qconv_op.scale = Y_scale
qconv_op.zero_point = Y_zero_point
qconv_op.set_weight_bias(W_q, bias_float)
Y_dq_ref = conv_op(X_q.dequantize())
Y_q_ref = torch.quantize_per_tensor(Y_dq_ref, scale=Y_scale,
zero_point=Y_zero_point,
dtype=X_qdtype)
Y_q = qconv_op(X_q)
self.assertEqual(Y_q_ref, Y_q)
"""Tests the correctness of quantized convolution op."""
@given(batch_size=st.integers(1, 3),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
time=st.integers(2, 5),
height=st.integers(10, 16),
width=st.integers(7, 14),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 3),
kernel_t=st.integers(1, 7),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_t=st.integers(1, 2),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_t=st.integers(0, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
o_pad_t=st.integers(0, 2),
o_pad_h=st.integers(0, 2),
o_pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans())
@override_qengines
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_qconv_transpose3d(
self,
batch_size,
input_channels_per_group,
time,
height,
width,
output_channels_per_group,
groups,
kernel_t,
kernel_h,
kernel_w,
stride_t,
stride_h,
stride_w,
pad_t,
pad_h,
pad_w,
o_pad_t,
o_pad_h,
o_pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias):
if qengine_is_qnnpack():
return # QNNPACK doesn't support this
# ONEDNN doesn't support output paddings
if qengine_is_onednn() and (o_pad_t, o_pad_h, o_pad_w) != (0, 0, 0):
return
assume(o_pad_t < stride_t or o_pad_t < dilation)
assume(o_pad_h < stride_h or o_pad_h < dilation)
assume(o_pad_w < stride_w or o_pad_w < dilation)
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_t, kernel_h, kernel_w)
strides = (stride_t, stride_h, stride_w)
pads = (pad_t, pad_h, pad_w)
o_pads = (o_pad_t, o_pad_h, o_pad_w)
dilations = (dilation, dilation, dilation)
qconv = torch.ops.quantized.conv_transpose3d
qconv_prepack = torch.ops.quantized.conv_transpose3d_prepack
conv_op = torch.nn.ConvTranspose3d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
X_q, W_q, bias_float = self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (time, height, width),
output_channels_per_group, groups, kernels, strides, pads, o_pads,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_relu=False,
use_channelwise=False, use_transpose=True)
# check that this doesn't error
test_conv = torch.nn.quantized.ConvTranspose3d(input_channels, output_channels, 1)
test_conv.scale = Y_scale
test_conv(X_q)
# Test the module implementation
qconv_op = torch.nn.quantized.ConvTranspose3d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
qconv_op.scale = Y_scale
qconv_op.zero_point = Y_zero_point
qconv_op.set_weight_bias(W_q, bias_float)
Y_dq_ref = conv_op(X_q.dequantize())
Y_q_ref = torch.quantize_per_tensor(Y_dq_ref, scale=Y_scale,
zero_point=Y_zero_point,
dtype=torch.quint8)
Y_q = qconv_op(X_q)
self.assertEqual(Y_q_ref, Y_q)
@given(
inputs=hu.tensor_conv(
spatial_dim=1, batch_size_range=(1, 3),
input_channels_per_group_range=(1, 4),
output_channels_per_group_range=(1, 4), feature_map_range=(4, 8),
kernel_range=(1, 4), max_groups=4,
can_be_transposed=False,
qparams=[hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint32,
zero_point_min=0,
zero_point_max=0)]),
stride=st.integers(1, 3),
pad=st.integers(1, 2),
o_pad=st.integers(1, 2),
channelwise=st.booleans())
@override_qengines
def test_qconv1d_unpack(self, inputs, stride, pad, o_pad, channelwise):
transposed = inputs[-1]
qengine = torch.backends.quantized.engine
if qengine not in supported_qengines:
return
if qengine == 'qnnpack':
assume(not channelwise) # QNNPACK doesn't support channelwise
else:
assume(not transposed) # Only QNNPACK supports transposed conv
if transposed:
qconv_prepack = torch.ops.quantized.conv_transpose1d_prepack
qconv_unpack = torch.ops.quantized.conv_transpose1d_unpack
else:
qconv_prepack = torch.ops.quantized.conv1d_prepack
qconv_unpack = torch.ops.quantized.conv1d_unpack
self._test_qconv_unpack_impl(
qconv_prepack, qconv_unpack, inputs, [stride],
[pad], [o_pad], channelwise)
@given(
inputs=hu.tensor_conv(
spatial_dim=2, batch_size_range=(1, 3),
input_channels_per_group_range=(1, 4),
output_channels_per_group_range=(1, 4), feature_map_range=(4, 8),
kernel_range=(1, 4), max_groups=4,
can_be_transposed=True,
qparams=[hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint32,
zero_point_min=0,
zero_point_max=0)]),
stride=st.integers(1, 3),
pad=st.integers(0, 2),
o_pad=st.integers(0, 2),
channelwise=st.booleans())
@override_qengines
def test_qconv2d_unpack(self, inputs, stride, pad, o_pad, channelwise):
transposed = inputs[-1]
qengine = torch.backends.quantized.engine
if qengine not in supported_qengines:
return
if qengine == 'qnnpack':
assume(not channelwise) # QNNPACK doesn't support channelwise
if transposed:
qconv_prepack = torch.ops.quantized.conv_transpose2d_prepack
qconv_unpack = torch.ops.quantized.conv_transpose2d_unpack
else:
qconv_prepack = torch.ops.quantized.conv2d_prepack
qconv_unpack = torch.ops.quantized.conv2d_unpack
self._test_qconv_unpack_impl(
qconv_prepack, qconv_unpack, inputs, [stride, stride],
[pad, pad], [o_pad, o_pad], channelwise)
"""Tests the correctness of quantized 1D convolution op."""
@given(batch_size=st.integers(1, 6),
input_channels_per_group=st.sampled_from((2, 4, 5, 8, 16, 32)),
output_channels_per_group=st.sampled_from((2, 4, 5, 8, 16, 32)),
groups=st.integers(1, 3),
length=st.integers(4, 16),
kernel=st.integers(1, 7),
stride=st.integers(1, 2),
pad=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_channelwise=st.booleans())
@override_qengines
def test_qconv1d(
self,
batch_size,
input_channels_per_group,
output_channels_per_group,
groups,
length,
kernel,
stride,
pad,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_relu,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
if torch.backends.quantized.engine == 'qnnpack':
use_channelwise = False
conv1d = torch.nn.Conv1d(
input_channels,
output_channels,
kernel,
stride,
pad,
dilation,
groups,
)
qconv_prepack = torch.ops.quantized.conv1d_prepack
qconv = torch.ops.quantized.conv1d
if use_relu:
qconv = torch.ops.quantized.conv1d_relu
act_qdtypes = [torch.quint8]
# Only qnnpack qengine supportes qint8
if qengine_is_qnnpack() and torch.backends.xnnpack.enabled:
act_qdtypes.append(torch.qint8)
for X_qdtype in act_qdtypes:
if X_qdtype == torch.qint8:
W_zero_point = [0 for i in range(len(W_zero_point))]
self._test_qconv_impl(
qconv, qconv_prepack, conv1d, batch_size,
input_channels_per_group, (length, ),
output_channels_per_group, groups, kernel, [stride], [pad], None,
[dilation], X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_relu, use_channelwise, False,
input_dtype=X_qdtype, output_dtype=X_qdtype)
# TODO: merge this test with test_qconv1d when CUDNN runtime flags becomes available
"""Tests the correctness of quantized 1D convolution cudnn op."""
@given(batch_size=st.integers(1, 6),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
input_channels_per_group=st.integers(1, 32),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 1), # currently padding only supports groups=1
length=st.integers(4, 16),
kernel=st.integers(1, 7),
stride=st.integers(1, 2),
pad=st.integers(0, 2),
# currently cudnn has only been verified to work for dilation = 1
# TODO: check backend works for dilation > 1
dilation=st.integers(1, 1),
X_scale=st.floats(1.2, 1.6),
# currently conv cudnn backend is only implemented for int8 symmetric
X_zero_point=st.sampled_from([0]),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
# currently conv cudnn backend is only implemented for int8 symmetric
W_zero_point=st.lists(st.integers(0, 0), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
# currently conv cudnn backend is only implemented for int8 symmetric
Y_zero_point=st.sampled_from([0]),
use_bias=st.booleans(),
use_relu=st.booleans(),
# TODO: enable channelwise
use_channelwise=st.sampled_from([False]))
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skip("Local only - currently the qconv1d_cudnn op is bulid "
"with USE_EXPERIMENTAL_CUDNN_V8_API, we can enable the test "
"after it is built by default")
def test_qconv1d_cudnn(
self,
batch_size,
input_channels_per_group,
output_channels_per_group,
groups,
length,
kernel,
stride,
pad,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_relu,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
conv1d = torch.nn.Conv1d(
input_channels,
output_channels,
kernel,
stride,
pad,
dilation,
groups,
).to(torch.device("cuda"))
qconv_prepack = torch.ops.quantized.conv1d_prepack
if use_relu:
qconv = torch.ops.quantized.conv1d_relu
else:
qconv = torch.ops.quantized.conv1d
self._test_qconv_impl(
qconv, qconv_prepack, conv1d, batch_size,
input_channels_per_group, (length, ),
output_channels_per_group, groups, kernel, [stride], [pad], None,
[dilation], X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_relu, use_channelwise, False,
device=torch.device("cuda"),
input_dtype=torch.qint8, weight_dtype=torch.qint8, output_dtype=torch.qint8)
@given(batch_size=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16]),
D=st.integers(4, 8),
H=st.integers(4, 8),
W=st.integers(4, 8),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16]),
groups=st.integers(1, 3),
kernel_d=st.integers(1, 4),
kernel_h=st.integers(1, 4),
kernel_w=st.integers(1, 4),
stride_d=st.integers(1, 2),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_d=st.integers(0, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_relu=st.booleans(),
use_channelwise=st.booleans(),
qengine=st.sampled_from(("qnnpack", "fbgemm")))
def test_qconv3d(
self,
batch_size,
input_channels_per_group,
D,
H,
W,
output_channels_per_group,
groups,
kernel_d,
kernel_h,
kernel_w,
stride_d,
stride_h,
stride_w,
pad_d,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_relu,
use_channelwise,
qengine
):
if qengine not in supported_qengines:
return
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_d, kernel_h, kernel_w)
strides = (stride_d, stride_h, stride_w)
pads = (pad_d, pad_h, pad_w)
dilations = (dilation, dilation, dilation)
with override_quantized_engine(qengine):
qconv = torch.ops.quantized.conv3d
if use_relu:
qconv = torch.ops.quantized.conv3d_relu
qconv_prepack = torch.ops.quantized.conv3d_prepack
conv_op = torch.nn.Conv3d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
)
self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (D, H, W), output_channels_per_group,
groups, kernels, strides, pads, None, dilations, X_scale,
X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, use_relu, use_channelwise, use_transpose=False)
"""Tests the correctness of the quantized::qconv3d_unpack op."""
@given(
inputs=hu.tensor_conv(
spatial_dim=3, batch_size_range=(1, 3),
input_channels_per_group_range=(1, 3),
output_channels_per_group_range=(1, 3), feature_map_range=(3, 6),
kernel_range=(1, 3), max_groups=3,
qparams=[hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint32,
zero_point_min=0,
zero_point_max=0)]),
stride_d=st.integers(1, 2), stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_d=st.integers(1, 2), pad_h=st.integers(1, 2),
pad_w=st.integers(1, 2),
o_pad=st.integers(0, 2),
channelwise=st.booleans())
@override_qengines
def test_qconv3d_unpack(
self, inputs, stride_d, stride_h, stride_w, pad_d, pad_h, pad_w, o_pad,
channelwise
):
if qengine_is_qnnpack():
return # QNNPACK doesn't support this
transposed = inputs[-1]
if transposed:
qconv_prepack = torch.ops.quantized.conv_transpose3d_prepack
qconv_unpack = torch.ops.quantized.conv_transpose3d_unpack
else:
qconv_prepack = torch.ops.quantized.conv3d_prepack
qconv_unpack = torch.ops.quantized.conv3d_unpack
self._test_qconv_unpack_impl(
qconv_prepack, qconv_unpack, inputs,
(stride_d, stride_h, stride_w), (pad_d, pad_h, pad_w), (o_pad, o_pad, o_pad),
channelwise)
class TestPadding(TestCase):
@given(batch_size=st.integers(1, 64),
channels=st.integers(1, 64),
width=st.integers(16, 128),
qtype=st.sampled_from(hu._ALL_QINT_TYPES))
def test_reflection_pad1d(self, batch_size, channels, width, qtype):
padding = width // 4
x = torch.arange(batch_size * channels * width).to(torch.float)
x = x.resize(batch_size, channels, width)
# Per-Tensor test
scale, zp = _calculate_dynamic_qparams(x, qtype)
qx = torch.quantize_per_tensor(x, scale, zp, qtype)
padding_op = torch.nn.ReflectionPad1d(padding)
y_ref = padding_op(x)
qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype)
qy_hat = padding_op(qx)
self.assertEqual(qy_ref, qy_hat)
# Out variant
qy_hat = torch._C._nn.reflection_pad1d(qx, padding, out=qy_hat)
self.assertEqual(qy_ref, qy_hat)
@given(batch_size=st.integers(1, 64),
channels=st.integers(1, 64),
height=st.integers(16, 128),
width=st.integers(16, 128),
qtype=st.sampled_from(hu._ALL_QINT_TYPES))
def test_reflection_pad2d(self, batch_size, channels, height, width, qtype):
padding = (width // 4, width // 4, height // 4, height // 4)
x = torch.arange(batch_size * channels * height * width).to(torch.float)
x = x.resize(batch_size, channels, height, width)
# Per-Tensor test
scale, zp = _calculate_dynamic_qparams(x, qtype)
qx = torch.quantize_per_tensor(x, scale, zp, qtype)
padding_op = torch.nn.ReflectionPad2d(padding)
y_ref = padding_op(x)
qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype)
qy_hat = padding_op(qx)
self.assertEqual(qy_ref, qy_hat)
# Out variant
qy_hat = torch._C._nn.reflection_pad2d(qx, padding, out=qy_hat)
self.assertEqual(qy_ref, qy_hat)
@given(batch_size=st.integers(1, 64),
channels=st.integers(1, 64),
hwd=st.integers(1, 16), # For 3D, max input size would be 16x16x16
d=st.sampled_from([1, 2, 3]),
value=st.floats(-5, 5, allow_nan=False, allow_infinity=False),
qtype=st.sampled_from(hu._ALL_QINT_TYPES))
def test_constant_padNd(self, batch_size, channels, d, hwd, value, qtype):
padding = hwd // 4
shape = [batch_size, channels, hwd]
op = torch.nn.ConstantPad1d
if d >= 2:
shape.append(hwd)
op = torch.nn.ConstantPad2d
if d == 3:
shape.append(hwd)
op = torch.nn.ConstantPad3d
numel = np.prod(shape)
x = torch.arange(numel).to(torch.float)
x = x.resize(*shape)
# Per-Tensor test
scale, zp = _calculate_dynamic_qparams(x, qtype)
qx = torch.quantize_per_tensor(x, scale, zp, qtype)
padding_op = op(padding, value)
y_ref = padding_op(x)
qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype)
qy_hat = padding_op(qx)
self.assertEqual(qy_ref, qy_hat)
@unittest.skipUnless('qnnpack' in supported_qengines,
"This Pytorch Build has not been built with or does not support QNNPACK")
class TestQNNPackOps(TestCase):
"""Tests the correctness of the quantized::qnnpack_relu op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0)))
def test_qnnpack_relu(self, X):
with override_quantized_engine('qnnpack'):
X, (scale, zero_point, torch_type) = X
relu = torch.nn.functional.relu
X = torch.from_numpy(X)
Y = X.clone()
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point, dtype=torch_type)
qY_hat = relu(qX)
Y[Y < 0] = 0
qY = torch.quantize_per_tensor(Y, scale=scale, zero_point=zero_point, dtype=torch_type)
self.assertEqual(qY, qY_hat)
"""Tests the correctness of the quantized::qnnpack_tanh op."""
@skipIfNoFBGEMM
def test_qnnpack_tanh(self):
# Note: In QNNPACK the output scale and zero_point can only be
# 2.0/256, 128 respectively, as it uses a LUT with 256 bins.
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
memory_formats = (torch.channels_last, torch.contiguous_format)
test_cases = itertools.product(shapes, memory_formats)
for shape, memory_format in test_cases:
X, scale, zero_point, torch_type = torch.randn(*shape), 1.0, 0, torch.quint8
if memory_format == torch.channels_last and len(shape) != 4:
continue
X = X.to(memory_format=memory_format)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=torch_type)
# Floating point reference
Y = torch.tanh(qX.dequantize())
qY = torch.quantize_per_tensor(Y, scale=1.0 / 128, zero_point=128,
dtype=torch.quint8)
with override_quantized_engine('fbgemm'):
qYserver = torch.tanh(qX)
with override_quantized_engine('qnnpack'):
qY_hat = torch.tanh(qX)
self.assertEqual(
qY, qY_hat,
msg="QNNPACK TanH failed (FP ref), memory_format {}".format(memory_format))
self.assertEqual(
qYserver, qY_hat,
msg="QNNPACK TanH failed (FBGEMM ref), memory_format {}".format(memory_format))
"""Tests the correctness of the quantized::qnnpack_sigmoid op."""
@skipIfNoFBGEMM
def test_qnnpack_sigmoid(self):
# Note: In QNNPACK the output scale and zero_point can only be
# 1.0/256, 0 respectively, as it uses a LUT with 256 bins.
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
memory_formats = (torch.channels_last, torch.contiguous_format)
test_cases = itertools.product(shapes, memory_formats)
for shape, memory_format in test_cases:
X, scale, zero_point, torch_type = torch.randn(*shape), 1.0, 0, torch.quint8
if memory_format == torch.channels_last and len(shape) != 4:
continue
X = X.to(memory_format=memory_format)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=torch_type)
# Floating point reference
Y = torch.sigmoid(qX.dequantize())
qY = torch.quantize_per_tensor(Y, scale=1.0 / 256, zero_point=0,
dtype=torch.quint8)
with override_quantized_engine('fbgemm'):
qYserver = torch.sigmoid(qX)
with override_quantized_engine('qnnpack'):
qY_hat = torch.sigmoid(qX)
self.assertEqual(
qY, qY_hat,
msg="QNNPACK Sigmoid failed (FP ref), memory_format {}".format(memory_format))
self.assertEqual(
qYserver, qY_hat,
msg="QNNPACK Sigmoid failed (FBGEMM ref), memory_format {}".format(memory_format))
@skipIfNoFBGEMM
def test_qnnpack_sigmoid_sweep(self):
# Input parameters
f_min = -4.0
f_max = 4.0
scale = (f_max - f_min) / 256.0
zero_point = 128
dtype = torch.quint8
step = scale / 2.0
x = np.arange(f_min, f_max + step, step)
X = torch.from_numpy(x).to(torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=dtype)
dqX = qX.dequantize()
# Floating point reference
Y = torch.sigmoid(dqX)
qY = torch.quantize_per_tensor(Y, scale=1.0 / 256, zero_point=0,
dtype=torch.quint8)
with override_quantized_engine('fbgemm'):
qYserver = torch.sigmoid(qX)
with override_quantized_engine('qnnpack'):
qY_hat = torch.sigmoid(qX)
self.assertEqual(qY, qY_hat,
msg="QNNPACK Sigmoid failed (FP ref)!")
self.assertEqual(qYserver, qY_hat,
msg="QNNPACK Sigmoid failed (FBGEMM ref)!")
"""Tests the correctness of the quantized::add (qnnpack) op."""
@settings(suppress_health_check=(HealthCheck.filter_too_much,))
@given(A=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams(dtypes=[torch.quint8, torch.qint8])),
zero_point=st.sampled_from([0, 2, 5, 15, 127]),
scale_A=st.sampled_from([0.001, 0.057, 0.889, 12.3]),
scale_B=st.sampled_from([0.008, 0.0821, 0.67, 7]),
scale_C=st.sampled_from([0.003, 0.07821, 0.457, 7.34]),)
def test_qnnpack_add(self, A, zero_point, scale_A, scale_B, scale_C):
with override_quantized_engine('qnnpack'):
A_temp = A
for channels_last in [True, False]:
if channels_last and len(A_temp[0].shape) != 4:
continue
A, (scale_a, zero_point_A, torch_type) = A_temp
B, (scale_b, zero_point_B, torch_type) = A_temp
A = torch.from_numpy(A)
B = torch.from_numpy(B)
if torch_type == torch.qint8 and not torch.backends.xnnpack.enabled:
continue
if channels_last:
A = A.to(memory_format=torch.channels_last)
B = B.to(memory_format=torch.channels_last)
assume(scale_A // scale_C >= 2**-14)
assume(scale_A // scale_C < 2**8)
assume(scale_B // scale_C >= 2**-14)
assume(scale_B // scale_C < 2**8)
zero_point_C = 127
np_dtype = np.uint8
if torch_type == torch.qint8:
zero_point_C = 0
np_dtype = np.int8
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point,
dtype=torch_type)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point,
dtype=torch_type)
# Add ground truth
C = (qA.dequantize() + qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=np_dtype)
qC_qnnp = torch.ops.quantized.add(qA, qB, scale_C, zero_point_C)
np.testing.assert_equal(qC, qC_qnnp.int_repr(),
"Quantized addition failed.")
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = torch.quantize_per_tensor(torch.from_numpy(Crelu), scale_C,
zero_point_C, dtype=torch_type)
qCrelu_hat = torch.ops.quantized.add_relu(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qCrelu.int_repr().numpy(), qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
"""Tests that quantized add works with broadcasting """
def test_qnnpack_add_broadcast(self):
def _run_test(A, B):
qA = torch.quantize_per_tensor(A, 0.02, 0, dtype)
qB = torch.quantize_per_tensor(B, 0.04, 2, dtype)
output_scale = 0.01
output_zp = 1
# ground truth
C = qA.dequantize() + qB.dequantize()
qC = torch.quantize_per_tensor(C, output_scale, output_zp, dtype)
# quantized
qC_hat_1 = torch.ops.quantized.add(qA, qB, output_scale, output_zp)
qC_hat_2 = torch.ops.quantized.add(qB, qA, output_scale, output_zp)
self.assertTrue(torch.allclose(qC.dequantize(), qC_hat_1.dequantize()))
self.assertTrue(torch.allclose(qC.dequantize(), qC_hat_2.dequantize()))
with override_quantized_engine("qnnpack"):
for dtype in (torch.qint8, torch.quint8):
if dtype == torch.qint8 and not torch.backends.xnnpack.enabled:
continue
for channels_last in [True, False]:
# 4d
A = torch.randn(1, 3, 4, 4)
B = torch.randn(1, 1, 1, 1)
if channels_last:
A = A.to(memory_format=torch.channels_last)
B = B.to(memory_format=torch.channels_last)
_run_test(A, B)
# 5d
C = torch.randn(1, 3, 4, 4, 4)
D = torch.randn(1, 1, 1, 1, 1)
if channels_last:
C = C.to(memory_format=torch.channels_last_3d)
D = D.to(memory_format=torch.channels_last_3d)
_run_test(C, D)
"""Tests the correctness of quantized::qnnpack_maxpool2d op."""
@given(A=hu.tensor(shapes=hu.array_shapes(4, 4, 3, 5),
qparams=hu.qparams(dtypes=torch.quint8)),
kernel=st.sampled_from([2, 4]),
stride=st.sampled_from([1, 2]),
padding=st.sampled_from([1, 2]))
def test_qnnpack_maxpool2d(self, A, kernel, stride, padding):
import torch.nn.functional as F
with override_quantized_engine('qnnpack'):
A, (scale, zero_point, torch_type) = A
X = torch.from_numpy(A)
np_type = np.uint8
dilation = 1
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation)
assume(oW > 0)
k = (kernel, kernel)
s = (stride, stride)
d = (dilation, dilation)
p = (padding, padding)
q_max_pool = torch.ops.quantized.max_pool2d
a = scale * (X - zero_point).to(dtype=torch.float)
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
a_ref = qa.dequantize()
a_pool = F.max_pool2d(a_ref, kernel_size=k, stride=s, padding=p,
dilation=d)
a_pool_nhwc = a_pool.permute([0, 2, 3, 1])
qa_pool = q_max_pool(qa, k, s, p, d, ceil_mode=False)
qa_pool_int = qa_pool.dequantize()
np.testing.assert_equal(a_pool.numpy(), qa_pool_int.numpy())
@given(batch_size=st.integers(1, 5),
channels=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(4, 10),
width=st.integers(4, 10),
kernel=st.integers(2, 5),
stride=st.integers(1, 2),
padding=st.integers(1, 2),
scale=st.floats(0.2, 1.6),
zero_point=st.integers(0, 25)
)
def test_avg_pool2d(
self,
batch_size,
channels,
height,
width,
kernel,
stride,
padding,
scale,
zero_point
):
with override_quantized_engine('qnnpack'):
import torch.nn.functional as F
X_init = torch.from_numpy(np.random.randint(
0, 50, (batch_size, channels, height, width)))
X = scale * (X_init - zero_point).to(dtype=torch.float)
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, 1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, 1)
assume(oW > 0)
k = (kernel, kernel)
s = (stride, stride)
p = (padding, padding)
q_avg_pool = torch.nn.quantized.functional.avg_pool2d
x_q = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
a_pool = F.avg_pool2d(x_q.dequantize().to(torch.float), kernel_size=k, stride=s, padding=p)
qa_pool = q_avg_pool(x_q, k, s, p)
# Quantize Ref Output
a_pool_q = torch.quantize_per_tensor(a_pool, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
np.testing.assert_array_almost_equal(a_pool_q.int_repr().numpy(),
qa_pool.int_repr().numpy(), decimal=0)
@given(batch_size=st.integers(1, 5),
channels=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(4, 20),
width=st.integers(4, 20),
output_height=st.integers(2, 10),
output_width=st.integers(2, 10),
scale=st.floats(0.2, 1.6),
zero_point=st.integers(0, 25)
)
def test_adaptive_avg_pool2d(
self,
batch_size,
channels,
height,
width,
output_height,
output_width,
scale,
zero_point
):
with override_quantized_engine('qnnpack'):
# Check constraints
assume(height >= output_height)
assume(width >= output_width)
import torch.nn.functional as F
X_init = torch.from_numpy(np.random.randint(
0, 50, (batch_size, channels, height, width)))
X = scale * (X_init - zero_point).to(dtype=torch.float)
iH, iW = X.shape[-2:]
q_avg_pool = torch.nn.quantized.functional.adaptive_avg_pool2d
x_q = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
a_pool = F.adaptive_avg_pool2d(x_q.dequantize().to(torch.float), (output_height, output_width))
qa_pool = q_avg_pool(x_q, (output_height, output_width))
# Quantize Ref Output
a_pool_q = torch.quantize_per_tensor(a_pool, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
np.testing.assert_array_almost_equal(a_pool_q.int_repr().numpy(),
qa_pool.int_repr().numpy(), decimal=0)
@given(batch_size=st.integers(1, 5),
channels=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(4, 10),
width=st.integers(4, 10),
scale=st.floats(0.02, 2.6),
zero_point=st.integers(0, 25))
def test_mean(self, batch_size, channels, height, width, scale, zero_point):
with override_quantized_engine('qnnpack'):
dim = (2, 3)
X_init = torch.from_numpy(np.random.randint(
0, 50, (batch_size, channels, height, width)))
X = scale * (X_init - zero_point).to(dtype=torch.float)
qX = torch.quantize_per_tensor(X, scale, zero_point, torch.quint8)
Y = torch.mean(qX.dequantize(), dim)
Y = torch.quantize_per_tensor(Y, scale, zero_point, torch.quint8)
qY = torch.mean(qX, dim)
np.testing.assert_array_almost_equal(Y.int_repr().numpy(), qY.int_repr().numpy(), decimal=0)
"""Tests the correctness of the quantized::hardtanh op."""
def test_hardtanh(self):
if 'qnnpack' not in torch.backends.quantized.supported_engines:
return
with override_quantized_engine('qnnpack'):
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
memory_formats = (torch.channels_last, torch.contiguous_format)
min_vals = (-0.5, -0.3, 0.5)
max_vals = (-0.3, 0.3, 0.7)
test_cases = itertools.product(shapes, memory_formats, min_vals, max_vals)
for shape, memory_format, min_val, max_val in test_cases:
X, scale, zero_point, torch_type = torch.randn(*shape), 1.0, 0, torch.quint8
if memory_format == torch.channels_last and len(shape) != 4:
continue
Y = X.clone()
Y[Y < min_val] = min_val
Y[Y > max_val] = max_val
qY = torch.quantize_per_tensor(Y, scale=scale,
zero_point=zero_point, dtype=torch_type)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
qY_hat = torch.nn.quantized.functional.hardtanh(qX, min_val, max_val)
self.assertEqual(
qY, qY_hat,
msg="hardtanh failed:\nactual {}\nexpected {}\nmemory_format {}".format(qY_hat, qY, memory_format))
"""Tests the correctness of the tensor comparators."""
class TestComparatorOps(TestCase):
"""Tests the element-wise equality ops."""
@given(A=hu.tensor(shapes=((3, 4, 5),),
qparams=hu.qparams()),
B=hu.tensor(shapes=((5,), (1, 5), (1, 1, 5), (4, 5), (3, 4, 5)),
qparams=hu.qparams()))
def test_compare_tensor_tensor(self, A, B):
A, (scale_a, zero_point_a, dtype_a) = A
B, (scale_b, zero_point_b, dtype_b) = B
tA = torch.from_numpy(A)
tB = torch.from_numpy(B)
qA = torch.quantize_per_tensor(tA, scale=scale_a, zero_point=zero_point_a,
dtype=dtype_a)
qB = torch.quantize_per_tensor(tB, scale=scale_b, zero_point=zero_point_b,
dtype=dtype_b)
dqA = qA.dequantize()
dqB = qB.dequantize()
ops_under_test = ('__eq__', '__ne__', '__ge__', '__le__', '__gt__',
'__lt__', 'eq', 'ne', 'ge', 'le', 'gt', 'lt')
for op in ops_under_test:
result_ref = getattr(dqA, op)(dqB)
result = getattr(qA, op)(qB)
self.assertEqual(result_ref, result,
msg="'tensor.{}(tensor)'' failed".format(op))
# Reversed broadcasting.
result_ref = getattr(dqB, op)(dqA)
result = getattr(qB, op)(qA)
self.assertEqual(result_ref, result,
msg="'tensor.{}(tensor)'' failed".format(op))
@given(A=hu.tensor(shapes=((3, 4, 5),),
qparams=hu.qparams()),
b=hu.floats(allow_infinity=False, allow_nan=False))
def test_compare_tensor_scalar(self, A, b):
A, (scale_a, zero_point_a, dtype_a) = A
tA = torch.from_numpy(A)
qA = torch.quantize_per_tensor(tA, scale=scale_a, zero_point=zero_point_a,
dtype=dtype_a)
dqA = qA.dequantize()
ops_under_test_reversible = ('__eq__', '__ne__', '__ge__', '__le__',
'__gt__', '__lt__')
ops_under_test_nonreversible = ('eq', 'ne', 'ge', 'le', 'gt', 'lt')
for op in ops_under_test_reversible:
result_ref = getattr(dqA, op)(b)
result = getattr(qA, op)(b)
note("result_ref 1: {}".format(result_ref))
note("result 1: {}".format(result))
self.assertEqual(result_ref, result,
msg="'tensor.{}(scalar)'' failed".format(op))
# Reversed broadcasting.
result_ref = getattr(b, op)(dqA)
result = getattr(b, op)(qA)
note("result_ref 2: {}".format(result_ref))
note("result 2: {}".format(result))
self.assertEqual(result_ref, result,
msg="'scalar.{}(tensor)'' failed".format(op))
for op in ops_under_test_nonreversible:
result_ref = getattr(dqA, op)(b)
result = getattr(qA, op)(b)
note("result_ref 3: {}".format(result_ref))
note("result 3: {}".format(result))
self.assertEqual(result_ref, result,
msg="'tensor.{}(scalar)'' failed".format(op))
|
pytorch-master
|
test/quantization/core/test_quantized_op.py
|
# Owner(s): ["oncall: quantization"]
# Torch
import torch
import torch.nn.functional as F
import torch.nn.quantized.functional as qF
# Standard library
import numpy as np
# Testing utils
from hypothesis import assume, given
from hypothesis import strategies as st
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
_make_conv_test_input,
)
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_utils import (
IS_PPC,
TEST_WITH_UBSAN,
)
class TestQuantizedFunctionalOps(QuantizationTestCase):
def test_relu_api(self):
X = torch.arange(-5, 5, dtype=torch.float)
scale = 2.0
zero_point = 1
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point, dtype=torch.quint8)
qY = torch.relu(qX)
qY_hat = F.relu(qX)
self.assertEqual(qY, qY_hat)
def _test_conv_api_impl(
self, qconv_fn, conv_fn, batch_size, in_channels_per_group,
input_feature_map_size, out_channels_per_group, groups, kernel_size,
stride, padding, dilation, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, use_channelwise,
):
for i in range(len(kernel_size)):
assume(input_feature_map_size[i] + 2 * padding[i]
>= dilation[i] * (kernel_size[i] - 1) + 1)
(X, X_q, W, W_q, b) = _make_conv_test_input(
batch_size, in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, X_scale,
X_zero_point, W_scale, W_zero_point, use_bias, use_channelwise)
Y_exp = conv_fn(X, W, b, stride, padding, dilation, groups)
Y_exp = torch.quantize_per_tensor(
Y_exp, scale=Y_scale, zero_point=Y_zero_point, dtype=torch.quint8)
Y_act = qconv_fn(
X_q, W_q, b, stride, padding, dilation, groups,
padding_mode="zeros", scale=Y_scale, zero_point=Y_zero_point)
# Make sure the results match
# assert_array_almost_equal compares using the following formula:
# abs(desired-actual) < 1.5 * 10**(-decimal)
# (https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html)
# We use decimal = 0 to ignore off-by-1 differences between reference
# and test. Off-by-1 differences arise due to the order of round and
# zero_point addition operation, i.e., if addition followed by round is
# used by reference and round followed by addition is used by test, the
# results may differ by 1.
# For example, the result of round(2.5) + 1 is 3 while round(2.5 + 1) is
# 4 assuming the rounding mode is round-to-nearest, ties-to-even.
np.testing.assert_array_almost_equal(
Y_exp.int_repr().numpy(), Y_act.int_repr().numpy(), decimal=0)
@given(batch_size=st.integers(1, 3),
in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
L=st.integers(4, 16),
out_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 4),
kernel=st.integers(1, 7),
stride=st.integers(1, 2),
pad=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans(),
qengine=st.sampled_from(("qnnpack", "fbgemm")))
def test_conv1d_api(
self, batch_size, in_channels_per_group, L, out_channels_per_group,
groups, kernel, stride, pad, dilation,
X_scale, X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, use_channelwise, qengine,
):
# Tests the correctness of the conv1d function.
if qengine not in torch.backends.quantized.supported_engines:
return
if qengine == 'qnnpack':
if IS_PPC or TEST_WITH_UBSAN:
return
use_channelwise = False
input_feature_map_size = (L, )
kernel_size = (kernel, )
stride = (stride, )
padding = (pad, )
dilation = (dilation, )
with override_quantized_engine(qengine):
qconv_fn = qF.conv1d
conv_fn = F.conv1d
self._test_conv_api_impl(
qconv_fn, conv_fn, batch_size, in_channels_per_group,
input_feature_map_size, out_channels_per_group, groups,
kernel_size, stride, padding, dilation, X_scale, X_zero_point,
W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
use_channelwise)
@given(batch_size=st.integers(1, 3),
in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
H=st.integers(4, 16),
W=st.integers(4, 16),
out_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 4),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans(),
qengine=st.sampled_from(("qnnpack", "fbgemm")))
def test_conv2d_api(
self, batch_size, in_channels_per_group, H, W, out_channels_per_group,
groups, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation,
X_scale, X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, use_channelwise, qengine,
):
# Tests the correctness of the conv2d function.
if qengine not in torch.backends.quantized.supported_engines:
return
if qengine == 'qnnpack':
if IS_PPC or TEST_WITH_UBSAN:
return
input_feature_map_size = (H, W)
kernel_size = (kernel_h, kernel_w)
stride = (stride_h, stride_w)
padding = (pad_h, pad_w)
dilation = (dilation, dilation)
with override_quantized_engine(qengine):
qconv_fn = qF.conv2d
conv_fn = F.conv2d
self._test_conv_api_impl(
qconv_fn, conv_fn, batch_size, in_channels_per_group,
input_feature_map_size, out_channels_per_group, groups,
kernel_size, stride, padding, dilation, X_scale, X_zero_point,
W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
use_channelwise)
@given(batch_size=st.integers(1, 3),
in_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
D=st.integers(4, 8),
H=st.integers(4, 8),
W=st.integers(4, 8),
out_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 4),
kernel_d=st.integers(1, 4),
kernel_h=st.integers(1, 4),
kernel_w=st.integers(1, 4),
stride_d=st.integers(1, 2),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_d=st.integers(0, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans(),
qengine=st.sampled_from(("fbgemm",)))
def test_conv3d_api(
self, batch_size, in_channels_per_group, D, H, W,
out_channels_per_group, groups, kernel_d, kernel_h, kernel_w,
stride_d, stride_h, stride_w, pad_d, pad_h, pad_w, dilation, X_scale,
X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
use_channelwise, qengine,
):
# Tests the correctness of the conv3d function.
# Currently conv3d only supports FbGemm engine
if qengine not in torch.backends.quantized.supported_engines:
return
input_feature_map_size = (D, H, W)
kernel_size = (kernel_d, kernel_h, kernel_w)
stride = (stride_d, stride_h, stride_w)
padding = (pad_d, pad_h, pad_w)
dilation = (dilation, dilation, dilation)
with override_quantized_engine(qengine):
qconv_fn = qF.conv3d
conv_fn = F.conv3d
self._test_conv_api_impl(
qconv_fn, conv_fn, batch_size, in_channels_per_group,
input_feature_map_size, out_channels_per_group, groups,
kernel_size, stride, padding, dilation, X_scale, X_zero_point,
W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias,
use_channelwise)
@given(N=st.integers(1, 10),
C=st.integers(1, 10),
H=st.integers(4, 8),
H_out=st.integers(4, 8),
W=st.integers(4, 8),
W_out=st.integers(4, 8),
scale=st.floats(.1, 2),
zero_point=st.integers(0, 4))
def test_grid_sample(self, N, C, H, H_out, W, W_out, scale, zero_point):
X = torch.rand(N, C, H, W)
X_q = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point, dtype=torch.quint8)
grid = torch.rand(N, H_out, W_out, 2)
out = F.grid_sample(X_q, grid)
out_exp = torch.quantize_per_tensor(F.grid_sample(X, grid), scale=scale, zero_point=zero_point, dtype=torch.quint8)
np.testing.assert_array_almost_equal(
out.int_repr().numpy(), out_exp.int_repr().numpy(), decimal=0)
|
pytorch-master
|
test/quantization/core/test_quantized_functional.py
|
# Owner(s): ["oncall: quantization"]
# Torch
import torch
from torch.ao.quantization import (
MinMaxObserver,
PerChannelMinMaxObserver,
MovingAverageMinMaxObserver,
MovingAveragePerChannelMinMaxObserver,
HistogramObserver,
RecordingObserver,
PlaceholderObserver,
NoopObserver,
FakeQuantize,
FixedQParamsObserver,
default_debug_qconfig,
default_observer,
default_histogram_observer,
default_per_channel_weight_observer,
get_observer_dict,
prepare,
prepare_qat,
convert,
QConfig,
FusedMovingAvgObsFakeQuantize,
get_embedding_qat_module_mappings,
get_embedding_static_quant_module_mappings,
)
import torch.nn as nn
# Standard library
import copy
import io
import itertools
import unittest
import math
import numpy as np
# Testing utils
from hypothesis import given, settings
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
AnnotatedSingleLayerLinearModel,
test_only_eval_fn,
SingleLayerLinearModel,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
supported_qengines,
override_qengines,
_fake_quantize_per_channel_affine_reference,
_fake_quantize_per_channel_affine_grad_reference,
to_tensor,
)
from torch.testing._internal.common_quantization import (
DeFusedEmbeddingBagLinear,
)
NP_RANDOM_SEED = 19
tolerance = 1e-6
class TestObserver(QuantizationTestCase):
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8, torch.qint32)),
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),
reduce_range=st.booleans())
def test_per_tensor_observers(self, qdtype, qscheme, reduce_range):
# reduce_range cannot be true for symmetric quantization with uint8
if (qdtype == torch.quint8 and qscheme == torch.per_tensor_symmetric) or qdtype == torch.qint32:
reduce_range = False
ObserverList = [MinMaxObserver(dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range),
MovingAverageMinMaxObserver(averaging_constant=0.5,
dtype=qdtype,
qscheme=qscheme,
reduce_range=reduce_range)]
def _get_ref_params(reduce_range, qscheme, dtype, input_scale, min_val, max_val):
eps = torch.tensor([tolerance])
if dtype == torch.qint8:
if reduce_range:
quant_min, quant_max = -64, 63
else:
quant_min, quant_max = -128, 127
elif dtype == torch.quint8:
if reduce_range:
quant_min, quant_max = 0, 127
else:
quant_min, quant_max = 0, 255
elif dtype == torch.qint32:
quant_min, quant_max = -1 * (2 ** 31), (2 ** 31) - 1
min_val_neg = torch.tensor([0.])
max_val_pos = torch.tensor([input_scale * max_val]) if qdtype is torch.qint32 else torch.tensor([max_val])
scale, zero_point = 1.0, 0
if qscheme == torch.per_tensor_symmetric or qscheme == torch.per_channel_symmetric:
scale = torch.max(-min_val_neg, max_val_pos) / (float(quant_max - quant_min) / 2)
scale = torch.max(scale, eps)
if dtype == torch.quint8:
zero_point = 128
else:
scale = torch.max((max_val_pos - min_val_neg) / float(quant_max - quant_min), eps)
zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)
zero_point = torch.clamp(zero_point, quant_min, quant_max)
return scale, zero_point
for myobs in ObserverList:
# Calculate Qparams should return with a warning for observers with no data
qparams = myobs.calculate_qparams()
input_scale = 2**16 if qdtype is torch.qint32 else 1
if type(myobs) == MinMaxObserver:
x = torch.tensor([1.0, 2.0, 2.0, 3.0, 4.0, 5.0, 6.0]) * input_scale
y = torch.tensor([4.0, 5.0, 5.0, 6.0, 7.0, 8.0]) * input_scale
else:
# Moving average of min/max for x and y matches that of
# extreme values for x/y used for minmax observer
x = torch.tensor([0.0, 2.0, 2.0, 3.0, 4.0, 5.0, 6.0]) * input_scale
y = torch.tensor([2.0, 5.0, 5.0, 6.0, 7.0, 10.0]) * input_scale
result = myobs(x)
result = myobs(y)
self.assertEqual(result, y)
self.assertEqual(myobs.min_val, 1.0 * input_scale)
self.assertEqual(myobs.max_val, 8.0 * input_scale)
qparams = myobs.calculate_qparams()
ref_scale, ref_zero_point = _get_ref_params(reduce_range, qscheme, qdtype, input_scale, 1.0, 8.0)
self.assertEqual(qparams[1].item(), ref_zero_point)
self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)
state_dict = myobs.state_dict()
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
loaded_obs = MinMaxObserver(dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
loaded_obs.load_state_dict(loaded_dict)
loaded_qparams = loaded_obs.calculate_qparams()
self.assertEqual(myobs.min_val, loaded_obs.min_val)
self.assertEqual(myobs.max_val, loaded_obs.max_val)
self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_channel_affine, torch.per_channel_symmetric, torch.per_channel_affine_float_qparams)),
ch_axis=st.sampled_from((0, 1, 2, 3)), reduce_range=st.booleans())
def test_per_channel_observers(self, qdtype, qscheme, ch_axis, reduce_range):
# reduce_range cannot be true for symmetric quantization with uint8
if qscheme == torch.per_channel_affine_float_qparams:
reduce_range = False
if qdtype == torch.quint8 and qscheme == torch.per_channel_symmetric:
reduce_range = False
ObserverList = [PerChannelMinMaxObserver(reduce_range=reduce_range,
ch_axis=ch_axis,
dtype=qdtype,
qscheme=qscheme),
MovingAveragePerChannelMinMaxObserver(averaging_constant=0.5,
reduce_range=reduce_range,
ch_axis=ch_axis,
dtype=qdtype,
qscheme=qscheme)]
for myobs in ObserverList:
# Calculate qparams should work for empty observers
qparams = myobs.calculate_qparams()
x = torch.tensor(
[
[[[1.0, 2.0], [2.0, 2.5]], [[3.0, 4.0], [4.5, 6.0]]],
[[[-4.0, -3.0], [5.0, 5.0]], [[6.0, 3.0], [7.0, 8.0]]],
]
)
if type(myobs) == MovingAveragePerChannelMinMaxObserver:
# Scaling the input tensor to model change in min/max values
# across batches
result = myobs(0.5 * x)
result = myobs(1.5 * x)
self.assertEqual(result, 1.5 * x)
else:
result = myobs(x)
self.assertEqual(result, x)
qparams = myobs.calculate_qparams()
ref_min_vals = [[1.0, -4.0], [-4.0, 3.0], [-4.0, 2.0], [-4.0, -3.0]]
ref_max_vals = [[6.0, 8.0], [5.0, 8.0], [6.0, 8.0], [7.0, 8.0]]
per_channel_symmetric_ref_scales = [
[0.04705882, 0.06274509],
[0.03921569, 0.0627451],
[0.04705882, 0.0627451],
[0.05490196, 0.0627451],
]
per_channel_affine_ref_scales = [
[0.02352941, 0.04705882],
[0.03529412, 0.03137255],
[0.03921569, 0.03137255],
[0.04313726, 0.04313726],
]
per_channel_affine_qint8_zp = [
[-128, -43],
[-15, -128],
[-26, -128],
[-35, -58],
]
per_channel_affine_float_qparams_ref_scales = [
[0.0196, 0.0471],
[0.0353, 0.0196],
[0.0392, 0.0235],
[0.0431, 0.0431],
]
per_channel_affine_quint8_zp = [[0, 85], [113, 0], [102, 0], [93, 70]]
self.assertEqual(myobs.min_val, ref_min_vals[ch_axis])
self.assertEqual(myobs.max_val, ref_max_vals[ch_axis])
if qscheme == torch.per_channel_symmetric:
ref_scales = per_channel_symmetric_ref_scales[ch_axis]
ref_zero_points = [0, 0] if qdtype is torch.qint8 else [128, 128]
elif qscheme == torch.per_channel_affine_float_qparams:
ref_scales = per_channel_affine_float_qparams_ref_scales[ch_axis]
ref_zero_points = [-1 * ref_min_vals[ch_axis][i] / ref_scales[i] for i in range(len(ref_scales))]
else:
ref_scales = per_channel_affine_ref_scales[ch_axis]
ref_zero_points = (
per_channel_affine_qint8_zp[ch_axis]
if qdtype is torch.qint8
else per_channel_affine_quint8_zp[ch_axis]
)
if reduce_range:
ref_scales = [s * 255 / 127 for s in ref_scales]
ref_zero_points = [math.floor(z / 2) for z in ref_zero_points]
self.assertEqual(qparams[0], torch.tensor(ref_scales, dtype=qparams[0].dtype), rtol=1e-5, atol=0.0001)
if qscheme == torch.per_channel_affine_float_qparams:
self.assertEqual(qparams[1], torch.tensor(ref_zero_points, dtype=qparams[1].dtype), rtol=1e-5, atol=1)
else:
self.assertEqual(qparams[1], torch.tensor(ref_zero_points, dtype=qparams[1].dtype))
# Test for serializability
state_dict = myobs.state_dict()
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
loaded_obs = PerChannelMinMaxObserver(reduce_range=reduce_range, ch_axis=ch_axis, dtype=qdtype, qscheme=qscheme)
loaded_obs.load_state_dict(loaded_dict)
loaded_qparams = loaded_obs.calculate_qparams()
self.assertEqual(myobs.min_val, loaded_obs.min_val)
self.assertEqual(myobs.max_val, loaded_obs.max_val)
self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())
def test_observer_scriptable(self):
obs_list = [MinMaxObserver(), MovingAverageMinMaxObserver()]
for obs in obs_list:
scripted = torch.jit.script(obs)
x = torch.rand(3, 4)
obs(x)
scripted(x)
self.assertEqual(obs.calculate_qparams(), scripted.calculate_qparams())
buf = io.BytesIO()
torch.jit.save(scripted, buf)
buf.seek(0)
loaded = torch.jit.load(buf)
self.assertEqual(obs.calculate_qparams(), loaded.calculate_qparams())
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@override_qengines
def test_state_dict_respects_device_affinity(self):
"""
Tests that loading from a state dict loads buffers to the correct
device.
"""
device_cpu = torch.device('cpu')
device_cuda = torch.device('cuda:0')
test_cases = itertools.product(
[device_cpu, device_cuda],
[device_cpu, device_cuda],
[MinMaxObserver, MovingAverageMinMaxObserver,
PerChannelMinMaxObserver,
MovingAveragePerChannelMinMaxObserver,
# TODO: enable this (separate PR)
# HistogramObserver,
PlaceholderObserver, RecordingObserver, NoopObserver,
FakeQuantize])
for device_source, device_target, obs_cls in test_cases:
# calibrated source model
model = obs_cls()
model.to(device_source)
model(torch.randn(4, 1, 4, 4, device=device_source))
# target model
model2 = obs_cls()
model2.to(device_target)
model2.load_state_dict(model.state_dict())
# verify that buffers stayed on model2's device
model_devices = {p.device for p in model2.parameters()} | \
{p.device for p in model2.buffers()}
# some observers do not have any buffers, so lessEqual instead of
# Equal
self.assertLessEqual(len(model_devices), 1)
if len(model_devices) == 1:
model_device = next(iter(model_devices))
self.assertEqual(model_device, device_target)
def test_histogram_observer_consistent_buffer_shape(self):
"""
Ensures that the buffer shapes do not change from uninitialized to
initialized states for HistogramObserver.
"""
obs = HistogramObserver()
min_shape_before = obs.min_val.shape
max_shape_before = obs.max_val.shape
for _ in range(2):
obs(torch.randn(4, 4, 4, 4))
self.assertEqual(min_shape_before, obs.min_val.shape)
self.assertEqual(max_shape_before, obs.max_val.shape)
def test_histogram_observer_save_load_state_dict(self):
"""
Smoke test on saving/loading state_dict
"""
obs1 = HistogramObserver()
obs1(torch.randn(4, 4, 4, 4))
obs2 = HistogramObserver()
obs2.load_state_dict(obs1.state_dict())
self.assertEqual(obs2.min_val.shape, torch.Size([]))
self.assertEqual(obs2.max_val.shape, torch.Size([]))
def test_save_load_state_dict_script(self):
"""
Tests that we can save and load state_dict for observers that are scripted
in a quantized model.
"""
obs_list = [MinMaxObserver, MovingAverageMinMaxObserver, HistogramObserver]
for obs in obs_list:
model = SingleLayerLinearModel().eval()
qconfig = QConfig(activation=default_observer, weight=obs)
qconfig_dict = {'' : qconfig}
scripted = torch.jit.script(model)
scripted = torch.ao.quantization.prepare_jit(scripted, qconfig_dict)
x = torch.rand(5, 5)
scripted(x)
obs_dict = torch.ao.quantization.get_observer_state_dict(scripted)
# Load stats
scripted_2 = torch.jit.script(model)
scripted_2 = torch.ao.quantization.prepare_jit(scripted_2, qconfig_dict)
torch.ao.quantization.load_observer_state_dict(scripted_2, obs_dict)
# Verify that state_dict matches exactly with original one.
self.assertEqual(scripted.state_dict(), scripted_2.state_dict())
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_observer_qparams_respects_device_affinity(self):
"""
Ensure that the scale and zero_point returned by the observer
are on the same device as the input tensor.
"""
observerList = [MinMaxObserver(),
MovingAverageMinMaxObserver(),
PerChannelMinMaxObserver(),
MovingAveragePerChannelMinMaxObserver()]
for obs in observerList:
device = torch.device('cuda:1')
x = torch.randn(1, 2, device=device)
obs.to(device)
result = obs(x)
scale, zero_point = obs.calculate_qparams()
self.assertEqual(x.device, scale.device)
self.assertEqual(x.device, zero_point.device)
def test_zero_numel(self):
obs_list = [MinMaxObserver, MovingAverageMinMaxObserver,
PerChannelMinMaxObserver,
MovingAveragePerChannelMinMaxObserver, HistogramObserver,
FakeQuantize, FixedQParamsObserver]
for obs_cls in obs_list:
if obs_cls is FixedQParamsObserver:
obs = obs_cls(0.1, 0)
else:
obs = obs_cls()
x = torch.tensor([])
# verify no crash
x = obs(x)
def _test_memoryless(self, obs_class):
obs = obs_class(averaging_constant=1)
x = torch.randn((3, 3))
obs(x)
params = obs.calculate_qparams()
for _ in range(20):
obs(10 * torch.randn((3, 3)))
self.assertNotEqual(params, obs.calculate_qparams())
obs(x)
self.assertEqual(params, obs.calculate_qparams())
def test_memoryless_minmaxobserver(self):
self._test_memoryless(MovingAverageMinMaxObserver)
def test_memoryless_perchannelminmaxobserver(self):
self._test_memoryless(MovingAveragePerChannelMinMaxObserver)
# HistogramObserver that works like it does on master
class _ReferenceHistogramObserver(HistogramObserver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@torch.jit.ignore
def _non_linear_param_search(self):
r"""Non-linear parameter search.
An approximation for L2 error minimization for selecting min/max.
By selecting new min/max, we filter out outliers in input distribution.
This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in
caffe2/quantization/server/norm_minimization.cc
"""
def _get_norm(delta_begin, delta_end, density, norm_type):
r"""
Compute the norm of the values uniformaly distributed between
delta_begin and delta_end.
norm = density * (integral_{begin, end} x^2)
= density * (end^3 - begin^3) / 3
"""
assert norm_type == "L2", "Only L2 norms are currently supported"
norm = 0.0
if norm_type == "L2":
norm = (
delta_end * delta_end * delta_end
- delta_begin * delta_begin * delta_begin
) / 3
return density * norm
def _compute_quantization_error(next_start_bin, next_end_bin, norm_type):
r"""
Compute the quantization error if we use start_bin to end_bin as the
min and max to do the quantization.
"""
bin_width = (self.max_val.item() - self.min_val.item()) / self.bins
norm = 0.0
dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins
if dst_bin_width == 0.0:
return 0.0
for src_bin in range(self.bins):
# distances from the beginning of first dst_bin to the beginning and
# end of src_bin
src_bin_begin = (src_bin - next_start_bin) * bin_width
src_bin_end = src_bin_begin + bin_width
# which dst_bins the beginning and end of src_bin belong to?
dst_bin_of_begin = min(
self.dst_nbins - 1, max(0.0, math.floor(src_bin_begin / dst_bin_width))
)
dst_bin_of_end = min(
self.dst_nbins - 1, max(0.0, math.floor(src_bin_end / dst_bin_width))
)
dst_bin_of_begin_center = (
dst_bin_of_begin * dst_bin_width + dst_bin_width / 2
)
density = self.histogram[src_bin] / bin_width
if dst_bin_of_begin == dst_bin_of_end:
# if src_bin is entirely within 1 dst_bin
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = src_bin_end - dst_bin_of_begin_center
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
else:
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = dst_bin_width / 2
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
norm = norm + (dst_bin_of_end - dst_bin_of_begin - 1) * _get_norm(
-dst_bin_width / 2, dst_bin_width / 2, density, norm_type
)
dst_bin_of_end_center = (
dst_bin_of_end * dst_bin_width + dst_bin_width / 2
)
delta_begin = -dst_bin_width / 2
delta_end = src_bin_end - dst_bin_of_end_center
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
return norm
assert self.histogram.size()[0] == self.bins, "bins mistmatch"
bin_width = (self.max_val - self.min_val) / self.bins
# cumulative sum
total = sum(self.histogram)
cSum = torch.cumsum(self.histogram, dim=0)
stepsize = 1e-5 # granularity
alpha = 0.0 # lower bound
beta = 1.0 # upper bound
start_bin = 0
end_bin = self.bins - 1
norm_min = float("inf")
while alpha < beta:
# Find the next step
next_alpha = alpha + stepsize
next_beta = beta - stepsize
# find the left and right bins between the quantile bounds
l = start_bin
r = end_bin
while l < end_bin and cSum[l] < next_alpha * total:
l = l + 1
while r > start_bin and cSum[r] > next_beta * total:
r = r - 1
# decide the next move
next_start_bin = start_bin
next_end_bin = end_bin
if (l - start_bin) > (end_bin - r):
# move the start bin
next_start_bin = l
alpha = next_alpha
else:
# move the end bin
next_end_bin = r
beta = next_beta
if next_start_bin == start_bin and next_end_bin == end_bin:
continue
# calculate the quantization error using next_start_bin and next_end_bin
norm = _compute_quantization_error(next_start_bin, next_end_bin, "L2")
if norm > norm_min:
break
norm_min = norm
start_bin = next_start_bin
end_bin = next_end_bin
new_min = self.min_val + bin_width * start_bin
new_max = self.min_val + bin_width * (end_bin + 1)
return new_min, new_max
class TestRecordHistogramObserver(QuantizationTestCase):
# TODO: move this to quantize.py
def test_record_observer(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = AnnotatedSingleLayerLinearModel()
model.qconfig = default_debug_qconfig
model = prepare(model)
# run the evaluation and dump all tensors
test_only_eval_fn(model, self.calib_data)
test_only_eval_fn(model, self.calib_data)
observer_dict = {}
get_observer_dict(model, observer_dict)
self.assertTrue('fc1.module.activation_post_process' in observer_dict.keys(),
'observer is not recorded in the dict')
self.assertEqual(len(observer_dict['fc1.module.activation_post_process'].get_tensor_value()),
2 * len(self.calib_data))
self.assertEqual(observer_dict['fc1.module.activation_post_process'].get_tensor_value()[0],
model(self.calib_data[0][0]))
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)))
def test_observer_scriptable(self, qdtype):
obs = RecordingObserver(dtype=qdtype)
scripted = torch.jit.script(obs)
x = torch.rand(3, 4)
obs(x)
scripted(x)
self.assertTrue(torch.equal(obs.get_tensor_value()[0], scripted.get_tensor_value()[0]))
buf = io.BytesIO()
torch.jit.save(scripted, buf)
buf.seek(0)
loaded = torch.jit.load(buf)
self.assertTrue(torch.equal(obs.get_tensor_value()[0], loaded.get_tensor_value()[0]))
class TestHistogramObserver(QuantizationTestCase):
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from(
(torch.per_tensor_affine, torch.per_tensor_symmetric))
)
def test_observer_scriptable(self, qdtype, qscheme):
ob_list = [
HistogramObserver(dtype=qdtype, qscheme=qscheme),
default_histogram_observer()
]
for obs in ob_list:
scripted = torch.jit.script(obs)
x = torch.rand(3, 4)
obs(x)
scripted(x)
self.assertTrue(torch.equal(obs.histogram, scripted.histogram))
buf = io.BytesIO()
torch.jit.save(scripted, buf)
buf.seek(0)
loaded = torch.jit.load(buf)
self.assertTrue(torch.equal(obs.histogram, scripted.histogram))
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),
reduce_range=st.booleans())
@settings(max_examples=10)
def test_histogram_observer(self, qdtype, qscheme, reduce_range):
myobs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
# Calculate qparams should work for empty observers
qparams = myobs.calculate_qparams()
x = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)
y = torch.tensor([5.0, 6.0, 7.0, 8.0])
out_x = myobs(x)
self.assertTrue(out_x.requires_grad)
myobs(y)
self.assertEqual(myobs.min_val, 2.0)
self.assertEqual(myobs.max_val, 8.0)
self.assertEqual(myobs.histogram, [2., 3., 3.])
qparams = myobs.calculate_qparams()
if reduce_range:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.0470588 * 255 / 127
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0235294 * 255 / 127
ref_zero_point = -64 if qdtype is torch.qint8 else 0
else:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.0470588
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0235294
ref_zero_point = -128 if qdtype is torch.qint8 else 0
self.assertEqual(qparams[1].item(), ref_zero_point)
self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)
# Test for serializability
state_dict = myobs.state_dict()
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
loaded_obs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
loaded_obs.load_state_dict(loaded_dict)
loaded_qparams = loaded_obs.calculate_qparams()
self.assertEqual(myobs.min_val, loaded_obs.min_val)
self.assertEqual(myobs.max_val, loaded_obs.max_val)
self.assertEqual(myobs.histogram, loaded_obs.histogram)
self.assertEqual(myobs.bins, loaded_obs.bins)
self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())
def test_histogram_observer_one_sided(self):
myobs = HistogramObserver(bins=8, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True)
x = torch.tensor([0.0, 0.3, 1.2, 1.7])
y = torch.tensor([0.1, 1.3, 2.0, 2.7])
myobs(x)
myobs(y)
self.assertEqual(myobs.min_val, 0)
qparams = myobs.calculate_qparams()
self.assertEqual(qparams[1].item(), 0)
def test_histogram_observer_same_inputs(self):
myobs = HistogramObserver(bins=3, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False)
w = torch.ones(4, requires_grad=True)
x = torch.zeros(4, requires_grad=True)
y = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)
z = torch.tensor([5.0, 6.0, 7.0, 8.0])
myobs(w)
myobs(x)
myobs(x)
myobs(y)
myobs(z)
qparams = myobs.calculate_qparams()
self.assertEqual(myobs.min_val, 2.0)
self.assertEqual(myobs.max_val, 8.0)
self.assertEqual(myobs.histogram, [2., 3., 3.])
@given(N=st.sampled_from([10, 1000]),
bins=st.sampled_from([256, 512, 1024, 2048]),
dtype=st.sampled_from([torch.qint8, torch.quint8]),
qscheme=st.sampled_from([torch.per_tensor_affine, torch.per_tensor_symmetric]),
reduce_range=st.booleans())
def test_histogram_observer_against_reference(self, N, bins, dtype, qscheme, reduce_range):
ref_obs = _ReferenceHistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)
my_obs = HistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)
for _ in range(10):
X = torch.randn(N)
my_obs(X)
ref_obs(X)
ref_qparams = ref_obs.calculate_qparams()
my_qparams = my_obs.calculate_qparams()
self.assertEqual(ref_qparams, my_qparams)
class TestFakeQuantize(TestCase):
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
X=hu.per_channel_tensor(shapes=hu.array_shapes(2, 5,),
qparams=hu.qparams(dtypes=torch.qint8)))
def test_fq_module_per_channel(self, device, X):
np.random.seed(NP_RANDOM_SEED)
X, (scale, zero_point, axis, torch_type) = X
quant_min = torch.iinfo(torch_type).min
quant_max = torch.iinfo(torch_type).max
X = to_tensor(X, device)
X.requires_grad_()
fq_module = FakeQuantize(default_per_channel_weight_observer, quant_min, quant_max, ch_axis=axis).to(device)
Y_prime = fq_module(X)
assert fq_module.scale is not None
assert fq_module.zero_point is not None
Y = _fake_quantize_per_channel_affine_reference(X, fq_module.scale,
fq_module.zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(Y.cpu().detach().numpy(), Y_prime.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
# Test backward
dout = torch.rand_like(X, dtype=torch.float, device=device)
Y_prime.backward(dout)
dX = _fake_quantize_per_channel_affine_grad_reference(dout, X, fq_module.scale,
fq_module.zero_point, axis, quant_min, quant_max)
np.testing.assert_allclose(dX.cpu().numpy(), X.grad.cpu().detach().numpy(), rtol=tolerance, atol=tolerance)
def test_fq_serializable_per_channel(self):
observer = default_per_channel_weight_observer
quant_min = -128
quant_max = 127
fq_module = FakeQuantize(observer, quant_min, quant_max)
X = torch.tensor([[-5, -3.5, -2, 0, 3, 5, 7], [1, 3, 2, 5, 6.5, 8, 10]], dtype=torch.float32)
y_ref = fq_module(X)
state_dict = fq_module.state_dict()
self.assertEqual(state_dict['scale'], [0.054902, 0.078431])
self.assertEqual(state_dict['zero_point'], [0, 0])
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
def test_quant_min_max_override(self):
observer = default_per_channel_weight_observer
# test no override
fq_module = FakeQuantize(observer)
self.assertEqual(fq_module.activation_post_process.quant_min, -128)
self.assertEqual(fq_module.activation_post_process.quant_max, 127)
# test quant_min/quant_max override
fq_module = FakeQuantize(observer, quant_min=0, quant_max=127)
self.assertEqual(fq_module.activation_post_process.quant_min, 0)
self.assertEqual(fq_module.activation_post_process.quant_max, 127)
def _get_buffer_ids(module):
"""
Object addresses stay constant if and only if all modifications are in-place
"""
return [id(v) for k, v in module._buffers.items()]
class TestDistributed(QuantizationTestCase):
def test_observers_preserve_buffers(self):
"""
Tests that observers only modify buffers in place. Note: this is important
because nn.DataParallel depends on this assumption to work correctly.
However, DataParallel does not expose IDs of the replicas, so we test it
without DataParallel in order to easily access the object IDs.
"""
observer_types = [
torch.ao.quantization.MinMaxObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.MovingAverageMinMaxObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.PerChannelMinMaxObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.MovingAveragePerChannelMinMaxObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.HistogramObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.RecordingObserver.with_args(dtype=torch.qint8),
torch.ao.quantization.PlaceholderObserver.with_args(dtype=torch.float16),
]
for observer_type in observer_types:
observer = observer_type()
buffer_ids_before = _get_buffer_ids(observer)
for _i in range(5):
inputs = torch.rand((4, 4, 4))
observer(inputs)
buffer_ids_after = _get_buffer_ids(observer)
self.assertEqual(
buffer_ids_before,
buffer_ids_after,
msg="{}: Buffers must be modified in place".format(str(observer)))
def test_fake_quant_preserves_buffers(self):
"""
Tests that fake quant only modifies buffers in place. Note: this is important
because nn.DataParallel depends on this assumption to work correctly.
However, DataParallel does not expose IDs of the replicas, so we test it
without DataParallel in order to easily access the object IDs.
"""
model = torch.ao.quantization.FakeQuantize()
buffer_ids_before = _get_buffer_ids(model)
for _i in range(5):
inputs = torch.rand((4, 4, 4))
model(inputs)
model.apply(torch.ao.quantization.enable_fake_quant)
model.apply(torch.ao.quantization.disable_fake_quant)
model.apply(torch.ao.quantization.enable_observer)
model.apply(torch.ao.quantization.disable_observer)
buffer_ids_after = _get_buffer_ids(model)
self.assertEqual(
buffer_ids_before,
buffer_ids_after,
msg="FakeQuant: Buffers must be modified in place")
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_qat_data_parallel(self):
"""
Tests that doing QAT in nn.DataParallel does not crash.
"""
if 'fbgemm' not in torch.backends.quantized.supported_engines:
return
with override_quantized_engine('fbgemm'):
device = torch.device('cuda')
model = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(3, 1, 1, bias=False),
nn.BatchNorm2d(1),
nn.ReLU(),
nn.Conv2d(1, 2, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(2),
nn.AvgPool2d(14),
nn.Sigmoid(),
torch.ao.quantization.DeQuantStub(),
)
torch.ao.quantization.fuse_modules_qat(model, [['1', '2', '3'], ['4', '5']], inplace=True)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')
torch.ao.quantization.prepare_qat(model, inplace=True)
model = nn.DataParallel(model, device_ids=[0, 1])
model.to(device)
model.train()
for epoch in range(3):
inputs = torch.rand(2, 3, 28, 28).to(device)
model(inputs)
if epoch >= 1:
model.apply(torch.ao.quantization.disable_observer)
if epoch >= 2:
model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
quant_model = copy.deepcopy(model.module)
quant_model = torch.ao.quantization.convert(quant_model.eval().cpu(), inplace=False)
with torch.no_grad():
out = quant_model(torch.rand(1, 3, 28, 28))
def test_qat_convbn_fused_syncbn_replacement(self):
"""
Tests that SyncBatchNorm replacement works for fused ConvBN.
"""
if 'fbgemm' not in torch.backends.quantized.supported_engines:
return
with override_quantized_engine('fbgemm'):
# create conv-bn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = nn.Conv2d(4, 1, 3, padding=1)
self.bn = nn.BatchNorm2d(1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
model = Model()
# fuse it
fused_model = torch.ao.quantization.fuse_modules_qat(
model,
[['conv', 'bn']],
)
# convert to QAT
fused_model.qconfig = torch.ao.quantization.get_default_qconfig('fbgemm')
torch.ao.quantization.prepare_qat(fused_model, inplace=True)
# replace with DDP
fused_model = nn.SyncBatchNorm.convert_sync_batchnorm(fused_model)
self.assertTrue(
isinstance(fused_model.conv.bn, nn.SyncBatchNorm),
"Expected BN to be converted to SyncBN")
def test_syncbn_preserves_qconfig(self):
"""
Makes sure that if a BatchNorm is not fused and a qconfig exists,
convering the module to SyncBatchNorm preserves the qconfig.
"""
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.BatchNorm2d(1),
)
m[1].qconfig = torch.ao.quantization.default_qconfig
m = torch.nn.SyncBatchNorm.convert_sync_batchnorm(m)
self.assertTrue(
hasattr(m[1], "qconfig"),
"missing qconfig after SyncBatchNorm conversion")
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@override_qengines
def test_device_affinity(self):
"""
Tests that converting a model to QAT respects device affinity
"""
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
model = Model()
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(torch.backends.quantized.engine)
device = torch.device('cuda:0')
model.to(device)
torch.ao.quantization.prepare_qat(model, inplace=True)
model_devices = {p.device for p in model.parameters()} | \
{p.device for p in model.buffers()}
self.assertEqual(len(model_devices), 1)
model_device = next(iter(model_devices))
self.assertEqual(model_device, device)
# ensure that running an input on CUDA works without any needed changes
input = torch.randn(4, 1, 4, 4, device=device)
model(input)
class TestFusedObsFakeQuantModule(TestCase):
@given(
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
)
)
@settings(deadline=None)
def test_fused_obs_fq_module(self, device):
# Set up the parameters
x = torch.randn(5, 5, device=device)
running_min_op = torch.tensor(float("inf"), device=device)
running_max_op = torch.tensor(float("-inf"), device=device)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
# Run the forward on the Module
mod = FusedMovingAvgObsFakeQuantize()
torch.ao.quantization.enable_fake_quant(mod)
torch.ao.quantization.enable_observer(mod)
mod.to(device)
out = mod(x)
# Run the operator directly
pt_op = torch.fused_moving_avg_obs_fake_quant
out_ref = pt_op(
x,
mod.observer_enabled,
mod.fake_quant_enabled,
running_min_op,
running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
False,
)
# Compare params with reference
torch.testing.assert_allclose(out, out_ref)
torch.testing.assert_allclose(
running_min_op, mod.activation_post_process.min_val
)
torch.testing.assert_allclose(
running_max_op, mod.activation_post_process.max_val
)
@given(
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
)
)
@settings(deadline=None)
def test_fused_obs_fq_moving_avg_module(self, device):
# Set up the parameters
running_min_op = torch.tensor(float("inf"), device=device)
running_max_op = torch.tensor(float("-inf"), device=device)
avg_const = 0.001
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
mod = FusedMovingAvgObsFakeQuantize(averaging_constant=0.001)
mod.to(device)
mod.observer_enabled[0] = 0
mod.fake_quant_enabled[0] = 0
for i in range(10):
x = torch.randn(5, 5, device=device)
if i > 2:
mod.observer_enabled[0] = 1
if i > 4:
mod.fake_quant_enabled[0] = 1
# Run the forward on the Module
out = mod(x)
# Run the operator directly
pt_op = torch.fused_moving_avg_obs_fake_quant
out_ref = pt_op(
x,
mod.observer_enabled,
mod.fake_quant_enabled,
running_min_op,
running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
False,
)
# Compare params with reference
torch.testing.assert_allclose(out, out_ref)
torch.testing.assert_allclose(
running_min_op, mod.activation_post_process.min_val
)
torch.testing.assert_allclose(
running_max_op, mod.activation_post_process.max_val
)
@given(
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
)
)
@settings(deadline=None)
def test_compare_fused_obs_fq_oss_module(self, device):
mod = FusedMovingAvgObsFakeQuantize()
torch.ao.quantization.enable_fake_quant(mod)
torch.ao.quantization.enable_observer(mod)
mod.to(device)
mod_ref = FakeQuantize()
torch.ao.quantization.enable_fake_quant(mod_ref)
torch.ao.quantization.enable_observer(mod_ref)
mod_ref.to(device)
for i in range(10):
x = torch.randn(5, 5, device=device)
out = mod(x)
out_ref = mod_ref(x)
torch.testing.assert_allclose(out, out_ref)
torch.testing.assert_allclose(
mod_ref.activation_post_process.min_val,
mod.activation_post_process.min_val,
)
torch.testing.assert_allclose(
mod_ref.activation_post_process.max_val,
mod.activation_post_process.max_val,
)
def test_fused_mod_per_channel(self):
devices = ["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
m = 5
n = 10
for device in devices:
running_min_op = torch.empty(m, device=device).fill_(float("inf"))
running_max_op = torch.empty(m, device=device).fill_(float("-inf"))
avg_const = 0.001
scale = torch.empty(m, device=device).fill_(0.1)
zero_point = torch.empty(m, dtype=torch.int, device=device).fill_(0)
obs = FusedMovingAvgObsFakeQuantize.with_args(
averaging_constant=avg_const,
observer=MovingAveragePerChannelMinMaxObserver,
)
mod = obs()
mod = torch.jit.script(mod)
mod.to(device)
for i in range(10):
x = torch.randn(m, n, device=device)
if i > 2:
mod.observer_enabled[0] = 1
if i > 4:
mod.fake_quant_enabled[0] = 1
# Run the forward on the Module
out = mod(x)
# Run the operator directly
pt_op = torch.fused_moving_avg_obs_fake_quant
out_ref = pt_op(
x,
mod.observer_enabled,
mod.fake_quant_enabled,
running_min_op,
running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
True,
False,
)
# Compare params with reference
torch.testing.assert_allclose(out, out_ref)
if mod.observer_enabled[0]:
torch.testing.assert_allclose(
running_min_op, mod.activation_post_process.min_val
)
torch.testing.assert_allclose(
running_max_op, mod.activation_post_process.max_val
)
if mod.fake_quant_enabled:
torch.testing.assert_allclose(scale, mod.scale)
torch.testing.assert_allclose(zero_point, mod.zero_point)
torch.testing.assert_allclose(mod.state_dict()['activation_post_process.min_val'], running_min_op)
torch.testing.assert_allclose(mod.state_dict()['activation_post_process.max_val'], running_max_op)
def test_fused_mod_reduce_range(self):
obs = FusedMovingAvgObsFakeQuantize(quant_min=0, quant_max=255, dtype=torch.quint8, reduce_range=True)
self.assertEqual(obs.activation_post_process.quant_min, 0)
self.assertEqual(obs.activation_post_process.quant_max, 127)
def test_embedding_bag_qat_config(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.emb1 = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12,
include_last_offset=True, scale_grad_by_freq=False, mode='sum')
self.emb2 = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12,
include_last_offset=True, scale_grad_by_freq=False, mode='sum')
def forward(self, indices):
return torch.cat((self.emb1(indices), self.emb2(indices)))
qconfigs = [torch.ao.quantization.default_embedding_qat_qconfig,
torch.ao.quantization.default_embedding_qat_qconfig_4bit]
for qconfig in qconfigs:
model = Model().train()
indices = torch.randint(0, 10, (5, 12))
model.qconfig = qconfig
quant_model = prepare_qat(model,
mapping=get_embedding_qat_module_mappings())
count_fake_quant = 0
for name, mod in quant_model.named_modules():
if name.endswith('weight_fake_quant'):
count_fake_quant += 1
self.assertEqual(type(mod), FakeQuantize)
self.assertEqual(count_fake_quant, 2)
quant_model(indices)
# Ensure that EmbeddingBags have float zero_point values
self.assertEqual(quant_model.emb1.weight_fake_quant.zero_point.dtype, torch.float32)
self.assertEqual(quant_model.emb2.weight_fake_quant.zero_point.dtype, torch.float32)
inference_gm = convert(quant_model.eval().cpu(),
mapping=get_embedding_static_quant_module_mappings())
# Ensure that EmbeddingBags are now quantized with the appropriate bitwidth.
self.assertEqual(type(inference_gm.emb1), torch.nn.quantized.EmbeddingBag)
self.assertEqual(type(inference_gm.emb2), torch.nn.quantized.EmbeddingBag)
self.assertEqual(inference_gm.emb1.dtype, qconfig.weight().dtype)
self.assertEqual(inference_gm.emb2.dtype, qconfig.weight().dtype)
def test_embedding_qat_config(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = DeFusedEmbeddingBagLinear()
indices = torch.randint(0, 10, (5, 12))
quant_model = prepare_qat(model,
mapping=get_embedding_qat_module_mappings())
count_fake_quant = 0
count_activation_postproc = 0
for name, mod in quant_model.named_modules():
if name.endswith('weight_fake_quant'):
count_fake_quant += 1
if name.count('activation_post_process') == 1 and 'weight_fake_quant' not in name:
count_activation_postproc += 1
# One for embeddings, one for linear layer.
self.assertEqual(count_fake_quant, 2)
# One for embeddings (but it is a NoOp), One for quantize, one for linear layer.
self.assertEqual(count_activation_postproc, 3)
self.assertEqual(type(quant_model.emb.weight_fake_quant), FakeQuantize)
self.assertEqual(quant_model.emb.weight_fake_quant.zero_point.dtype, torch.float32)
self.assertEqual(type(quant_model.emb.activation_post_process), NoopObserver)
self.assertEqual(type(quant_model.linear.weight_fake_quant), FusedMovingAvgObsFakeQuantize)
self.assertEqual(type(quant_model.linear.activation_post_process), FusedMovingAvgObsFakeQuantize)
quant_model(indices)
inference_gm = convert(quant_model,
mapping=get_embedding_static_quant_module_mappings())
# Ensure that Embedding is now quantized
self.assertEqual(type(inference_gm.emb), torch.nn.quantized.Embedding)
# Ensure that Linear is now quantized
self.assertEqual(type(inference_gm.linear), torch.nn.quantized.Linear)
def test_default_fused_qat_config(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(2, 2)
self.relu = nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
for qengine in ["fbgemm", "qnnpack"]:
model = Model()
model.linear.weight = torch.nn.Parameter(torch.randn(2, 2))
sample_input = torch.randn(2, 2)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine, version=1)
ref_model = torch.ao.quantization.QuantWrapper(model)
ref_model = torch.ao.quantization.prepare_qat(ref_model)
ref_model(sample_input)
count_fake_quant = 0
for name, mod in ref_model.named_modules():
if name.endswith('weight_fake_quant'):
count_fake_quant += 1
self.assertEqual(type(mod), FusedMovingAvgObsFakeQuantize)
if name.count('activation_post_process') == 1 and 'weight_fake_quant' not in name:
count_fake_quant += 1
self.assertEqual(type(mod), FusedMovingAvgObsFakeQuantize)
self.assertEqual(count_fake_quant, 3)
if qengine == "fbgemm":
lower_bnd = 0
upper_bnd = 127
obs2match = MovingAveragePerChannelMinMaxObserver
else:
lower_bnd = 0
upper_bnd = 255
obs2match = MovingAverageMinMaxObserver
self.assertEqual(ref_model.quant.activation_post_process.activation_post_process.quant_min, lower_bnd)
self.assertEqual(ref_model.quant.activation_post_process.activation_post_process.quant_max, upper_bnd)
self.assertEqual(type(ref_model.module.linear.weight_fake_quant.activation_post_process),
obs2match)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/quantization/core/test_workflow_module.py
|
# Owner(s): ["oncall: quantization"]
from torch.ao.quantization.experimental.observer import APoTObserver
import unittest
import torch
class TestNonUniformObserver(unittest.TestCase):
"""
Test case 1: calculate_qparams
Test that error is thrown when k == 0
"""
def test_calculate_qparams_invalid(self):
obs = APoTObserver(b=0, k=0)
obs.min_val = torch.tensor([0.0])
obs.max_val = torch.tensor([0.0])
with self.assertRaises(AssertionError):
alpha, gamma, quantization_levels, level_indices = obs.calculate_qparams(signed=False)
"""
Test case 2: calculate_qparams
APoT paper example: https://arxiv.org/pdf/1909.13144.pdf
Assume hardcoded parameters:
* b = 4 (total number of bits across all terms)
* k = 2 (base bitwidth, i.e. bitwidth of every term)
* n = 2 (number of additive terms)
* note: b = k * n
"""
def test_calculate_qparams_2terms(self):
obs = APoTObserver(b=4, k=2)
obs.min_val = torch.tensor([0.0])
obs.max_val = torch.tensor([1.0])
alpha, gamma, quantization_levels, level_indices = obs.calculate_qparams(signed=False)
alpha_test = torch.max(-obs.min_val, obs.max_val)
# check alpha value
self.assertEqual(alpha, alpha_test)
# calculate expected gamma value
gamma_test = 0
for i in range(2):
gamma_test += 2**(-i)
gamma_test = 1 / gamma_test
# check gamma value
self.assertEqual(gamma, gamma_test)
# check quantization levels size
quantlevels_size_test = int(len(quantization_levels))
quantlevels_size = 2**4
self.assertEqual(quantlevels_size_test, quantlevels_size)
# check level indices size
levelindices_size_test = int(len(level_indices))
self.assertEqual(levelindices_size_test, 16)
# check level indices unique values
level_indices_test_list = level_indices.tolist()
self.assertEqual(len(level_indices_test_list), len(set(level_indices_test_list)))
"""
Test case 3: calculate_qparams
Assume hardcoded parameters:
* b = 6 (total number of bits across all terms)
* k = 2 (base bitwidth, i.e. bitwidth of every term)
* n = 3 (number of additive terms)
"""
def test_calculate_qparams_3terms(self):
obs = APoTObserver(b=6, k=2)
obs.min_val = torch.tensor([0.0])
obs.max_val = torch.tensor([1.0])
alpha, gamma, quantization_levels, level_indices = obs.calculate_qparams(signed=False)
alpha_test = torch.max(-obs.min_val, obs.max_val)
# check alpha value
self.assertEqual(alpha, alpha_test)
# calculate expected gamma value
gamma_test = 0
for i in range(3):
gamma_test += 2**(-i)
gamma_test = 1 / gamma_test
# check gamma value
self.assertEqual(gamma, gamma_test)
# check quantization levels size
quantlevels_size_test = int(len(quantization_levels))
quantlevels_size = 2**6
self.assertEqual(quantlevels_size_test, quantlevels_size)
# check level indices size
levelindices_size_test = int(len(level_indices))
self.assertEqual(levelindices_size_test, 64)
# check level indices unique values
level_indices_test_list = level_indices.tolist()
self.assertEqual(len(level_indices_test_list), len(set(level_indices_test_list)))
"""
Test case 4: calculate_qparams
Same as test case 2 but with signed = True
Assume hardcoded parameters:
* b = 4 (total number of bits across all terms)
* k = 2 (base bitwidth, i.e. bitwidth of every term)
* n = 2 (number of additive terms)
* signed = True
"""
def test_calculate_qparams_signed(self):
obs = APoTObserver(b=4, k=2)
obs.min_val = torch.tensor([0.0])
obs.max_val = torch.tensor([1.0])
alpha, gamma, quantization_levels, level_indices = obs.calculate_qparams(signed=True)
alpha_test = torch.max(-obs.min_val, obs.max_val)
# check alpha value
self.assertEqual(alpha, alpha_test)
# calculate expected gamma value
gamma_test = 0
for i in range(2):
gamma_test += 2**(-i)
gamma_test = 1 / gamma_test
# check gamma value
self.assertEqual(gamma, gamma_test)
# check quantization levels size
quantlevels_size_test = int(len(quantization_levels))
self.assertEqual(quantlevels_size_test, 49)
# check negatives of each element contained
# in quantization levels
quantlevels_test_list = quantization_levels.tolist()
negatives_contained = True
for ele in quantlevels_test_list:
if not (-ele) in quantlevels_test_list:
negatives_contained = False
self.assertTrue(negatives_contained)
# check level indices size
levelindices_size_test = int(len(level_indices))
self.assertEqual(levelindices_size_test, 49)
# check level indices unique elements
level_indices_test_list = level_indices.tolist()
self.assertEqual(len(level_indices_test_list), len(set(level_indices_test_list)))
"""
Test case 5: calculate_qparams
Assume hardcoded parameters:
* b = 6 (total number of bits across all terms)
* k = 1 (base bitwidth, i.e. bitwidth of every term)
* n = 6 (number of additive terms)
"""
def test_calculate_qparams_k1(self):
obs = APoTObserver(b=6, k=1)
obs.min_val = torch.tensor([0.0])
obs.max_val = torch.tensor([1.0])
alpha, gamma, quantization_levels, level_indices = obs.calculate_qparams(signed=False)
# calculate expected gamma value
gamma_test = 0
for i in range(6):
gamma_test += 2**(-i)
gamma_test = 1 / gamma_test
# check gamma value
self.assertEqual(gamma, gamma_test)
# check quantization levels size
quantlevels_size_test = int(len(quantization_levels))
quantlevels_size = 2**6
self.assertEqual(quantlevels_size_test, quantlevels_size)
# check level indices size
levelindices_size_test = int(len(level_indices))
level_indices_size = 2**6
self.assertEqual(levelindices_size_test, level_indices_size)
# check level indices unique values
level_indices_test_list = level_indices.tolist()
self.assertEqual(len(level_indices_test_list), len(set(level_indices_test_list)))
"""
Test forward method on hard-coded tensor with arbitrary values.
Checks that alpha is max of abs value of max and min values in tensor.
"""
def test_forward(self):
obs = APoTObserver(b=4, k=2)
X = torch.tensor([0.0, -100.23, -37.18, 3.42, 8.93, 9.21, 87.92])
X = obs.forward(X)
alpha, gamma, quantization_levels, level_indices = obs.calculate_qparams(signed=True)
min_val = torch.min(X)
max_val = torch.max(X)
expected_alpha = torch.max(-min_val, max_val)
self.assertEqual(alpha, expected_alpha)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
test/quantization/core/experimental/test_nonuniform_observer.py
|
# Owner(s): ["oncall: quantization"]
import torch
import unittest
from torch.ao.quantization.experimental.observer import APoTObserver
from torch.ao.quantization.experimental.quantizer import quantize_APoT
class TestQuantizedTensor(unittest.TestCase):
r""" Tests int_repr on APoTQuantizer with random tensor2quantize
and hard-coded values
"""
def test_int_repr(self):
# generate tensor with random fp values
tensor2quantize = tensor2quantize = torch.tensor([0, 0.0215, 0.1692, 0.385, 1, 0.0391])
observer = APoTObserver(b=4, k=2)
observer.forward(tensor2quantize)
qparams = observer.calculate_qparams(signed=False)
# get apot quantized tensor result
qtensor = quantize_APoT(tensor2quantize=tensor2quantize,
alpha=qparams[0],
gamma=qparams[1],
quantization_levels=qparams[2],
level_indices=qparams[3])
qtensor_data = qtensor.int_repr().int()
# expected qtensor values calculated based on
# corresponding level_indices to nearest quantization level
# for each fp value in tensor2quantize
# e.g.
# 0.0215 in tensor2quantize nearest 0.0208 in quantization_levels -> 3 in level_indices
expected_qtensor_data = torch.tensor([0, 3, 8, 13, 5, 12], dtype=torch.int32)
self.assertTrue(torch.equal(qtensor_data, expected_qtensor_data))
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
test/quantization/core/experimental/test_quantized_tensor.py
|
import torch
import torchvision
import torchvision.transforms.transforms as transforms
import os
import torch.quantization
from torchvision.models.quantization.resnet import resnet18
from torch.autograd import Variable
# Setup warnings
import warnings
warnings.filterwarnings(
action='ignore',
category=DeprecationWarning,
module=r'.*'
)
warnings.filterwarnings(
action='default',
module=r'torch.quantization'
)
"""
Define helper functions for APoT PTQ and QAT
"""
# Specify random seed for repeatable results
_ = torch.manual_seed(191009)
train_batch_size = 30
eval_batch_size = 50
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0.0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def evaluate(model, criterion, data_loader):
model.eval()
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
with torch.no_grad():
for image, target in data_loader:
output = model(image)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], image.size(0))
top5.update(acc5[0], image.size(0))
print('')
return top1, top5
def load_model(model_file):
model = resnet18(pretrained=False)
state_dict = torch.load(model_file)
model.load_state_dict(state_dict)
model.to("cpu")
return model
def print_size_of_model(model):
if isinstance(model, torch.jit.RecursiveScriptModule):
torch.jit.save(model, "temp.p")
else:
torch.jit.save(torch.jit.script(model), "temp.p")
print("Size (MB):", os.path.getsize("temp.p") / 1e6)
os.remove("temp.p")
def prepare_data_loaders(data_path):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
dataset = torchvision.datasets.ImageNet(data_path,
split="train",
transform=transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize]))
dataset_test = torchvision.datasets.ImageNet(data_path,
split="val",
transform=transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize]))
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=train_batch_size,
sampler=train_sampler)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=eval_batch_size,
sampler=test_sampler)
return data_loader, data_loader_test
def training_loop(model, criterion, data_loader):
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
train_loss, correct, total = 0, 0, 0
model.train()
for i in range(10):
for data, target in data_loader:
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss = Variable(loss, requires_grad=True)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(output, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return train_loss, correct, total
|
pytorch-master
|
test/quantization/core/experimental/quantization_util.py
|
# Owner(s): ["oncall: quantization"]
import torch
import unittest
from torch.ao.quantization.experimental.observer import APoTObserver
from torch.ao.quantization.experimental.quantizer import quantize_APoT, dequantize_APoT
from torch.ao.quantization.experimental.fake_quantize import APoTFakeQuantize
from torch.ao.quantization.experimental.fake_quantize_function import fake_quantize_function
forward_helper = fake_quantize_function.forward
backward = fake_quantize_function.backward
from torch.autograd import gradcheck
class TestFakeQuantize(unittest.TestCase):
r""" Tests fake quantize calculate_qparams() method
by comparing with result from observer calculate_qparams.
Uses hard-coded values: alpha=1.0, b=4, k=2.
"""
def test_fake_calc_qparams(self):
apot_fake = APoTFakeQuantize(b=4, k=2)
apot_fake.activation_post_process.min_val = torch.tensor([0.0])
apot_fake.activation_post_process.max_val = torch.tensor([1.0])
alpha, gamma, quantization_levels, level_indices = apot_fake.calculate_qparams(signed=False)
observer = APoTObserver(b=4, k=2)
observer.min_val = torch.tensor([0.0])
observer.max_val = torch.tensor([1.0])
qparams_expected = observer.calculate_qparams(signed=False)
self.assertEqual(alpha, qparams_expected[0])
self.assertTrue(torch.equal(gamma, qparams_expected[1]))
self.assertTrue(torch.equal(quantization_levels, qparams_expected[2]))
self.assertTrue(torch.equal(level_indices, qparams_expected[3]))
r""" Tests fake quantize forward() method
by comparing result with expected
quant_dequant_APoT mapping of input tensor.
Uses input tensor with random values from 0 -> 1000
and APoT observer with hard-coded values b=4, k=2
"""
def test_forward(self):
# generate a tensor of size 20 with random values
# between 0 -> 1000 to quantize -> dequantize
X = 1000 * torch.rand(20)
observer = APoTObserver(b=4, k=2)
observer.forward(X)
alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False)
apot_fake = APoTFakeQuantize(b=4, k=2)
apot_fake.enable_observer()
apot_fake.enable_fake_quant()
X_reduced_precision_fp = apot_fake.forward(torch.clone(X), False)
# get X_expected by converting fp -> apot -> fp to simulate quantize -> dequantize
X_to_apot = quantize_APoT(X, alpha, gamma, quantization_levels, level_indices)
X_expected = dequantize_APoT(X_to_apot)
self.assertTrue(torch.equal(X_reduced_precision_fp, X_expected))
r""" Tests fake quantize forward() method
throws error when qparams are None
"""
def test_forward_exception(self):
# generate a tensor of size 20 with random values
# between 0 -> 1000 to quantize -> dequantize
X = 1000 * torch.rand(20)
apot_fake = APoTFakeQuantize(b=4, k=2)
# disable observer so qparams not set, qparams are all None
apot_fake.disable_observer()
apot_fake.enable_fake_quant()
with self.assertRaises(Exception):
apot_fake.forward(torch.clone(X), False)
r""" Tests fake quantize helper backward() method
using torch.autograd.gradcheck function.
"""
def test_backward(self):
input = torch.randn(20, dtype=torch.double, requires_grad=True)
observer = APoTObserver(b=4, k=2)
observer(input)
alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False)
test = gradcheck(fake_quantize_function.apply, (input, alpha, gamma, quantization_levels, level_indices), atol=1e-4)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
test/quantization/core/experimental/test_fake_quantize.py
|
# Owner(s): ["oncall: quantization"]
import torch
from torch import quantize_per_tensor
from torch.ao.quantization.observer import MinMaxObserver
from torch.ao.quantization.experimental.observer import APoTObserver
from torch.ao.quantization.experimental.quantizer import APoTQuantizer, quantize_APoT, dequantize_APoT
import unittest
import random
class TestQuantizer(unittest.TestCase):
r""" Tests quantize_APoT result on random 1-dim tensor
and hardcoded values for b, k by comparing to uniform quantization
(non-uniform quantization reduces to uniform for k = 1)
quantized tensor (https://pytorch.org/docs/stable/generated/torch.quantize_per_tensor.html)
* tensor2quantize: Tensor
* b: 8
* k: 1
"""
def test_quantize_APoT_rand_k1(self):
# generate random size of tensor2quantize between 1 -> 20
size = random.randint(1, 20)
# generate tensor with random fp values between 0 -> 1000
tensor2quantize = 1000 * torch.rand(size, dtype=torch.float)
apot_observer = APoTObserver(b=8, k=1)
apot_observer(tensor2quantize)
alpha, gamma, quantization_levels, level_indices = apot_observer.calculate_qparams(signed=False)
# get apot quantized tensor result
qtensor = quantize_APoT(tensor2quantize=tensor2quantize,
alpha=alpha,
gamma=gamma,
quantization_levels=quantization_levels,
level_indices=level_indices)
# get uniform quantization quantized tensor result
uniform_observer = MinMaxObserver()
uniform_observer(tensor2quantize)
scale, zero_point = uniform_observer.calculate_qparams()
uniform_quantized = quantize_per_tensor(input=tensor2quantize,
scale=scale,
zero_point=zero_point,
dtype=torch.quint8).int_repr()
qtensor_data = qtensor.data.int()
uniform_quantized_tensor = uniform_quantized.data.int()
self.assertTrue(torch.equal(qtensor_data, uniform_quantized_tensor))
r""" Tests quantize_APoT for k != 1.
Tests quantize_APoT result on random 1-dim tensor and hardcoded values for
b=4, k=2 by comparing results to hand-calculated results from APoT paper
https://arxiv.org/pdf/1909.13144.pdf
* tensor2quantize: Tensor
* b: 4
* k: 2
"""
def test_quantize_APoT_k2(self):
r"""
given b = 4, k = 2, alpha = 1.0, we know:
(from APoT paper example: https://arxiv.org/pdf/1909.13144.pdf)
quantization_levels = tensor([0.0000, 0.0208, 0.0417, 0.0625, 0.0833, 0.1250, 0.1667,
0.1875, 0.2500, 0.3333, 0.3750, 0.5000, 0.6667, 0.6875, 0.7500, 1.0000])
level_indices = tensor([ 0, 3, 12, 15, 2, 14, 8, 11, 10, 1, 13, 9, 4, 7, 6, 5]))
"""
# generate tensor with random fp values
tensor2quantize = torch.tensor([0, 0.0215, 0.1692, 0.385, 1, 0.0391])
observer = APoTObserver(b=4, k=2)
observer.forward(tensor2quantize)
alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False)
# get apot quantized tensor result
qtensor = quantize_APoT(tensor2quantize=tensor2quantize,
alpha=alpha,
gamma=gamma,
quantization_levels=quantization_levels,
level_indices=level_indices)
qtensor_data = qtensor.data.int()
# expected qtensor values calculated based on
# corresponding level_indices to nearest quantization level
# for each fp value in tensor2quantize
# e.g.
# 0.0215 in tensor2quantize nearest 0.0208 in quantization_levels -> 3 in level_indices
expected_qtensor = torch.tensor([0, 3, 8, 13, 5, 12], dtype=torch.int32)
self.assertTrue(torch.equal(qtensor_data, expected_qtensor))
r""" Tests dequantize_apot result on random 1-dim tensor
and hardcoded values for b, k.
Dequant -> quant an input tensor and verify that
result is equivalent to input
* tensor2quantize: Tensor
* b: 4
* k: 2
"""
def test_dequantize_quantize_rand_b4(self):
# make observer
observer = APoTObserver(4, 2)
# generate random size of tensor2quantize between 1 -> 20
size = random.randint(1, 20)
# make tensor2quantize: random fp values between 0 -> 1000
tensor2quantize = 1000 * torch.rand(size, dtype=torch.float)
observer.forward(tensor2quantize)
alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False)
# make mock apot_tensor
original_apot = quantize_APoT(tensor2quantize=tensor2quantize,
alpha=alpha,
gamma=gamma,
quantization_levels=quantization_levels,
level_indices=level_indices)
original_input = torch.clone(original_apot.data).int()
# dequantize apot_tensor
dequantize_result = dequantize_APoT(apot_tensor=original_apot)
# quantize apot_tensor
final_apot = quantize_APoT(tensor2quantize=dequantize_result,
alpha=alpha,
gamma=gamma,
quantization_levels=quantization_levels,
level_indices=level_indices)
result = final_apot.data.int()
self.assertTrue(torch.equal(original_input, result))
r""" Tests dequantize_apot result on random 1-dim tensor
and hardcoded values for b, k.
Dequant -> quant an input tensor and verify that
result is equivalent to input
* tensor2quantize: Tensor
* b: 12
* k: 4
"""
def test_dequantize_quantize_rand_b6(self):
# make observer
observer = APoTObserver(12, 4)
# generate random size of tensor2quantize between 1 -> 20
size = random.randint(1, 20)
# make tensor2quantize: random fp values between 0 -> 1000
tensor2quantize = 1000 * torch.rand(size, dtype=torch.float)
observer.forward(tensor2quantize)
alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False)
# make mock apot_tensor
original_apot = quantize_APoT(tensor2quantize=tensor2quantize,
alpha=alpha,
gamma=gamma,
quantization_levels=quantization_levels,
level_indices=level_indices)
original_input = torch.clone(original_apot.data).int()
# dequantize apot_tensor
dequantize_result = dequantize_APoT(apot_tensor=original_apot)
# quantize apot_tensor
final_apot = quantize_APoT(tensor2quantize=dequantize_result,
alpha=alpha,
gamma=gamma,
quantization_levels=quantization_levels,
level_indices=level_indices)
result = final_apot.data.int()
self.assertTrue(torch.equal(original_input, result))
r""" Tests for correct dimensions in dequantize_apot result
on random 3-dim tensor with random dimension sizes
and hardcoded values for b, k.
Dequant an input tensor and verify that
dimensions are same as input.
* tensor2quantize: Tensor
* b: 4
* k: 2
"""
def test_dequantize_dim(self):
# make observer
observer = APoTObserver(4, 2)
# generate random size of tensor2quantize between 1 -> 20
size1 = random.randint(1, 20)
size2 = random.randint(1, 20)
size3 = random.randint(1, 20)
# make tensor2quantize: random fp values between 0 -> 1000
tensor2quantize = 1000 * torch.rand(size1, size2, size3, dtype=torch.float)
observer.forward(tensor2quantize)
alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False)
# make mock apot_tensor
original_apot = quantize_APoT(tensor2quantize=tensor2quantize,
alpha=alpha,
gamma=gamma,
quantization_levels=quantization_levels,
level_indices=level_indices)
# dequantize apot_tensor
dequantize_result = dequantize_APoT(apot_tensor=original_apot)
self.assertEqual(original_apot.data.size(), dequantize_result.size())
def test_q_apot_alpha(self):
with self.assertRaises(NotImplementedError):
APoTQuantizer.q_apot_alpha(self)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
test/quantization/core/experimental/test_quantizer.py
|
# Owner(s): ["oncall: quantization"]
import torch
from torch.ao.quantization.experimental.linear import LinearAPoT
from torch.nn.modules.linear import Linear
import unittest
class TestNonUniformObserver(unittest.TestCase):
"""
Test linear_APoT_fn by comparing to uniform linear
for 2d tensors with size (4,4) and k=1
"""
def test_linear_APoT_k1(self):
# weight: fp tensor
weight = 1000 * torch.rand(4, 4)
# activtion: fp32 tensor with ~ integer values
activation = torch.randint(low=0, high=255, size=(4, 4), dtype=torch.float)
# calculate result from calling linear forward method
apot_linear = LinearAPoT(weight, 8, 1)
apot_linear_result = apot_linear(activation)
# calculate expected results
fp_linear = Linear(4, 4, bias=False)
# set weight for fp linear
apot_quantized_weight_float = apot_linear.weight.type(torch.FloatTensor)
fp_linear_weight = torch.nn.parameter.Parameter(data=apot_quantized_weight_float)
fp_linear.weight = fp_linear_weight
fp_linear_result = fp_linear(activation).data
self.assertTrue(torch.equal(apot_linear_result, fp_linear_result))
"""
Test linear_APoT_fn by comparing to uniform linear
for 2d tensors with size (5,3), (3, 5) and k=2
"""
def test_linear_APoT_k2(self):
# weight: fp tensor
weight = 1000 * torch.rand(5, 3)
# activtion: fp32 tensor with ~ integer values
# note: transpose of activation matrix will have dimension (3, 5)
activation = torch.randint(low=0, high=255, size=(5, 3), dtype=torch.float)
# calculate result from calling linear forward method
apot_linear = LinearAPoT(weight, 8, 2)
apot_linear_result = apot_linear(activation)
# calculate expected results
fp_linear = Linear(4, 4, bias=False)
# set weight for fp linear
apot_quantized_weight_float = apot_linear.weight.type(torch.FloatTensor)
fp_linear_weight = torch.nn.parameter.Parameter(data=apot_quantized_weight_float)
fp_linear.weight = fp_linear_weight
fp_linear_result = fp_linear(activation).data
self.assertTrue(torch.equal(apot_linear_result, fp_linear_result))
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
test/quantization/core/experimental/test_linear.py
|
from torchvision.models.quantization.resnet import resnet18
from torch.ao.quantization.experimental.quantization_helper import (
evaluate,
prepare_data_loaders,
training_loop
)
# training and validation dataset: full ImageNet dataset
data_path = '~/my_imagenet/'
train_batch_size = 30
eval_batch_size = 50
data_loader, data_loader_test = prepare_data_loaders(data_path)
criterion = nn.CrossEntropyLoss()
float_model = resnet18(pretrained=True)
float_model.eval()
# deepcopy the model since we need to keep the original model around
import copy
model_to_quantize = copy.deepcopy(float_model)
model_to_quantize.eval()
"""
Prepare model QAT for specified qconfig for torch.nn.Linear
"""
def prepare_qat_linear(qconfig):
qconfig_dict = {"object_type": [(torch.nn.Linear, qconfig)]}
prepared_model = prepare_fx(copy.deepcopy(float_model), qconfig_dict) # fuse modules and insert observers
training_loop(prepared_model, criterion, data_loader)
prepared_model.eval()
return prepared_model
"""
Prepare model with uniform activation, uniform weight
b=8, k=2
"""
prepared_model = prepare_qat_linear(uniform_qconfig_8bit)
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
print("Model #1 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model with uniform activation, uniform weight
b=4, k=2
"""
prepared_model = prepare_qat_linear(uniform_qconfig_4bit)
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
print("Model #1 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model with uniform activation, APoT weight
(b=8, k=2)
"""
prepared_model = prepare_qat_linear(apot_weights_qconfig_8bit)
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
print("Model #2 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model with uniform activation, APoT weight
(b=4, k=2)
"""
prepared_model = prepare_qat_linear(apot_weights_qconfig_4bit)
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
print("Model #2 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model with APoT activation and weight
(b=8, k=2)
"""
prepared_model = prepare_qat_linear(apot_qconfig_8bit)
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
print("Model #3 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model with APoT activation and weight
(b=4, k=2)
"""
prepared_model = prepare_qat_linear(apot_qconfig_4bit)
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
print("Model #3 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
|
pytorch-master
|
test/quantization/core/experimental/apot_fx_graph_mode_qat.py
|
import torch
import torch.nn as nn
import torch.quantization
from torchvision.models.quantization.resnet import resnet18
from torch.ao.quantization.experimental.quantization_helper import (
evaluate,
prepare_data_loaders
)
# validation dataset: full ImageNet dataset
data_path = '~/my_imagenet/'
data_loader, data_loader_test = prepare_data_loaders(data_path)
criterion = nn.CrossEntropyLoss()
float_model = resnet18(pretrained=True)
float_model.eval()
# deepcopy the model since we need to keep the original model around
import copy
model_to_quantize = copy.deepcopy(float_model)
model_to_quantize.eval()
"""
Prepare models
"""
# Note that this is temporary, we'll expose these functions to torch.quantization after official releasee
from torch.quantization.quantize_fx import prepare_qat_fx
def calibrate(model, data_loader):
model.eval()
with torch.no_grad():
for image, target in data_loader:
model(image)
from torch.ao.quantization.experimental.qconfig import (
uniform_qconfig_8bit,
apot_weights_qconfig_8bit,
apot_qconfig_8bit,
uniform_qconfig_4bit,
apot_weights_qconfig_4bit,
apot_qconfig_4bit
)
"""
Prepare full precision model
"""
full_precision_model = float_model
top1, top5 = evaluate(full_precision_model, criterion, data_loader_test)
print("Model #0 Evaluation accuracy on test dataset: %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model PTQ for specified qconfig for torch.nn.Linear
"""
def prepare_ptq_linear(qconfig):
qconfig_dict = {"object_type": [(torch.nn.Linear, qconfig)]}
prepared_model = prepare_qat_fx(copy.deepcopy(float_model), qconfig_dict) # fuse modules and insert observers
calibrate(prepared_model, data_loader_test) # run calibration on sample data
return prepared_model
"""
Prepare model with uniform activation, uniform weight
b=8, k=2
"""
prepared_model = prepare_ptq_linear(uniform_qconfig_8bit)
quantized_model = convert_fx(prepared_model) # convert the calibrated model to a quantized model
top1, top5 = evaluate(quantized_model, criterion, data_loader_test)
print("Model #1 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model with uniform activation, uniform weight
b=4, k=2
"""
prepared_model = prepare_ptq_linear(uniform_qconfig_4bit)
quantized_model = convert_fx(prepared_model) # convert the calibrated model to a quantized model
top1, top5 = evaluate(quantized_model1, criterion, data_loader_test)
print("Model #1 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model with uniform activation, APoT weight
(b=8, k=2)
"""
prepared_model = prepare_ptq_linear(apot_weights_qconfig_8bit)
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
print("Model #2 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model with uniform activation, APoT weight
(b=4, k=2)
"""
prepared_model = prepare_ptq_linear(apot_weights_qconfig_4bit)
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
print("Model #2 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model with APoT activation and weight
(b=8, k=2)
"""
prepared_model = prepare_ptq_linear(apot_qconfig_8bit)
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
print("Model #3 Evaluation accuracy on test dataset (b=8, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare model with APoT activation and weight
(b=4, k=2)
"""
prepared_model = prepare_ptq_linear(apot_qconfig_4bit)
top1, top5 = evaluate(prepared_model, criterion, data_loader_test)
print("Model #3 Evaluation accuracy on test dataset (b=4, k=2): %2.2f, %2.2f" % (top1.avg, top5.avg))
"""
Prepare eager mode quantized model
"""
eager_quantized_model = resnet18(pretrained=True, quantize=True).eval()
top1, top5 = evaluate(eager_quantized_model, criterion, data_loader_test)
print("Eager mode quantized model evaluation accuracy on test dataset: %2.2f, %2.2f" % (top1.avg, top5.avg))
|
pytorch-master
|
test/quantization/core/experimental/apot_fx_graph_mode_ptq.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: quantization"]
import sys
import os
import unittest
from typing import Set
# torch
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic.quantized as nniq
from torch.fx import GraphModule
# Testing utils
from torch.testing._internal.common_utils import TestCase, IS_AVX512_VNNI_SUPPORTED
from torch.testing._internal.common_quantized import override_qengines, qengine_is_fbgemm
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.quantization_torch_package_models import LinearReluFunctional
from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver
import torch.ao.quantization.quantize_fx as quantize_fx
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
def get_filenames(self, subname):
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives.
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
base_name = os.path.join(os.path.dirname(test_file),
"../serialized",
munged_id)
subname_output = ""
if subname:
base_name += "_" + subname
subname_output = " ({})".format(subname)
input_file = base_name + ".input.pt"
state_dict_file = base_name + ".state_dict.pt"
scripted_module_file = base_name + ".scripted.pt"
traced_module_file = base_name + ".traced.pt"
expected_file = base_name + ".expected.pt"
package_file = base_name + ".package.pt"
get_attr_targets_file = base_name + ".get_attr_targets.pt"
return input_file, state_dict_file, scripted_module_file, \
traced_module_file, expected_file, package_file, get_attr_targets_file
class TestSerialization(TestCase):
""" Test backward compatiblity for serialization and numerics
"""
# Copy and modified from TestCase.assertExpected
def _test_op(self, qmodule, subname=None, input_size=None, input_quantized=True,
generate=False, prec=None, new_zipfile_serialization=False):
r""" Test quantized modules serialized previously can be loaded
with current code, make sure we don't break backward compatibility for the
serialization of quantized modules
"""
input_file, state_dict_file, scripted_module_file, traced_module_file, \
expected_file, _package_file, _get_attr_targets_file = \
get_filenames(self, subname)
# only generate once.
if generate and qengine_is_fbgemm():
input_tensor = torch.rand(*input_size).float()
if input_quantized:
input_tensor = torch.quantize_per_tensor(input_tensor, 0.5, 2, torch.quint8)
torch.save(input_tensor, input_file)
# Temporary fix to use _use_new_zipfile_serialization until #38379 lands.
torch.save(qmodule.state_dict(), state_dict_file, _use_new_zipfile_serialization=new_zipfile_serialization)
torch.jit.save(torch.jit.script(qmodule), scripted_module_file)
torch.jit.save(torch.jit.trace(qmodule, input_tensor), traced_module_file)
torch.save(qmodule(input_tensor), expected_file)
input_tensor = torch.load(input_file)
qmodule.load_state_dict(torch.load(state_dict_file))
qmodule_scripted = torch.jit.load(scripted_module_file)
qmodule_traced = torch.jit.load(traced_module_file)
expected = torch.load(expected_file)
self.assertEqual(qmodule(input_tensor), expected, atol=prec)
self.assertEqual(qmodule_scripted(input_tensor), expected, atol=prec)
self.assertEqual(qmodule_traced(input_tensor), expected, atol=prec)
def _test_op_graph(self, qmodule, subname=None, input_size=None, input_quantized=True,
generate=False, prec=None, new_zipfile_serialization=False):
r"""
Input: a floating point module
If generate == True, traces and scripts the module and quantizes the results with
PTQ, and saves the results.
If generate == False, traces and scripts the module and quantizes the results with
PTQ, and compares to saved results.
"""
input_file, state_dict_file, scripted_module_file, traced_module_file, \
expected_file, _package_file, _get_attr_targets_file = \
get_filenames(self, subname)
# only generate once.
if generate and qengine_is_fbgemm():
input_tensor = torch.rand(*input_size).float()
torch.save(input_tensor, input_file)
# convert to TorchScript
scripted = torch.jit.script(qmodule)
traced = torch.jit.trace(qmodule, input_tensor)
# quantize
def _eval_fn(model, data):
model(data)
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
scripted_q = torch.ao.quantization.quantize_jit(
scripted, qconfig_dict, _eval_fn, [input_tensor])
traced_q = torch.ao.quantization.quantize_jit(
traced, qconfig_dict, _eval_fn, [input_tensor])
torch.jit.save(scripted_q, scripted_module_file)
torch.jit.save(traced_q, traced_module_file)
torch.save(scripted_q(input_tensor), expected_file)
input_tensor = torch.load(input_file)
qmodule_scripted = torch.jit.load(scripted_module_file)
qmodule_traced = torch.jit.load(traced_module_file)
expected = torch.load(expected_file)
self.assertEqual(qmodule_scripted(input_tensor), expected, atol=prec)
self.assertEqual(qmodule_traced(input_tensor), expected, atol=prec)
def _test_obs(self, obs, input_size, subname=None, generate=False, check_numerics=True):
"""
Test observer code can be loaded from state_dict.
"""
input_file, state_dict_file, _, traced_module_file, expected_file, \
_package_file, _get_attr_targets_file = get_filenames(self, None)
if generate:
input_tensor = torch.rand(*input_size).float()
torch.save(input_tensor, input_file)
torch.save(obs(input_tensor), expected_file)
torch.save(obs.state_dict(), state_dict_file)
input_tensor = torch.load(input_file)
obs.load_state_dict(torch.load(state_dict_file))
expected = torch.load(expected_file)
if check_numerics:
self.assertEqual(obs(input_tensor), expected)
def _test_package(self, fp32_module, input_size, generate=False):
"""
Verifies that files created in the past with torch.package
work on today's FX graph mode quantization transforms.
"""
input_file, state_dict_file, _scripted_module_file, _traced_module_file, \
expected_file, package_file, get_attr_targets_file = \
get_filenames(self, None)
package_name = 'test'
resource_name_model = 'test.pkl'
def _do_quant_transforms(
m: torch.nn.Module,
input_tensor: torch.Tensor,
) -> torch.nn.Module:
example_inputs = (input_tensor,)
# do the quantizaton transforms and save result
qconfig = torch.quantization.get_default_qconfig('fbgemm')
mp = quantize_fx.prepare_fx(m, {'': qconfig}, example_inputs=example_inputs)
mp(input_tensor)
mq = quantize_fx.convert_fx(mp)
return mq
def _get_get_attr_target_strings(m: GraphModule) -> Set[str]:
results = set()
for node in m.graph.nodes:
if node.op == 'get_attr':
results.add(node.target)
return results
if generate and qengine_is_fbgemm():
input_tensor = torch.randn(*input_size)
torch.save(input_tensor, input_file)
# save the model with torch.package
with torch.package.PackageExporter(package_file) as exp:
exp.intern('torch.testing._internal.quantization_torch_package_models')
exp.save_pickle(package_name, resource_name_model, fp32_module)
# do the quantization transforms and save the result
mq = _do_quant_transforms(fp32_module, input_tensor)
get_attrs = _get_get_attr_target_strings(mq)
torch.save(get_attrs, get_attr_targets_file)
q_result = mq(input_tensor)
torch.save(q_result, expected_file)
# load input tensor
input_tensor = torch.load(input_file)
expected_output_tensor = torch.load(expected_file)
expected_get_attrs = torch.load(get_attr_targets_file)
# load model from package and verify output and get_attr targets match
imp = torch.package.PackageImporter(package_file)
m = imp.load_pickle(package_name, resource_name_model)
mq = _do_quant_transforms(m, input_tensor)
get_attrs = _get_get_attr_target_strings(mq)
self.assertTrue(
get_attrs == expected_get_attrs,
f'get_attrs: expected {expected_get_attrs}, got {get_attrs}')
output_tensor = mq(input_tensor)
self.assertTrue(torch.allclose(output_tensor, expected_output_tensor))
@override_qengines
def test_linear(self):
module = nnq.Linear(3, 1, bias_=True, dtype=torch.qint8)
self._test_op(module, input_size=[1, 3], generate=False)
@override_qengines
def test_linear_relu(self):
module = nniq.LinearReLU(3, 1, bias=True, dtype=torch.qint8)
self._test_op(module, input_size=[1, 3], generate=False)
@override_qengines
def test_linear_dynamic(self):
module_qint8 = nnqd.Linear(3, 1, bias_=True, dtype=torch.qint8)
self._test_op(module_qint8, "qint8", input_size=[1, 3], input_quantized=False, generate=False)
if qengine_is_fbgemm():
module_float16 = nnqd.Linear(3, 1, bias_=True, dtype=torch.float16)
self._test_op(module_float16, "float16", input_size=[1, 3], input_quantized=False, generate=False)
@override_qengines
def test_conv2d(self):
module = nnq.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=True, padding_mode="zeros")
self._test_op(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_nobias(self):
module = nnq.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=False, padding_mode="zeros")
self._test_op(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_graph(self):
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=True, padding_mode="zeros"),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_nobias_graph(self):
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=False, padding_mode="zeros"),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_graph_v2(self):
# tests the same thing as test_conv2d_graph, but for version 2 of
# ConvPackedParams{n}d
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=True, padding_mode="zeros"),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_nobias_graph_v2(self):
# tests the same thing as test_conv2d_nobias_graph, but for version 2 of
# ConvPackedParams{n}d
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=False, padding_mode="zeros"),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_graph_v3(self):
# tests the same thing as test_conv2d_graph, but for version 3 of
# ConvPackedParams{n}d
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=True, padding_mode="zeros"),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_nobias_graph_v3(self):
# tests the same thing as test_conv2d_nobias_graph, but for version 3 of
# ConvPackedParams{n}d
module = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=False, padding_mode="zeros"),
)
self._test_op_graph(module, input_size=[1, 3, 6, 6], generate=False)
@override_qengines
def test_conv2d_relu(self):
module = nniq.ConvReLU2d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=True, padding_mode="zeros")
self._test_op(module, input_size=[1, 3, 6, 6], generate=False)
# TODO: graph mode quantized conv2d module
@override_qengines
def test_conv3d(self):
if qengine_is_fbgemm():
module = nnq.Conv3d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=True, padding_mode="zeros")
self._test_op(module, input_size=[1, 3, 6, 6, 6], generate=False)
# TODO: graph mode quantized conv3d module
@override_qengines
def test_conv3d_relu(self):
if qengine_is_fbgemm():
module = nniq.ConvReLU3d(3, 3, kernel_size=3, stride=1, padding=0, dilation=1,
groups=1, bias=True, padding_mode="zeros")
self._test_op(module, input_size=[1, 3, 6, 6, 6], generate=False)
# TODO: graph mode quantized conv3d module
@override_qengines
@unittest.skipIf(IS_AVX512_VNNI_SUPPORTED, "This test fails on machines with AVX512_VNNI support. Ref: GH Issue 59098")
def test_lstm(self):
class LSTMModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lstm = nnqd.LSTM(input_size=3, hidden_size=7, num_layers=1).to(dtype=torch.float)
def forward(self, x):
x = self.lstm(x)
return x
if qengine_is_fbgemm():
mod = LSTMModule()
self._test_op(mod, input_size=[4, 4, 3], input_quantized=False, generate=False, new_zipfile_serialization=True)
def test_per_channel_observer(self):
obs = PerChannelMinMaxObserver()
self._test_obs(obs, input_size=[5, 5], generate=False)
def test_per_tensor_observer(self):
obs = MinMaxObserver()
self._test_obs(obs, input_size=[5, 5], generate=False)
def test_default_qat_qconfig(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(5, 5)
self.relu = nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
model = Model()
model.linear.weight = torch.nn.Parameter(torch.randn(5, 5))
model.qconfig = torch.ao.quantization.get_default_qat_qconfig("fbgemm")
ref_model = torch.ao.quantization.QuantWrapper(model)
ref_model = torch.ao.quantization.prepare_qat(ref_model)
self._test_obs(ref_model, input_size=[5, 5], generate=False, check_numerics=False)
@skipIfNoFBGEMM
def test_linear_relu_package_quantization_transforms(self):
m = LinearReluFunctional(4).eval()
self._test_package(m, input_size=(1, 1, 4, 4), generate=False)
|
pytorch-master
|
test/quantization/bc/test_backward_compatibility.py
|
pytorch-master
|
test/quantization/bc/__init__.py
|
|
# Owner(s): ["oncall: quantization"]
# Copied from pytorch/test/fx/test_subgraph_rewriter.py
import os
import sys
import torch
from torch.fx import symbolic_trace, subgraph_rewriter
from torch.fx.annotate import annotate
# Make the helper files in test/ importable
from torch.fx.experimental.rewriter import RewritingTracer
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_fx.py TESTNAME\n\n"
"instead.")
class TestSubgraphRewriter(JitTestCase):
def test_subgraph_rewriter_preserves_logic(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x) + torch.relu(x)
def comparison(x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
# Replace `pattern` with the same pattern (shouldn't change
# the underlying logic)
subgraph_rewriter.replace_pattern(traced, pattern, pattern)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_with_oneliner_pattern(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x)
def replacement(x):
return torch.relu(x)
def comparison(x):
val = torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_single_pattern_match(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.neg(x) + torch.relu(x)
def replacement(x):
return torch.relu(x)
def comparison(x):
val = torch.relu(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_output = comparison_fn(x)
test_output = traced.forward(x)
self.assertEqual(ref_output, test_output)
def test_subgraph_rewriter_multiple_pattern_match(self):
class M(torch.nn.Module):
def forward(self, x, w1, w2):
m1 = torch.cat([w1, w2]).sum()
m2 = torch.cat([w1, w2]).sum()
return x + torch.max(m1) + torch.max(m2)
def pattern(w1, w2):
return torch.cat([w1, w2]).sum()
def replacement(w1, w2):
return torch.stack([w1, w2])
def comparison(x, w1, w2):
m1 = torch.stack([w1, w2])
m2 = torch.stack([w1, w2])
return x + torch.max(m1) + torch.max(m2)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.rand(1, 3)
w1 = torch.rand(1, 3)
w2 = torch.rand(1, 3)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x, w1, w2)
test_outs = traced.forward(x, w1, w2)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_graph_argument_order(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.mm(x, y)
def pattern(x, y):
return torch.mm(x, y)
def comparison(x, y):
return torch.mm(x, y)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
y = torch.randn(4, 5)
subgraph_rewriter.replace_pattern(traced, pattern, pattern)
traced.graph.lint()
ref_outs = comparison_fn(x, y)
test_outs = traced.forward(x, y)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_correct_output_replacement(self):
class M(torch.nn.Module):
def forward(self, x, y):
val = torch.neg(y) + torch.relu(x)
return torch.add(val, val)
def pattern(x):
return torch.relu(x)
def replacement(x):
return torch.neg(x)
def comparison(x, y):
val = torch.neg(y) + torch.neg(x)
return torch.add(val, val)
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x, y)
test_outs = traced.forward(x, y)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_traced_as_callable(self):
class M(torch.nn.Module):
def forward(self, x):
val = torch.neg(x) + torch.relu(x)
return torch.add(val, val)
class Pattern(torch.nn.Module):
def forward(self, x):
return torch.neg(x) + torch.relu(x)
class Replacement(torch.nn.Module):
def forward(self, x):
return torch.sigmoid(x)
def comparison(x):
val = torch.sigmoid(x)
return torch.add(val, val)
traced = symbolic_trace(M())
traced_pattern = symbolic_trace(Pattern())
traced_replacement = symbolic_trace(Replacement())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, traced_pattern, traced_replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_pattern_is_entire_graph(self):
class M(torch.nn.Module):
def forward(self, x):
a = torch.neg(x)
return torch.add(a, a)
def pattern(x):
a = torch.neg(x)
return torch.add(a, a)
def replacement(x):
a = torch.sigmoid(x)
return torch.cat([a, a])
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(replacement)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_pattern_output_pattern_node_can_have_users_that_are_not_matched(self):
class M(torch.nn.Module):
def forward(self, x):
y = torch.relu(x)
return torch.neg(y) - y
def pattern(x):
return torch.relu(x)
def replacement(x):
return torch.sigmoid(x)
def comparison(x):
y = torch.sigmoid(x)
return torch.neg(y) - y
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_internal_pattern_nodes_cannot_have_users_that_are_not_matched(self):
class M(torch.nn.Module):
def forward(self, x, w1, w2, b1, b2):
m0 = torch.cat([w1, w2])
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
t0 = torch.addmm(b1, m1, m2.t())
t1 = torch.sum(w1, 1)
t2 = torch.addmm(b1, m1, m2.t())
return torch.sum(t1), torch.sum(t2)
def pattern(x, w1, w2, b1, b2):
m1 = torch.cat([w1, w2])
m2 = torch.cat([x, b2])
return torch.addmm(b1, m1, m2.t())
def replacement(x, w1, w2, b1, b2):
return torch.cat([x, w1, w2])
traced = symbolic_trace(M())
# Result should be [] since no matches can be found
res = subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
self.assertEqual(res, [])
def test_subgraph_rewriter_placeholder_matching(self):
"""
This tests that a placeholder Node can be matched to a Node with
a different number of input Nodes. In the example below, the
original traced Module looks like this:
opcode target args kwargs
------------- ---------------------------------------------------------- ------------------------ --------
placeholder x () {}
call_function <built-in function add> (x, 3) {}
call_method dequantize (add,) {}
call_function <built-in method sigmoid of type object at 0x7f7c1f440fe0> (dequantize,) {}
call_method to (sigmoid, torch.float16) {}
output output (to,) {}
while the pattern we want to match looks like this:
opcode target args kwargs
------------- ---------------------------------------------------------- ------------------------ --------
placeholder x () {}
call_method dequantize (x,) {}
call_function <built-in method sigmoid of type object at 0x7f7c1f440fe0> (dequantize,) {}
call_method to (sigmoid, torch.float16) {}
output output (to,) {}
Here, we want to be able to match the original graph's
`call_function.add` Node with the pattern graph's
`plaeholder.x` Node.
Credit to Jerry Zhang (GitHub: jerryzh168) for this test case
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.dtype = torch.float16
def forward(self, x):
x += 3
x = x.dequantize()
x = torch.sigmoid(x)
dtype = self.dtype
x = x.to(dtype)
return x
def pattern(x):
x = x.dequantize()
x = torch.sigmoid(x)
x = x.to(torch.float16)
return x
def replacement(x):
return x
def comparison(x):
return x + 3
traced = symbolic_trace(M())
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
def test_subgraph_rewriter_replaces_referenced_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.submod = torch.nn.ReLU()
def forward(self, x):
x = x + 1
return self.submod(self.sigmoid(x))
class Pattern(torch.nn.Module):
def __init__(self):
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.submod = torch.nn.ReLU()
def forward(self, x):
return self.submod(self.sigmoid(x))
class Replacement(torch.nn.Module):
def __init__(self):
super().__init__()
self.id = torch.nn.Identity()
self.submod = torch.nn.ReLU()
def forward(self, x):
return self.submod(self.id(x))
class Comparison(torch.nn.Module):
def __init__(self):
super().__init__()
self.id = torch.nn.Identity()
self.submod = torch.nn.ReLU()
def forward(self, x):
x = x + 1
return self.submod(self.id(x))
traced = symbolic_trace(M())
comparison = Comparison()
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, Pattern(), Replacement())
traced.graph.lint()
ref_outs = comparison(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
traced.get_submodule("id")
with self.assertRaisesRegex(AttributeError, "has no attribute"):
traced.get_submodule("sigmoid")
submod = traced.get_submodule("submod")
self.assertEqual(type(submod), torch.nn.ReLU)
def test_subgraph_rewriter_annotations_int(self):
class M1(torch.nn.Module):
def forward(self, x):
y: int = x
return torch.add(x, y)
class M2(torch.nn.Module):
def forward(self, x):
y = annotate(x, int)
return torch.add(x, y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M1())
module = M2()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(module)
for n, m in zip(symbolic_traced.graph.nodes, graph.nodes):
if n.op == 'placeholder':
assert n.type == int
assert m.type == int
def test_subgraph_writer_replace_consecutive_submodules(self):
def f(x):
x = torch.sigmoid(x)
x = torch.sigmoid(x)
return torch.sigmoid(x)
def pattern(x):
return torch.sigmoid(x)
def replacement(x):
return torch.exp(x)
def comparison(x):
x = torch.exp(x)
x = torch.exp(x)
return torch.exp(x)
traced = symbolic_trace(f)
comparison_fn = symbolic_trace(comparison)
x = torch.randn(3, 4)
subgraph_rewriter.replace_pattern(traced, pattern, replacement)
traced.graph.lint()
ref_outs = comparison_fn(x)
test_outs = traced.forward(x)
self.assertEqual(ref_outs, test_outs)
|
pytorch-master
|
test/quantization/fx/test_subgraph_rewriter.py
|
pytorch-master
|
test/quantization/fx/__init__.py
|
|
# Owner(s): ["oncall: quantization"]
import copy
import math
import operator
import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.ao.quantization import default_dynamic_qconfig
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from torch.ao.quantization.quantize_fx import (
convert_fx,
convert_to_reference_fx,
prepare_fx,
prepare_qat_fx,
)
from torch.testing._internal.common_quantization import (
ConvBnModel,
ConvBnReLUModel,
ConvModel,
QuantizationTestCase,
skipIfNoFBGEMM,
SingleLayerLinearDynamicModel,
SingleLayerLinearModel,
LSTMwithHiddenDynamicModel,
SparseNNModel,
skip_if_no_torchvision,
)
from torch.ao.quantization.quantization_mappings import (
get_default_static_quant_module_mappings,
get_default_dynamic_quant_module_mappings,
get_default_float_to_quantized_operator_mappings,
)
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_quantization import NodeSpec as ns
from torch.ao.quantization.fx.pattern_utils import get_default_quant_patterns
import torch.ao.quantization.fx.quantization_patterns as qp
from torch.ao.ns.fx.pattern_utils import (
get_type_a_related_to_b,
)
from torch.ao.ns.fx.graph_matcher import (
get_matching_subgraph_pairs,
GraphMatchingException,
)
from torch.ao.ns.fx.utils import (
compute_sqnr,
compute_normalized_l2_error,
compute_cosine_similarity,
)
from torch.ao.ns.fx.mappings import (
get_node_type_to_io_type_map,
get_unmatchable_types_map,
get_base_name_to_sets_of_related_ops,
get_base_name_for_op,
add_op_to_sets_of_related_ops,
)
from torch.ao.ns.fx.weight_utils import (
get_op_to_type_to_weight_extraction_fn,
)
from torch.ao.ns._numeric_suite_fx import (
extract_weights,
_extract_weights_impl,
add_loggers,
_add_loggers_impl,
OutputLogger,
add_shadow_loggers,
_add_shadow_loggers_impl,
extract_logger_info,
extract_shadow_logger_info,
extend_logger_results_with_comparison,
)
from torch.ao.quantization.backend_config import get_native_backend_config
from torch.ao.quantization.fx.backend_config_utils import get_pattern_to_quantize_handlers
# Note: these models are not for use outside of this file. While it's good
# to reuse code, we also need to be able to iterate on tests
# quickly when debugging. If a test model has a large number of callsites
# across various different files, speed of debugging on individual test cases
# decreases.
class LinearReluFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(4, 4))
self.b1 = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w1, self.b1)
x = F.relu(x)
return x
class LinearFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(4, 4))
self.b1 = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w1, self.b1)
return x
class LinearReluLinearFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.Tensor(4, 4))
self.b = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w, self.b)
x = F.relu(x)
x = F.linear(x, self.w, self.b)
return x
class AddMulFunctional(nn.Module):
def forward(self, x, y):
x = x + 1.0
x = x * 1.0
x = 1.0 + x
x = 1.0 * x
x = x + y
x = x * y
return x
class AllConvAndLinearFusionModules(torch.nn.Module):
def __init__(self):
super().__init__()
# conv1d
self.conv1d_0 = nn.Conv1d(1, 1, 1)
# conv1d - relu
self.conv1d_1 = nn.Conv1d(1, 1, 1)
self.relu_0 = nn.ReLU()
# conv1d - bn (qat only)
self.conv1d_2 = nn.Conv1d(1, 1, 1)
self.bn1d_0 = nn.BatchNorm1d(1)
# conv1d - bn - relu (qat only)
self.conv1d_3 = nn.Conv1d(1, 1, 1)
self.bn1d_1 = nn.BatchNorm1d(1)
self.relu_4 = nn.ReLU()
# conv2d
self.conv2d_0 = nn.Conv2d(1, 1, 1)
# conv2d - relu
self.conv2d_1 = nn.Conv2d(1, 1, 1)
self.relu_1 = nn.ReLU()
# conv2d - bn (qat only)
self.conv2d_2 = nn.Conv2d(1, 1, 1)
self.bn2d_0 = nn.BatchNorm2d(1)
# conv2d - bn - relu (qat only)
self.conv2d_3 = nn.Conv2d(1, 1, 1)
self.bn2d_1 = nn.BatchNorm2d(1)
self.relu_5 = nn.ReLU()
# conv3d
self.conv3d_0 = nn.Conv3d(1, 1, 1)
# conv3d - relu
self.conv3d_1 = nn.Conv3d(1, 1, 1)
self.relu_2 = nn.ReLU()
# conv3d - bn (qat only)
self.conv3d_2 = nn.Conv3d(1, 1, 1)
self.bn3d_0 = nn.BatchNorm3d(1)
# conv3d - bn - relu (qat only)
self.conv3d_3 = nn.Conv3d(1, 1, 1)
self.bn3d_1 = nn.BatchNorm3d(1)
self.relu_6 = nn.ReLU()
# linear
self.linear_0 = nn.Linear(1, 1)
# linear - relu
self.linear_1 = nn.Linear(1, 1)
self.relu_3 = nn.ReLU()
def forward(self, x):
# conv1d
x = self.conv1d_0(x)
x = self.conv1d_1(x)
x = self.relu_0(x)
x = self.conv1d_2(x)
x = self.bn1d_0(x)
x = self.conv1d_3(x)
x = self.bn1d_1(x)
x = self.relu_4(x)
# conv2d
x = x.reshape(1, 1, 1, 1)
x = self.conv2d_0(x)
x = self.conv2d_1(x)
x = self.relu_1(x)
x = self.conv2d_2(x)
x = self.bn2d_0(x)
x = self.conv2d_3(x)
x = self.bn2d_1(x)
x = self.relu_5(x)
# conv3d
x = x.reshape(1, 1, 1, 1, 1)
x = self.conv3d_0(x)
x = self.conv3d_1(x)
x = self.relu_2(x)
x = self.conv3d_2(x)
x = self.bn3d_0(x)
x = self.conv3d_3(x)
x = self.bn3d_1(x)
x = self.relu_6(x)
# linear
x = x.reshape(1, 1)
x = self.linear_0(x)
x = self.linear_1(x)
x = self.relu_3(x)
return x
class AllConvFunctional(torch.nn.Module):
def __init__(self, weight1d, weight2d, weight3d, bias1d, bias2d, bias3d):
super().__init__()
self.weight1d = torch.nn.Parameter(weight1d)
self.weight2d = torch.nn.Parameter(weight2d)
self.weight3d = torch.nn.Parameter(weight3d)
self.bias1d = torch.nn.Parameter(bias1d)
self.bias2d = torch.nn.Parameter(bias2d)
self.bias3d = torch.nn.Parameter(bias3d)
self.stride1d = 1
self.padding1d = 0
self.dilation1d = 1
self.stride2d = (1, 1)
self.padding2d = (0, 0)
self.dilation2d = (1, 1)
self.groups = 1
self.stride3d = (1, 1, 1)
self.padding3d = (0, 0, 0)
self.dilation3d = (1, 1, 1)
def forward(self, x):
x = F.conv1d(
x, self.weight1d, self.bias1d, self.stride1d, self.padding1d,
self.dilation1d, self.groups)
x = F.conv1d(
x, self.weight1d, self.bias1d, self.stride1d, self.padding1d,
self.dilation1d, self.groups)
x = F.relu(x)
x = F.conv2d(
x, self.weight2d, self.bias2d, self.stride2d, self.padding2d,
self.dilation2d, self.groups)
x = F.conv2d(
x, self.weight2d, self.bias2d, self.stride2d, self.padding2d,
self.dilation2d, self.groups)
x = F.relu(x)
x = F.conv3d(
x, self.weight3d, self.bias3d, self.stride3d, self.padding3d,
self.dilation3d, self.groups)
x = F.conv3d(
x, self.weight3d, self.bias3d, self.stride3d, self.padding3d,
self.dilation3d, self.groups)
x = F.relu(x)
return x
@torch.fx.wrap
def _wrapped_hardswish(x):
return F.hardswish(x)
@torch.fx.wrap
def _wrapped_hardswish_fp16(x):
x = x.dequantize()
x = F.hardswish(x)
x = x.to(torch.float16)
return x
@torch.fx.wrap
def _wrapped_sigmoid(x):
return F.sigmoid(x)
@torch.fx.wrap
def _wrapped_linear(x, w, b):
return F.linear(x, w, b)
def get_all_quant_patterns():
""" we are in the process to migrate the frontend of fx graph mode quant
to use backend_config_dict, so some of the patterns are moved to backend_config_dict
this function will include these patterns so that we can still have all the patterns
"""
# TODO: we can remove this call, and get all patterns from backend_config_dict in
# the future when the frontend refactor is done in fx graph mode quantization
all_quant_patterns = get_default_quant_patterns()
# some of the patterns are moved to (native) backend_config_dict so we need to
# add them back here
for pattern, quantize_handler in get_pattern_to_quantize_handlers(get_native_backend_config()).items():
all_quant_patterns[pattern] = quantize_handler
return all_quant_patterns
class TestFXGraphMatcher(QuantizationTestCase):
@skipIfNoFBGEMM
def test_simple_mod(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(1, 1, 1, 1),))
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
conv_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_0'
expected_types = {
conv_name_0: ((nn.Conv2d, torch.ao.quantization.MinMaxObserver), (nnq.Conv2d, nnq.Conv2d)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_fun(self):
class M(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.empty(1, 4))
self.b = nn.Parameter(torch.zeros(1))
torch.nn.init.kaiming_uniform_(self.w, a=math.sqrt(5))
def forward(self, x):
return F.linear(x, self.w, self.b)
m = M().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(1, 1, 1, 1),))
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
linear_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.linear) + '_0'
expected_types = {
linear_name_0:
((F.linear, torch.ao.quantization.MinMaxObserver), (toq.linear, toq.linear))
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_fusion(self):
m = LinearReluFunctional().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(4, 4),))
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
linear_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.linear) + '_0'
expected_types = {
linear_name_0:
((F.linear, torch.ao.quantization.MinMaxObserver), (toq.linear_relu, toq.linear_relu)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_mod_multi(self):
m = nn.Sequential(
nn.Sequential(
nn.Conv2d(1, 1, 1),
),
nn.Conv2d(1, 1, 1),
).eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(1, 1, 1, 1),))
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
def test_simple_tensor_ops(self):
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
z = x + y
return z
m = M().eval()
example_inputs = (torch.randn(1), torch.randn(1))
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
def test_matching_failure_node_count(self):
# verify that matching graphs with matching node types but
# different counts of matchable nodes fails
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1)).eval()
example_inputs = (torch.randn(1, 1, 1, 1),)
mp1 = prepare_fx(m1, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
mp2 = prepare_fx(m2, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
with self.assertRaises(GraphMatchingException) as ex:
results = get_matching_subgraph_pairs(mp1, mp2)
@skipIfNoFBGEMM
def test_matching_failure_node_type(self):
# verify that matching graphs with non-matching node types fails
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
m2 = nn.Sequential(nn.Linear(1, 1)).eval()
example_inputs = (torch.randn(1, 1, 1, 1),)
mp1 = prepare_fx(m1, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
example_inputs = (torch.randn(1, 1),)
mp2 = prepare_fx(m2, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
with self.assertRaises(GraphMatchingException) as ex:
results = get_matching_subgraph_pairs(mp1, mp2)
@skipIfNoFBGEMM
def test_nodes_before_cat(self):
# verify that nodes before cat get matched
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x0):
x1 = torch.add(x0, 1.0)
y1 = torch.add(x0, 1.0)
x2 = torch.cat([x1, y1])
return x2
m = M().eval()
example_inputs = (torch.randn(1),)
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
cat_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.cat) + '_0'
add_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_0'
add_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_1'
expected_types = {
cat_name_0: ((torch.cat, torch.cat), (torch.cat, torch.cat)),
add_name_0: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_1: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_dict_return_type(self):
# verify that we can traverse up nodes which return dictionaries
class M(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x0):
x1 = torch.add(x0, 1.0)
y1 = torch.add(x0, 1.0)
z1 = torch.add(x0, 1.0)
a1 = {'x1': x1, 'y1': (y1,), 'z1': [{'key': (z1,)}]}
return a1
m = M().eval()
example_inputs = (torch.randn(1),)
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_0'
add_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_1'
add_name_2 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_2'
expected_types = {
add_name_0: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_1: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_2: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_nodes_with_equal_types_get_matched(self):
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = torch.mul(x, x)
x = torch.sigmoid(x)
x = F.relu(x)
return x
m = M().eval()
# prevent conv2 from getting quantized, so we can test
# modules with equal types
qconfig_mapping = torch.ao.quantization.get_default_qconfig_mapping().set_module_name("conv2", None)
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = prepare_fx(m, qconfig_mapping, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
conv_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_0'
conv_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_1'
mul_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.mul) + '_0'
relu_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.relu) + '_0'
sigmoid_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.sigmoid) + '_0'
# all of these should be matched
expected_types = {
conv_name_1:
((nn.Conv2d, torch.ao.quantization.HistogramObserver), (nnq.Conv2d, nnq.Conv2d)),
conv_name_0:
((nn.Conv2d, torch.ao.quantization.HistogramObserver), (nn.Conv2d, nn.Conv2d)),
mul_name_0: ((torch.mul, torch.ao.quantization.HistogramObserver), (toq.mul, toq.mul)),
relu_name_0: ((F.relu, torch.ao.quantization.FixedQParamsObserver), (F.relu, F.relu)),
sigmoid_name_0:
((torch.sigmoid, torch.ao.quantization.FixedQParamsObserver), (torch.sigmoid, torch.sigmoid)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
def test_methods(self):
"""
Verify that graph matching works on methods
"""
class M(nn.Module):
def forward(self, x):
x = x.sigmoid()
return x
m1 = M().eval()
m2 = M().eval()
qconfig_mapping = torch.ao.quantization.get_default_qconfig_mapping()
example_inputs = (torch.randn(1),)
m1p = prepare_fx(m1, qconfig_mapping, example_inputs=example_inputs)
m2p = prepare_fx(m2, qconfig_mapping, example_inputs=example_inputs)
results = get_matching_subgraph_pairs(m1p, m2p)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
sigmoid_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.sigmoid) + '_0'
expected_types = {
sigmoid_name_0:
(('sigmoid', torch.ao.quantization.FixedQParamsObserver), ('sigmoid', torch.ao.quantization.FixedQParamsObserver)),
}
self.assert_types_for_matched_subgraph_pairs(
results, expected_types, m1p, m2p)
def test_op_relationship_mapping(self):
"""
Tests that the mapping of op relationships is complete.
"""
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
# 1. check static quant module mappings
static_quant_mod_mappings = get_default_static_quant_module_mappings()
for fp32_type, int8_type in static_quant_mod_mappings.items():
# skip quants and dequants, for the purposes of Numerical Suite
types_to_skip = (
torch.ao.quantization.QuantStub,
torch.ao.quantization.DeQuantStub,
nnq.FloatFunctional,
# the ConvTranspose3d swap is not implemented in FX Graph
# mode quantization yet
nn.ConvTranspose3d,
# the GroupNorm swap is not implemented in FX Graph
# mode quantization yet
nn.GroupNorm,
# nnq.ReLU6 is no longer swapped, because nn.ReLU6 can
# take quantized inputs
nn.ReLU6,
)
if fp32_type in types_to_skip:
continue
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 2. check static quant op mappings
static_quant_fun_mappings = get_default_float_to_quantized_operator_mappings()
for fp32_type, int8_type in static_quant_fun_mappings.items():
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 3. check dynamic quant mappings
dynamic_quant_mappings = get_default_dynamic_quant_module_mappings()
for fp32_type, int8_type in dynamic_quant_mappings.items():
# TODO(future PR): enable correct weight extraction for these
# and remove from this list.
types_to_skip = (
nn.GRUCell,
nn.GRU,
nn.LSTMCell,
nn.RNNCell,
)
if fp32_type in types_to_skip:
continue
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 4. go through the ops mapped to each QuantizeHandler type, and verify
# correctness.
def _op_in_base_sets_of_related_ops(op):
for name, ops in base_name_to_sets_of_related_ops.items():
if op in ops:
return True
return False
unmatchable_types_map = get_unmatchable_types_map()
FUNS_UNMATCHABLE = unmatchable_types_map['funs_unmatchable']
MODS_UNMATCHABLE = unmatchable_types_map['mods_unmatchable']
METHS_UNMATCHABLE = unmatchable_types_map['meths_unmatchable']
def _op_is_unmatchable(op):
return (
op in FUNS_UNMATCHABLE or
op in MODS_UNMATCHABLE or
op in METHS_UNMATCHABLE
)
default_quant_patterns = get_all_quant_patterns()
for pattern, qhandler_cls in default_quant_patterns.items():
base_op = None
if isinstance(pattern, tuple):
base_op = pattern[-1]
elif isinstance(pattern, str):
base_op = pattern
else:
base_op = pattern
qhandler_cls_all_ops_quantizeable = [
qp.CatQuantizeHandler,
qp.ConvReluQuantizeHandler,
qp.LinearReLUQuantizeHandler,
qp.BatchNormQuantizeHandler,
qp.EmbeddingQuantizeHandler,
qp.RNNDynamicQuantizeHandler,
]
qhandler_cls_quant_op_same_signature = [
qp.FixedQParamsOpQuantizeHandler,
qp.CopyNodeQuantizeHandler,
qp.GeneralTensorShapeOpQuantizeHandler,
]
if qhandler_cls == qp.BinaryOpQuantizeHandler:
# these ops do not have quantized equivalents
ops_to_skip = [
torch.bmm,
torch.div,
torch.sub,
operator.truediv,
operator.sub
]
if base_op in ops_to_skip:
continue
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
elif qhandler_cls == qp.RNNDynamicQuantizeHandler:
# TODO(future PR): add support for all classes in
# RNNDynamicQuantizeHandler
pass
elif qhandler_cls == qp.DefaultNodeQuantizeHandler:
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
elif qhandler_cls in qhandler_cls_quant_op_same_signature:
# these ops use the same op signature for fp32 and quantized
# tensors
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op) or
_op_is_unmatchable(base_op),
f"{base_op} not in sets of related ops or unmatchable")
elif qhandler_cls in qhandler_cls_all_ops_quantizeable:
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
else:
# torch.sum does not have quantized equivalents
if base_op in [
torch.sum,
nn.GRUCell,
nn.GRU,
nn.LSTMCell,
nn.RNNCell,
]:
continue
if isinstance(base_op, tuple):
# skip fusion patterns
continue
# didn't match explicit quantize handler class, we can check if the
# operator is in the related op set directly
if not (_op_in_base_sets_of_related_ops(base_op) or _op_is_unmatchable(base_op)):
raise AssertionError(
f"handling for {qhandler_cls} for op {base_op} not implemented")
@skipIfNoFBGEMM
def test_user_defined_function(self):
"""
Verify that graph matching works on user defined functions
"""
class M1(nn.Module):
def forward(self, x):
x = F.hardswish(x)
return x
class M2(nn.Module):
def forward(self, x):
x = _wrapped_hardswish(x)
return x
qconfig_mapping = torch.ao.quantization.get_default_qconfig_mapping()
example_inputs = (torch.randn(1, 1, 1, 1),)
m1 = prepare_fx(M1().eval(), qconfig_mapping, example_inputs=example_inputs)
m2 = prepare_fx(M2().eval(), qconfig_mapping, example_inputs=example_inputs)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_hardswish, F.hardswish)
results = get_matching_subgraph_pairs(
m1, m2,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops)
hardswish_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.hardswish) + '_0'
expected_types = {
hardswish_name_0:
((F.hardswish, torch.ao.quantization.HistogramObserver), (_wrapped_hardswish, _wrapped_hardswish)),
}
self.assert_types_for_matched_subgraph_pairs(
results, expected_types, m1, m2)
@skipIfNoFBGEMM
def test_results_order(self):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Linear(1, 1),
).eval()
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
self.assertTrue(len(results) == 2)
results_iter = iter(results.items())
_, (subgraph_a_0, subgraph_b_0) = next(results_iter)
self.assertTrue(subgraph_a_0.start_node.name == '_0' and
subgraph_b_0.start_node.name == '_0')
_, (subgraph_a_1, subgraph_b_1) = next(results_iter)
self.assertTrue(subgraph_a_1.start_node.name == '_1' and
subgraph_b_1.start_node.name == '_1')
class TestFXGraphMatcherModels(QuantizationTestCase):
@skipIfNoFBGEMM
@skip_if_no_torchvision
def test_mobilenet_v2(self):
# verify that mobilenetv2 graph is able to be matched
import torchvision
m = torchvision.models.__dict__['mobilenet_v2'](pretrained=False).eval().float()
example_inputs = (torch.randn(1, 3, 224, 224),)
mp = prepare_fx(copy.deepcopy(m), {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
# assume success if no exceptions
results_m_mp = get_matching_subgraph_pairs(torch.fx.symbolic_trace(m), mp)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results_mp_mq = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
@skip_if_no_torchvision
def test_mobilenet_v2_qat(self):
# verify that mobilenetv2 graph is able to be matched
import torchvision
m = torchvision.models.__dict__['mobilenet_v2'](pretrained=False).float()
example_inputs = (torch.randn(1, 3, 224, 224),)
mp = prepare_qat_fx(
copy.deepcopy(m),
{'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')},
example_inputs=example_inputs)
# assume success if no exceptions
results_m_mp = get_matching_subgraph_pairs(torch.fx.symbolic_trace(m), mp)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results_mp_mq = get_matching_subgraph_pairs(mp, mq)
class FXNumericSuiteQuantizationTestCase(QuantizationTestCase):
def _test_extract_weights(
self, m, example_inputs, results_len=0, qconfig_dict=None, prepare_fn=prepare_fx
):
m = torch.fx.symbolic_trace(m)
if qconfig_dict is None:
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fn(copy.deepcopy(m), qconfig_dict, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# test both the public API as well as the internal GraphModule API
for extract_weights_fun in (extract_weights, _extract_weights_impl):
# test both m vs mp and mp vs mq
for m1, m2 in ((m, mp), (mp, mq)):
results = extract_weights_fun('a', m1, 'b', m2)
self.assertTrue(
len(results) == results_len,
f"expected len {results_len}, got len {len(results)}")
self.assert_ns_compare_dict_valid(results)
extend_logger_results_with_comparison(
results, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
results, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
results, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
def _test_match_activations(
self, m, data, prepared_expected_node_occurrence=None, results_len=0,
should_log_inputs=False,
qconfig_dict=None,
skip_scripting=False,
prepare_fn=prepare_fx,
):
if qconfig_dict is None:
qconfig_dict = torch.ao.quantization.get_default_qconfig_mapping()
if prepare_fn == prepare_fx:
m.eval()
else:
m.train()
mp = prepare_fn(copy.deepcopy(m), qconfig_dict, example_inputs=data)
mp(*data)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
m_ns, mp_ns2 = add_loggers(
'a', m, 'b', copy.deepcopy(mp), OutputLogger,
should_log_inputs=should_log_inputs)
mp_ns, mq_ns = add_loggers(
'a', mp, 'b', mq, OutputLogger,
should_log_inputs=should_log_inputs)
if prepared_expected_node_occurrence:
self.checkGraphModuleNodes(
m_ns, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_ns2, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_ns, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mq_ns, expected_node_occurrence=prepared_expected_node_occurrence)
if not skip_scripting:
m_ns = torch.jit.script(m_ns)
mp_ns = torch.jit.script(mp_ns)
mq_ns = torch.jit.script(mq_ns)
# calibrate
m_ns(*data)
mp_ns2(*data)
mp_ns(*data)
mq_ns(*data)
# check activation result correctness
results = []
for m1, m2 in ((m_ns, mp_ns2), (mp_ns, mq_ns)):
act_compare_dict = extract_logger_info(
m1, m2, OutputLogger, 'b')
self.assertTrue(
len(act_compare_dict) == results_len,
f"expected len {results_len}, got len {len(act_compare_dict)}")
self.assert_ns_compare_dict_valid(act_compare_dict)
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
results.append(act_compare_dict)
return results
def _test_match_shadow_activations(
self, m, data, prepared_expected_node_occurrence=None, results_len=None,
should_log_inputs=False, qconfig_dict=None, skip_scripting=False,
prepare_fn=prepare_fx, compare_fp32_vs_fp32_prepared=True,
):
if qconfig_dict is None:
qconfig_dict = torch.ao.quantization.get_default_qconfig_mapping()
if prepare_fn == prepare_fx:
m.eval()
else:
m.train()
mp = prepare_fn(copy.deepcopy(m), qconfig_dict, example_inputs=data)
mp(*data)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
if compare_fp32_vs_fp32_prepared:
m_shadows_mp = add_shadow_loggers(
'a', copy.deepcopy(m), 'b', copy.deepcopy(mp),
OutputLogger, should_log_inputs=should_log_inputs)
mp_shadows_mq = add_shadow_loggers(
'a', mp, 'b', mq, OutputLogger,
should_log_inputs=should_log_inputs)
if prepared_expected_node_occurrence:
if compare_fp32_vs_fp32_prepared:
self.checkGraphModuleNodes(
m_shadows_mp, expected_node_occurrence=prepared_expected_node_occurrence)
self.checkGraphModuleNodes(
mp_shadows_mq, expected_node_occurrence=prepared_expected_node_occurrence)
if not skip_scripting:
if compare_fp32_vs_fp32_prepared:
m_shadows_mp = torch.jit.script(m_shadows_mp)
mp_shadows_mq = torch.jit.script(mp_shadows_mq)
# calibrate
if compare_fp32_vs_fp32_prepared:
m_shadows_mp(*data)
mp_shadows_mq(*data)
# check activation result correctness
results = []
models = (m_shadows_mp, mp_shadows_mq) if \
compare_fp32_vs_fp32_prepared else (mp_shadows_mq,)
for model in models:
act_compare_dict = extract_shadow_logger_info(
model, OutputLogger, 'b')
if results_len is not None:
self.assertTrue(
len(act_compare_dict) == results_len,
f"expected len {results_len}, got len {len(act_compare_dict)}")
self.assert_ns_compare_dict_valid(act_compare_dict)
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_normalized_l2_error, 'l2_error')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_cosine_similarity,
'cosine_similarity')
results.append(act_compare_dict)
return results
class TestFXNumericSuiteCoreAPIs(FXNumericSuiteQuantizationTestCase):
@skipIfNoFBGEMM
def test_extract_weights_mod_ptq(self):
m = AllConvAndLinearFusionModules().eval()
example_inputs = (torch.randn(1, 1, 1, 1),)
self._test_extract_weights(m, example_inputs, results_len=14)
@skipIfNoFBGEMM
def test_extract_weights_mod_qat(self):
m = AllConvAndLinearFusionModules().train()
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
example_inputs = (torch.randn(1, 1, 1, 1),)
self._test_extract_weights(
m, example_inputs, results_len=14, qconfig_dict=qconfig_dict, prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_extract_weights_linear_fun_ptq(self):
m = LinearReluLinearFunctional().eval()
example_inputs = (torch.randn(1, 4),)
self._test_extract_weights(m, example_inputs, results_len=2)
@skipIfNoFBGEMM
def test_extract_weights_linear_fun_qat(self):
m = LinearReluLinearFunctional().train()
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
example_inputs = (torch.randn(1, 4),)
self._test_extract_weights(
m, example_inputs, results_len=2, qconfig_dict=qconfig_dict, prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_extract_weights_conv_fun_ptq(self):
w1d = torch.randn(1, 1, 1)
w2d = torch.randn(1, 1, 1, 1)
w3d = torch.randn(1, 1, 1, 1, 1)
b1d = torch.randn(1)
b2d = torch.randn(1)
b3d = torch.randn(1)
m = AllConvFunctional(w1d, w2d, w3d, b1d, b2d, b3d).eval()
example_inputs = (torch.randn(1, 1, 1, 1),)
self._test_extract_weights(m, example_inputs, results_len=6)
@skipIfNoFBGEMM
def test_extract_weights_conv_fun_qat(self):
w1d = torch.randn(1, 1, 1)
w2d = torch.randn(1, 1, 1, 1)
w3d = torch.randn(1, 1, 1, 1, 1)
b1d = torch.randn(1)
b2d = torch.randn(1)
b3d = torch.randn(1)
m = AllConvFunctional(w1d, w2d, w3d, b1d, b2d, b3d).train()
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
example_inputs = (torch.randn(1, 1, 1, 1),)
self._test_extract_weights(
m, example_inputs, results_len=6, qconfig_dict=qconfig_dict, prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_extract_weights_dynamic(self):
# TODO(future PR): add Linear-ReLU, after #55393 is fixed.
m = nn.Sequential(nn.Linear(1, 1)).eval()
qconfig_dict = {
'object_type': [
(nn.Linear, default_dynamic_qconfig),
],
}
example_inputs = (torch.randn(1, 1),)
self._test_extract_weights(m, example_inputs, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_extract_weights_fqn(self):
m = nn.Sequential(
nn.Sequential(nn.Conv2d(1, 1, 1)),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mq = convert_fx(copy.deepcopy(mp))
results = extract_weights('a', mp, 'b', mq)
fqn_a_0 = results['_0_0']['weight']['a'][0]['fqn']
fqn_b_0 = results['_0_0']['weight']['b'][0]['fqn']
self.assertTrue(fqn_a_0 == '0.0' and fqn_a_0 == fqn_b_0)
fqn_a_1 = results['_1']['weight']['a'][0]['fqn']
fqn_b_1 = results['_1']['weight']['b'][0]['fqn']
self.assertTrue(fqn_a_1 == '1' and fqn_a_1 == fqn_b_1)
def _test_match_activations_mod_impl(self, prepare_fn=prepare_fx):
m = nn.Sequential(
torch.ao.quantization.QuantStub(),
nn.Conv2d(1, 1, 1),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
expected_occurrence = {
ns.call_module(OutputLogger): 2,
}
self._test_match_activations(
m, (torch.randn(2, 1, 2, 2),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=2, qconfig_dict=qconfig_dict, prepare_fn=prepare_fn)
@skipIfNoFBGEMM
def test_match_activations_mod_ptq(self):
self._test_match_activations_mod_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_match_activations_mod_qat(self):
self._test_match_activations_mod_impl(prepare_fn=prepare_qat_fx)
def _test_match_activations_fun_impl(self, prepare_fn=prepare_fx):
m = LinearReluLinearFunctional().eval()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
expected_occurrence = {
ns.call_module(OutputLogger): 2,
}
self._test_match_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=2, prepare_fn=prepare_fn, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_match_activations_fun_ptq(self):
self._test_match_activations_fun_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_match_activations_fun_qat(self):
self._test_match_activations_fun_impl(prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_match_activations_meth_ptq(self):
"""
Verify that add_loggers works on methods
"""
class M(nn.Module):
def forward(self, x):
x = x.sigmoid()
return x
m = M().eval()
res = self._test_match_activations(
m, (torch.randn(4, 4),),
results_len=1)
@skipIfNoFBGEMM
def test_match_activations_fqn(self):
m = nn.Sequential(
nn.Sequential(nn.Conv2d(1, 1, 1)),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mq = convert_fx(copy.deepcopy(mp))
mp_ns, mq_ns = add_loggers('a', mp, 'b', mq, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
mp_ns(datum)
mq_ns(datum)
results = extract_logger_info(mp_ns, mq_ns, OutputLogger, 'b')
fqn_a_0 = results['_0_0']['node_output']['a'][0]['fqn']
fqn_b_0 = results['_0_0']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_0 == '0.0' and fqn_a_0 == fqn_b_0)
fqn_a_1 = results['_1']['node_output']['a'][0]['fqn']
fqn_b_1 = results['_1']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_1 == '1' and fqn_a_1 == fqn_b_1)
def _test_add_shadow_loggers_mod_impl(self, prepare_fn=prepare_fx):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
res = self._test_match_shadow_activations(
m, (torch.randn(1, 1, 4, 4),), results_len=2,
prepare_fn=prepare_fn, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_add_shadow_loggers_mod_ptq(self):
self._test_add_shadow_loggers_mod_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_add_shadow_loggers_mod_qat(self):
self._test_add_shadow_loggers_mod_impl(prepare_fn=prepare_qat_fx)
def _test_add_shadow_loggers_fun_impl(self, prepare_fn=prepare_fx):
m = LinearReluLinearFunctional()
qconfig_dict = None
if prepare_fn == prepare_qat_fx:
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
res = self._test_match_shadow_activations(
m, (torch.randn(4, 4),), results_len=2, prepare_fn=prepare_fn,
qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_add_shadow_loggers_fun_ptq(self):
self._test_add_shadow_loggers_fun_impl(prepare_fn=prepare_fx)
@skipIfNoFBGEMM
def test_add_shadow_loggers_fun_qat(self):
self._test_add_shadow_loggers_fun_impl(prepare_fn=prepare_qat_fx)
@skipIfNoFBGEMM
def test_add_shadow_loggers_meth_ptq(self):
"""
Verify that add_loggers works on methods
"""
class M(nn.Module):
def forward(self, x):
x = x.sigmoid()
return x
m = M().eval()
res = self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
# For now, sigmoid is not supported for shadowing because the dtype
# inference for it is not implemented yet. So, this is just testing
# that shadowing models with method calls does not crash.
results_len=0)
@skipIfNoFBGEMM
def test_shadow_activations_fqn(self):
m = nn.Sequential(
nn.Sequential(nn.Conv2d(1, 1, 1)),
nn.Conv2d(1, 1, 1),
).eval()
qconfig_mapping = torch.ao.quantization.get_default_qconfig_mapping()
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = prepare_fx(m, qconfig_mapping, example_inputs=example_inputs)
mq = convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers('a', mp, 'b', mq, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
mp_shadows_mq(datum)
results = extract_shadow_logger_info(mp_shadows_mq, OutputLogger, 'b')
fqn_a_0 = results['_0_0']['node_output']['a'][0]['fqn']
fqn_b_0 = results['_0_0']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_0 == '0.0' and fqn_a_0 == fqn_b_0)
fqn_a_1 = results['_1']['node_output']['a'][0]['fqn']
fqn_b_1 = results['_1']['node_output']['b'][0]['fqn']
self.assertTrue(fqn_a_1 == '1' and fqn_a_1 == fqn_b_1)
@skipIfNoFBGEMM
def test_logging_inputs(self):
"""
Verifies that logging inputs works correctly
"""
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
x = torch.cat([x, x], dim=0)
return x
m = M().eval()
self._test_match_shadow_activations(
m, (torch.randn(1, 1, 4, 4),),
results_len=1,
should_log_inputs=True)
@skipIfNoFBGEMM
def test_ops_with_same_fp32_and_int8_signature(self):
"""
Verifies that we can match pairs of ops which have the same aten
signature for fp32 and int8 tensors.
"""
class M(nn.Module):
def __init__(self):
super().__init__()
self.max_pool_2d = nn.MaxPool2d(2)
def forward(self, x):
x = self.max_pool_2d(x)
x = F.relu(x)
return x
m = M().eval()
self._test_match_activations(
m, (torch.randn(1, 1, 2, 2),),
results_len=2)
@skipIfNoFBGEMM
def test_add_mul_inputs_activations(self):
m = AddMulFunctional().eval()
res = self._test_match_activations(
m, (torch.randn(2, 2), torch.randn(2, 2)),
results_len=6, should_log_inputs=True)
@skipIfNoFBGEMM
def test_linear_fp16_weights(self):
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
m = LinearReluFunctional().eval()
example_inputs = (torch.randn(1, 4),)
self._test_extract_weights(m, example_inputs, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_linear_fp16_activations(self):
for should_log_inputs in (True, False):
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
m = LinearReluFunctional().eval()
num_loggers = 2 if should_log_inputs else 1
expected_occurrence = {
ns.call_module(OutputLogger): num_loggers,
}
res = self._test_match_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=1,
qconfig_dict=qconfig_dict,
should_log_inputs=should_log_inputs)
@skipIfNoFBGEMM
def test_linear_fp16_shadow_activations(self):
for should_log_inputs in (True, False):
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
m = LinearReluFunctional().eval()
num_loggers = 4 if should_log_inputs else 2
expected_occurrence = {
ns.call_module(OutputLogger): num_loggers,
}
res2 = self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
prepared_expected_node_occurrence=expected_occurrence,
results_len=1,
qconfig_dict=qconfig_dict,
should_log_inputs=should_log_inputs)
@skipIfNoFBGEMM
def test_linear_fp16_vs_linear_fp16_shadow_activations(self):
m = LinearFunctional().eval()
qconfig_dict = {'': torch.ao.quantization.float16_static_qconfig}
example_inputs = (torch.randn(1, 4),)
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mq1 = convert_fx(copy.deepcopy(mp))
mq2 = convert_fx(copy.deepcopy(mp))
mq1_shadows_mq2 = _add_shadow_loggers_impl(
'a', mq1, 'b', mq2, OutputLogger, should_log_inputs=False)
mq1_shadows_mq2(torch.randn(4, 4))
act_compare_dict = extract_shadow_logger_info(
mq1_shadows_mq2, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 1)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_op_with_either_fp32_or_int8_input(self):
"""
Verify that shadowing works with ops which accept either fp32 or
int8 inputs.
"""
class M(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(x)
x = F.relu(x)
return x
m = M()
res = self._test_match_shadow_activations(
m, (torch.randn(4, 4),),
# Note: shadowing relu by itself is currently not supported,
# this test is just testing that it does not crash
results_len=0)
def _test_int8_shadows_int8_impl(self, m):
"""
Verify that shadowing works where both modules are int8
"""
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(4, 1, 4, 4),)
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mq1 = convert_fx(copy.deepcopy(mp))
mq2 = convert_fx(mp)
mq1_shadows_mq2 = add_shadow_loggers('a', mq1, 'b', mq2, OutputLogger)
mq1_shadows_mq2(torch.randn(4, 1, 4, 4))
act_compare_dict = extract_shadow_logger_info(
mq1_shadows_mq2, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 1)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_int8_shadows_int8_mod(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
self._test_int8_shadows_int8_impl(m)
@skipIfNoFBGEMM
def test_int8_shadows_int8_fun(self):
m = LinearFunctional().eval()
self._test_int8_shadows_int8_impl(m)
@skipIfNoFBGEMM
def test_user_module_scriptable(self):
# Logging of the output of this class is not supported, because it is
# neither a tensor or an RNN return type.
class M1(nn.Module):
def forward(self, x):
x1 = x * 2
x2 = x * 4
return (x1, x2)
class M2(nn.Module):
def __init__(self):
super().__init__()
self.m1 = M1()
def forward(self, x):
x1, x2 = self.m1(x)
return x1, x2
m = M2().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
prepare_custom_config_dict = {
'non_traceable_module_class': [M1],
}
example_inputs = (torch.randn(1),)
mp1 = prepare_fx(
m,
qconfig_dict,
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
mp2 = copy.deepcopy(mp1)
unmatchable_types_map = get_unmatchable_types_map()
unmatchable_types_map['mods_unmatchable'].add(M1)
mp1_ns, mp2_ns = _add_loggers_impl(
'a', mp1, 'b', mp2, OutputLogger, should_log_inputs=False,
unmatchable_types_map=unmatchable_types_map)
# Scripting a model with loggers should succeed. If it fails because of
# incorrect dtypes, we can blocklist the associated types from being instrumented.
mp1_ns_scripted = torch.jit.script(mp1_ns)
mp2_ns_scripted = torch.jit.script(mp2_ns)
@skipIfNoFBGEMM
def test_user_module(self):
"""
For user defined modules,
1. weight extraction should not crash
2. unshadowed activations should only have loggers for known types
3. shadowed activations should only have loggers for known types with
known dtypes
"""
class UserModule(nn.Module):
def forward(self, x):
return x
class M(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
self.user_module = UserModule()
def forward(self, x):
x = self.linear(x)
x = self.user_module(x)
return x
m = M().eval()
# quantize without tracing through UserModule
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
prepare_custom_config_dict = {'non_traceable_module_name': ['user_module']}
example_inputs = (torch.randn(1, 1, 1),)
mp = prepare_fx(
m,
qconfig_dict,
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
mp(*example_inputs)
mq = convert_fx(copy.deepcopy(mp))
# weight extraction should not crash
weights = _extract_weights_impl('fp32_prepared', mp, 'int8', mq)
# unshadowed activations should have loggers
# add loggers, without retracing
# note: converting again because we cannot copy a quantized linear
mp_ns, mq_ns = _add_loggers_impl(
'fp32_prepared', copy.deepcopy(mp), 'int8',
convert_fx(copy.deepcopy(mp)), OutputLogger,
should_log_inputs=True)
# both fp32 and int8 models should have 2 loggers each, 2 for I/O
# of linear, and 0 for I/O of user_module
unshadowed_expected_occurrence = {
ns.call_module(OutputLogger): 2,
}
self.checkGraphModuleNodes(
mp_ns, expected_node_occurrence=unshadowed_expected_occurrence)
self.checkGraphModuleNodes(
mq_ns, expected_node_occurrence=unshadowed_expected_occurrence)
# shadowed activations should only have loggers for nodes where
# the types are known and we can do a dtype cast
# add shadow loggers, without retracing
mp_shadows_mq_ns = _add_shadow_loggers_impl(
'fp32_prepared', mp, 'int8', mq, OutputLogger,
should_log_inputs=True)
# 4 loggers for I/O of linear, 0 loggers for I/O of user_module
shadowed_expected_occurrence = {
ns.call_module(OutputLogger): 4,
}
self.checkGraphModuleNodes(
mp_shadows_mq_ns, expected_node_occurrence=shadowed_expected_occurrence)
def test_op_io_dtype_coverage(self):
"""
Tests that all the ops quantization cares about have input and output
dtypes defined.
"""
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
# TODO(future PR): clean this up
node_type_to_io_type_map = get_node_type_to_io_type_map()
FUNS_IO_TYPE_FP32 = node_type_to_io_type_map['funs_io_type_fp32']
FUNS_IO_TYPE_INT8 = node_type_to_io_type_map['funs_io_type_int8']
FUNS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map['funs_io_type_fp32_or_int8']
MODS_IO_TYPE_FP32 = node_type_to_io_type_map['mods_io_type_fp32']
MODS_IO_TYPE_INT8 = node_type_to_io_type_map['mods_io_type_int8']
MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map['mods_io_type_fp32_or_int8']
METHS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map['meths_io_type_fp32_or_int8']
unmatchable_types_map = get_unmatchable_types_map()
FUNS_UNMATCHABLE = unmatchable_types_map['funs_unmatchable']
MODS_UNMATCHABLE = unmatchable_types_map['mods_unmatchable']
METHS_UNMATCHABLE = unmatchable_types_map['meths_unmatchable']
# 1. check static quant module mappings
static_quant_mod_mappings = get_default_static_quant_module_mappings()
for fp32_type, int8_type in static_quant_mod_mappings.items():
types_to_skip = (
torch.ao.quantization.QuantStub,
torch.ao.quantization.DeQuantStub,
nnq.FloatFunctional,
# TODO(future PR): look into whether shadowing embeddings
# makes sense
nn.Embedding,
nn.EmbeddingBag,
# the ConvTranspose3d swap is not implemented in FX Graph
# mode quantization yet
nn.ConvTranspose3d,
# the GroupNorm swap is not implemented in FX Graph
# mode quantization yet
nn.GroupNorm,
# nnq.ReLU6 is no longer swapped, because nn.ReLU6 can
# take quantized inputs
nn.ReLU6,
)
if fp32_type in types_to_skip:
continue
self.assertTrue(
fp32_type in MODS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type}")
self.assertTrue(
int8_type in MODS_IO_TYPE_INT8,
f"missing IO type handling for f{int8_type}")
# 2. check static quant op mappings
static_quant_fun_mappings = get_default_float_to_quantized_operator_mappings()
for fp32_type, int8_type in static_quant_fun_mappings.items():
self.assertTrue(
fp32_type in FUNS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type}")
self.assertTrue(
int8_type in FUNS_IO_TYPE_INT8,
f"missing IO type handling for f{int8_type}")
# 3. check dynamic quant mappings
dynamic_quant_mappings = get_default_dynamic_quant_module_mappings()
for fp32_type1, fp32_type2 in dynamic_quant_mappings.items():
# TODO(future PR): verify correct I/O for these and remove from
# this list.
types_to_skip = (
nn.GRUCell,
nn.GRU,
nn.LSTMCell,
nn.RNNCell,
# TODO(future PR): look into whether shadowing embeddings
# makes sense
nn.Embedding,
nn.EmbeddingBag,
)
if fp32_type1 in types_to_skip:
continue
self.assertTrue(
fp32_type1 in MODS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type1}")
self.assertTrue(
fp32_type2 in MODS_IO_TYPE_FP32,
f"missing IO type handling for f{fp32_type2}")
# 4. go through the ops mapped to each QuantizeHandler type, and verify
# correctness.
default_quant_patterns = get_all_quant_patterns()
for pattern, qhandler_cls in default_quant_patterns.items():
base_op = None
if isinstance(pattern, tuple):
base_op = pattern[-1]
elif isinstance(pattern, str):
base_op = pattern
else:
base_op = pattern
if (
qhandler_cls in (
qp.BinaryOpQuantizeHandler,
qp.RNNDynamicQuantizeHandler,
)
):
# TODO(future PR): implement shadowing for binary ops
# TODO(future PR): implement shadowing for RNN ops
continue
elif qhandler_cls == qp.CatQuantizeHandler:
self.assertTrue(
base_op in FUNS_IO_TYPE_FP32_OR_INT8,
f"missing IO type handling for {base_op}")
elif (
qhandler_cls in (
qp.ConvReluQuantizeHandler,
qp.LinearReLUQuantizeHandler,
qp.BatchNormQuantizeHandler,
qp.DefaultNodeQuantizeHandler,
)
):
self.assertTrue(
(base_op in FUNS_IO_TYPE_FP32) or (base_op in MODS_IO_TYPE_FP32),
f"missing IO type handling for {base_op}")
elif (
qhandler_cls in (
qp.FixedQParamsOpQuantizeHandler,
qp.CopyNodeQuantizeHandler,
qp.GeneralTensorShapeOpQuantizeHandler,
)
):
if (
base_op in FUNS_UNMATCHABLE or
base_op in MODS_UNMATCHABLE or
base_op in METHS_UNMATCHABLE
):
continue
self.assertTrue(
(base_op in FUNS_IO_TYPE_FP32_OR_INT8) or
(base_op in MODS_IO_TYPE_FP32_OR_INT8) or
(base_op in METHS_IO_TYPE_FP32_OR_INT8) or
# Softmax has a different signature for the quantized
# version, so it does not fit into the cases above.
(base_op is torch.nn.Softmax),
f"missing IO type handling for {base_op}")
elif qhandler_cls == qp.EmbeddingQuantizeHandler:
# embedding shadowing is not implemented, for now
continue
else:
if (
base_op in FUNS_UNMATCHABLE or
base_op in MODS_UNMATCHABLE or
base_op in METHS_UNMATCHABLE
):
continue
if qhandler_cls(None, {}).is_general_tensor_value_op():
self.assertTrue(
(base_op in FUNS_IO_TYPE_FP32_OR_INT8) or
(base_op in MODS_IO_TYPE_FP32_OR_INT8) or
(base_op in METHS_IO_TYPE_FP32_OR_INT8),
f"missing IO type handling for {base_op} using {qhandler_cls}")
else:
self.assertTrue(
(base_op in FUNS_IO_TYPE_FP32_OR_INT8) or
(base_op in MODS_IO_TYPE_FP32_OR_INT8) or
(base_op in METHS_IO_TYPE_FP32_OR_INT8) or
(base_op in FUNS_IO_TYPE_FP32) or
(base_op in MODS_IO_TYPE_FP32) or
f"missing IO type handling for {base_op} using {qhandler_cls}")
@skipIfNoFBGEMM
def test_user_defined_function(self):
"""
Verify that NS APIs work on user defined functions
"""
class M1(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(1, 1))
self.b1 = nn.Parameter(torch.zeros(1))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.hardswish(x)
x = x.sigmoid()
x = F.linear(x, self.w1, self.b1)
return x
class M2(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(1, 1))
self.b1 = nn.Parameter(torch.zeros(1))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = _wrapped_hardswish(x)
x = _wrapped_sigmoid(x)
x = _wrapped_linear(x, self.w1, self.b1)
return x
qconfig_mapping = torch.ao.quantization.get_default_qconfig_mapping()
example_inputs = (torch.randn(1, 1),)
m1 = prepare_fx(M1().eval(), qconfig_mapping, example_inputs=example_inputs)
m2 = prepare_fx(M2().eval(), qconfig_mapping, example_inputs=example_inputs)
data = torch.randn(1, 1)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_hardswish, F.hardswish)
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_sigmoid, F.sigmoid)
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_linear, F.linear)
op_to_type_to_weight_extraction_fn = \
get_op_to_type_to_weight_extraction_fn()
op_to_type_to_weight_extraction_fn['call_function'][_wrapped_linear] = \
torch.ao.ns.fx.weight_utils.get_linear_fun_weight
# test compare weights
results = extract_weights(
'a', m1, 'b', m2,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
op_to_type_to_weight_extraction_fn=op_to_type_to_weight_extraction_fn)
self.assertTrue(len(results) == 1)
self.assertTrue(len(results['_wrapped_linear']['weight']) == 2)
# test unshadowed activations
m1_ns, m2_ns = _add_loggers_impl(
'a', copy.deepcopy(m1), 'b', copy.deepcopy(m2), OutputLogger,
should_log_inputs=False,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops)
# calibrate
m1_ns(data)
m2_ns(data)
# check activation result correctness
act_compare_dict = extract_logger_info(m1_ns, m2_ns, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 3)
self.assert_ns_compare_dict_valid(act_compare_dict)
# test shadowed activations
node_type_to_io_type_map = get_node_type_to_io_type_map()
node_type_to_io_type_map['funs_io_type_fp32'].add(_wrapped_hardswish)
node_type_to_io_type_map['funs_io_type_fp32'].add(_wrapped_sigmoid)
m2_shadows_m1_ns = _add_shadow_loggers_impl(
'a', m2, 'b', m1, OutputLogger,
should_log_inputs=False,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
node_type_to_io_type_map=node_type_to_io_type_map)
# calibrate
m2_shadows_m1_ns(data)
# check activation result correctness
act_compare_dict = extract_shadow_logger_info(
m2_shadows_m1_ns, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 2)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_layer_names(self):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Conv2d(1, 1, 1),
nn.Sigmoid(),
).eval()
qconfig_mapping = torch.ao.quantization.get_default_qconfig_mapping("fbgemm")
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = torch.ao.quantization.quantize_fx.prepare_fx(m, qconfig_mapping, example_inputs=example_inputs)
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
# extract weights
results = extract_weights('fp32', mp, 'int8', mq)
mq_node_names = [node.name for node in mq.graph.nodes]
for layer_name in results.keys():
self.assertTrue(layer_name in mq_node_names)
# match activations
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_ns, mq_ns = add_loggers(
'fp32', copy.deepcopy(mp), 'int8', mq, OutputLogger)
data = torch.randn(1, 1, 1, 1)
mp_ns(data)
mq_ns(data)
results = extract_logger_info(mp_ns, mq_ns, OutputLogger, 'int8')
mq_node_names = [node.name for node in mq_ns.graph.nodes]
for layer_name in results.keys():
self.assertTrue(layer_name in mq_node_names)
# match shadow activations
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers(
'fp32', mp, 'int8', mq, OutputLogger)
mp_shadows_mq(data)
results = extract_shadow_logger_info(
mp_shadows_mq, OutputLogger, 'int8')
mq_node_names = [node.name for node in mp_shadows_mq.graph.nodes]
for layer_name in results.keys():
self.assertTrue(layer_name in mq_node_names)
@skipIfNoFBGEMM
def test_extend_logger_results_with_comparison(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1)).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = torch.ao.quantization.quantize_fx.prepare_fx(
m, qconfig_dict, example_inputs=example_inputs)
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
# extract weights
results = extract_weights('fp32', mp, 'int8', mq)
extend_logger_results_with_comparison(
results, 'fp32', 'int8', compute_sqnr, 'sqnr_int8_vs_fp32')
extend_logger_results_with_comparison(
results, 'fp32', 'int8', compute_normalized_l2_error, 'l2_error_int8_vs_fp32')
extend_logger_results_with_comparison(
results, 'fp32', 'int8', compute_cosine_similarity,
'cosine_similarity_int8_vs_fp32')
for layer_name, layer_results in results.items():
assert 'sqnr_int8_vs_fp32' in \
layer_results['weight']['int8'][0].keys()
assert 'l2_error_int8_vs_fp32' in \
layer_results['weight']['int8'][0].keys()
assert 'cosine_similarity_int8_vs_fp32' in \
layer_results['weight']['int8'][0].keys()
@skipIfNoFBGEMM
def test_int8_shadows_fp32_simple(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1), nn.ReLU()).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = torch.ao.quantization.quantize_fx.prepare_fx(
m, qconfig_dict, example_inputs=example_inputs)
mp(torch.randn(1, 1, 1, 1))
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mq_ref = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers(
'int8', mq, 'fp32', mp, OutputLogger)
# verify that scale and zp were extracted correctly
# for the first op, the scale+zp live as attributes on the module
scale_0 = mp_shadows_mq._0_input_scale_0
scale_0_ref = getattr(mq_ref, '0_input_scale_0')
self.assertEqual(scale_0, scale_0_ref)
zp_0 = mp_shadows_mq._0_input_zero_point_0
zp_0_ref = getattr(mq_ref, '0_input_zero_point_0')
self.assertEqual(zp_0, zp_0_ref)
# for the second op, the scale and zp of input to second op
# must equal to scale and zp of output of first op
scale_1 = mp_shadows_mq._1_input_scale_0
scale_1_ref = getattr(mq_ref, '0').scale
self.assertEqual(scale_1, scale_1_ref)
zp_1 = mp_shadows_mq._1_input_zero_point_0
zp_1_ref = getattr(mq_ref, '0').zero_point
self.assertEqual(zp_1, zp_1_ref)
# verify running data works
mp_shadows_mq(torch.randn(1, 1, 1, 1))
act_compare_dict = extract_shadow_logger_info(
mp_shadows_mq, OutputLogger, 'fp32')
self.assertTrue(len(act_compare_dict) == 2)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_int8_shadows_fp32_coverage(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.adaptive_avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.adaptive_avg_pool(x)
# input qparams of conv will be input qparams of adaptive_avg_pool
x = self.conv(x)
x = torch.mul(x, x)
x = self.conv(x)
x = torch.add(x, x)
x = F.relu(x)
x = self.conv(x)
return x
m = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mq = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mq_ref = torch.ao.quantization.quantize_fx.convert_fx(copy.deepcopy(mp))
mp_shadows_mq = add_shadow_loggers(
'int8', mq, 'fp32', mp, OutputLogger)
mp_shadows_mq(torch.randn(1, 1, 1, 1))
act_compare_dict = extract_shadow_logger_info(
mp_shadows_mq, OutputLogger, 'fp32')
self.assertTrue(len(act_compare_dict) == 3)
self.assert_ns_compare_dict_valid(act_compare_dict)
@skipIfNoFBGEMM
def test_loggers_preserve_qat_numerics(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1))
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = prepare_qat_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mc = convert_fx(copy.deepcopy(mp))
mp.apply(torch.ao.quantization.disable_observer)
ref_fp32 = mp(*example_inputs)
ref_int8 = mc(*example_inputs)
mp_ns, mc_ns = add_loggers('fp32', mp, 'int8', mc, OutputLogger)
ref_fp32_ns = mp_ns(*example_inputs)
ref_int8_ns = mc_ns(*example_inputs)
self.assertEqual(ref_fp32, ref_fp32_ns)
self.assertEqual(ref_int8, ref_int8_ns)
@skipIfNoFBGEMM
def test_shadow_loggers_preserve_qat_numerics(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1))
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = prepare_qat_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mc = convert_fx(copy.deepcopy(mp))
mp.apply(torch.ao.quantization.disable_observer)
ref_fp32 = mp(*example_inputs)
ref_int8 = mc(*example_inputs)
mc_shadows_mp = add_shadow_loggers('int8', mc, 'fp32', mp, OutputLogger)
ref_shadow = mc_shadows_mp(*example_inputs)
self.assertEqual(ref_fp32, ref_shadow)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_extract_weights_cuda(self):
# Note: this is not using quantization because quantized kernels do not
# work on cuda yet.
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
results = extract_weights('a', m1, 'b', m2)
extend_logger_results_with_comparison(
results, 'a', 'b', compute_sqnr, 'sqnr')
self.assert_ns_compare_dict_valid(results)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_add_loggers_cuda(self):
# Note: this is not using quantization because quantized kernels do not
# work on cuda yet.
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m1_ns, m2_ns = add_loggers('a', m1, 'b', m2, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
datum = datum.cuda()
m1_ns(datum)
m2_ns(datum)
act_compare_dict = extract_logger_info(m1_ns, m2_ns, OutputLogger, 'b')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_add_shadow_loggers_cuda(self):
# Note: this is not using quantization because quantized kernels do not
# work on cuda yet.
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1)).cuda()
m1_shadows_m2 = add_shadow_loggers('a', m1, 'b', m2, OutputLogger)
datum = torch.randn(1, 1, 1, 1)
datum = datum.cuda()
m1_shadows_m2(datum)
act_compare_dict = extract_shadow_logger_info(m1_shadows_m2, OutputLogger, 'b')
extend_logger_results_with_comparison(
act_compare_dict, 'a', 'b', compute_sqnr, 'sqnr')
def test_fp16_shadows_fp32(self):
m = LinearReluFunctional().eval()
example_inputs = (torch.randn(1, 4),)
qconfig_dict = {"": torch.ao.quantization.float16_static_qconfig}
mp = prepare_fx(copy.deepcopy(m), qconfig_dict, example_inputs=example_inputs)
mq = convert_to_reference_fx(mp)
mq_shadows_m = add_shadow_loggers('a', mq, 'b', m, OutputLogger)
def test_mul_add_cat_stack_skips_shadowing(self):
class M(nn.Module):
def forward(self, x):
x = x * x
x = torch.mul(x, x)
x = x + x
x = torch.add(x, x)
x = torch.cat([x])
x = torch.stack([x])
return x
m = M().eval()
self._test_match_shadow_activations(
m, (torch.randn(1, 1, 4, 4),),
results_len=0)
def test_op_with_only_kwargs_skips_shadowing(self):
class M(nn.Module):
def forward(self, x):
x = torch.cat(tensors=[x])
x = torch.stack(tensors=[x])
return x
m = M().eval()
self._test_match_shadow_activations(
m, (torch.randn(1, 1, 4, 4),),
results_len=0)
def test_unsupported_op_copy_skips_shadowing(self):
"""
Copying a `call_function` node is not implemented, test that this
does not crash shadowing but instead skips the node.
"""
class M(nn.Module):
def forward(self, x):
# the second argument leads to attempting to copy a
# call_function node
x = F.layer_norm(x, x.shape[1:])
return x
m = M().eval()
self._test_match_shadow_activations(
m, (torch.randn(1, 1, 4, 4),),
results_len=0)
def test_linear_kwargs_shadow(self):
class M(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(4, 4))
self.b1 = nn.Parameter(torch.zeros(4))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.linear(input=x, weight=self.w1, bias=self.b1)
return x
# note: FX graph mode quantization does not have good support
# for kwargs-only right now, so we pass in two unquantized
# models
m = M().eval()
mt = torch.fx.symbolic_trace(m)
mt_copy = copy.deepcopy(mt)
mt_shadows_mt_copy = add_shadow_loggers(
'a', mt, 'b', mt_copy, OutputLogger)
mt_shadows_mt_copy(torch.randn(4, 4))
act_compare_dict = extract_shadow_logger_info(
mt_shadows_mt_copy, OutputLogger, 'b')
self.assertTrue(len(act_compare_dict) == 1)
class TestFXNumericSuiteCoreAPIsModels(FXNumericSuiteQuantizationTestCase):
"""
Tests numeric suite core APIs on non-toy models.
"""
@skipIfNoFBGEMM
def test_compare_weights_conv(self):
test_cases = (
(ConvModel(),),
(ConvBnModel(),),
(ConvBnReLUModel(),),
)
for m, in test_cases:
m.eval()
example_inputs = (torch.randn(1, 3, 5, 5),)
self._test_extract_weights(m, example_inputs, results_len=1)
@skipIfNoFBGEMM
def test_compare_weights_linear(self):
test_cases = (
(SingleLayerLinearModel(), None),
(
SingleLayerLinearDynamicModel(),
{"object_type": [(nn.Linear, default_dynamic_qconfig)]},
),
)
for m, qconfig_dict in test_cases:
m.eval()
example_inputs = (torch.randn(1, 3, 5, 5),)
res = self._test_extract_weights(
m, example_inputs, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_weights_lstm_dynamic(self):
qconfig_dict = {"object_type": [(nn.LSTM, default_dynamic_qconfig)]}
lstm_input = torch.rand((1, 1, 2))
lstm_hidden = (torch.rand(1, 1, 2), torch.rand(1, 1, 2))
example_inputs = (lstm_input, lstm_hidden)
m = LSTMwithHiddenDynamicModel().eval()
res = self._test_extract_weights(
m, example_inputs, results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_activations_conv(self):
test_cases = (
(ConvModel(),),
(ConvBnModel(),),
(ConvBnReLUModel(),),
)
for m, in test_cases:
m.eval()
res = self._test_match_activations(
m, (torch.randn(1, 3, 4, 4),), results_len=1)
@skipIfNoFBGEMM
def test_compare_activations_linear(self):
test_cases = (
(SingleLayerLinearModel(), None),
(
SingleLayerLinearDynamicModel(),
{"object_type": [(nn.Linear, default_dynamic_qconfig)]},
),
)
for m, qconfig_dict in test_cases:
m.eval()
res = self._test_match_activations(
m, (torch.randn(5, 5),), results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_activations_lstm_dynamic(self):
qconfig_dict = {"object_type": [(nn.LSTM, default_dynamic_qconfig)]}
m = LSTMwithHiddenDynamicModel().eval()
lstm_input = torch.rand((1, 1, 2))
lstm_hidden = (torch.rand(1, 1, 2), torch.rand(1, 1, 2))
# TODO(future PR): enable scripting (quant prepared LSTM not scriptable)
res = self._test_match_activations(
m, (lstm_input, lstm_hidden), results_len=1, qconfig_dict=qconfig_dict,
skip_scripting=True)
@skipIfNoFBGEMM
def test_compare_shadow_activations_conv(self):
test_cases = (
(ConvModel(),),
(ConvBnModel(),),
(ConvBnReLUModel(),),
)
for m, in test_cases:
m.eval()
res = self._test_match_shadow_activations(
m, (torch.randn(1, 3, 4, 4),), results_len=1)
@skipIfNoFBGEMM
def test_compare_shadow_activations_linear(self):
test_cases = (
(SingleLayerLinearModel(), None),
(
SingleLayerLinearDynamicModel(),
{"object_type": [(nn.Linear, default_dynamic_qconfig)]},
),
)
for m, qconfig_dict in test_cases:
m.eval()
res = self._test_match_shadow_activations(
m, (torch.randn(5, 5),), results_len=1, qconfig_dict=qconfig_dict)
@skipIfNoFBGEMM
def test_compare_shadow_activations_lstm_dynamic(self):
qconfig_dict = {"object_type": [(nn.LSTM, default_dynamic_qconfig)]}
m = LSTMwithHiddenDynamicModel().eval()
lstm_input = torch.rand((1, 1, 2))
lstm_hidden = (torch.rand(1, 1, 2), torch.rand(1, 1, 2))
# TODO(future PR): enable scripting (quant prepared LSTM not scriptable)
res = self._test_match_shadow_activations(
m, (lstm_input, lstm_hidden), results_len=1, qconfig_dict=qconfig_dict,
skip_scripting=True)
@skipIfNoFBGEMM
def test_sparsenn_compare_activations(self):
for should_log_inputs in (True, False):
sparse_nn = SparseNNModel().eval()
idx = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
x = torch.randn(2, 4)
self._test_match_activations(
sparse_nn, (idx, offsets, x),
results_len=5,
should_log_inputs=should_log_inputs)
@skipIfNoFBGEMM
def test_sparsenn_shadow(self):
for should_log_inputs in (True, False):
sparse_nn = SparseNNModel().eval()
idx = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
x = torch.randn(2, 4)
self._test_match_shadow_activations(
sparse_nn, (idx, offsets, x),
results_len=3,
should_log_inputs=should_log_inputs)
@skip_if_no_torchvision
@skipIfNoFBGEMM
def test_resnet18(self):
import torchvision
m = torchvision.models.quantization.resnet18(pretrained=False, quantize=False).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
self._test_match_shadow_activations(
m, (torch.randn(1, 3, 224, 224),),
qconfig_dict=qconfig_dict,
should_log_inputs=False)
@skip_if_no_torchvision
@skipIfNoFBGEMM
def test_mobilenet_v2(self):
import torchvision
m = torchvision.models.quantization.mobilenet_v2(pretrained=False, quantize=False).eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
self._test_match_shadow_activations(
m, (torch.randn(1, 3, 224, 224),),
qconfig_dict=qconfig_dict,
should_log_inputs=False)
|
pytorch-master
|
test/quantization/fx/test_numeric_suite_fx.py
|
# Owner(s): ["oncall: quantization"]
from collections import OrderedDict
import os
import contextlib
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized.dynamic as nniqd
import torch.multiprocessing as mp
from torch.ao.quantization import is_activation_post_process
# graph mode quantization based on fx
from torch.ao.quantization.quantize_fx import (
prepare_fx,
convert_fx,
convert_to_reference_fx,
prepare_qat_fx,
fuse_fx,
)
from torch.ao.quantization.fx.quantization_patterns import DefaultNodeQuantizeHandler
from torch.ao.quantization.fx.common_quantization_patterns import CommonQuantizeHandler
from torch.ao.quantization.fx.match_utils import (
is_match,
MatchAllNode,
)
from torch.ao.quantization import (
QuantType,
quant_type_to_str,
)
from torch.ao.quantization import (
QuantStub,
DeQuantStub,
QuantWrapper,
default_qconfig,
default_dynamic_qconfig,
default_qat_qconfig,
default_reuse_input_qconfig,
per_channel_dynamic_qconfig,
float16_dynamic_qconfig,
float16_static_qconfig,
float_qparams_weight_only_qconfig,
float_qparams_weight_only_qconfig_4bit,
get_default_qconfig,
get_default_qat_qconfig,
get_default_qconfig_mapping,
get_default_qat_qconfig_mapping,
fuse_modules,
fuse_modules_qat,
prepare,
prepare_qat,
convert,
quantize_dynamic,
default_placeholder_observer,
default_weight_observer,
PerChannelMinMaxObserver,
FixedQParamsFakeQuantize,
FixedQParamsObserver,
FusedMovingAvgObsFakeQuantize,
FakeQuantize,
MovingAverageMinMaxObserver,
HistogramObserver,
QConfig,
default_embedding_qat_qconfig,
)
from torch.ao.quantization.backend_config import (
BackendConfig,
BackendPatternConfig,
)
from torch.ao.quantization.backend_config.native import (
get_test_only_legacy_native_backend_config,
)
from torch.ao.quantization.qconfig_mapping import (
GLOBAL_DICT_KEY,
MODULE_NAME_DICT_KEY,
MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY,
MODULE_NAME_REGEX_DICT_KEY,
OBJECT_TYPE_DICT_KEY,
QConfigMapping,
)
from torch.ao.quantization.qconfig_mapping_utils import (
get_object_type_qconfig,
get_module_name_qconfig,
get_module_name_regex_qconfig,
)
from torch.ao.quantization.fx.pattern_utils import (
DEFAULT_FUSION_PATTERNS,
DEFAULT_QUANTIZATION_PATTERNS,
DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP,
DEFAULT_OUTPUT_OBSERVER_MAP,
register_fusion_pattern,
register_quant_pattern,
get_default_output_activation_post_process_map
)
from torch.ao.quantization.fx.custom_config import (
STANDALONE_MODULE_NAME_DICT_KEY,
STANDALONE_MODULE_CLASS_DICT_KEY,
FLOAT_TO_OBSERVED_DICT_KEY,
OBSERVED_TO_QUANTIZED_DICT_KEY,
NON_TRACEABLE_MODULE_NAME_DICT_KEY,
NON_TRACEABLE_MODULE_CLASS_DICT_KEY,
INPUT_QUANTIZED_INDEXES_DICT_KEY,
OUTPUT_QUANTIZED_INDEXES_DICT_KEY,
PRESERVED_ATTRIBUTES_DICT_KEY,
FuseCustomConfig,
ConvertCustomConfig,
PrepareCustomConfig,
StandaloneModuleConfigEntry,
)
from torch.ao.quantization.fx.qconfig_utils import (
maybe_adjust_qconfig_for_module_name_object_type_order,
)
from torch.ao.quantization.fx.utils import NodeInfo
from torch.ao.quantization.fake_quantize import (
default_fixed_qparams_range_0to1_fake_quant,
default_fixed_qparams_range_neg1to1_fake_quant,
)
from torch.ao.quantization.observer import (
default_fixed_qparams_range_0to1_observer,
default_fixed_qparams_range_neg1to1_observer,
)
# test utils
from hypothesis import given, settings
from hypothesis import strategies as st
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from torch.testing._internal.common_quantization import (
LinearReluLinearModel,
LinearReluModel,
QuantizationTestCase,
skipIfNoFBGEMM,
skip_if_no_torchvision,
train_one_epoch,
run_ddp,
test_only_eval_fn,
test_only_train_fn,
ModelForConvTransposeBNFusion,
)
from torch.testing._internal.common_quantization import (
LinearModelWithSubmodule,
ResNetBase,
RNNDynamicModel,
RNNCellDynamicModel,
)
from torch.testing._internal.common_quantized import (
supported_qengines,
override_qengines,
override_quantized_engine,
)
from torch.testing._internal.common_utils import TemporaryFileName, IS_ARM64
from torch.testing._internal.common_quantization import NodeSpec as ns
from torch.testing import FileCheck
import copy
import itertools
import operator
import unittest
import io
from typing import Callable, Optional, List
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
def get_supported_device_types():
return ['cpu', 'cuda'] if torch.cuda.is_available() and not TEST_WITH_ROCM else ['cpu']
class BinaryOp(torch.nn.Module):
def __init__(self, binary_op, ibinary_op, is_inplace, is_scalar):
""" ibinary_op means inplace binary op
"""
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1).float()
self.conv2 = torch.nn.Conv2d(1, 1, 1).float()
self.is_scalar = is_scalar
self.op = ibinary_op if ibinary_op and is_inplace else binary_op
def forward(self, x, y):
x = self.conv1(x)
y = 3 if self.is_scalar else self.conv2(y)
# x = x + y
x = self.op(x, y)
# x = y + x
x = self.op(y, x)
return x
class BinaryOpNonQuantizedInput(torch.nn.Module):
def __init__(self, binary_op, ibinary_op, is_inplace, is_scalar):
""" ibinary_op means inplace binary op
"""
super().__init__()
self.is_scalar = is_scalar
self.op = ibinary_op if ibinary_op and is_inplace else binary_op
def forward(self, x, y):
y = 3 if self.is_scalar else y
x = self.op(x, y)
return x
class BinaryOpRelu(torch.nn.Module):
def __init__(self, binary_op, ibinary_op, is_inplace, relu_callable,
is_scalar):
""" ibinary_op means inplace binary op
"""
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1).float()
self.conv2 = torch.nn.Conv2d(1, 1, 1).float()
self.op = ibinary_op if ibinary_op and is_inplace else binary_op
self.relu_callable = relu_callable
self.is_scalar = is_scalar
if relu_callable is torch.nn.ReLU:
self.relu = torch.nn.ReLU()
else:
self.relu = relu_callable
def forward(self, x, y):
x = self.conv1(x)
y = 3 if self.is_scalar else self.conv2(y)
x = self.op(x, y)
x = self.relu(x)
x = self.op(y, x)
x = self.relu(x)
return x
@torch.fx.wrap
def _user_func_with_complex_return_type(x):
return list(torch.split(x, 1, 1))
class TestFuseFx(QuantizationTestCase):
def test_fuse_conv_bn_relu(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1d = nn.Conv1d(1, 1, 1)
self.conv2d = nn.Conv2d(1, 1, 1)
self.conv3d = nn.Conv3d(1, 1, 1)
self.bn1d = nn.BatchNorm1d(1)
self.bn2d = nn.BatchNorm2d(1)
self.bn3d = nn.BatchNorm3d(1)
self.conv1d2 = nn.Conv1d(1, 1, 1)
self.conv2d2 = nn.Conv2d(1, 1, 1)
self.conv3d2 = nn.Conv3d(1, 1, 1)
self.bn1d2 = nn.BatchNorm1d(1)
self.bn2d2 = nn.BatchNorm2d(1)
self.bn3d2 = nn.BatchNorm3d(1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv1d(x)
x = self.bn1d(x)
x = self.conv2d(x)
x = self.bn2d(x)
x = self.conv3d(x)
x = self.bn3d(x)
x = self.conv1d2(x)
x = self.bn1d2(x)
x = self.relu(x)
x = self.conv2d2(x)
x = self.bn2d2(x)
x = self.relu(x)
x = self.conv3d2(x)
x = self.bn3d2(x)
x = self.relu(x)
return x
# test train mode
m = M().train()
# currently we don't check if the module are configured with qconfig before fusion
# TODO: if we decide to do that in the future, this test needs to
# be updated
# train mode fuse_fx is called in prepare_qat_fx
m = prepare_qat_fx(m, {}, example_inputs=(torch.randn(1, 1, 1, 1),))
expected_nodes = [
ns.call_module(nni.ConvBn1d),
ns.call_module(nni.ConvBn2d),
ns.call_module(nni.ConvBn3d),
ns.call_module(nni.ConvBnReLU1d),
ns.call_module(nni.ConvBnReLU2d),
ns.call_module(nni.ConvBnReLU3d),
]
expected_occurrence = {
ns.call_module(nn.ReLU): 0
}
self.checkGraphModuleNodes(
m,
expected_node_list=expected_nodes,
expected_node_occurrence=expected_occurrence)
# test eval mode
m = M().eval()
# fuse_fx is a top level api and only supports eval mode
m = fuse_fx(m)
expected_nodes = [
ns.call_module(nn.Conv1d),
ns.call_module(nn.Conv2d),
ns.call_module(nn.Conv3d),
ns.call_module(nni.ConvReLU1d),
ns.call_module(nni.ConvReLU2d),
ns.call_module(nni.ConvReLU3d),
]
# ConvBnRelu1d is not fused
expected_occurrence = {
ns.call_module(nn.ReLU): 0
}
self.checkGraphModuleNodes(
m,
expected_node_list=expected_nodes,
expected_node_occurrence=expected_occurrence)
def test_fuse_linear_bn_eval(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
self.bn1d = nn.BatchNorm1d(1)
def forward(self, x):
x = self.linear(x)
x = self.bn1d(x)
return x
# test eval mode
m = M().eval()
# fuse_fx is a top level api and only supports eval mode
m = fuse_fx(m)
expected_nodes = [
ns.call_module(nn.Linear),
]
expected_occurrence = {
ns.call_module(nn.BatchNorm1d): 0,
}
self.checkGraphModuleNodes(
m,
expected_node_list=expected_nodes,
expected_node_occurrence=expected_occurrence)
def test_fuse_convtranspose_bn_eval(self):
m = ModelForConvTransposeBNFusion().eval()
m = fuse_fx(m)
expected_nodes = [
ns.call_module(nn.ConvTranspose1d),
ns.call_module(nn.ConvTranspose2d),
ns.call_module(nn.ConvTranspose3d),
]
expected_occurrence = {
ns.call_module(nn.BatchNorm1d): 0,
ns.call_module(nn.BatchNorm2d): 0,
ns.call_module(nn.BatchNorm3d): 0,
}
self.checkGraphModuleNodes(
m,
expected_node_list=expected_nodes,
expected_node_occurrence=expected_occurrence)
def test_fuse_module_relu(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1d = nn.Conv1d(1, 1, 1)
self.conv2d = nn.Conv2d(1, 1, 1)
self.conv3d = nn.Conv3d(1, 1, 1)
self.bn1d = nn.BatchNorm1d(1)
self.bn2d = nn.BatchNorm2d(1)
self.bn3d = nn.BatchNorm3d(1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv1d(x)
x = self.relu(x)
x = self.conv2d(x)
x = self.relu(x)
x = self.conv3d(x)
x = self.relu(x)
x = self.bn1d(x)
x = self.relu(x)
x = self.bn2d(x)
x = self.relu(x)
x = self.bn3d(x)
x = self.relu(x)
return x
m = M().eval()
m = fuse_fx(m)
expected_nodes = [
ns.call_module(nni.ConvReLU1d),
ns.call_module(nni.ConvReLU2d),
ns.call_module(nni.ConvReLU3d),
ns.call_module(nni.BNReLU2d),
ns.call_module(nni.BNReLU3d),
]
self.checkGraphModuleNodes(m, expected_node_list=expected_nodes)
@skipIfNoFBGEMM
def test_qconfig_fused_module(self):
""" TODO: add test for all fused modules
"""
qconfig_dict = {
"": None,
"object_type": [(nn.Linear, default_qconfig),
(nn.ReLU, default_qconfig),
(F.relu, default_qconfig)]
}
linearRelu_node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nniq.LinearReLU),
ns.call_method('dequantize')
]
linearReluLinear_node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nniq.LinearReLU),
ns.call_module(nnq.Linear),
ns.call_method('dequantize')
]
tests = [(LinearReluModel, linearRelu_node_list),
(LinearReluLinearModel, linearReluLinear_node_list)]
for M, node_list in tests:
m = M().eval()
example_inputs = (torch.rand(5, 5),)
prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
prepared(*example_inputs)
quantized = convert_fx(prepared)
self.checkGraphModuleNodes(quantized, expected_node_list=node_list)
def test_problematic_fuse_example(self):
class LinearRelu(nn.Sequential):
def __init__(self):
super().__init__(
nn.Linear(5, 5),
nn.ReLU(),
)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin_relu = LinearRelu()
self.linear = nn.Linear(5, 5)
def forward(self, x):
x = self.lin_relu(x)
x = self.linear(x)
return x
model = M().eval()
# these qconfigs somehow fail equality where default_qconfig does not
qconfig_dict = {
"": None,
"object_type": [
(torch.nn.Linear, get_default_qconfig('fbgemm')),
(torch.nn.ReLU, get_default_qconfig('fbgemm')),
],
}
m = prepare_fx(model, qconfig_dict, example_inputs=(torch.randn(1, 5),))
self.checkGraphModuleNodes(m, expected_node=ns.call_module(torch.nn.intrinsic.modules.fused.LinearReLU))
@unittest.skip("Temprorarily skipping the test case, will enable after the simple"
"pattern format is supported")
def test_fuse_addtional_fuser_method(self):
class MyConvReLU(torch.nn.Module):
pass
def my_conv_relu_fuser(conv, relu):
return MyConvReLU()
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(self.conv(x))
m = M().eval()
m = fuse_fx(m, fuse_custom_config={
"additional_fuser_method_mapping": {
(torch.nn.Conv2d, torch.nn.ReLU): my_conv_relu_fuser
}
})
self.checkGraphModuleNodes(m, expected_node=ns.call_module(MyConvReLU))
def test_fuse_custom_pattern(self):
class M(torch.nn.Module):
def __init__(self, use_torch_add=True):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
self.bn = torch.nn.BatchNorm2d(3)
self.relu = torch.nn.ReLU()
self.maxpool = torch.nn.MaxPool2d(3)
if use_torch_add:
self.add = torch.add
else:
self.add = operator.add
def forward(self, x):
y = x
y = self.maxpool(x)
x = self.conv(x)
x = self.bn(x)
x = self.add(y, x)
x = self.relu(x)
return x
for use_torch_add in [True, False]:
m = M(use_torch_add).eval()
def fuse_conv_bn_relu(is_qat, relu, add_pattern):
_, _, bn_pattern = add_pattern
bn, conv = bn_pattern
return conv
conv_bn_res_relu_config1 = BackendPatternConfig((nn.ReLU, (torch.add, MatchAllNode, (nn.BatchNorm2d, nn.Conv2d)))) \
.set_fuser_method(fuse_conv_bn_relu)
conv_bn_res_relu_config2 = BackendPatternConfig((nn.ReLU, (operator.add, MatchAllNode, (nn.BatchNorm2d, nn.Conv2d)))) \
.set_fuser_method(fuse_conv_bn_relu)
backend_config = BackendConfig() \
.set_backend_pattern_config(conv_bn_res_relu_config1) \
.set_backend_pattern_config(conv_bn_res_relu_config2)
m = fuse_fx(m, backend_config=backend_config)
self.assertEqual(type(m.conv), torch.nn.Conv2d)
# check bn and relu are gone since we replaced the whole pattern to conv
self.assertFalse(hasattr(m, "bn"))
self.assertFalse(hasattr(m, "relu"))
def test_fusion_pattern_with_multiple_inputs(self):
""" This test tests two keys in backend_config: root_node_getter and
extra_inputs_getter,
root_node_getter is used to identify a "root" module in the node pattern,
the node that we'll keep after fusion.
extra_inputs_getter will return a list of node that needs to be added to the
fused node as extra inputs.
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
self.bn = torch.nn.BatchNorm2d(3)
self.relu = torch.nn.ReLU()
self.maxpool = torch.nn.MaxPool2d(3)
def forward(self, x):
y = x
y = self.maxpool(x)
x = self.conv(x)
x = self.bn(x)
x = torch.add(x, y)
x = self.relu(x)
return x
m = M().eval()
def fuse_conv_bn_relu(is_qat, relu, add_pattern):
_, bn_pattern, _ = add_pattern
bn, conv = bn_pattern
return conv
def conv_bn_res_relu_root_node_getter(pattern):
relu, add_pattern = pattern
_, bn_pattern, _ = add_pattern
bn, conv = bn_pattern
return conv
def conv_bn_res_relu_extra_inputs_getter(pattern):
""" get inputs pattern for extra inputs, inputs for root node
are assumed to be copied over from root node to the fused node
"""
relu, add_pattern = pattern
_, bn_pattern, extra_input = add_pattern
bn, conv = bn_pattern
return [extra_input]
conv_bn_res_relu_config = BackendPatternConfig((nn.ReLU, (torch.add, (nn.BatchNorm2d, nn.Conv2d), MatchAllNode))) \
.set_fuser_method(fuse_conv_bn_relu) \
._set_root_node_getter(conv_bn_res_relu_root_node_getter) \
._set_extra_inputs_getter(conv_bn_res_relu_extra_inputs_getter)
backend_config = BackendConfig().set_backend_pattern_config(conv_bn_res_relu_config)
m = fuse_fx(m, backend_config=backend_config)
self.assertEqual(type(m.conv), torch.nn.Conv2d)
# check bn and relu are gone since we replaced the whole pattern to conv
self.assertFalse(hasattr(m, "bn"))
self.assertFalse(hasattr(m, "relu"))
# check conv module has two inputs
named_modules = dict(m.named_modules())
for node in m.graph.nodes:
if node.op == "call_module" and type(named_modules[node.target]) == torch.nn.Conv2d:
self.assertTrue(len(node.args) == 2), "Expecting the fused op to have two arguments"
def test_fusion_pattern_with_matchallnode(self):
"""This test tests that the node matched by MatchAllNode will be regared as an input
instead of a module to be fused. For instance, we have two patterns:
(nn.ReLU, (torch.add, MatchAllNode, nn.Conv2d))
(nn.ReLU, nn.Conv2d)
And we wanna fuse the following model
Conv2d -> ReLU +
Conv2d ------ Add -> ReLU
ReLU in the first row is matched as MatchAllNode in the residual pattern. But it won't be
fused as part of that pattnern. It needs to be properly fused with the upstream Conv2d.
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3)
self.relu1 = torch.nn.ReLU()
self.conv2 = torch.nn.Conv2d(3, 3, 3)
self.relu2 = torch.nn.ReLU()
def forward(self, x):
y = self.conv1(x)
y = self.relu1(y)
x = self.conv2(x)
x = torch.add(x, y)
x = self.relu2(x)
return x
m = M().eval()
def fuse_conv_relu(is_qat, relu, conv):
return conv
def fuse_conv_res_relu(is_qat, relu, add_pattern):
_, conv, _ = add_pattern
return conv
def conv_res_relu_root_node_getter(pattern):
relu, (_, conv, _) = pattern
return conv
def conv_res_relu_extra_inputs_getter(pattern):
relu, (_, _, extra_input) = pattern
return [extra_input]
conv_relu_config = BackendPatternConfig((nn.ReLU, nn.Conv2d)) \
.set_fuser_method(fuse_conv_relu)
conv_res_relu_config = BackendPatternConfig((nn.ReLU, (torch.add, nn.Conv2d, MatchAllNode))) \
.set_fuser_method(fuse_conv_res_relu) \
._set_root_node_getter(conv_res_relu_root_node_getter) \
._set_extra_inputs_getter(conv_res_relu_extra_inputs_getter)
backend_config = BackendConfig() \
.set_backend_pattern_config(conv_relu_config) \
.set_backend_pattern_config(conv_res_relu_config)
m = fuse_fx(m, backend_config=backend_config)
self.assertEqual(type(m.conv1), torch.nn.Conv2d)
self.assertEqual(type(m.conv2), torch.nn.Conv2d)
# check relu are gone since we replaced the both patterns to conv
self.assertFalse(hasattr(m, "relu1"))
self.assertFalse(hasattr(m, "relu2"))
@skipIfNoFBGEMM
class TestQuantizeFx(QuantizationTestCase):
def test_pattern_match(self):
""" test MatchAllNode with
conv - bn - add - relu pattern
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
def forward(self, x, y):
x = self.conv(x)
x = self.bn(x)
x = x + y
x = self.relu(x)
return x
pattern = (nn.ReLU, (operator.add, (nn.BatchNorm2d, nn.Conv2d), MatchAllNode))
m = torch.fx.symbolic_trace(M())
modules = dict(m.named_modules())
for n in m.graph.nodes:
if n.op == 'call_module' and type(modules[n.target]) == nn.ReLU:
self.assertTrue(is_match(modules, n, pattern))
def test_fused_module_qat_swap(self):
class Tmp(torch.nn.Module):
def __init__(self):
super().__init__()
self.tmp = torch.nn.Linear(5, 5)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.tmp(x)
return self.relu(x)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(Tmp(), torch.nn.Linear(5, 5))
self.mods2 = torch.nn.Linear(5, 5)
def forward(self, x):
a = self.mods1(x)
x = torch.add(x, 5)
x = self.mods2(x)
x = torch.add(x, 5)
return a, x
model = M().train()
qconfig_dict = {
"": None,
"object_type": [
(torch.nn.Linear, default_qat_qconfig),
(torch.nn.ReLU, default_qat_qconfig),
],
}
prepared = prepare_qat_fx(model, qconfig_dict, example_inputs=(torch.randn(1, 5),))
self.assertTrue(isinstance(getattr(prepared.mods1, "0").tmp, torch.nn.intrinsic.qat.LinearReLU))
def _get_conv_linear_test_cases(self, is_reference):
""" Returns a list of test cases, with format:
is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_op
"""
class FunctionalConv1d(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
self.stride = 1
self.padding = 0
self.dilation = 1
self.groups = 1
def forward(self, x):
return F.conv1d(x, self.weight, None, self.stride, self.padding, self.dilation, self.groups)
class Conv1d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.conv = torch.nn.Conv1d(*args)
def forward(self, x):
return self.conv(x)
conv1d_input = torch.rand(1, 3, 224)
conv1d_weight = torch.rand(3, 3, 3)
conv1d_module_args = (3, 3, 3)
class FunctionalConv2d(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
self.stride = (1, 1)
self.padding = (0, 0)
self.dilation = (1, 1)
self.groups = 1
def forward(self, x):
return F.conv2d(x, self.weight, None, self.stride, self.padding, self.dilation, self.groups)
class Conv2d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.conv = torch.nn.Conv2d(*args)
def forward(self, x):
return self.conv(x)
conv2d_input = torch.rand(1, 3, 224, 224)
conv2d_weight = torch.rand(3, 3, 3, 3)
conv2d_module_args = (3, 3, 3)
class FunctionalConv3d(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
self.stride = (1, 1, 1)
self.padding = (0, 0, 0)
self.dilation = (1, 1, 1)
self.groups = 1
def forward(self, x):
return F.conv3d(
x,
self.weight,
None,
self.stride,
self.padding,
self.dilation,
self.groups,
)
class Conv3d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.conv = torch.nn.Conv3d(*args)
def forward(self, x):
return self.conv(x)
conv3d_input = torch.rand(1, 3, 32, 224, 224)
conv3d_weight = torch.rand(3, 3, 3, 3, 3)
conv3d_module_args = (3, 3, 3)
class Linear(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
return F.linear(x, self.weight)
linear_input = torch.rand(8, 5)
linear_weight = torch.rand(10, 5)
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
linear_module_input = torch.rand(8, 5)
# is_dynamic, ModuleClass, module_constructor_inputs,
# inputs, quantized_node, weight_prepack_node
tests = [
(
False,
FunctionalConv1d,
(conv1d_weight,),
(conv1d_input,),
ns.call_function(torch.nn.functional.conv1d if is_reference else torch.ops.quantized.conv1d) ,
ns.call_function(torch.ops.quantized.conv1d_prepack),
),
(
False,
FunctionalConv2d,
(conv2d_weight,),
(conv2d_input,),
ns.call_function(torch.nn.functional.conv2d if is_reference else torch.ops.quantized.conv2d),
ns.call_function(torch.ops.quantized.conv2d_prepack),
),
(
False,
FunctionalConv3d,
(conv3d_weight,),
(conv3d_input,),
ns.call_function(torch.nn.functional.conv3d if is_reference else torch.ops.quantized.conv3d),
ns.call_function(torch.ops.quantized.conv3d_prepack),
),
(
False,
Conv1d,
conv1d_module_args,
(conv1d_input,),
ns.call_module(nnqr.Conv1d if is_reference else nnq.Conv1d),
None
),
(
False,
Conv2d,
conv2d_module_args,
(conv2d_input,),
ns.call_module(nnqr.Conv2d if is_reference else nnq.Conv2d),
None
),
(
False,
Conv3d,
conv3d_module_args,
(conv3d_input,),
ns.call_module(nnqr.Conv3d if is_reference else nnq.Conv3d),
None
),
(
True,
Linear,
(linear_weight,),
(linear_input,),
None if is_reference else ns.call_function(torch.ops.quantized.linear_dynamic),
ns.call_function(torch.ops.quantized.linear_prepack),
),
(
False,
Linear,
(linear_weight,),
(linear_input,),
ns.call_function(torch.nn.functional.linear if is_reference else torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.linear_prepack),
),
(
True,
LinearModule,
(),
(linear_module_input,),
ns.call_module(nnqr.Linear) if is_reference else ns.call_module(nnqd.Linear),
None,
),
(
False,
LinearModule,
(),
(linear_module_input,),
ns.call_module(nnqr.Linear if is_reference else nnq.Linear),
None,
),
]
return tests
@skipIfNoFBGEMM
def test_conv_linear_not_reference(self):
""" Test quantizing conv and linear
"""
tests = self._get_conv_linear_test_cases(is_reference=False)
for (is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_node) in tests:
quant_type = QuantType.DYNAMIC if is_dynamic else QuantType.STATIC
node_occurrence = dict()
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
self.checkGraphModeFxOp(
ModuleClass(*module_constructor_inputs),
inputs, quant_type,
expected_node=quantized_node,
expected_node_occurrence=node_occurrence,
is_reference=False)
@skipIfNoFBGEMM
def test_conv_linear_reference(self):
""" Test quantizing functional conv and linear with reference option
"""
tests = self._get_conv_linear_test_cases(is_reference=True)
def _get_keys(prefix, is_dynamic):
all_keys = [prefix + "." + k for k in ["weight_qscheme", "weight_dtype"]]
if not is_dynamic:
all_keys.extend([prefix + "." + k for k in ["weight_scale", "weight_zero_point"]])
return all_keys
for (is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_node) in tests:
quant_type = QuantType.DYNAMIC if is_dynamic else QuantType.STATIC
node_occurrence = dict()
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
result_dict = self.checkGraphModeFxOp(
ModuleClass(*module_constructor_inputs),
inputs, quant_type,
expected_node=quantized_node,
expected_node_occurrence=node_occurrence,
is_reference=True)
qr = result_dict["quantized_reference"]
def checkWeightQParams(model):
for module_name in ("linear", "conv"):
if hasattr(model, module_name):
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_qscheme"))
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_scale"))
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_zero_point"))
self.assertTrue("Reference" in qr.get_submodule(module_name)._get_name())
def checkSerDeser(model, is_dynamic):
for module_name in ("linear", "conv"):
if hasattr(model, module_name):
# make sure seralization works
state_dict = copy.deepcopy(model.state_dict())
all_keys = _get_keys(module_name, is_dynamic)
for key in all_keys:
self.assertTrue(key in state_dict)
# check load_state_dict restores states
module = getattr(model, module_name)
prev_scale = module.weight_scale
module.weight_scale = None
model.load_state_dict(state_dict)
module = getattr(model, module_name)
self.assertTrue(torch.equal(prev_scale, module.weight_scale))
checkWeightQParams(qr)
qr = copy.deepcopy(qr)
# make sure the qparams are preserved after copy
checkWeightQParams(qr)
checkSerDeser(qr, is_dynamic)
@skipIfNoFBGEMM
def test_dynamic_quant_weight_observer(self):
''' Test that weight observer is run in convert step
'''
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
return F.linear(x, self.weight)
m = M(torch.rand(1, 1)).eval()
qconfig = default_dynamic_qconfig
qconfig_dict = {'': qconfig}
example_inputs = (torch.rand(1, 1),)
prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
quantized = convert_to_reference_fx(prepared)
qparams = (quantized._scale_0, quantized._zero_point_0)
weight_obs = qconfig.weight()
weight_obs(quantized.weight)
# Get the actual value to avoid tensor size mismatch error, torch.Size([]) vs torch.Size([1])
ref_qparams = (weight_obs.calculate_qparams()[0].item(), weight_obs.calculate_qparams()[1].item())
self.assertEqual(qparams, ref_qparams)
def test_conv_bn_relu(self):
""" Tests fusion and quantization for "Conv - Bn" and "Conv - Bn - ReLU"
"""
convs = {
1: nn.Conv1d,
2: nn.Conv2d,
3: nn.Conv3d,
}
bns = {
1: nn.BatchNorm1d,
2: nn.BatchNorm2d,
3: nn.BatchNorm3d,
}
quantized_convs = {
1: nnq.Conv1d,
2: nnq.Conv2d,
3: nnq.Conv3d,
}
quantized_conv_relus = {
1: nniq.ConvReLU1d,
2: nniq.ConvReLU2d,
3: nniq.ConvReLU3d,
}
class M(torch.nn.Module):
def __init__(self, dim, has_relu):
super().__init__()
self.conv = convs[dim](3, 3, 3)
self.bn = bns[dim](3)
self.relu = nn.ReLU() if has_relu else nn.Identity()
self.has_relu = has_relu
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
x = self.dequant(x)
return x
options = itertools.product([1, 2, 3], [True, False], self.static_quant_types)
for dim, has_relu, quant_type in options:
expected_node = ns.call_module(
quantized_conv_relus[dim] if has_relu
else quantized_convs[dim])
m = M(dim, has_relu)
m_eager = copy.deepcopy(m)
result_dict = self.checkGraphModeFxOp(
m,
self.img_data_dict[dim],
quant_type,
expected_node=expected_node,
)
result = result_dict["quantized_output"]
# check numerics
qengine = torch.backends.quantized.engine
if quant_type == QuantType.STATIC:
m_eager.eval()
qconfig = get_default_qconfig(qengine)
prepare_fn = prepare
is_qat = False
else:
m_eager.train()
qconfig = get_default_qat_qconfig(qengine)
prepare_fn = prepare_qat
is_qat = True
fuse_list = ["conv", "bn"]
if has_relu:
fuse_list.append("relu")
if is_qat:
fuse_modules_qat(m_eager, fuse_list, inplace=True)
else:
fuse_modules(m_eager, fuse_list, inplace=True)
m_eager.qconfig = qconfig
m_eager = prepare_fn(m_eager)
prepared_fx = result_dict["prepared"]
m_eager(*self.img_data_dict[dim][0])
m_eager = convert(m_eager)
result_eager = m_eager(*self.img_data_dict[dim][0])
self.assertEqual(result, result_eager)
def test_linear_bn(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(4, 4)
self.bn = nn.BatchNorm1d(4)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.linear(x)
x = self.bn(x)
x = self.dequant(x)
return x
data = (torch.randn(4, 4),)
for quant_type in self.static_quant_types:
expected_node = ns.call_module(nnq.Linear)
m = M()
m_eager = copy.deepcopy(m)
result_dict = self.checkGraphModeFxOp(m, data, quant_type, expected_node=expected_node)
result = result_dict["quantized_output"]
# check numerics vs eager mode
fuse_list = ["linear", "bn"]
qengine = torch.backends.quantized.engine
if quant_type == QuantType.STATIC:
m_eager.eval()
qconfig = get_default_qconfig(qengine)
prepare_fn = prepare
fuse_modules(m_eager, fuse_list, inplace=True)
else:
m_eager.train()
qconfig = get_default_qat_qconfig(qengine)
prepare_fn = prepare_qat
fuse_modules_qat(m_eager, fuse_list, inplace=True)
m_eager.qconfig = qconfig
m_eager = prepare_fn(m_eager)
m_eager(*data)
m_eager = convert(m_eager)
result_eager = m_eager(*data)
self.assertEqual(result, result_eager)
@skipIfNoFBGEMM
def test_dynamic_quant_fp16(self):
with override_quantized_engine('fbgemm'):
class Linear(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
return F.linear(x, self.weight)
linear_input = torch.rand(8, 5)
linear_weight = torch.rand(10, 5)
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
linear_module_input = torch.rand(8, 5)
tests = [
(Linear, (linear_weight,), (linear_input,),
ns.call_function(torch.ops.quantized.linear_dynamic_fp16),
ns.call_function(torch.ops.quantized.linear_prepack_fp16)),
(LinearModule, (), (linear_module_input,),
ns.call_module(nnqd.Linear),
None),
]
for (ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_node) in tests:
for is_reference in [True, False]:
node_occurrence = dict()
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
m = ModuleClass(*module_constructor_inputs).eval()
qconfig_dict = {"": float16_dynamic_qconfig}
m = prepare_fx(m, qconfig_dict, example_inputs=inputs)
convert_fn = convert_to_reference_fx if is_reference else convert_fx
m = convert_fn(m)
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@override_qengines
def test_qat_prepare_device_affinity(self):
"""
Tests that FX QAT prepare pass respects device affinity
"""
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
model = Model()
qengine = torch.backends.quantized.engine
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig(qengine)}
device = torch.device('cuda:0')
model.to(device)
example_inputs = (torch.randn(4, 1, 4, 4, device=device),)
# QAT prepare
model = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
# ensure that running an input on CUDA works without any needed changes
model(*example_inputs)
# ensure all buffers and parameters are on the device we expect
model_devices = {p.device for p in model.parameters()} | \
{p.device for p in model.buffers()}
self.assertEqual(len(model_devices), 1)
model_device = next(iter(model_devices))
self.assertEqual(model_device, device)
@skipIfNoFBGEMM
def test_dict_output(self):
""" Make sure quantization runs for models with dictionary output
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return {"output": self.conv(x["input"])}
example_inputs = ({"input": torch.randn(1, 1, 1, 1)},)
m = M().eval()
qconfig_dict = {"": default_qconfig}
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
@override_qengines
def test_attention(self):
""" Make sure quantization runs for a corner case in attention module
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
q, k, v = x.chunk(3, dim=0)
q = q.contiguous().view(-1, 1).transpose(0, 1)
k = k.contiguous().view(-1, 1).transpose(0, 1)
v = v.contiguous().view(-1, 1).transpose(0, 1)
torch._assert(
k.size(1) == 1, "key size should be equal to 1"
)
r = torch.mm(k, v)
return q * k + r
example_inputs = (torch.randn(3, 1, 1, 1),)
m = M().eval()
qconfig_dict = {
"": None,
"object_type": [
(nn.Conv2d, default_qconfig),
]
}
# make sure it runs
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
def _test_standalone_module(
self,
interface_config,
prepare_count_check,
standalone_prepare_count_check,
convert_count_check,
standalone_convert_count_check):
""" Test standalone module with different quantized input/quantized output
configurations
"""
class StandaloneModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv(x)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.standalone = StandaloneModule()
def forward(self, x):
x = self.conv(x)
x = self.standalone(x)
return x
class RefM(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
example_inputs = (torch.randn(1, 1, 1, 1),)
# instantiate M and RefM and align the parameters
original_m = M().eval()
original_ref_m = RefM().eval()
original_ref_m.conv1.weight = torch.nn.Parameter(original_m.conv.weight.detach())
original_ref_m.conv1.bias = torch.nn.Parameter(original_m.conv.bias.detach())
original_ref_m.conv2.weight = torch.nn.Parameter(original_m.standalone.conv.weight.detach())
original_ref_m.conv2.bias = torch.nn.Parameter(original_m.standalone.conv.bias.detach())
for is_name in [True, False]:
sm_example_inputs = example_inputs
if is_name:
prepare_config = {
"standalone_module_name": [("standalone", None, sm_example_inputs, interface_config, None)]
}
else:
prepare_config = {
"standalone_module_class": [(StandaloneModule, None, sm_example_inputs, interface_config, None)]
}
original_m_copy = copy.deepcopy(original_m)
original_ref_m_copy = copy.deepcopy(original_ref_m)
qconfig_dict = {"": default_qconfig}
# check prepared model
m = prepare_fx(
original_m_copy,
qconfig_dict,
example_inputs=example_inputs,
prepare_custom_config=prepare_config)
# calibration
m(*example_inputs)
self.checkGraphModuleNodes(m, expected_node_occurrence=prepare_count_check)
self.checkGraphModuleNodes(m.standalone, expected_node_occurrence=standalone_prepare_count_check)
# check converted/quantized model
m = convert_fx(m)
self.checkGraphModuleNodes(m, expected_node_occurrence=convert_count_check)
self.checkGraphModuleNodes(m.standalone, expected_node_occurrence=standalone_convert_count_check)
res = m(*example_inputs)
# quantize the reference model
ref_m = prepare_fx(
original_ref_m_copy,
qconfig_dict,
example_inputs=example_inputs,
)
ref_m(*example_inputs)
ref_m = convert_fx(ref_m)
ref_res = ref_m(*example_inputs)
self.assertEqual(res, ref_res)
def test_standalone_module_float_interface(self):
float_interface_config = {
"input_quantized_idxs": [], # float input
"output_quantized_idxs": [], # float output
}
interface_config = float_interface_config
# input and output of first conv, observer for standalone module
# will be inserted in the standalone module itself
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2
}
# for input and output of conv in the standalone module
standalone_prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2
}
convert_count_check = {
ns.call_function(torch.quantize_per_tensor) : 1,
ns.call_module(nnq.Conv2d) : 1,
ns.call_method("dequantize") : 1,
}
standalone_convert_count_check = {
# standalone module will take float as input and output
# so we'll see quantize and dequantize in the modoule
ns.call_function(torch.quantize_per_tensor) : 1,
ns.call_module(nnq.Conv2d): 1,
ns.call_method("dequantize") : 1,
}
self._test_standalone_module(
interface_config,
prepare_count_check,
standalone_prepare_count_check,
convert_count_check,
standalone_convert_count_check)
def test_standalone_module_quantized_interface(self):
quantized_interface_config = {
"input_quantized_idxs": [0], # quantized input
"output_quantized_idxs": [0], # quantized output
}
interface_config = quantized_interface_config
# observer for input and output of first conv
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2
}
# for output of conv in the standalone module
standalone_prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 1
}
convert_count_check = {
# quantizing input for conv
ns.call_function(torch.quantize_per_tensor) : 1,
ns.call_module(nnq.Conv2d) : 1,
# dequantizing output of standalone module
ns.call_method("dequantize") : 1,
}
standalone_convert_count_check = {
# quantization of input happens in parent module
# quantization of output happens in the quantized conv module
ns.call_function(torch.quantize_per_tensor) : 0,
ns.call_module(nnq.Conv2d): 1,
# dequantization for output happens in parent module
ns.call_method("dequantize") : 0,
}
self._test_standalone_module(
interface_config,
prepare_count_check,
standalone_prepare_count_check,
convert_count_check,
standalone_convert_count_check)
@skipIfNoFBGEMM
def test_qconfig_none(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
m = M().eval()
qconfig_dict = {"": default_qconfig,
"module_name": [("conv2", None)]}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
# first conv is quantized, second conv is not quantized
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method("dequantize"),
ns.call_module(nn.Conv2d),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_module_type(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
m = M().eval()
qconfig_dict = {"object_type": [(torch.nn.Conv2d, default_qconfig)]}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
# first conv is quantized, second conv is not quantized
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_module(nnq.Conv2d),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_qat_module_type(self):
class LinearRelu(nn.Sequential):
def __init__(self):
super().__init__(
nn.Linear(5, 5),
nn.ReLU(),
)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin_relu = LinearRelu()
self.linear = nn.Linear(5, 5)
def forward(self, x):
x = self.lin_relu(x)
x = self.linear(x)
return x
model = M().train()
qconfig_dict = {
"": None,
"object_type": [
(torch.nn.Linear, default_qat_qconfig),
(torch.nn.ReLU, default_qat_qconfig),
],
}
example_inputs = (torch.rand(5, 5),)
m = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nniq.LinearReLU),
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_function(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
return x + y
m = M().eval()
qconfig_dict = {"object_type": [(operator.add, default_qconfig)]}
data = torch.randn(1, 1, 1, 1)
example_inputs = (data, data)
m = prepare_fx(m, qconfig_dict, example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
# first conv is quantized, second conv is not quantized
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_module_name_regex(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
m = M().eval()
qconfig_dict = {"module_name_regex": [("conv*", default_qconfig)]}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
# first conv is quantized, second conv is not quantized
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_module(nnq.Conv2d),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_precedence(self):
for device in get_supported_device_types():
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(1, 1)
self.conv = nn.Conv2d(1, 1, 1)
self.module_conv1 = nn.Conv2d(1, 1, 1)
self.module_conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
# global
x = self.linear(x)
# global + object_type --> object_type
x = self.conv(x)
# global + object_type + module_name_regex --> module_name_regex
x = self.module_conv1(x)
# global + object_type + module_name_regex + module_name --> module_name
x = self.module_conv2(x)
return x
m = M().to(device).eval()
global_qconfig = default_qconfig
object_type_qconfig = default_dynamic_qconfig
module_name_regex_qconfig = float16_dynamic_qconfig
module_name_qconfig = default_qat_qconfig
qconfig_dict = {
"": global_qconfig,
"object_type": [(nn.Conv2d, object_type_qconfig)],
"module_name_regex": [("module_conv*", module_name_regex_qconfig)],
"module_name": [("module_conv2", module_name_qconfig)]}
m_prep = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 1),))
self.assertEqual(m_prep.linear.qconfig.activation.p.func, global_qconfig.activation.p.func)
self.assertEqual(m_prep.linear.qconfig.weight.p.func, global_qconfig.weight.p.func)
self.assertEqual(m_prep.conv.qconfig.activation.p.func, object_type_qconfig.activation.p.func)
self.assertEqual(m_prep.conv.qconfig.weight.p.func, object_type_qconfig.weight.p.func)
self.assertEqual(m_prep.module_conv1.qconfig.activation.p.func, module_name_regex_qconfig.activation.p.func)
self.assertEqual(m_prep.module_conv1.qconfig.weight.p.func, module_name_regex_qconfig.weight.p.func)
self.assertEqual(m_prep.module_conv2.qconfig.activation.p.func, module_name_qconfig.activation.p.func)
self.assertEqual(m_prep.module_conv2.qconfig.weight.p.func, module_name_qconfig.weight.p.func)
def test_qconfig_module_name_object_type_order(self):
class M1(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(1, 1)
self.fc2 = nn.Linear(1, 1)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = torch.add(x, x)
x = torch.add(x, x)
return x
class M2(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(1, 1)
self.fc2 = nn.Linear(1, 1)
self.m1 = M1()
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = torch.add(x, x)
x = torch.add(x, x)
x = self.m1(x)
return x
class M3(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(1, 1)
self.fc2 = nn.Linear(1, 1)
self.m2 = M2()
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = torch.add(x, x)
x = torch.add(x, x)
x = self.m2(x)
return x
m = M3().eval()
qconfig_dict = {
"module_name_object_type_order": [
# test various FQNs: global, single child, multiple children
("", nn.Linear, 0, torch.ao.quantization.default_qconfig),
("", torch.add, 0, torch.ao.quantization.default_qconfig),
("m2", nn.Linear, 1, torch.ao.quantization.default_qconfig),
("m2", torch.add, 1, torch.ao.quantization.default_qconfig),
("m2.m1", nn.Linear, 0, torch.ao.quantization.default_qconfig),
("m2.m1", torch.add, 0, torch.ao.quantization.default_qconfig),
],
}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
# m3
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
ns.call_module(nn.Linear),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
ns.call_function(torch.add),
# m2
ns.call_module(nn.Linear),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
ns.call_function(torch.add),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
# m1
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
ns.call_module(nn.Linear),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
ns.call_function(torch.add),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
# test that function order overrides global qconfig
class M4(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(1, 1)
self.fc2 = nn.Linear(1, 1)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = torch.add(x, x)
x = torch.add(x, x)
return x
m = M4().eval()
qconfig_dict = {
"": torch.ao.quantization.default_qconfig,
"module_name_object_type_order": [
("", nn.Linear, 1, None),
("", torch.add, 1, None),
],
}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
ns.call_module(nn.Linear),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
ns.call_function(torch.add),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_dict_with_fused_modules(self):
class LinearReLUModel(torch.nn.Module):
def __init__(self, relu):
super(LinearReLUModel, self).__init__()
self.linear = torch.nn.Linear(3, 3)
self.relu = relu
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class ConvReLUModel(torch.nn.Module):
def __init__(self, relu):
super(ConvReLUModel, self).__init__()
self.conv = torch.nn.Conv1d(3, 3, 3)
self.relu = relu
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
class ConvBnReLUModel(torch.nn.Module):
def __init__(self, relu):
super(ConvBnReLUModel, self).__init__()
self.conv = torch.nn.Conv1d(3, 3, 3)
self.bn = torch.nn.BatchNorm1d(3)
self.relu = relu
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
for model in [LinearReLUModel, ConvReLUModel, ConvBnReLUModel]:
for relu in [torch.nn.ReLU(), torch.nn.functional.relu, torch.relu]:
m = model(relu).eval()
qconfig_dict = torch.ao.quantization.get_default_qconfig_mapping("fbgemm")
# should not crash as in https://github.com/pytorch/pytorch/issues/75825
prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 3, 3, 3),))
# TODO: move QConfigMapping tests to test/quantization/core
def test_qconfig_mapping_set_global(self):
qconfig = get_default_qconfig()
qconfig_mapping = QConfigMapping()
self.assertEqual(qconfig_mapping.global_qconfig, None)
qconfig_mapping.set_global(qconfig)
self.assertEqual(qconfig_mapping.global_qconfig, qconfig)
def test_qconfig_mapping_set_object_type(self):
qconfig1 = get_default_qconfig()
qconfig2 = get_default_qconfig()
qconfig3 = get_default_qconfig()
self.assertNotEqual(qconfig1, qconfig2)
self.assertNotEqual(qconfig1, qconfig3)
qconfig_mapping = QConfigMapping()
self.assertEqual(len(qconfig_mapping.object_type_qconfigs), 0)
# Insert some entries
qconfig_mapping.set_object_type(torch.nn.Linear, qconfig1)
qconfig_mapping.set_object_type(torch.nn.ReLU, qconfig2)
self.assertEqual(len(qconfig_mapping.object_type_qconfigs), 2)
self.assertEqual(qconfig_mapping.object_type_qconfigs[torch.nn.Linear], qconfig1)
self.assertEqual(qconfig_mapping.object_type_qconfigs[torch.nn.ReLU], qconfig2)
# Override existing key
qconfig_mapping.set_object_type(torch.nn.Linear, qconfig3)
self.assertEqual(qconfig_mapping.object_type_qconfigs[torch.nn.Linear], qconfig3)
self.assertEqual(qconfig_mapping.object_type_qconfigs[torch.nn.ReLU], qconfig2)
self.assertEqual(get_object_type_qconfig(qconfig_mapping, torch.nn.Linear, None), qconfig3)
self.assertEqual(get_object_type_qconfig(qconfig_mapping, torch.nn.ReLU, None), qconfig2)
self.assertEqual(get_object_type_qconfig(qconfig_mapping, "nomatch", None), None)
def test_qconfig_mapping_set_module_name_regex(self):
qconfig1 = get_default_qconfig()
qconfig2 = get_default_qconfig()
qconfig3 = get_default_qconfig()
self.assertNotEqual(qconfig1, qconfig2)
self.assertNotEqual(qconfig1, qconfig3)
qconfig_mapping = QConfigMapping()
self.assertEqual(len(qconfig_mapping.module_name_regex_qconfigs), 0)
# Insert some entries
qconfig_mapping.set_module_name_regex("foo.*bar", qconfig1)
qconfig_mapping.set_module_name_regex("foo.*", qconfig2)
self.assertEqual(len(qconfig_mapping.module_name_regex_qconfigs), 2)
self.assertEqual(qconfig_mapping.module_name_regex_qconfigs["foo.*bar"], qconfig1)
self.assertEqual(qconfig_mapping.module_name_regex_qconfigs["foo.*"], qconfig2)
# Override existing key
qconfig_mapping.set_module_name_regex("foo.*bar", qconfig3)
self.assertEqual(qconfig_mapping.module_name_regex_qconfigs["foo.*bar"], qconfig3)
self.assertEqual(qconfig_mapping.module_name_regex_qconfigs["foo.*"], qconfig2)
self.assertEqual(get_module_name_regex_qconfig(qconfig_mapping, "foo123bar", None), qconfig3)
self.assertEqual(get_module_name_regex_qconfig(qconfig_mapping, "foobar", None), qconfig3)
self.assertEqual(get_module_name_regex_qconfig(qconfig_mapping, "foobaz", None), qconfig2)
self.assertEqual(get_module_name_regex_qconfig(qconfig_mapping, "foo", None), qconfig2)
self.assertEqual(get_module_name_regex_qconfig(qconfig_mapping, "nomatch", None), None)
def test_qconfig_mapping_set_module_name(self):
qconfig1 = get_default_qconfig()
qconfig2 = get_default_qconfig()
qconfig3 = get_default_qconfig()
self.assertNotEqual(qconfig1, qconfig2)
self.assertNotEqual(qconfig1, qconfig3)
qconfig_mapping = QConfigMapping()
self.assertEqual(len(qconfig_mapping.module_name_qconfigs), 0)
# Insert some entries
qconfig_mapping.set_module_name("mod1", qconfig1)
qconfig_mapping.set_module_name("mod2", qconfig2)
self.assertEqual(len(qconfig_mapping.module_name_qconfigs), 2)
self.assertEqual(qconfig_mapping.module_name_qconfigs["mod1"], qconfig1)
self.assertEqual(qconfig_mapping.module_name_qconfigs["mod2"], qconfig2)
# Override existing key
qconfig_mapping.set_module_name("mod1", qconfig3)
self.assertEqual(qconfig_mapping.module_name_qconfigs["mod1"], qconfig3)
self.assertEqual(qconfig_mapping.module_name_qconfigs["mod2"], qconfig2)
self.assertEqual(get_module_name_qconfig(qconfig_mapping, "mod1", None), qconfig3)
self.assertEqual(get_module_name_qconfig(qconfig_mapping, "mod2", None), qconfig2)
self.assertEqual(get_module_name_qconfig(qconfig_mapping, "nomatch", None), None)
def test_qconfig_mapping_set_module_name_object_type_order(self):
qconfig1 = get_default_qconfig()
qconfig2 = get_default_qconfig()
qconfig3 = get_default_qconfig()
self.assertNotEqual(qconfig1, qconfig2)
self.assertNotEqual(qconfig1, qconfig3)
qconfig_mapping = QConfigMapping()
self.assertEqual(len(qconfig_mapping.module_name_object_type_order_qconfigs), 0)
# Insert some entries
qconfig_mapping.set_module_name_object_type_order("mod1", torch.nn.Linear, 0, qconfig1)
qconfig_mapping.set_module_name_object_type_order("mod2", torch.nn.ReLU, 1, qconfig2)
self.assertEqual(len(qconfig_mapping.module_name_object_type_order_qconfigs), 2)
key1 = ("mod1", torch.nn.Linear, 0)
key2 = ("mod2", torch.nn.ReLU, 1)
self.assertEqual(list(qconfig_mapping.module_name_object_type_order_qconfigs)[0], key1)
self.assertEqual(list(qconfig_mapping.module_name_object_type_order_qconfigs)[1], key2)
self.assertEqual(qconfig_mapping.module_name_object_type_order_qconfigs[key1], qconfig1)
self.assertEqual(qconfig_mapping.module_name_object_type_order_qconfigs[key2], qconfig2)
self.assertEqual(maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod1", torch.nn.Linear, 0, None), qconfig1)
self.assertEqual(maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod2", torch.nn.ReLU, 1, None), qconfig2)
# Override existing key
qconfig_mapping.set_module_name_object_type_order("mod1", torch.nn.Linear, 0, qconfig3)
self.assertEqual(len(qconfig_mapping.module_name_object_type_order_qconfigs), 2)
self.assertEqual(list(qconfig_mapping.module_name_object_type_order_qconfigs)[0], key1)
self.assertEqual(list(qconfig_mapping.module_name_object_type_order_qconfigs)[1], key2)
self.assertEqual(qconfig_mapping.module_name_object_type_order_qconfigs[key1], qconfig3)
self.assertEqual(qconfig_mapping.module_name_object_type_order_qconfigs[key2], qconfig2)
self.assertEqual(maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod1", torch.nn.Linear, 0, None), qconfig3)
self.assertEqual(maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod2", torch.nn.ReLU, 1, None), qconfig2)
# No match
self.assertEqual(maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod123", torch.nn.Linear, 0, None), None)
self.assertEqual(maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod1", torch.nn.Linear, 35, None), None)
self.assertEqual(maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod2", torch.nn.Conv2d, 1, None), None)
def _get_qconfig_dict_for_qconfig_mapping_test(self, global_qconfig, qconfig1, qconfig2):
"""
Return a dummy qconfig_dict to test QConfigMapping's to_dict and from_dict methods.
"""
return {
GLOBAL_DICT_KEY: global_qconfig,
OBJECT_TYPE_DICT_KEY: [
(torch.nn.Linear, qconfig1),
(torch.nn.ReLU, qconfig2),
],
MODULE_NAME_REGEX_DICT_KEY: [
("foo.*bar", qconfig1),
("foo.*", qconfig2),
],
MODULE_NAME_DICT_KEY: [
("bazbaz", qconfig1),
("borbor", qconfig2),
],
MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY: [
("bazbaz", torch.nn.Linear, 0, qconfig1),
("foofoo", torch.nn.ReLU, 1, qconfig2),
],
}
with self.assertRaises(ValueError) as context:
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 3, 3, 3),))
self.assertTrue(
'Expected qconfig_dict to have the following keys:' in str(context.exception)
)
self.assertTrue('But found \'object_typo\' instead.' in str(context.exception))
def test_qconfig_mapping_from_dict(self):
global_qconfig = QConfig(123, "global")
qconfig1 = QConfig(1, "one")
qconfig2 = QConfig(2, "two")
qconfig_dict = self._get_qconfig_dict_for_qconfig_mapping_test(global_qconfig, qconfig1, qconfig2)
qconfig_dict["undefined_dict_key"] = [(123, qconfig1), (234, qconfig2)]
qconfig_mapping = QConfigMapping.from_dict(qconfig_dict)
self.assertEqual(qconfig_mapping.global_qconfig, global_qconfig)
self.assertEqual(qconfig_mapping.object_type_qconfigs, OrderedDict({
torch.nn.Linear: qconfig1,
torch.nn.ReLU: qconfig2,
}))
self.assertEqual(qconfig_mapping.module_name_regex_qconfigs, OrderedDict({
"foo.*bar": qconfig1,
"foo.*": qconfig2,
}))
self.assertEqual(qconfig_mapping.module_name_qconfigs, OrderedDict({
"bazbaz": qconfig1,
"borbor": qconfig2,
}))
self.assertEqual(qconfig_mapping.module_name_object_type_order_qconfigs, OrderedDict({
("bazbaz", torch.nn.Linear, 0): qconfig1,
("foofoo", torch.nn.ReLU, 1): qconfig2,
}))
def test_qconfig_mapping_to_dict(self):
global_qconfig = QConfig(123, "global")
qconfig1 = QConfig(1, "one")
qconfig2 = QConfig(2, "two")
qconfig_mapping = QConfigMapping().set_global(global_qconfig) \
.set_object_type(torch.nn.Linear, qconfig1) \
.set_object_type(torch.nn.ReLU, qconfig2) \
.set_module_name_regex("foo.*bar", qconfig1) \
.set_module_name_regex("foo.*", qconfig2) \
.set_module_name("bazbaz", qconfig1) \
.set_module_name("borbor", qconfig2) \
.set_module_name_object_type_order("bazbaz", torch.nn.Linear, 0, qconfig1) \
.set_module_name_object_type_order("foofoo", torch.nn.ReLU, 1, qconfig2)
qconfig_dict = self._get_qconfig_dict_for_qconfig_mapping_test(global_qconfig, qconfig1, qconfig2)
self.assertEqual(qconfig_mapping.to_dict(), qconfig_dict)
# Dummy classes for PrepareCustomConfig testing
class _DummyStandaloneModule:
pass
class _DummyFloatModule:
pass
class _DummyObservedModule:
pass
class _DummyQuantizedModule:
pass
class _DummyNonTraceableModule1:
pass
class _DummyNonTraceableModule2:
pass
def test_prepare_custom_config_set_standalone_module_name(self):
qconfig_mapping = QConfigMapping()
example_inputs = (torch.randn(3),)
child_prepare_custom_config = PrepareCustomConfig()
backend_config = BackendConfig("my_backend")
config_entry = StandaloneModuleConfigEntry(
qconfig_mapping, example_inputs, child_prepare_custom_config, backend_config)
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.standalone_module_names), 0)
prepare_custom_config.set_standalone_module_name(
"module1", qconfig_mapping, example_inputs, child_prepare_custom_config, backend_config)
self.assertEqual(list(prepare_custom_config.standalone_module_names.keys()), ["module1"])
self.assertEqual(prepare_custom_config.standalone_module_names["module1"], config_entry)
def test_prepare_custom_config_set_standalone_module_class(self):
qconfig_mapping = QConfigMapping()
example_inputs = (torch.randn(3),)
child_prepare_custom_config = PrepareCustomConfig()
backend_config = BackendConfig("my_backend")
config_entry = StandaloneModuleConfigEntry(
qconfig_mapping, example_inputs, child_prepare_custom_config, backend_config)
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.standalone_module_classes), 0)
prepare_custom_config.set_standalone_module_class(
self._DummyStandaloneModule, qconfig_mapping, example_inputs, child_prepare_custom_config, backend_config)
self.assertEqual(len(prepare_custom_config.standalone_module_classes), 1)
self.assertTrue(self._DummyStandaloneModule in prepare_custom_config.standalone_module_classes)
self.assertEqual(prepare_custom_config.standalone_module_classes[self._DummyStandaloneModule], config_entry)
def test_prepare_custom_config_set_float_to_observed_mapping(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.float_to_observed_mapping), 0)
prepare_custom_config.set_float_to_observed_mapping(self._DummyFloatModule, self._DummyObservedModule, QuantType.STATIC)
self.assertEqual(len(prepare_custom_config.float_to_observed_mapping), 1)
self.assertEqual(list(prepare_custom_config.float_to_observed_mapping.keys()), [QuantType.STATIC])
self.assertEqual(len(prepare_custom_config.float_to_observed_mapping[QuantType.STATIC]), 1)
self.assertTrue(self._DummyFloatModule in prepare_custom_config.float_to_observed_mapping[QuantType.STATIC])
self.assertEqual(prepare_custom_config.float_to_observed_mapping[QuantType.STATIC][self._DummyFloatModule],
self._DummyObservedModule)
def test_prepare_custom_config_set_non_traceable_module_names(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.non_traceable_module_names), 0)
prepare_custom_config.set_non_traceable_module_names(["module1", "module2"])
self.assertEqual(prepare_custom_config.non_traceable_module_names, ["module1", "module2"])
def test_prepare_custom_config_set_non_traceable_module_classes(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.non_traceable_module_classes), 0)
prepare_custom_config.set_non_traceable_module_classes([self._DummyNonTraceableModule1, self._DummyNonTraceableModule2])
self.assertEqual(prepare_custom_config.non_traceable_module_classes,
[self._DummyNonTraceableModule1, self._DummyNonTraceableModule2])
def test_prepare_custom_config_set_input_quantized_indexes(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.input_quantized_indexes), 0)
prepare_custom_config.set_input_quantized_indexes([0, 1])
self.assertEqual(prepare_custom_config.input_quantized_indexes, [0, 1])
def test_prepare_custom_config_set_output_quantized_indexes(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.output_quantized_indexes), 0)
prepare_custom_config.set_output_quantized_indexes([0, 1])
self.assertEqual(prepare_custom_config.output_quantized_indexes, [0, 1])
def test_prepare_custom_config_set_preserved_attributes(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.preserved_attributes), 0)
prepare_custom_config.set_preserved_attributes(["attr1", "attr2"])
self.assertEqual(prepare_custom_config.preserved_attributes, ["attr1", "attr2"])
def _get_dummy_prepare_custom_config_dict(self):
"""
Return a dummy prepare_custom_config_dict to test PrepareCustomConfig's to_dict and from_dict methods.
"""
return {
STANDALONE_MODULE_NAME_DICT_KEY: [(
"module1",
QConfigMapping(),
(torch.randn(3),),
PrepareCustomConfig(),
BackendConfig("my_backend"),
)],
STANDALONE_MODULE_CLASS_DICT_KEY: [(
self._DummyStandaloneModule,
QConfigMapping(),
(torch.randn(10),),
PrepareCustomConfig(),
BackendConfig("my_backend"),
)],
FLOAT_TO_OBSERVED_DICT_KEY: {
"static": {
self._DummyFloatModule: self._DummyObservedModule
},
},
NON_TRACEABLE_MODULE_NAME_DICT_KEY: ["module2", "module3"],
NON_TRACEABLE_MODULE_CLASS_DICT_KEY: [self._DummyNonTraceableModule1, self._DummyNonTraceableModule2],
INPUT_QUANTIZED_INDEXES_DICT_KEY: [0, 1],
OUTPUT_QUANTIZED_INDEXES_DICT_KEY: [0, 1],
PRESERVED_ATTRIBUTES_DICT_KEY: ["attr1", "attr2"]
}
def test_prepare_custom_config_from_dict(self):
prepare_custom_config_dict = self._get_dummy_prepare_custom_config_dict()
(sm_name, qm1, ei1, pcc1, bcd1) = prepare_custom_config_dict[STANDALONE_MODULE_NAME_DICT_KEY][0]
(sm_class, qm2, ei2, pcc2, bcd2) = prepare_custom_config_dict[STANDALONE_MODULE_CLASS_DICT_KEY][0]
sm_config_entry1 = StandaloneModuleConfigEntry(qm1, ei1, pcc1, bcd1)
sm_config_entry2 = StandaloneModuleConfigEntry(qm2, ei2, pcc2, bcd2)
prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config_dict)
# Standalone modules
self.assertEqual(len(prepare_custom_config.standalone_module_names), 1)
self.assertTrue(sm_name in prepare_custom_config.standalone_module_names)
self.assertEqual(prepare_custom_config.standalone_module_names[sm_name], sm_config_entry1)
self.assertEqual(len(prepare_custom_config.standalone_module_classes), 1)
self.assertTrue(sm_class in prepare_custom_config.standalone_module_classes)
self.assertEqual(prepare_custom_config.standalone_module_classes[sm_class], sm_config_entry2)
# Float to observed mapping
self.assertEqual(len(prepare_custom_config.float_to_observed_mapping), 1)
self.assertEqual(list(prepare_custom_config.float_to_observed_mapping.keys()), [QuantType.STATIC])
self.assertEqual(len(prepare_custom_config.float_to_observed_mapping[QuantType.STATIC]), 1)
self.assertTrue(self._DummyFloatModule in prepare_custom_config.float_to_observed_mapping[QuantType.STATIC])
self.assertEqual(prepare_custom_config.float_to_observed_mapping[QuantType.STATIC][self._DummyFloatModule],
self._DummyObservedModule)
# Other
self.assertEqual(prepare_custom_config.non_traceable_module_names, ["module2", "module3"])
self.assertEqual(prepare_custom_config.non_traceable_module_classes,
[self._DummyNonTraceableModule1, self._DummyNonTraceableModule2])
self.assertEqual(prepare_custom_config.input_quantized_indexes, [0, 1])
self.assertEqual(prepare_custom_config.output_quantized_indexes, [0, 1])
self.assertEqual(prepare_custom_config.preserved_attributes, ["attr1", "attr2"])
def test_prepare_custom_config_to_dict(self):
prepare_custom_config_dict = self._get_dummy_prepare_custom_config_dict()
(sm_name, qm1, ei1, pcc1, bcd1) = prepare_custom_config_dict[STANDALONE_MODULE_NAME_DICT_KEY][0]
(sm_class, qm2, ei2, pcc2, bcd2) = prepare_custom_config_dict[STANDALONE_MODULE_CLASS_DICT_KEY][0]
prepare_custom_config = PrepareCustomConfig() \
.set_standalone_module_name(sm_name, qm1, ei1, pcc1, bcd1) \
.set_standalone_module_class(sm_class, qm2, ei2, pcc2, bcd2) \
.set_float_to_observed_mapping(self._DummyFloatModule, self._DummyObservedModule) \
.set_non_traceable_module_names(["module2", "module3"]) \
.set_non_traceable_module_classes([self._DummyNonTraceableModule1, self._DummyNonTraceableModule2]) \
.set_input_quantized_indexes([0, 1]) \
.set_output_quantized_indexes([0, 1]) \
.set_preserved_attributes(["attr1", "attr2"])
# PrepareCustomConfig.to_dict also converts internal QConfigMappings and PrepareCustomConfigs to dicts
prepare_custom_config_dict[STANDALONE_MODULE_NAME_DICT_KEY][0] = (sm_name, qm1.to_dict(), ei1, pcc1.to_dict(), bcd1)
prepare_custom_config_dict[STANDALONE_MODULE_CLASS_DICT_KEY][0] = (sm_class, qm2.to_dict(), ei2, pcc2.to_dict(), bcd2)
self.assertEqual(prepare_custom_config.to_dict(), prepare_custom_config_dict)
def test_convert_custom_config_set_observed_to_quantized_mapping(self):
convert_custom_config = ConvertCustomConfig()
self.assertEqual(len(convert_custom_config.observed_to_quantized_mapping), 0)
convert_custom_config.set_observed_to_quantized_mapping(
self._DummyObservedModule, self._DummyQuantizedModule, QuantType.STATIC)
self.assertEqual(len(convert_custom_config.observed_to_quantized_mapping), 1)
self.assertEqual(list(convert_custom_config.observed_to_quantized_mapping.keys()), [QuantType.STATIC])
self.assertTrue(self._DummyObservedModule in convert_custom_config.observed_to_quantized_mapping[QuantType.STATIC])
self.assertEqual(convert_custom_config.observed_to_quantized_mapping[QuantType.STATIC][self._DummyObservedModule],
self._DummyQuantizedModule)
def test_convert_custom_config_set_preserved_attributes(self):
convert_custom_config = ConvertCustomConfig()
self.assertEqual(len(convert_custom_config.preserved_attributes), 0)
convert_custom_config.set_preserved_attributes(["attr1", "attr2"])
self.assertEqual(convert_custom_config.preserved_attributes, ["attr1", "attr2"])
def _get_dummy_convert_custom_config_dict(self):
"""
Return a dummy convert_custom_config_dict to test ConvertCustomConfig's to_dict and from_dict methods.
"""
return {
OBSERVED_TO_QUANTIZED_DICT_KEY: {
"static": {
self._DummyObservedModule: self._DummyQuantizedModule
},
},
PRESERVED_ATTRIBUTES_DICT_KEY: ["attr1", "attr2"]
}
def test_convert_custom_config_from_dict(self):
convert_custom_config_dict = self._get_dummy_convert_custom_config_dict()
convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config_dict)
self.assertEqual(len(convert_custom_config.observed_to_quantized_mapping), 1)
self.assertEqual(list(convert_custom_config.observed_to_quantized_mapping.keys()), [QuantType.STATIC])
self.assertEqual(len(convert_custom_config.observed_to_quantized_mapping[QuantType.STATIC]), 1)
self.assertTrue(self._DummyObservedModule in convert_custom_config.observed_to_quantized_mapping[QuantType.STATIC])
self.assertEqual(convert_custom_config.observed_to_quantized_mapping[QuantType.STATIC][self._DummyObservedModule],
self._DummyQuantizedModule)
self.assertEqual(convert_custom_config.preserved_attributes, ["attr1", "attr2"])
def test_convert_custom_config_to_dict(self):
convert_custom_config = ConvertCustomConfig() \
.set_observed_to_quantized_mapping(self._DummyObservedModule, self._DummyQuantizedModule) \
.set_preserved_attributes(["attr1", "attr2"])
self.assertEqual(convert_custom_config.to_dict(), self._get_dummy_convert_custom_config_dict())
def test_fuse_custom_config_set_preserved_attributes(self):
fuse_custom_config = FuseCustomConfig()
self.assertEqual(len(fuse_custom_config.preserved_attributes), 0)
fuse_custom_config.set_preserved_attributes(["attr1", "attr2"])
self.assertEqual(fuse_custom_config.preserved_attributes, ["attr1", "attr2"])
def test_fuse_custom_config_from_dict(self):
fuse_custom_config_dict = {PRESERVED_ATTRIBUTES_DICT_KEY: ["attr1", "attr2"]}
fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config_dict)
self.assertEqual(fuse_custom_config.preserved_attributes, ["attr1", "attr2"])
def test_fuse_custom_config_to_dict(self):
fuse_custom_config_dict = {PRESERVED_ATTRIBUTES_DICT_KEY: ["attr1", "attr2"]}
fuse_custom_config = FuseCustomConfig().set_preserved_attributes(["attr1", "attr2"])
self.assertEqual(fuse_custom_config.to_dict(), fuse_custom_config_dict)
def test_remove_qconfig(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.avg_pool = torch.nn.AvgPool2d(1)
def forward(self, x):
return self.avg_pool(x)
m = M().eval()
qconfig_dict = {'': default_qconfig}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
for name, module in m.named_modules():
self.assertFalse(hasattr(module, 'qconfig'),
'qconfig is not removed for ' + name)
def test_return_none(self):
class M(torch.nn.Module):
def forward(self, x):
pass
m = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1),))
m = convert_fx(m)
def test_default_quant_after_none_qconfig(self):
""" Make sure default quant is inserted properly"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = x.transpose(1, 2)
x = self.conv2(x)
m = M().eval()
qconfig_dict = {
"": default_qconfig,
"module_name": [
("conv1", None)
]
}
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 1, 1, 1),))
m = convert_fx(m)
def test_qconfig_for_call_method(self):
class Sub(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = x.transpose(2, 3)
x = self.conv(x)
return x.transpose(2, 3)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub = Sub()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.sub(x)
x = self.conv2(x)
return x.transpose(2, 3)
qconfig_dict1 = {"": default_qconfig, "module_name": [("sub", None)]}
# since sub is configured to have qconfig None, we should dequantize the output
# of self.conv1 and quantize the input of self.conv2
# dequantize after conv2 should happen after transpose since
# it is configured with default_qconfig
# nodes in Sub module instance is not quantized
node_list1 = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method("dequantize"),
ns.call_method("transpose"),
ns.call_module(nn.Conv2d),
ns.call_method("transpose"),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method("transpose"),
ns.call_method("dequantize")
]
qconfig_dict2 = {"": None, "module_name": [("sub", default_qconfig)]}
# Only nodes in Sub module instance are quantized
# the first transpose is not quantized because the input is not quantized
node_list2 = [
ns.call_module(nn.Conv2d),
ns.call_function(torch.quantize_per_tensor),
ns.call_method("transpose"),
ns.call_module(nnq.Conv2d),
ns.call_method("transpose"),
ns.call_method("dequantize"),
ns.call_module(nn.Conv2d),
ns.call_method("transpose"),
]
for qconfig_dict, node_list in [
(qconfig_dict1, node_list1),
(qconfig_dict2, node_list2)
]:
example_inputs = (torch.randn(2, 1, 3, 3),)
m = M().eval()
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(torch.randn(2, 1, 3, 3))
m = convert_fx(m)
self.checkGraphModuleNodes(m, expected_node_list=node_list)
# make sure it runs
m(*example_inputs)
def test_qconfig_for_call_func(self):
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = Linear()
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
return x
model = M().eval()
example_inputs = (torch.rand(5, 5),)
qconfig_dict = {"": default_qconfig, "module_name": [("mods2", None)]}
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.linear),
ns.call_method('dequantize'),
ns.call_function(torch.nn.functional.linear)
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
m(torch.rand(5, 5))
def test_preserve_attributes(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv(x)
m = M()
m.eval()
m.preserved_attr = 3
prepare_custom_config_dict = {
"preserved_attributes": ["preserved_attr"]
}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(
m,
{"": default_qconfig},
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
def assertAttrPreserved(m):
self.assertTrue(hasattr(m, "preserved_attr"))
self.assertEqual(m.preserved_attr, 3)
assertAttrPreserved(m)
convert_custom_config_dict = {
"preserved_attributes": ["preserved_attr"]
}
m = convert_fx(m, convert_custom_config=convert_custom_config_dict)
assertAttrPreserved(m)
@skipIfNoFBGEMM
def test_qat_and_script(self):
model = LinearModelWithSubmodule().train()
qengine = torch.backends.quantized.engine
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig(qengine)}
x = torch.randn(5, 5)
example_inputs = (x,)
model = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
# ensure scripting works
scripted = torch.jit.script(model)
# run one round to make sure model runs
scripted(x)
FileCheck().check_count('FakeQuantize = prim::GetAttr[name="', 4, exactly=True) \
.run(scripted.graph)
# disable fake_quant and observer
for epoch in range(3):
if epoch == 1:
scripted.apply(torch.ao.quantization.disable_observer)
if epoch == 2:
scripted.apply(torch.ao.quantization.disable_fake_quant)
# ensure the fake_quant and observer have been disabled.
matches = ['.fake_quant_enabled', '.observer_enabled']
for key, v in scripted.state_dict().items():
if any(x in key for x in matches):
self.assertEqual(v, torch.tensor([0], dtype=torch.int64))
# enable them back
scripted.apply(torch.ao.quantization.enable_fake_quant)
scripted.apply(torch.ao.quantization.enable_observer)
for key, v in scripted.state_dict().items():
if any(x in key for x in matches):
self.assertEqual(v, torch.tensor([1], dtype=torch.int64))
@skipIfNoFBGEMM
def test_save_observer_state_dict(self):
orig = LinearModelWithSubmodule().eval()
model = orig
qconfig_dict = {'': torch.ao.quantization.get_default_qconfig('fbgemm')}
x = torch.randn(5, 5)
model = prepare_fx(model, qconfig_dict, example_inputs=(x,))
# run it through input
model(x)
quant = convert_fx(model)
# save state_dict of model
obs_dict = torch.ao.quantization.get_observer_state_dict(model)
b = io.BytesIO()
torch.save(obs_dict, b)
b.seek(0)
# Load the stats into new model
model_2 = orig
model_2 = prepare_fx(model_2, qconfig_dict, example_inputs=(x,))
loaded_dict = torch.load(b)
torch.ao.quantization.load_observer_state_dict(model_2, loaded_dict)
quant_2 = convert_fx(model_2)
# Verify that loaded state dict produces same results.
self.assertEqual(quant(x), quant_2(x))
@skipIfNoFBGEMM
def test_custom_module_class(self):
class CustomModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
return self.linear(x)
class ObservedCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_float(cls, float_module):
assert hasattr(float_module, 'qconfig')
observed = cls(float_module.linear)
observed.qconfig = float_module.qconfig
return observed
class StaticQuantCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_observed(cls, observed_module):
assert hasattr(observed_module, 'qconfig')
assert hasattr(observed_module, 'activation_post_process')
observed_module.linear.activation_post_process = \
observed_module.activation_post_process
quantized = cls(nnq.Linear.from_float(observed_module.linear))
return quantized
class DynamicQuantCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_observed(cls, observed_module):
assert hasattr(observed_module, 'qconfig')
quantized = cls(nnqd.Linear.from_float(observed_module.linear))
return quantized
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 3)
self.custom = CustomModule()
def forward(self, x):
x = self.linear(x)
x = self.custom(x)
return x
class RefM(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(3, 3)
self.linear2 = torch.nn.Linear(3, 3)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
# instantiate M and RefM and align the parameters
original_m = M().eval()
original_ref_m = RefM().eval()
original_ref_m.linear1.weight = torch.nn.Parameter(original_m.linear.weight.detach())
original_ref_m.linear1.bias = torch.nn.Parameter(original_m.linear.bias.detach())
original_ref_m.linear2.weight = torch.nn.Parameter(original_m.custom.linear.weight.detach())
original_ref_m.linear2.bias = torch.nn.Parameter(original_m.custom.linear.bias.detach())
test_configs = {
"static": (default_qconfig, StaticQuantCustomModule, 3),
"dynamic": (default_dynamic_qconfig, DynamicQuantCustomModule, 0)
}
for quant_type in [QuantType.STATIC, QuantType.DYNAMIC]:
key = quant_type_to_str(quant_type)
qconfig, quantized_module_class, num_observers = test_configs[key]
qconfig_dict = {"": qconfig}
if key == "static":
prepare_custom_config_dict = {
"float_to_observed_custom_module_class": {
"static": {
CustomModule: ObservedCustomModule
}
}
}
convert_custom_config_dict = {
"observed_to_quantized_custom_module_class": {
"static": {
ObservedCustomModule: quantized_module_class
}
}
}
else:
prepare_custom_config_dict = {
"non_traceable_module_class": [
CustomModule
]
}
convert_custom_config_dict = {
"observed_to_quantized_custom_module_class": {
"dynamic": {
CustomModule: quantized_module_class
}
}
}
example_inputs = (torch.randn(3, 3),)
# check prepared model
m = prepare_fx(
original_m,
qconfig_dict,
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
# calibration
m(*example_inputs)
# all activation observers are inserted in the top level module
count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): num_observers
}
self.checkGraphModuleNodes(m, expected_node_occurrence=count_check)
# check converted/quantized model
m = convert_fx(
m,
convert_custom_config=convert_custom_config_dict)
if quant_type == QuantType.STATIC:
count_check = {
ns.call_function(torch.quantize_per_tensor) : 1,
ns.call_module(nnq.Linear) : 1,
ns.call_method('dequantize') : 1,
}
self.checkGraphModuleNodes(m, expected_node_occurrence=count_check)
self.assertEqual(type(m.custom), quantized_module_class)
res = m(*example_inputs)
# quantize the reference model
ref_m = prepare_fx(original_ref_m, qconfig_dict, example_inputs=example_inputs)
ref_m(*example_inputs)
ref_m = convert_fx(ref_m)
ref_res = ref_m(*example_inputs)
self.assertEqual(res, ref_res)
@skipIfNoFBGEMM
def test_custom_module_class_input_has_multiple_users(self):
""" Tests that the flow still works when the input of custom module
has multiple users
"""
class CustomModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
return self.linear(x)
class ObservedCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_float(cls, float_module):
assert hasattr(float_module, 'qconfig')
observed = cls(float_module.linear)
observed.qconfig = float_module.qconfig
return observed
class StaticQuantCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_observed(cls, observed_module):
assert hasattr(observed_module, 'qconfig')
assert hasattr(observed_module, 'activation_post_process')
observed_module.linear.activation_post_process = \
observed_module.activation_post_process
quantized = cls(nnq.Linear.from_float(observed_module.linear))
return quantized
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 3)
self.custom = CustomModule()
def forward(self, x0):
x1 = self.custom(x0)
x2 = self.linear(x0)
return x1 + x2
prepare_custom_config_dict = {
"float_to_observed_custom_module_class": {
"static": {
CustomModule: ObservedCustomModule
}
}
}
convert_custom_config_dict = {
"observed_to_quantized_custom_module_class": {
"static": {
ObservedCustomModule: StaticQuantCustomModule
}
}
}
m = M().eval()
example_inputs = (torch.randn(3, 3),)
m = prepare_fx(
m,
{"": default_qconfig},
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
# make sure it works
m = convert_fx(
m,
convert_custom_config=convert_custom_config_dict)
# make sure it runs
m(*example_inputs)
@skipIfNoFBGEMM
def test_non_traceable_module(self):
class NonTraceable(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
for k in x.keys():
print(x[k])
return x
class NonTraceable2(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
# data dependent control flow is not traceable
for i in x:
print(i)
return x
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.m1 = NonTraceable()
self.m2 = NonTraceable2()
def forward(self, x):
x = self.m1(x)
x = self.m2(x)
return x
m = M().eval()
qconfig_dict = {"": default_qconfig}
prepare_custom_config_dict = {
"non_traceable_module_name": [
"m1"
],
"non_traceable_module_class": [
NonTraceable2
]
}
m = prepare_fx(
m, qconfig_dict,
example_inputs=({"key": torch.randn(1)},),
prepare_custom_config=prepare_custom_config_dict)
node_occurrence = {
ns.call_module(NonTraceable) : 1,
ns.call_module(NonTraceable2) : 1,
}
# make sure these modules are not traced
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
def test_prepared_model_deepcopy(self):
"""Ensures that copy.deepcopy works correctly on a prepared model.
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self._foobar = 'foobar'
self.foobar2 = 'foobar2'
def forward(self, x):
x = self.conv(x)
return x
m = M()
m.eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(4, 1, 4, 4),)
prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
# calibrate
prepared(*example_inputs)
# copy
prepared_copy = copy.deepcopy(prepared)
# quantize, should run with no errors
quantized = convert_fx(prepared_copy)
def test_quantized_model_type(self):
""" Test state_dict and deepcopy works properly in the quantized model
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x)
example_inputs = (torch.rand(8, 5),)
m = M().eval()
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
m = convert_fx(m)
# test deepcopy
m_copy = copy.deepcopy(m)
self.assertEqual(m_copy(*example_inputs), m(*example_inputs))
# test state_dict
state_dict = m.state_dict()
m_new = M().eval()
m_new = prepare_fx(m_new, {"": default_qconfig}, example_inputs=example_inputs)
m_new = convert_fx(m_new)
m_new.load_state_dict(state_dict)
self.assertEqual(m_new(*example_inputs), m(*example_inputs))
def test_dequantize(self):
r""" Test to make sure dequantize node are placed before
non-quantizable node
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.act = torch.nn.GELU()
def forward(self, x):
x = self.conv(x)
return self.act(x)
data = torch.rand(5, 1, 3, 3, dtype=torch.float)
for quant_type in self.static_quant_types:
node_list = [
ns.call_module(nnq.Conv2d),
ns.call_method("dequantize"),
ns.call_module(nn.GELU),
]
self.checkGraphModeFxOp(
M().eval(), (data,), quant_type, expected_node_list=node_list)
def test_sequential(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.convs = torch.nn.Sequential(
torch.nn.Conv2d(1, 1, 1),
torch.nn.Conv2d(1, 1, 1)
)
def forward(self, x):
x = self.convs(x)
return x
data = torch.rand(5, 1, 3, 3, dtype=torch.float)
for quant_type in self.static_quant_types:
node_list = [
ns.call_module(nnq.Conv2d),
ns.call_module(nnq.Conv2d),
]
self.checkGraphModeFxOp(
M().eval(), (data,), quant_type, expected_node_list=node_list)
def _test_quantized_inputs_outputs(
self, prepare_custom_config_dict, prepare_count_check,
convert_count_check):
"""
Test the option to have inputs and outputs of the graph quantized
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
# quantized input, quantized output
m = M()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(1, 1, 4, 4),)
m.eval()
mp = torch.ao.quantization.quantize_fx.prepare_fx(
m, qconfig_dict,
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
self.checkGraphModuleNodes(mp, expected_node_occurrence=prepare_count_check)
mp(*example_inputs)
mq = torch.ao.quantization.quantize_fx.convert_fx(mp)
self.checkGraphModuleNodes(mq, expected_node_occurrence=convert_count_check)
def test_quantized_input_quantized_output(self):
prepare_custom_config_dict = {
'input_quantized_idxs': [0], 'output_quantized_idxs': [0]}
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2,
}
convert_count_check = {
ns.call_function(torch.quantize_per_tensor): 0,
ns.call_method('dequantize'): 0,
}
self._test_quantized_inputs_outputs(
prepare_custom_config_dict, prepare_count_check, convert_count_check)
def test_fp32_input_quantized_output(self):
prepare_custom_config_dict = {
'output_quantized_idxs': [0]}
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 3,
}
convert_count_check = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method('dequantize'): 0,
}
self._test_quantized_inputs_outputs(
prepare_custom_config_dict, prepare_count_check, convert_count_check)
def test_quantized_input_fp32_output(self):
prepare_custom_config_dict = {
'input_quantized_idxs': [0]}
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2,
}
convert_count_check = {
ns.call_function(torch.quantize_per_tensor): 0,
ns.call_method('dequantize'): 1,
}
self._test_quantized_inputs_outputs(
prepare_custom_config_dict, prepare_count_check, convert_count_check)
def test_fp32_input_fp32_output(self):
prepare_custom_config_dict = {}
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 3,
}
convert_count_check = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method('dequantize'): 1,
}
self._test_quantized_inputs_outputs(
prepare_custom_config_dict, prepare_count_check, convert_count_check)
@skipIfNoFBGEMM
def test_convtranspose_per_channel_fails_early(self):
r"""
Verifies that attempting to quantize a ConvTranspose module with per-Channel
weight observers fails in the prepare step, as opposed to the convert step.
"""
m = torch.nn.Sequential(torch.nn.ConvTranspose2d(1, 1, 1))
m.eval()
qconfig_dict = {'': torch.ao.quantization.get_default_qconfig('fbgemm')}
with self.assertRaises(AssertionError) as context:
mp = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 1, 1, 1),))
self.assertTrue(
str(context.exception) ==
'Per channel weight observer is not supported yet for ConvTranspose{n}d.')
@skipIfNoFBGEMM
def test_qparams_buffers(self):
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = Linear()
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
return x
model = M().eval()
qconfig_dict = {"": default_qconfig}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
keys = m.state_dict().keys()
quant_scale_count = quant_zero_point = scale_count = zero_point_count = 0
for k in keys:
if 'input_scale' in k:
quant_scale_count = quant_scale_count + 1
elif 'input_zero_point' in k:
quant_zero_point = quant_zero_point + 1
elif 'scale' in k:
scale_count = scale_count + 1
elif 'zero_point' in k:
zero_point_count = zero_point_count + 1
# Expect each quantized linear op to have a scale and zero point
self.assertTrue(scale_count == 3, "Expect each quantized linear op to have a scale in state_dict")
self.assertTrue(zero_point_count == 3, "Expect each quantized linear op to have a zero_point in state_dict")
# ensure it runs
m(*example_inputs)
# ensure it is scriptable
scripted = torch.jit.script(m)
scripted_keys = scripted.state_dict().keys()
scripted.mods1_0_packed_weight_0 = m.state_dict()["mods1_0_packed_weight_0"]
non_packed_weight_keys = [key for key in keys if "_packed_weight" not in key]
self.assertTrue(
set(scripted_keys) == set(non_packed_weight_keys),
"Expected the scripted model to preserve the state_dict for non-packed weight attributes")
# TODO: probably don't want to hardcode the attribute names, since they are generated
for attr_name in [
"mods1_0_input_scale_0", "mods1_0_input_zero_point_0",
"mods1_0_scale_1", "mods1_0_zero_point_1",
"mods1_1_scale_1", "mods1_1_zero_point_1",
"mods2_scale_1", "mods2_zero_point_1"]:
self.assertTrue(hasattr(m, attr_name), attr_name + " not found.")
@skipIfNoFBGEMM
def test_packed_weight_fused_op(self):
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return F.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = Linear()
self.relu = F.relu
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
x = self.relu(x)
return x
model = M().eval()
example_inputs = (torch.rand(5, 5),)
qconfig_dict = {"": default_qconfig}
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
assert hasattr(m, "mods1_0_packed_weight_0")
assert hasattr(m, "mods1_1_packed_weight_0")
assert hasattr(m, "mods2_packed_weight_0")
@skipIfNoFBGEMM
def test_mul_add_fp16_config(self):
with override_quantized_engine('fbgemm'):
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = Linear()
def forward(self, x):
x = x * 5
x = x + 5
x = self.mods1(x)
x = self.mods2(x)
return x
model = M().eval()
qconfig_dict = {"": float16_dynamic_qconfig}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m = convert_fx(m)
# make sure it runs
m(*example_inputs)
def test_getattr_with_nontensor_result(self):
"""
Verifies that binary ops get quantized correctly if some
of the args are nodes but not Tensors, such as an `x.ndim`
pattern.
"""
class M1(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
dims = x.ndim
dims_sub = dims - 1
dims_sub2 = dims_sub - 1
x = torch.add(x, dims_sub2)
return x
class M2(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
dims = x.ndim
dims_sub = dims - 2
mul = [1] * dims_sub
dims_list = [-1, x.size(1)] + mul
x = x.view(dims_list)
return x
class M3(torch.nn.Module):
def forward(self, x):
shape = x.shape
x = x.view(shape)
return x
for cls in (M1, M2, M3):
m = cls().eval()
example_inputs = (torch.rand(4, 4, 4, 4),)
m(*example_inputs)
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(torch.rand(4, 4, 4, 4))
mc = convert_fx(mp)
class _NonReferenceTestModel(nn.Module):
def __init__(self, func, lin_in, lin_out):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.lin = nn.Linear(lin_in, lin_out)
self.func = func
def forward(self, x, y, z):
x = self.pool(F.relu(self.conv1(x)))
x = torch.flatten(x, 1)
x = self.func(x, y, z)
x = self.lin(x)
return x
# This function looks at the node specified by the NodeInfo in the key of
# node_info_to_non_tensor_args and checks that the args at specified indices
# are not observed (since they are non tensors). If the args at those indices
# are a tuple/list (which do not show up as nodes) the function checks the
# individual elements of the tuple/list recursively.
def _check_not_observed(self, model, node_info_to_non_tensor_args):
# this is a helper function (for easier recursion) that checks whether
# arg_node is observed
def _check_node_not_observed(model, arg_node, node):
if isinstance(arg_node, tuple) or isinstance(arg_node, list):
for new_node in arg_node:
_check_node_not_observed(model, new_node, node)
elif arg_node.op == "call_module":
self.assertTrue(
not is_activation_post_process(getattr(model, arg_node.target)),
"Arg: {0} of node: {1} is observed but is not a float tensor".format(
arg_node, node
),
)
for node in model.graph.nodes:
indices = node_info_to_non_tensor_args.get(
NodeInfo(node.op, node.target), []
)
for index in indices:
if index < len(node.args):
arg_node = node.args[index]
_check_node_not_observed(model, arg_node, node)
# This test checks that the model gets prepared correct, doesn't have observers
# on specific ops (see _check_not_observed) and that the prepared model runs
def _test_dtype_propagation(self, model, node_info_to_non_tensor_args, *args):
model.eval()
qconfig_dict = {"": torch.ao.quantization.get_default_qconfig("fbgemm")}
prepared_model = prepare_fx(model, qconfig_dict, example_inputs=tuple(args))
self._check_not_observed(prepared_model, node_info_to_non_tensor_args)
prepared_model(*args)
def test_masked_fill_nontensor_args_not_observed(self):
def func(x, y, z):
return x.masked_fill(y, z)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), torch.randn(1176) > 0, 0.1]
node_info_to_non_tensor_args = {NodeInfo("call_method", "masked_fill"): [1, 2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_permute_nontensor_args_not_observed(self):
def func(x, y, z):
return x.permute(y, z)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), 0, 1]
node_info_to_non_tensor_args = {NodeInfo("call_method", "permute"): [1, 2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_repeat_nontensor_args_not_observed(self):
def func(x, y, z):
return x.repeat(y, z)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), 2, 1]
node_info_to_non_tensor_args = {NodeInfo("call_method", "repeat"): [1, 2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_reshape_nontensor_args_not_observed(self):
def func(x, y, z):
return x.reshape(-1, y)
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), 5, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_size_nontensor_args_not_observed(self):
def func(x, y, z):
return x.reshape((-1, x.size(y)))
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), 0, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "size"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_transpose_nontensor_args_not_observed(self):
def func(x, y, z):
return x.transpose(y, z)
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), 0, 1]
node_info_to_non_tensor_args = {NodeInfo("call_method", "transpose"): [1, 2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_torch_transpose_nontensor_args_not_observed(self):
# TODO: make torch.transpose traceable by fx when using
# variable nontensor arguments
# func = lambda x, y, z: torch.transpose(x, y, z) # error
def func(x, y, z):
return torch.transpose(x, 0, 1)
model = self._NonReferenceTestModel(func, 5, 1)
node_info_to_non_tensor_args = {
NodeInfo("call_method", torch.transpose): [1, 2]
}
args = [torch.randn(5, 3, 32, 32), 0, 1]
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_unsqueeze_nontensor_args_not_observed(self):
def func(x, y, z):
return x.unsqueeze(y)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), 1, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "unsqueeze"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_unsqueeze__nontensor_args_not_observed(self):
def func(x, y, z):
return x.unsqueeze_(y)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), 1, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "unsqueeze_"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_torch_unsqueeze_nontensor_args_not_observed(self):
# TODO: make torch.unsqueeze scriptable by fx when using
# variable nontensor arguments
# func = lambda x, y, z: torch.unsqueeze(x, y) # error
def func(x, y, z):
return torch.unsqueeze(x, 1)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), 1, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", torch.unsqueeze): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_view_nontensor_args_not_observed(self):
def func(x, y, z):
return x.view(-1, y)
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), 5, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "view"): [2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_list_args(self):
def func(x, y, z):
return x.reshape(y)
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), [-1, 5], None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_split_list_args(self):
def func(x, y, z):
return x.reshape([y, z])
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), -1, 5]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_tuple_args(self):
def func(x, y, z):
return x.reshape(y)
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), (-1, 5), None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_split_tuple_args(self):
def func(x, y, z):
return x.reshape((y, z))
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), -1, 5]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_dict_args(self):
def func(x, y, z):
return x.transpose(y["first"], y["second"])
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), {"first": 0, "second": 1}, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "transpose"): [1, 2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_dict_tuple_args(self):
class reshape_module(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y, z):
return x.reshape(y["shape"])
model = self._NonReferenceTestModel(reshape_module(), 5, 1)
args = [torch.randn(5, 3, 32, 32), {"shape": (-1, 5)}, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_dict_split_tuple_args(self):
def func(x, y, z):
return x.reshape((y["first"], y["second"]))
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), {"first": -1, "second": 5}, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "transpose"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_assert_on_size_after_quant_layer(self):
"""
Verifies that calculating a size of a quantized tensor works
correctly in quantization passes.
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
torch._assert(x.size(1) == 1, 'foobar')
return x
m = M().eval()
example_inputs = (torch.rand(4, 1, 4, 4),)
m(*example_inputs)
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mc = convert_fx(mp)
mc(*example_inputs)
def test_fp32_sum(self):
"""
Verifies that fp32 sum works correctly if it's before or after
quantized layers.
"""
class M1(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = torch.stack([x])
x = torch.sum(x)
return x
class M2(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x1 = torch.stack([x])
x1 = torch.sum(x1, dim=0)
x2 = self.conv2(x1)
return x2
for cls in (M1, M2):
m = cls().eval()
example_inputs = (torch.rand(4, 1, 4, 4),)
m(*example_inputs)
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mc = convert_fx(mp)
mc(*example_inputs)
def test_fusion_pattern_unquantized(self):
"""
Ensure that leaving a possible fusion pattern of multiple nodes
unquantized runs through the APIs without errors.
"""
class Child(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
def forward(self, x):
x = torch.add(x, 1.0)
x = torch.nn.functional.relu(x)
return x
class Parent(torch.nn.Module):
def __init__(self):
super().__init__()
self.child = Child()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.child(x)
x = self.conv(x)
return x
m = Parent().eval()
qconfig_dict = {
'': torch.ao.quantization.default_qconfig,
'module_name': [
('child', None),
],
}
example_inputs = (torch.rand(1, 1, 1, 1),)
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mc = convert_fx(mp)
def test_state_dict(self):
""" Make sure packed params appear in state_dict
"""
# test linear packed weight
class M1(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.rand(4, 30)
self.b = torch.rand(4)
def forward(self, x):
return F.linear(x, self.w, self.b)
m = M1().eval()
qconfig_dict = {"": default_qconfig}
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 30),))
m = convert_fx(m)
state_dict = m.state_dict()
self.assertTrue("_packed_weight_0" in state_dict)
# test conv packed weight
class M2(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.rand(3, 3, 3, 3)
self.b = torch.rand(3)
self.stride = (1, 1)
self.padding = (0, 0)
self.dilation = (1, 1)
self.groups = 1
def forward(self, x):
return F.conv2d(x, self.w, self.b, self.stride, self.padding, self.dilation, self.groups)
m = M2().eval()
qconfig_dict = {"": default_qconfig}
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 3, 3, 3),))
m = convert_fx(m)
state_dict = m.state_dict()
self.assertTrue("_packed_weight_0" in state_dict)
# test load
ref_weight, ref_bias = torch.ops.quantized.conv2d_unpack(state_dict["_packed_weight_0"])
data = torch.rand(1, 3, 5, 5)
ref_res = m(data)
m = M2().eval()
m = prepare_fx(m, qconfig_dict, (data,))
m = convert_fx(m)
res = m(data)
weight, bias = m._packed_weight_0.unpack()
# check that random model weight/bias does not match ref weight/bias
self.assertNotEqual(weight, ref_weight)
self.assertNotEqual(bias, ref_bias)
self.assertNotEqual(res, ref_res)
m.load_state_dict(state_dict)
def checkModel(m, data, ref_weight, ref_bias, ref_res):
res = m(data)
weight, bias = m._packed_weight_0.unpack()
# check that weight/bias matches after load the state_dict
self.assertEqual(weight, ref_weight)
self.assertEqual(bias, ref_bias)
self.assertEqual(res, ref_res)
checkModel(m, data, ref_weight, ref_bias, ref_res)
# Test save to disk and load back
m = M2().eval()
m = prepare_fx(m, qconfig_dict, example_inputs=(data,))
m = convert_fx(m)
m.load_state_dict(state_dict)
with TemporaryFileName() as fname:
torch.save(m.state_dict(), fname)
m.load_state_dict(torch.load(fname))
checkModel(m, data, ref_weight, ref_bias, ref_res)
@skipIfNoFBGEMM
def test_preserve_qconfig(self):
"""
Test to make sure the temporary config option to preserve qconfig attributes
in the model works
"""
with override_quantized_engine('fbgemm'):
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = torch.nn.Sigmoid()
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
return x
model = M().eval()
qconfig_dict = {
"object_type": [
(torch.nn.functional.linear, float16_dynamic_qconfig),
],
}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m, _remove_qconfig=False)
self.assertTrue(hasattr(m.mods2, 'qconfig'))
def test_not_used(self):
""" Test quantizing a not used value"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = x + x
x.sigmoid_()
return x
m = M().eval()
qconfig_mapping = get_default_qconfig_mapping().set_global(float16_static_qconfig)
# make sure quantization runs
m = prepare_fx(m, qconfig_mapping, example_inputs=(torch.randn(1),))
m = convert_fx(m)
def test_qparams_fqn(self):
""" Test that the FQN of input_scale/zero_point is set
to that of first linear use. """
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
def forward(self, x):
x = torch.cat((x,), 1)
tmp = x.size()
x = self.mods1(x)
y = x * tmp[0]
return y
model = M().eval()
qconfig_dict = {
"": None,
"object_type": [
(torch.nn.functional.linear, default_qconfig),
(torch.nn.functional.relu, default_qconfig),
],
}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
keys = m.state_dict().keys()
m(torch.randn(5, 5))
# TODO: probably don't want to hardcode the attribute names, since they are generated
for attr_name in [
"mods1_0_input_scale_0", "mods1_0_input_zero_point_0",
"mods1_0_scale_0", "mods1_0_zero_point_0",
"mods1_1_scale_0", "mods1_1_zero_point_0"]:
self.assertTrue(hasattr(m, attr_name), attr_name + " not found.")
def test_no_obs_between_unmatched_node_and_copy_node(self):
"""
Verifies that an observer is not inserted between an unmatched
node and a node matched to CopyNodeQuantizeHandler. This is done
because observers require activations to be Tensors, and there is
no guarantee that an output of an unmatched node is a Tensor.
"""
class M(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
def forward(self, x):
x = _user_func_with_complex_return_type(x)
x1 = x[0] + 1
return x1, x[1]
m = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(4, 4, 4, 4),)
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
# if an observer is inserted after _user_func_with_complex_return_type,
# the following call will fail
mp(*example_inputs)
mc = convert_fx(mp)
mc(*example_inputs)
def test_fold_quant_dequant(self):
""" Test that the sequence of quant-dequant nodes in the
graph, get folded and we erase the extra dequant nodes.
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
x = torch.cat((x,), 1)
tmp = x.size()
x = torch.nn.functional.linear(x, self.w, self.b)
y = x * tmp[0]
return y
model = M().eval()
qconfig_dict = {
"": None,
"object_type": [
(torch.nn.functional.linear, default_qconfig),
],
}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
keys = m.state_dict().keys()
m(*example_inputs)
dequant = 0
quant = 0
for n in m.graph.nodes:
if n.op == "call_method" and n.target == "dequantize":
dequant = dequant + 1
if n.op == "call_function" and n.target == torch.quantize_per_tensor:
quant = quant + 1
self.assertEqual(dequant, 1)
self.assertEqual(quant, 1)
def test_quant_output_always_observed(self):
"""
If the output is hardcoded to be quantized, ensure that
there is always an observer, even if the last non-output node is not
quantizeable.
"""
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
prepare_custom_config_dict = {'output_quantized_idxs': [0]}
example_inputs = (torch.randn(4, 1, 4, 4),)
# non-quantizeable node, quantized output
class M1(torch.nn.Module):
def __init__(self):
super().__init__()
self.identity = torch.nn.Identity()
def forward(self, x):
x = self.identity(x)
return x
m1 = M1()
self.checkGraphModeFxOp(
m1, example_inputs, QuantType.QAT,
prepare_expected_node_occurrence={
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 2,
},
expected_node_occurrence={
ns.call_function(torch.quantize_per_tensor): 1,
},
prepare_custom_config=prepare_custom_config_dict)
# quantizeable node, quantized output
class M2(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
return x
m2 = M2()
self.checkGraphModeFxOp(
m2, example_inputs, QuantType.QAT,
prepare_expected_node_occurrence={
# one for weights, one for activations
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 2,
},
expected_node_occurrence={
ns.call_function(torch.quantize_per_tensor): 1,
},
prepare_custom_config=prepare_custom_config_dict)
# quantizeable node, quantized dictionary output
class M3(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
return {"output": x}
m3 = M3()
self.checkGraphModeFxOp(
m3, example_inputs, QuantType.QAT,
prepare_expected_node_occurrence={
# one for weights, one for activations
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 2,
},
expected_node_occurrence={
ns.call_function(torch.quantize_per_tensor): 1,
},
prepare_custom_config=prepare_custom_config_dict)
def test_deepcopy_preserve_attributes(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = 3
def forward(self, x):
return x
m = M().eval()
m = prepare_fx(
m,
{"": default_qconfig},
example_inputs=(torch.randn(1),),
prepare_custom_config={"preserved_attributes": ["attr"]})
self.assertTrue(hasattr(m, "attr"))
m2 = copy.deepcopy(m)
self.assertTrue(hasattr(m2, "attr"))
m = convert_fx(m, convert_custom_config={"preserved_attributes": ["attr"]})
self.assertTrue(hasattr(m, "attr"))
m2 = copy.deepcopy(m)
self.assertTrue(hasattr(m2, "attr"))
def test_output_lists_and_dicts(self):
"""Verify that specifying complicated output types does not crash.
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
return {'foo': [x]}, [{'foo': [[x]]}]
m = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 1, 1, 1),))
mc = convert_fx(mp)
def test_shape_followed_by_quantized_op(self):
""" Make sure that shape does not dequantize
the Tensor before the next operator
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2)
self.conv2 = torch.nn.Conv2d(2, 2, 2)
def forward(self, x):
x = self.conv1(x)
s = x.shape
torch._assert(s == x.shape, "")
x = self.conv2(x)
return x
# make sure quantization runs
m = M().eval()
example_inputs = (torch.randn(2, 2, 4, 4),)
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
def test_trace_quantize_per_tensor(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
return x
m = M().eval()
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(torch.randn(1, 1, 3, 3),))
m = convert_fx(m)
# Make sure this runs without error
m = torch.fx.Transformer(m).transform()
def test_copy_node_has_shared_actpp_instance(self):
""" Test the output of CopyNode to have the same
observer/fake_quant instance as the input
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.avgpool2d = torch.nn.AvgPool2d(kernel_size=3)
def forward(self, x):
x = self.avgpool2d(x)
return x
for quant_type in self.static_quant_types:
m = M()
# Checks that we have an observer for both input and output
occurrence_map = {
QuantType.STATIC: {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2
},
QuantType.QAT: {
ns.call_module(torch.ao.quantization.FakeQuantize): 2
}
}
if quant_type == QuantType.QAT:
m.train()
prepare = prepare_qat_fx
qconfig = default_qat_qconfig
actpp_module_class = torch.ao.quantization.FakeQuantize
else:
m.eval()
prepare = prepare_fx
qconfig = default_qconfig
actpp_module_class = torch.ao.quantization.MinMaxObserver
example_inputs = (torch.randn(1, 3, 3, 3),)
m = prepare(m, {"": qconfig}, example_inputs=example_inputs)
# check that there is a duplicated observer instance
actpp_module_count = 0
for name, module in m.named_modules(remove_duplicate=False):
if isinstance(module, actpp_module_class):
actpp_module_count += 1
self.assertEqual(actpp_module_count, 2)
actpp_module_count = 0
for name, module in m.named_modules():
if isinstance(module, actpp_module_class):
actpp_module_count += 1
self.assertEqual(actpp_module_count, 1)
m_copy = copy.deepcopy(m)
m = convert_fx(m)
m_reference = convert_to_reference_fx(m_copy)
# checks for non-reference quantized model
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(torch.nn.AvgPool2d),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence, expected_node_list=node_list)
# checks for reference quantized model, for copy nodes we'll have
# dequant - copy_node - quant patterns which will be fused later
# in the backend lowering step
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 2,
ns.call_method("dequantize"): 2
}
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize"),
ns.call_module(torch.nn.AvgPool2d),
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m_reference, expected_node_occurrence=node_occurrence, expected_node_list=node_list)
def test_linear_qint8_activation(self):
"""Test support for qint8 activation in reference pattern
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 2, 2, 2)
self.linear = torch.nn.Linear(8, 5)
def forward(self, x):
x = self.conv(x)
x = torch.flatten(x, 1)
x = self.linear(x)
return x
m = M().eval()
example_inputs = (torch.rand(2, 1, 5, 5),)
m = prepare_fx(
m,
{"": torch.ao.quantization.QConfig(
activation=torch.ao.quantization.HistogramObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
), weight=torch.ao.quantization.default_per_channel_weight_observer)},
example_inputs=example_inputs)
m = convert_to_reference_fx(m)
m(*example_inputs)
def test_preserve_tuple(self):
""" Test tuple input type is preserved
"""
class LSTM(nn.Module):
def __init__(self):
super().__init__()
self.lstm = nn.LSTM(50, 50, 1)
def forward(self, inputs: torch.Tensor, state: List[torch.Tensor]):
h = state[0]
c = state[1]
return self.lstm(inputs, (h, c))
m = LSTM().eval()
example_inputs = (torch.randn(5, 3, 50), torch.randn(2, 3, 50), torch.randn(2, 3, 50))
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
# make sure the arg[1] of lstm module is a tuple
for n in m.graph.nodes:
if n.target == "lstm":
self.assertEqual(type(n.args[1]), tuple)
def test_relu_lowering(self):
class M(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.relu(x)
m = M().eval()
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(torch.randn(1),))
m_copy = copy.deepcopy(m)
m = convert_fx(m)
m_ref = convert_to_reference_fx(m_copy)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
node_occurrence_ref = {
ns.call_function(torch.quantize_per_tensor): 2,
ns.call_method("dequantize"): 2
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
self.checkGraphModuleNodes(m_ref, expected_node_occurrence=node_occurrence_ref)
@skipIfNoFBGEMM
def test_dynamic_with_fusion(self):
"""
Tests that dynamic quantization APIs work with Linear + Relu fusion
"""
with override_quantized_engine('fbgemm'):
class LinearRelu(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
return self.relu(x)
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(LinearRelu(), LinearRelu())
self.mods2 = Linear()
self.relu = F.relu
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
x = self.relu(x)
return x
dynamic_quantized_ops = {
float16_dynamic_qconfig: torch.ops.quantized.linear_relu_dynamic_fp16,
default_dynamic_qconfig: torch.ops.quantized.linear_relu_dynamic
}
for qconfig in [float16_dynamic_qconfig, default_dynamic_qconfig]:
model = M().eval()
qconfig_dict = {
"": qconfig
}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
ns.call_module(nniqd.LinearReLU),
ns.call_module(nniqd.LinearReLU),
ns.call_function(dynamic_quantized_ops[qconfig]),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
@skipIfNoFBGEMM
def test_dynamic_with_fusion_multiple_uses(self):
"""
Tests that dynamic quantization APIs work with Linear + Relu fusion
"""
with override_quantized_engine('fbgemm'):
class LinearRelu(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
return self.relu(x)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear_relu = LinearRelu()
def forward(self, x):
x = self.linear_relu(x)
x = self.linear_relu(x)
return x
for qconfig in [float16_dynamic_qconfig, default_dynamic_qconfig]:
model = M().eval()
qconfig_dict = {
"": qconfig
}
example_inputs = (torch.randn(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
ns.call_module(nniqd.LinearReLU),
ns.call_module(nniqd.LinearReLU),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
@skipIfNoFBGEMM
def test_dynamic_linear_input_multiple_use(self):
"""
Tests input for dynamic linear being used by multiple ops
"""
with override_quantized_engine('fbgemm'):
class LinearRelu(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
return self.relu(x)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mod1 = LinearRelu()
self.mod2 = LinearRelu()
def forward(self, x):
y1 = self.mod1(x)
y2 = self.mod2(x)
return y1 + y2
for qconfig in [float16_dynamic_qconfig, default_dynamic_qconfig]:
model = M().eval()
qconfig_dict = {
"": qconfig
}
example_inputs = (torch.rand(5, 5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
ns.call_module(nniqd.LinearReLU),
ns.call_module(nniqd.LinearReLU),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_ref_linear_module(self):
""" Make sure the numerics for models with ref linear module
matches models with fbgemm/qnnpack module
"""
class M1(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 5)
def forward(self, x):
return self.linear(x)
class M2(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 5)
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(self.linear(x))
for M in [M1, M2]:
m = M().eval()
example_inputs = (torch.randn(5, 10),)
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
m_copy = copy.deepcopy(m)
m = convert_fx(m)
m_ref = convert_to_reference_fx(m_copy)
result = m(*example_inputs)
result_ref = m_ref(*example_inputs)
self.assertTrue(torch.equal(result, result_ref))
def test_ref_conv_module(self):
""" Make sure the numerics for models with ref conv module
matches models with fbgemm/qnnpack module
"""
convs = {
1: nn.Conv1d,
2: nn.Conv2d,
3: nn.Conv3d,
}
class M1(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = convs[dim](3, 3, 3)
def forward(self, x):
return self.conv(x)
class M2(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = convs[dim](3, 3, 3)
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(self.conv(x))
for dim, M in itertools.product([1, 2, 3], [M1, M2]):
m = M(dim).eval()
data = self.img_data_dict[dim][0][0]
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(data,))
m_copy = copy.deepcopy(m)
m = convert_fx(m)
m_ref = convert_to_reference_fx(m_copy)
result = m(data)
result_ref = m_ref(data)
self.assertTrue(torch.equal(result, result_ref))
def test_sub_scalar(self):
class M(torch.nn.Module):
def forward(self, x):
x = x + 1
x = x - 1
x = x + 3
x = x - 4
return x
m = M().eval()
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(torch.rand(3),))
m = convert_fx(m)
occurrence = {
ns.call_function(torch.quantize_per_tensor): 2,
ns.call_method("dequantize"): 2
}
self.checkGraphModuleNodes(m, expected_node_occurrence=occurrence)
def test_observer_fqn(self):
"""
Test to make sure the observer FQN is based on the quantizable op/module that it is observing
and uses the modules FQN to determine the observer name.
"""
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = Linear()
self.mods3 = torch.nn.Linear(5, 5)
def forward(self, x):
x = self.mods1(x)
x = torch.add(x, 4)
x = self.mods2(x)
y = torch.add(x, 2)
z = torch.mul(x, 5)
a = self.mods3(y)
return a, z
model = M().eval()
prepared = prepare_fx(model, {"": default_qconfig}, example_inputs=(torch.randn(1, 5)))
name_list = []
for name, mod in prepared.named_modules():
if isinstance(mod, torch.ao.quantization.observer.MinMaxObserver):
name_list.append(name)
expected_name_list = ['activation_post_process_0',
'activation_post_process_1',
'activation_post_process_2',
'activation_post_process_3',
'activation_post_process_4',
'activation_post_process_6',
'activation_post_process_7',
'activation_post_process_10']
assert name_list == expected_name_list
def test_conv_lowering(self):
convs = {1: nn.Conv1d, 2: nn.Conv2d, 3: nn.Conv3d}
qconvs = {1: nn.quantized.Conv1d, 2: nn.quantized.Conv2d, 3: nn.quantized.Conv3d}
class M(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = convs[dim](3, 3, 3)
def forward(self, x):
return self.conv(x)
for dim in range(1, len(convs) + 1):
m = M(dim).eval()
data = self.img_data_dict[dim][0][0]
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(data,))
m_ref = copy.deepcopy(m)
m_ref = convert_to_reference_fx(m_ref)
m = convert_fx(m)
out_ref = m_ref(data)
out = m(data)
# check that reference pattern for quantized conv module is fused
expected_node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_module(qconvs[dim]): 1,
ns.call_method("dequantize"): 1
}
self.checkGraphModuleNodes(m, expected_node_occurrence=expected_node_occurrence)
# checking result match
self.assertTrue(torch.equal(out_ref, out))
def test_convert_qconfig_mapping(self):
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods3 = torch.nn.Linear(5, 5)
def forward(self, x):
x = self.mods1(x)
x = torch.add(x, 4)
z = torch.mul(x, 5)
x = self.mods3(z)
return x
model = M().train()
for check in ["module_name", "object_type"]:
qconfig_dict = {"": None,
"object_type": [
(nn.functional.linear, get_default_qat_qconfig("fbgemm")),
(torch.add, get_default_qat_qconfig("fbgemm")),
(nn.Linear, get_default_qat_qconfig("fbgemm")),
],
}
example_inputs = (torch.rand(5, 5),)
prepared = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
prepared(*example_inputs)
if check == "module_name":
convert_qconfig_dict = {"": None,
"object_type": [
(nn.functional.linear, get_default_qat_qconfig("fbgemm")),
(torch.add, get_default_qat_qconfig("fbgemm")),
(nn.Linear, get_default_qat_qconfig("fbgemm")),
],
"module_name": [("mods1.0", None)]}
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 2,
ns.call_function(torch.nn.functional.linear): 1,
ns.call_function(torch.ops.quantized.linear): 1,
ns.call_function(torch.ops.quantized.add): 1,
ns.call_method("dequantize"): 2
}
order_check = [
ns.call_function(torch.nn.functional.linear),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
]
elif check == "object_type":
convert_qconfig_dict = {"": None,
"object_type": [
(nn.functional.linear, get_default_qat_qconfig("fbgemm")),
(torch.add, get_default_qat_qconfig("fbgemm")),
(nn.Linear, None),
]}
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_function(torch.ops.quantized.linear): 2,
ns.call_function(torch.ops.quantized.add): 1,
ns.call_function(torch.mul): 1,
ns.call_method("dequantize"): 1
}
order_check = [
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
ns.call_function(torch.mul),
ns.call_module(nn.Linear),
]
converted = convert_fx(prepared, qconfig_mapping=convert_qconfig_dict)
converted(torch.rand(5, 5))
self.checkGraphModuleNodes(
converted,
expected_node_occurrence=node_occurrence,
expected_node_list=order_check)
def _assertFixedQParamsFakeQuantizeEqual(self, fq1, fq2):
self.assertEqual(fq1()._observer_ctr, fq2()._observer_ctr)
def test_register_patterns(self):
@register_fusion_pattern("dummy_fusion")
class DummyFusion():
pass
@register_quant_pattern("dummy_quant")
class DummyQuant():
pass
@register_quant_pattern("dummy_quant2", default_fixed_qparams_range_0to1_observer)
class DummyQuant2():
pass
@register_quant_pattern("dummy_quant3", default_fixed_qparams_range_neg1to1_observer)
class DummyQuant3():
pass
self.assertEqual(DEFAULT_FUSION_PATTERNS["dummy_fusion"], DummyFusion)
self.assertEqual(DEFAULT_QUANTIZATION_PATTERNS["dummy_quant"], DummyQuant)
self.assertEqual(DEFAULT_QUANTIZATION_PATTERNS["dummy_quant2"], DummyQuant2)
self.assertEqual(DEFAULT_QUANTIZATION_PATTERNS["dummy_quant3"], DummyQuant3)
self.assertEqual(DEFAULT_OUTPUT_OBSERVER_MAP["dummy_quant2"], default_fixed_qparams_range_0to1_observer)
self.assertEqual(DEFAULT_OUTPUT_OBSERVER_MAP["dummy_quant3"], default_fixed_qparams_range_neg1to1_observer)
self._assertFixedQParamsFakeQuantizeEqual(DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP["dummy_quant2"],
default_fixed_qparams_range_0to1_fake_quant)
self._assertFixedQParamsFakeQuantizeEqual(DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP["dummy_quant3"],
default_fixed_qparams_range_neg1to1_fake_quant)
output_fake_quantize_map = get_default_output_activation_post_process_map(is_training=True)
output_observer_map = get_default_output_activation_post_process_map(is_training=False)
self.assertEqual(output_observer_map.get("dummy_quant3"), default_fixed_qparams_range_neg1to1_observer)
self._assertFixedQParamsFakeQuantizeEqual(output_fake_quantize_map.get("dummy_quant3"),
default_fixed_qparams_range_neg1to1_fake_quant)
def test_reuse_input_qconfig(self):
class M1(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
def forward(self, x):
x = self.conv(x)
x = x.reshape()
return x
class M2(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = x.reshape()
return x
options = itertools.product([M1, M2], [True, False])
for M, is_qat in options:
m = M1().eval()
example_inputs = (torch.randn(1, 3, 3, 3),)
m = prepare_fx(m, get_default_qconfig_mapping(), example_inputs=example_inputs)
m = convert_fx(m)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method("reshape"),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(
m,
expected_node_list=node_list)
m = M2().eval()
m = prepare_fx(m, get_default_qconfig_mapping(), example_inputs=example_inputs)
m = convert_fx(m)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 0,
ns.call_method("dequnatize"): 0,
}
node_list = [
ns.call_method("reshape"),
]
self.checkGraphModuleNodes(
m,
expected_node_occurrence=node_occurrence,
expected_node_list=node_list)
def test_stack_trace_preserved_linear(self):
class M(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
x = self.linear(x)
return x
m = M().eval()
mp = prepare_fx(m, get_default_qconfig_mapping(), example_inputs=(torch.randn(1, 1),))
found_stack_trace = False
for n in mp.graph.nodes:
if n.op == 'call_module' and n.target == 'linear':
found_stack_trace = n.stack_trace is not None
break
self.assertTrue(found_stack_trace)
# test reference model
mq = convert_to_reference_fx(copy.deepcopy(mp))
found_stack_trace = False
for n in mq.graph.nodes:
if n.op == 'call_module' and n.target == 'linear':
found_stack_trace = n.stack_trace is not None
break
self.assertTrue(found_stack_trace, f"stack trace not found, node: {n.format_node()}, is_reference: True")
# test quantized model
mq = convert_fx(mp)
found_stack_trace = False
for n in mq.graph.nodes:
if n.op == 'call_module' and n.target == 'linear':
found_stack_trace = n.stack_trace is not None
break
self.assertTrue(found_stack_trace, f"stack trace not found, node: {n.format_node()}, is_reference: False")
def test_qat_skip_untraced(self):
class UnTraceableModuleClass(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2)
def forward(self, x):
return self.linear(x)
class UnTraceableModuleName(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2)
def forward(self, x):
return self.linear(x)
class M(nn.Module):
def __init__(self):
super().__init__()
self.untraceable_module_class = UnTraceableModuleClass()
self.untraceable_module_name = UnTraceableModuleClass()
def forward(self, x):
x = self.untraceable_module_class(x)
x = self.untraceable_module_name(x)
return x
mod = M()
qconfig_dict = {"": torch.quantization.get_default_qat_qconfig()}
prepare_custom_config_dict = {
"non_traceable_module_class": [UnTraceableModuleClass],
"non_traceable_module_name": ["untraceable_module_name"],
}
example_inputs = (torch.randn(2, 2),)
mod_prep = torch.ao.quantization.quantize_fx.prepare_qat_fx(
mod.train(), qconfig_dict, example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict
)
mod_prep = torch.ao.quantization.quantize_fx.prepare_qat_fx(
mod.train(), qconfig_dict, example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict
)
self.assertTrue(
isinstance(mod_prep.untraceable_module_class.linear, torch.nn.Linear)
)
self.assertTrue(
isinstance(mod_prep.untraceable_module_name.linear, torch.nn.Linear)
)
self.assertTrue(
type(mod_prep.untraceable_module_class.linear)
is not torch.nn.qat.modules.linear.Linear,
"prepare_qat_fx shold not convert anything inside untraced module classes",
)
self.assertTrue(
type(mod_prep.untraceable_module_name.linear)
is not torch.nn.qat.modules.linear.Linear,
"prepare_qat_fx shold not convert anything inside modules named in untraced_module_names",
)
def test_qconfig_dict_setup(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.Conv1d = torch.nn.Conv1d(1, 1, 1)
self.Conv2d = torch.nn.Conv2d(1, 1, 1)
self.Conv3d = torch.nn.Conv3d(1, 1, 1)
self.ConvTranspose1d = torch.nn.ConvTranspose1d(1, 1, 1)
self.ConvTranspose2d = torch.nn.ConvTranspose2d(1, 1, 1)
self.ConvTranspose3d = torch.nn.ConvTranspose3d(1, 1, 1)
self.Linear = torch.nn.Linear(1, 1, 1)
def forward(self, x):
x = self.Conv1d(x)
x = self.Conv2d(x)
x = self.Conv3d(x)
x = self.ConvTranspose1d(x)
x = self.ConvTranspose2d(x)
x = self.ConvTranspose3d(x)
x = self.Linear(x)
x = torch.nn.functional.conv1d(x, torch.rand(2, 2))
x = torch.nn.functional.conv2d(x, torch.rand(2, 2))
x = torch.nn.functional.conv3d(x, torch.rand(2, 2))
x = torch.nn.functional.linear(x, torch.rand(2, 2))
return x
backends = ["qnnpack", "fbgemm"]
for func in [get_default_qconfig_mapping, get_default_qat_qconfig_mapping]:
for backend in backends:
m = M().eval()
qconfig_dict = func(backend)
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 1, 1, 1)))
for name, mod in m.named_modules():
if is_activation_post_process(mod) and mod.dtype == torch.quint8:
if backend == "fbgemm":
lower_bnd = 0
upper_bnd = 127
else:
lower_bnd = 0
upper_bnd = 255
if issubclass(type(mod), FakeQuantize):
self.assertEqual(mod.activation_post_process.quant_min, lower_bnd)
self.assertEqual(mod.activation_post_process.quant_max, upper_bnd)
else:
self.assertEqual(mod.quant_min, lower_bnd)
self.assertEqual(mod.quant_max, upper_bnd)
def test_prepare_mode(self):
class LinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
def _test(prepare_fn, qconfig_dict):
m = LinearModel()
m1 = copy.deepcopy(m)
m1.train()
example_inputs = (torch.randn(1, 5),)
prepare_fn(m1, qconfig_dict, example_inputs=example_inputs)
m2 = copy.deepcopy(m)
m2.eval()
prepare_fn(m2, qconfig_dict, example_inputs=example_inputs)
# Ensure prepare_fx and prepare_qat_fx work in both training and eval modes
_test(prepare_fx, get_default_qconfig_mapping())
_test(prepare_qat_fx, get_default_qat_qconfig_mapping())
@skipIfNoFBGEMM
class TestQuantizeFxOps(QuantizationTestCase):
def setUp(self):
super().setUp()
self.custom_qconfig = torch.ao.quantization.QConfig(
activation=torch.ao.quantization.observer.HistogramObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
),
weight=torch.ao.quantization.default_per_channel_weight_observer
)
self.common_quant_patterns = {
torch.nn.ConvTranspose1d: CommonQuantizeHandler,
torch.nn.ConvTranspose2d: CommonQuantizeHandler,
torch.nn.ELU: CommonQuantizeHandler,
torch.nn.LeakyReLU: CommonQuantizeHandler,
torch.nn.Hardswish: CommonQuantizeHandler,
torch.nn.InstanceNorm1d: CommonQuantizeHandler,
torch.nn.InstanceNorm2d: CommonQuantizeHandler,
torch.nn.InstanceNorm3d: CommonQuantizeHandler,
torch.nn.LayerNorm: CommonQuantizeHandler,
torch.nn.SiLU: CommonQuantizeHandler,
torch.nn.Mish: CommonQuantizeHandler,
torch.nn.GELU: CommonQuantizeHandler,
torch.nn.Softmax: CommonQuantizeHandler,
torch.nn.functional.elu: CommonQuantizeHandler,
torch.nn.functional.hardswish: CommonQuantizeHandler,
torch.nn.functional.instance_norm: CommonQuantizeHandler,
torch.nn.functional.layer_norm: CommonQuantizeHandler,
torch.nn.functional.leaky_relu: CommonQuantizeHandler,
torch.nn.functional.silu: CommonQuantizeHandler,
torch.nn.functional.mish: CommonQuantizeHandler,
torch.nn.functional.gelu: CommonQuantizeHandler,
torch.nn.functional.softmax: CommonQuantizeHandler,
torch.sum: CommonQuantizeHandler
}
"""Unit tests for individual ops
"""
@skipIfNoFBGEMM
def test_linear_module(self):
with override_quantized_engine('fbgemm'):
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(30, 4).float()
def forward(self, x):
return self.linear(x)
class LinearReLUModel(torch.nn.Module):
def __init__(self, f_relu=False):
super(LinearReLUModel, self).__init__()
self.linear = torch.nn.Linear(30, 4).float()
if f_relu:
self.relu = F.relu
else:
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class LinearBnModel(torch.nn.Module):
def __init__(self):
super(LinearBnModel, self).__init__()
self.linear = torch.nn.Linear(4, 4).float()
self.bn = torch.nn.BatchNorm1d(4)
def forward(self, x):
x = self.linear(x)
x = self.bn(x)
return x
# Test linear
data = (torch.rand((1, 30), dtype=torch.float),)
for quant_type in self.all_quant_types:
model = LinearModel()
quantized_module = nnqd.Linear if quant_type == QuantType.DYNAMIC else nnq.Linear
quantized_node = ns.call_module(quantized_module)
result_dict = self.checkGraphModeFxOp(model, data, quant_type, quantized_node)
if quant_type in self.static_quant_types:
self.assertEqual(result_dict["quantized_output"], result_dict["quantized_reference_output"])
# TODO: enable test for dynamic quant
# Test linear-relu
for f_relu, quant_type in itertools.product([True, False], [QuantType.STATIC, QuantType.QAT]):
model = LinearReLUModel(f_relu)
quantized_node = ns.call_module(nniq.LinearReLU)
result_dict = self.checkGraphModeFxOp(model, data, quant_type, quantized_node)
self.assertEqual(result_dict["quantized_output"], result_dict["quantized_reference_output"])
# Test linear-bn
data = (torch.rand((4, 4), dtype=torch.float),)
for quant_type in self.static_quant_types:
model = LinearBnModel()
quantized_node = ns.call_module(nnq.Linear)
result_dict = self.checkGraphModeFxOp(model, data, quant_type, quantized_node)
self.assertEqual(result_dict["quantized_output"], result_dict["quantized_reference_output"])
@skipIfNoFBGEMM
def test_functional_linear(self):
with override_quantized_engine('fbgemm'):
class FuncLinear(torch.nn.Module):
def __init__(self, use_bias, has_relu, f_relu):
super(FuncLinear, self).__init__()
self.w = torch.randn(4, 30)
self.b = torch.randn(4)
self.use_bias = use_bias
if has_relu:
if f_relu:
self.relu_or_id = F.relu
else:
self.relu_or_id = torch.nn.ReLU()
else:
self.relu_or_id = torch.nn.Identity()
def forward(self, x):
if self.use_bias:
x = F.linear(x, self.w, self.b)
else:
x = F.linear(x, self.w)
x = self.relu_or_id(x)
return x
data = (torch.rand((1, 30), dtype=torch.float),)
quant_type_to_qlinear_fun = {
QuantType.DYNAMIC: ns.call_function(torch.ops.quantized.linear_dynamic),
QuantType.STATIC: ns.call_function(torch.ops.quantized.linear),
QuantType.QAT: ns.call_function(torch.ops.quantized.linear),
}
quant_type_to_qlinear_relu_fun = {
# we don't have linear_relu_dynamic
QuantType.DYNAMIC: ns.call_function(torch.ops.quantized.linear_relu_dynamic),
QuantType.STATIC: ns.call_function(torch.ops.quantized.linear_relu),
QuantType.QAT: ns.call_function(torch.ops.quantized.linear_relu),
}
options = itertools.product(
self.all_quant_types,
(True, False), # use_bias
(True, False), # has_relu
(True, False), # functional relu
)
for quant_type, use_bias, has_relu, f_relu in options:
# when has_relu is False, we are using an nn.Identity and
# we will insert observer/fake_quant for the output of nn.Identity since
# it is a copy node, that's why we have extra observer/fake_quant
# when has_relu is False
quant_type_to_prepare_expected_node_occurrence = {
QuantType.DYNAMIC: {
ns.call_module(torch.ao.quantization.PlaceholderObserver): 1,
ns.call_module(torch.ao.quantization.MinMaxObserver): 1,
},
# There should be 3 observers: after input, weight and activation.
# one more observer for torch.nn.Identity when there is no relu
QuantType.STATIC: {
ns.call_module(torch.ao.quantization.HistogramObserver): 2 if has_relu else 3,
ns.call_module(torch.ao.quantization.PerChannelMinMaxObserver): 1,
},
# There should be 3 observers: after input, weight and activation.
QuantType.QAT: {
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 3 if has_relu else 4,
},
}
model = FuncLinear(use_bias, has_relu, f_relu)
if has_relu:
qlinear_fun = quant_type_to_qlinear_relu_fun[quant_type]
else:
qlinear_fun = quant_type_to_qlinear_fun[quant_type]
if quant_type != QuantType.DYNAMIC:
num_dequantize = 1
else:
# we will have an extra quantize_per_tensor_dynamic + dequantize for
# nn.Identity right now, but it will be fixed after we use
# backend_config to configure the default pt backend
num_dequantize = int(not has_relu)
convert_node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1 if quant_type != QuantType.DYNAMIC else 0,
qlinear_fun: 1,
ns.call_method("dequantize"): num_dequantize if quant_type != QuantType.DYNAMIC else 0,
}
prepare_expected_node_occurrence = \
quant_type_to_prepare_expected_node_occurrence[quant_type]
result_dict = self.checkGraphModeFxOp(
model, data, quant_type, qlinear_fun,
prepare_expected_node_occurrence=prepare_expected_node_occurrence,
expected_node_occurrence=convert_node_occurrence)
if quant_type != QuantType.DYNAMIC:
self.assertEqual(result_dict["quantized_output"], result_dict["quantized_reference_output"])
# Ensure packed weights in lowered models are folded
self.assertIn("_packed_weight_0", result_dict["quantized"].state_dict().keys())
@skipIfNoFBGEMM
def test_linear_dynamic_fp16(self):
with override_quantized_engine('fbgemm'):
class FuncLinear(torch.nn.Module):
def __init__(self, use_bias, has_relu, f_relu):
super(FuncLinear, self).__init__()
self.w = torch.randn(4, 30)
self.b = torch.randn(4)
self.use_bias = use_bias
if has_relu:
if f_relu:
self.relu = F.relu
else:
self.relu = torch.nn.ReLU()
else:
self.relu = torch.nn.Identity()
def forward(self, x):
if self.use_bias:
x = F.linear(x, self.w, self.b)
else:
x = F.linear(x, self.w)
x = self.relu(x)
return x
data = (torch.rand((1, 30), dtype=torch.float),)
options = itertools.product(
(True, False), # use_bias
(True, False), # has_relu
(True, False), # functional relu
(True, False), # is_reference
)
for use_bias, has_relu, f_relu, is_reference in options:
model = FuncLinear(use_bias, has_relu, f_relu)
if is_reference:
qlinear_fun = ns.call_function(torch.nn.functional.linear)
else:
if has_relu:
qlinear_fun = ns.call_function(torch.ops.quantized.linear_relu_dynamic_fp16)
else:
qlinear_fun = ns.call_function(torch.ops.quantized.linear_dynamic_fp16)
prepare_node_occurrence = {
# activation and weight
ns.call_module(torch.ao.quantization.PlaceholderObserver): 2
}
convert_node_occurrence = {
qlinear_fun: 1,
# weight
ns.call_method("to"): 1 if is_reference else 0
}
self.checkGraphModeFxOp(
model, data, QuantType.DYNAMIC, qlinear_fun,
is_reference=is_reference,
custom_qconfig_dict={"": float16_dynamic_qconfig},
prepare_expected_node_occurrence=prepare_node_occurrence,
expected_node_occurrence=convert_node_occurrence)
def test_linear_static_fp16(self):
class FuncLinear(torch.nn.Module):
def __init__(self, use_bias, has_relu, f_relu):
super(FuncLinear, self).__init__()
self.w = torch.randn(4, 30)
self.b = torch.randn(4)
self.use_bias = use_bias
if has_relu:
if f_relu:
self.relu = F.relu
else:
self.relu = torch.nn.ReLU()
else:
self.relu = torch.nn.Identity()
def forward(self, x):
if self.use_bias:
x = F.linear(x, self.w, self.b)
else:
x = F.linear(x, self.w)
x = self.relu(x)
return x
data = (torch.rand((1, 30), dtype=torch.float),)
options = itertools.product(
(True, False), # use_bias
(True, False), # has_relu
(True, False), # functional relu
(True, False), # is_reference
)
backend_config = get_test_only_legacy_native_backend_config()
for use_bias, has_relu, f_relu, is_reference in options:
model = FuncLinear(use_bias, has_relu, f_relu)
linear_fun = ns.call_function(torch.nn.functional.linear)
# when has_relu is False, we are using an nn.Identity and
# we will insert observer/fake_quant for the output of nn.Identity since
# it is a copy node, that's why we have extra observer/fake_quant
# when has_relu is False
prepare_node_occurrence = {
# activation, weight, bias and output
ns.call_module(torch.ao.quantization.PlaceholderObserver): 3 + int(use_bias) + int(not has_relu),
}
# We have extra to and dequantize when is_reference is True
# and has_relu is False since when has_relu is False, we
# have an nn.Identity in the model, which is a CopyNode
# and we would add extra quant - dequant for CopyNode in
# reference patterns
convert_node_occurrence = {
# we don't support static fp16 ops, so the linear function
# is unfused
linear_fun: 1,
# activation, weight, bias and output
ns.call_method("to"): 3 + int(use_bias) + int(not has_relu and is_reference),
ns.call_method("dequantize"): 3 + int(use_bias) + int(not has_relu and is_reference)
}
self.checkGraphModeFxOp(
model, data, QuantType.DYNAMIC, linear_fun,
is_reference=is_reference,
custom_qconfig_dict={"": float16_static_qconfig},
prepare_expected_node_occurrence=prepare_node_occurrence,
expected_node_occurrence=convert_node_occurrence,
backend_config=backend_config)
@skipIfNoFBGEMM
def test_conv_module(self):
conv_module = {1 : torch.nn.Conv1d, 2 : torch.nn.Conv2d, 3 : torch.nn.Conv3d}
class ConvWrapper(torch.nn.Module):
def __init__(self, dim):
super(ConvWrapper, self).__init__()
self.conv = conv_module[dim](3, 3, 3).float()
def forward(self, x):
return self.conv(x)
options = itertools.product([1, 2, 3], self.static_quant_types)
quantized_nodes = {
# dim
1: ns.call_module(nnq.Conv1d),
2: ns.call_module(nnq.Conv2d),
3: ns.call_module(nnq.Conv3d),
}
for dim, quant_type in options:
self.checkGraphModeFxOp(
ConvWrapper(dim), self.img_data_dict[dim], quant_type,
quantized_nodes[dim])
@skipIfNoFBGEMM
def test_functional_conv(self):
with override_quantized_engine('fbgemm'):
""" Test for function conv and functional conv + relu
"""
convs = {
1: torch.nn.functional.conv1d,
2: torch.nn.functional.conv2d,
3: torch.nn.functional.conv3d,
}
class FuncConv(torch.nn.Module):
def __init__(self, dim, use_bias, has_relu, f_relu):
super().__init__()
self.dim = dim
self.w = torch.randn(tuple([3] * (dim + 2)))
self.b = torch.randn(3) if use_bias else None
self.stride = tuple([1] * dim)
self.padding = tuple([0] * dim)
self.dilation = tuple([1] * dim)
self.groups = 1
self.use_bias = use_bias
if has_relu:
if f_relu:
self.relu = F.relu
else:
self.relu = torch.nn.ReLU()
else:
self.relu = torch.nn.Identity()
def forward(self, x):
x = convs[self.dim](x, self.w, self.b, self.stride, self.padding, self.dilation, self.groups)
x = self.relu(x)
return x
quant_type_to_qconv_fun = {
QuantType.STATIC: {
1: ns.call_function(torch.ops.quantized.conv1d),
2: ns.call_function(torch.ops.quantized.conv2d),
3: ns.call_function(torch.ops.quantized.conv3d)
},
QuantType.QAT: {
1: ns.call_function(torch.ops.quantized.conv1d),
2: ns.call_function(torch.ops.quantized.conv2d),
3: ns.call_function(torch.ops.quantized.conv3d)
},
}
quant_type_to_qconv_relu_fun = {
QuantType.STATIC: {
1: ns.call_function(torch.ops.quantized.conv1d_relu),
2: ns.call_function(torch.ops.quantized.conv2d_relu),
3: ns.call_function(torch.ops.quantized.conv3d_relu)
},
QuantType.QAT: {
1: ns.call_function(torch.ops.quantized.conv1d_relu),
2: ns.call_function(torch.ops.quantized.conv2d_relu),
3: ns.call_function(torch.ops.quantized.conv3d_relu)
},
}
options = itertools.product(
[1, 2, 3], # dims
self.static_quant_types,
(True, False), # use_bias
(True, False), # has_relu
(True, False), # functional relu
)
for dim, quant_type, use_bias, has_relu, f_relu in options:
# when has_relu is False, we are using an nn.Identity and
# we will insert observer/fake_quant for the output of nn.Identity since
# it is a copy node, that's why we have extra observer/fake_quant
# when has_relu is False
quant_type_to_prepare_expected_node_occurrence = {
QuantType.DYNAMIC: {},
# There should be 3 observers: after input, weight and activation.
QuantType.STATIC: {
ns.call_module(torch.ao.quantization.HistogramObserver): 2 if has_relu else 3,
ns.call_module(torch.ao.quantization.PerChannelMinMaxObserver): 1,
},
# There should be 3 observers: after input, weight and activation.
QuantType.QAT: {
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 3 if has_relu else 4,
},
}
data_dims = [2, 3] + [4] * dim
data = (torch.randn(tuple(data_dims), dtype=torch.float),)
model = FuncConv(dim, use_bias, has_relu, f_relu)
if has_relu:
qconv_fun = quant_type_to_qconv_relu_fun[quant_type][dim]
else:
qconv_fun = quant_type_to_qconv_fun[quant_type][dim]
convert_node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
qconv_fun: 1,
ns.call_method("dequantize"): 1
}
prepare_expected_node_occurrence = \
quant_type_to_prepare_expected_node_occurrence[quant_type]
result_dict = self.checkGraphModeFxOp(
model, data, quant_type, qconv_fun,
prepare_expected_node_occurrence=prepare_expected_node_occurrence,
expected_node_occurrence=convert_node_occurrence)
if quant_type != QuantType.DYNAMIC:
self.assertEqual(result_dict["quantized_output"], result_dict["quantized_reference_output"])
# Ensure packed weights in lowered models are folded
self.assertIn("_packed_weight_0", result_dict["quantized"].state_dict().keys())
@skipIfNoFBGEMM
def test_quantized_conv_relu(self):
"""tests for conv1d_relu/conv2d_relu/conv3d_relu"""
conv_module = {1 : torch.nn.Conv1d, 2 : torch.nn.Conv2d, 3 : torch.nn.Conv3d}
class ConvNdRelu(torch.nn.Module):
def __init__(self, dim, inplace):
super(ConvNdRelu, self).__init__()
self.conv = conv_module[dim](3, 3, 3).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x):
return self.relu(self.conv(x))
class ConvNdFunctionalRelu(torch.nn.Module):
def __init__(self, dim):
super(ConvNdFunctionalRelu, self).__init__()
self.conv = conv_module[dim](3, 3, 3).float()
def forward(self, x):
return F.relu(self.conv(x))
class ConvNdInplaceFunctionalRelu(torch.nn.Module):
def __init__(self, dim):
super(ConvNdInplaceFunctionalRelu, self).__init__()
self.conv = conv_module[dim](3, 3, 3).float()
def forward(self, x):
return F.relu(self.conv(x), True)
options = itertools.product([1, 2, 3], self.static_quant_types)
quantized_nodes = {
# dim
1: ns.call_module(nniq.ConvReLU1d),
2: ns.call_module(nniq.ConvReLU2d),
3: ns.call_module(nniq.ConvReLU3d),
}
for dim, quant_type in options:
for m in [ConvNdRelu(dim, True),
ConvNdRelu(dim, False),
ConvNdFunctionalRelu(dim),
ConvNdInplaceFunctionalRelu(dim)]:
self.checkGraphModeFxOp(
m, self.img_data_dict[dim], quant_type,
quantized_nodes[dim])
def _test_binary_op_int8_impl(self, binary_op, ibinary_op, quantized_op):
data = (torch.randn(1, 1, 1, 1, dtype=torch.float),
torch.randn(1, 1, 1, 1, dtype=torch.float))
options = itertools.product([True, False], [True, False], [True, False])
quant_type = QuantType.STATIC
# testing for default int8 static quant
for is_inplace, is_scalar, is_reference in options:
if is_reference:
node_list = [
ns.call_method("dequantize"),
ns.call_function(binary_op),
ns.call_function(torch.quantize_per_tensor)
]
quantized_node = None
else:
node_list = None
quantized_node = ns.call_function(quantized_op)
self.checkGraphModeFxOp(
BinaryOp(binary_op, ibinary_op, is_inplace, is_scalar), data, quant_type,
quantized_node, expected_node_list=node_list, is_reference=is_reference)
# This tests the binary op should be quantized even when it is not feed with a
# quantized input
self.checkGraphModeFxOp(
BinaryOpNonQuantizedInput(binary_op, ibinary_op, is_inplace, is_scalar),
data, quant_type, quantized_node,
expected_node_list=node_list, is_reference=is_reference)
def _test_binary_op_float16_impl(self, binary_op, ibinary_op):
data = (torch.randn(1, 1, 1, 1, dtype=torch.float),
torch.randn(1, 1, 1, 1, dtype=torch.float))
quant_type = QuantType.STATIC
# testing for fp16 static quant
# we are producing fp16 patterns
options = itertools.product([True, False], [True, False])
custom_qconfig_dict = {
"object_type": [(binary_op, float16_static_qconfig)]
}
backend_config = get_test_only_legacy_native_backend_config()
for is_inplace, is_scalar in options:
node_occurrence = {
# output_conv1, output_add1, output_add2 for scalar
# output_conv1, output_conv2, output_add1, output_add2 for non-scalar
ns.call_method("to"): 3 if is_scalar else 4
}
self.checkGraphModeFxOp(
BinaryOp(binary_op, ibinary_op, is_inplace, is_scalar), data, quant_type,
expected_node_occurrence=node_occurrence,
custom_qconfig_dict=custom_qconfig_dict,
backend_config=backend_config)
node_occurrence = {
# input_add, output_add for scalar
# input_add1, input_add2, output_add for non-scalar
ns.call_method("to"): 2 if is_scalar else 3
}
self.checkGraphModeFxOp(
BinaryOpNonQuantizedInput(binary_op, ibinary_op, is_inplace, is_scalar), data, quant_type,
expected_node_occurrence=node_occurrence,
custom_qconfig_dict=custom_qconfig_dict,
backend_config=backend_config)
def _test_binary_op_relu_int8_impl(self, binary_op, ibinary_op, quantized_op):
data = (torch.rand((1, 1, 1, 1), dtype=torch.float),
torch.rand((1, 1, 1, 1), dtype=torch.float))
quant_type = QuantType.STATIC
quantized_node = ns.call_function(quantized_op)
options = itertools.product(
[True, False], [nn.ReLU, F.relu, torch.relu], [True, False])
for is_inplace_op, relu_callable, is_scalar in options:
model = BinaryOpRelu(
binary_op, ibinary_op, is_inplace_op, relu_callable, is_scalar)
self.checkGraphModeFxOp(
model, data, quant_type, quantized_node)
def _test_binary_op_relu_float16_impl(self, binary_op, ibinary_op):
data = (torch.rand((1, 1, 1, 1), dtype=torch.float),
torch.rand((1, 1, 1, 1), dtype=torch.float))
quant_type = QuantType.STATIC
options = itertools.product(
[True, False], [nn.ReLU, F.relu, torch.relu], [True, False])
custom_qconfig_dict = {
"": float16_static_qconfig,
"object_type": [(torch.nn.Conv2d, None)]
}
backend_config = get_test_only_legacy_native_backend_config()
for is_inplace_op, is_functional_relu, is_scalar in options:
node_occurrence = {
ns.call_method("to"): 3 if is_scalar else 4
}
model = BinaryOpRelu(
binary_op, ibinary_op, is_inplace_op, is_functional_relu, is_scalar)
self.checkGraphModeFxOp(
model, data, quant_type, custom_qconfig_dict=custom_qconfig_dict,
expected_node_occurrence=node_occurrence,
backend_config=backend_config)
@skipIfNoFBGEMM
def test_add(self):
self._test_binary_op_int8_impl(
operator.add, operator.iadd, torch.ops.quantized.add)
self._test_binary_op_float16_impl(
operator.add, operator.iadd)
@unittest.skip("This is no longer needed right now, can enable later with new api")
def test_sub(self):
self._test_binary_op_float16_impl(operator.sub, operator.isub)
self._test_binary_op_float16_impl(torch.sub, None)
@unittest.skip("This is no longer needed right now, can enable later with new api")
def test_div(self):
self._test_binary_op_float16_impl(operator.truediv, operator.itruediv)
self._test_binary_op_float16_impl(torch.div, None)
@skipIfNoFBGEMM
def test_mul(self):
self._test_binary_op_int8_impl(
operator.mul, operator.imul, torch.ops.quantized.mul)
self._test_binary_op_float16_impl(operator.mul, operator.imul)
@unittest.skip("This is no longer needed right now, can enable later with new api")
def test_sum(self):
class Sum(torch.nn.Module):
def forward(self, x):
x = torch.sum(x, [1], keepdim=True)
x = torch.sum(x, [1])
return x
data = torch.randn(1, 2, 3, 4, dtype=torch.float)
quant_type = QuantType.STATIC
# testing for fp16 static quant
# we are producing fp16 patterns
custom_qconfig_dict = {
"object_type": [(torch.sum, float16_static_qconfig)]
}
node_occurrence = {
# input_sum1, output_sum1, output_sum2
ns.call_method("to"): 3
}
self.checkGraphModeFxOp(
Sum(), data, quant_type,
expected_node_occurrence=node_occurrence,
custom_qconfig_dict=custom_qconfig_dict)
@unittest.skip("This is no longer needed right now, can enable later with new api")
def test_bmm(self):
class BMMMethod(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x.bmm(y)
data = (torch.randn(1, 1, 1, dtype=torch.float),
torch.randn(1, 1, 1, dtype=torch.float))
quant_type = QuantType.STATIC
# testing for fp16 static quant
# we are producing fp16 patterns
custom_qconfig_dict = {
"object_type": [(torch.bmm, float16_static_qconfig),
("bmm", float16_static_qconfig)]
}
node_occurrence = {
# input_bmm1, input_bmm2, output_bmm
ns.call_method("to"): 3
}
self.checkGraphModeFxOp(
BinaryOpNonQuantizedInput(torch.bmm, None, False, False), data, quant_type,
expected_node_occurrence=node_occurrence,
custom_qconfig_dict=custom_qconfig_dict)
# TODO: support call_method("bmm")
# we can transform call_method("bmm") to call_function(torch.bmm)
# self.checkGraphModeFxOp(
# BMMMethod(), data, quant_type,
# expected_node_occurrence=node_occurrence,
# custom_qconfig_dict=custom_qconfig_dict,
# print_debug_info=True)
@skipIfNoFBGEMM
def test_add_relu(self):
self._test_binary_op_relu_int8_impl(
operator.add, operator.iadd, torch.ops.quantized.add_relu)
self._test_binary_op_relu_float16_impl(
operator.add, operator.iadd)
@skipIfNoFBGEMM
def test_add_relu_multiple_uses_of_relu(self):
class Sub(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU(inplace=True)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub = Sub()
def forward(self, x, y):
x = x + y
x = self.sub.relu(x)
x = x + y
x = self.sub.relu(x)
return x
m = M().eval()
example_inputs = (torch.randn(3), torch.randn(3))
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
m = convert_fx(m)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 2,
ns.call_function(torch.ops.quantized.add_relu): 2,
ns.call_method("dequantize"): 1,
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
# check the model is scriptable
m = torch.jit.script(m)
# check the model is runnable
m(*example_inputs)
@skipIfNoFBGEMM
def test_mul_relu(self):
self._test_binary_op_relu_int8_impl(
operator.mul, operator.imul, torch.ops.quantized.mul_relu)
self._test_binary_op_relu_float16_impl(
operator.mul, operator.imul)
# TODO(future PR): make more generic
def _test_quantized_add_mul_qat(self, model, example_inputs, expected_node_occurrence):
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
mp = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
self.checkGraphModuleNodes(
mp, expected_node_occurrence=expected_node_occurrence)
@skipIfNoFBGEMM
def test_quantized_add_qat(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = torch.add(x, 1.0)
x = self.conv1(x)
x = torch.add(x, 1.0)
x = torch.relu(x)
x = self.conv2(x)
return x
m = M()
example_inputs = (torch.randn(1, 1, 1, 1),)
expected_node_occurrence = {
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 5,
}
self._test_quantized_add_mul_qat(m, example_inputs, expected_node_occurrence)
@skipIfNoFBGEMM
def test_quantized_mul_qat(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = torch.mul(x, 1.0)
x = self.conv1(x)
x = torch.mul(x, 1.0)
x = torch.relu(x)
x = self.conv2(x)
return x
m = M()
example_inputs = (torch.randn(1, 1, 1, 1),)
expected_node_occurrence = {
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 5,
}
self._test_quantized_add_mul_qat(m, example_inputs, expected_node_occurrence)
def test_int8_input_no_unnecessary_fq(self):
"""
If the inputs to the graph are quantized and the only node
does not need an activation observer, verifies that the
activation observer is not inserted.
"""
class M(nn.Module):
def __init__(self, scalar):
super().__init__()
self.scalar = scalar
self.add_func = torch.nn.quantized.FloatFunctional()
def forward(self, x):
return self.add_func.add_scalar(x, self.scalar)
m = M(0.5)
mp = torch.ao.quantization.quantize_fx.prepare_qat_fx(
m, {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')},
example_inputs=(torch.randn(1),),
prepare_custom_config={"input_quantized_idxs": [0]})
expected_node_occurrence = {
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 1,
}
self.checkGraphModuleNodes(
mp, expected_node_occurrence=expected_node_occurrence)
@skipIfNoFBGEMM
def test_cat(self):
""" quantization of the output of cat will depend on the
input of cat. we only quantize the output of cat when its inputs are quantized.
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
return torch.cat([x, y], 1)
example_inputs = (torch.randn(1, 2, 5, 5, dtype=torch.float),
torch.randn(1, 2, 5, 5, dtype=torch.float))
quantized_node = ns.call_function(torch.cat)
options = itertools.product(self.static_quant_types, [True, False])
for quant_type, is_reference in options:
if is_reference:
converted_node_list = [
ns.call_method("dequantize"),
ns.call_function(torch.cat),
ns.call_function(torch.quantize_per_tensor)
]
converted_node_occurrence = {
# inputs and outputs of the two conv, and output of cat
ns.call_method("dequantize"): 5,
ns.call_function(torch.cat): 1,
# inputs and outputs of the two conv, and output of cat
ns.call_function(torch.quantize_per_tensor): 5,
}
else:
converted_node_list = None
converted_node_occurrence = {
# output of cat
ns.call_method("dequantize"): 1,
ns.call_function(torch.cat): 1,
# for two inputs
ns.call_function(torch.quantize_per_tensor): 2,
}
self.checkGraphModeFxOp(
M(),
example_inputs,
quant_type,
quantized_node,
expected_node_list=converted_node_list,
expected_node_occurrence=converted_node_occurrence,
is_reference=is_reference)
# check cat is using the same observer for input and output
m = M().eval()
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
# two inputs and one output of torch.cat are using same observer, so we have
# 2 observers that's replicated
all_observers = len(dict(m.named_modules(remove_duplicate=False)))
distinct_observers = len(dict(m.named_modules()))
self.assertEqual(all_observers, distinct_observers + 2)
# make sure the converted model runs
m = convert_fx(m)
m(*example_inputs)
@skipIfNoFBGEMM
def test_qbatch_norm(self):
bn_module = {
# TODO: quantized batchnorm 1d module is missing
# 1 : torch.nn.BatchNorm1d,
2 : torch.nn.BatchNorm2d,
3 : torch.nn.BatchNorm3d,
}
class M(torch.nn.Module):
def __init__(self, dim):
super(M, self).__init__()
self.bn = bn_module[dim](3).to(torch.float)
def forward(self, x):
return self.bn(x)
options = itertools.product(self.static_quant_types, [2, 3], [True, False])
quantized_nodes = {
False: {
# 1: ns.call_module(nnq.BatchNorm1d),
2: ns.call_module(nnq.BatchNorm2d),
3: ns.call_module(nnq.BatchNorm3d),
},
True: {
# 1: ns.call_module(nn.BatchNorm1d),
2: ns.call_module(nn.BatchNorm2d),
3: ns.call_module(nn.BatchNorm3d),
}
}
for quant_type, dim, is_reference in options:
self.checkGraphModeFxOp(
M(dim), self.img_data_dict[dim], quant_type, quantized_nodes[is_reference][dim], is_reference=is_reference)
@skipIfNoFBGEMM
def test_qbatch_norm_relu(self):
bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
class BNRelu(torch.nn.Module):
def __init__(self, dim, inplace):
super(BNRelu, self).__init__()
self.bn = bn_module[dim](3).to(torch.float)
self.relu = torch.nn.ReLU(inplace=inplace)
def forward(self, x):
return self.relu(self.bn(x))
class BNFuncRelu(torch.nn.Module):
def __init__(self, dim):
super(BNFuncRelu, self).__init__()
self.bn = bn_module[dim](3).to(torch.float)
def forward(self, x):
return F.relu(self.bn(x), False)
class BNFuncInplaceRelu(torch.nn.Module):
def __init__(self, dim):
super(BNFuncInplaceRelu, self).__init__()
self.bn = bn_module[dim](3).to(torch.float)
def forward(self, x):
return F.relu(self.bn(x), True)
options = itertools.product(self.static_quant_types, [2, 3], [True, False])
quantized_nodes = {
True: {
2: ns.call_module(nni.BNReLU2d),
3: ns.call_module(nni.BNReLU3d),
},
False: {
2: ns.call_module(nniq.BNReLU2d),
3: ns.call_module(nniq.BNReLU3d),
}
}
for quant_type, dim, is_reference in options:
for instance in [BNRelu(dim, True), BNRelu(dim, False),
BNFuncRelu(dim), BNFuncInplaceRelu(dim)]:
self.checkGraphModeFxOp(
instance, self.img_data_dict[dim], quant_type,
quantized_nodes[is_reference][dim], is_reference=is_reference)
def _test_activation_impl(
self, float_module, float_op, quantized_module, quantized_op):
''' Test for activation op(with inplace options), float_op can be
torch op or functional op
'''
class M(torch.nn.Module):
def __init__(self, is_module, inplace):
super(M, self).__init__()
self.is_module = is_module
self.inplace = inplace
if self.is_module:
self.op = float_module(self.inplace)
else:
self.op = float_op
def forward(self, input):
if self.is_module:
return self.op(input)
else:
return self.op(input, self.inplace)
options = itertools.product([True, False], [True, False], self.static_quant_types, [True, False])
quantized_nodes = {
# is_module
True: {
# is_reference
True: ns.call_module(float_module),
False: ns.call_module(quantized_module),
},
False: {
True: ns.call_function(float_op),
False: ns.call_function(quantized_op),
}
}
for is_module, is_inplace, quant_type, is_reference in options:
self.checkGraphModeFxOp(
M(is_module, is_inplace), self.img_data_2d,
quant_type, quantized_nodes[is_module][is_reference], is_reference=is_reference)
def test_hardswish(self):
self._test_activation_impl(nn.Hardswish, F.hardswish, nnq.Hardswish, torch.ops.quantized.hardswish)
def test_elu(self):
self._test_activation_impl(nn.ELU, F.elu, nnq.ELU, torch.ops.quantized.elu)
def test_leaky_relu(self):
self._test_activation_impl(nn.LeakyReLU, F.leaky_relu, nnq.LeakyReLU, torch.ops.quantized.leaky_relu)
def test_prelu(self):
class M(torch.nn.Module):
def __init__(self, num_param: int):
super(M, self).__init__()
self.op = torch.nn.PReLU(num_parameters=num_param)
def forward(self, input):
return self.op(input)
X = [[torch.randn(4, 4, 4, 4, dtype=torch.float)]]
options = itertools.product([1, 4], self.static_quant_types, [True, False])
quantized_nodes = {
# is_reference
True: ns.call_module(torch.nn.PReLU),
False: ns.call_module(torch.nn.quantized.PReLU),
}
for num_parameter, quant_type, is_reference in options:
self.checkGraphModeFxOp(
M(num_parameter), X, quant_type, quantized_nodes[is_reference],
is_reference=is_reference)
def _test_norm_impl(
self, float_module, float_op, op_args, data, quantized_module, quantized_op,
skip_op_arg_for_functional=False):
''' Test for normalization op, float_op can be torch op or functional op,
op_args is a list of positional argument for the module/op
'''
class M(torch.nn.Module):
def __init__(self, is_module):
super(M, self).__init__()
self.is_module = is_module
if self.is_module:
self.op = float_module(*op_args)
else:
self.op = float_op
def forward(self, input):
if self.is_module:
return self.op(input)
else:
args = [input]
if not skip_op_arg_for_functional:
args += op_args
return self.op(*args)
options = itertools.product([True, False], self.static_quant_types)
quantized_nodes = {
# is_module
True: ns.call_module(quantized_module),
False: ns.call_function(quantized_op),
}
for is_module, quant_type in options:
self.checkGraphModeFxOp(
M(is_module), data, quant_type, quantized_nodes[is_module])
def _test_norm_float16_impl(
self, float_module, float_op, op_args, data,
skip_op_arg_for_functional=False):
''' Test for normalization op, float_op can be torch op or functional op,
op_args is a list of positional argument for the module/op
'''
class M(torch.nn.Module):
def __init__(self, is_module):
super(M, self).__init__()
self.is_module = is_module
if self.is_module:
self.op = float_module(*op_args)
else:
self.op = float_op
def forward(self, input):
if self.is_module:
return self.op(input)
else:
args = [input]
if not skip_op_arg_for_functional:
args += op_args
return self.op(*args)
options = itertools.product([True, False], self.static_quant_types)
qconfig_dict = {
"object_type": [
(float_module, float16_static_qconfig),
(float_op, float16_static_qconfig)
]
}
node_occurrence = {
ns.call_method("to"): 2
}
for is_module, quant_type in options:
self.checkGraphModeFxOp(
M(is_module), data, quant_type, custom_qconfig_dict=qconfig_dict, expected_node_occurrence=node_occurrence)
def test_layer_norm(self):
data = (torch.rand((1, 2, 5, 5), dtype=torch.float),)
self._test_norm_impl(
nn.LayerNorm, F.layer_norm, [[2, 5, 5]], data, nnq.LayerNorm, torch.ops.quantized.layer_norm)
def test_instance_norm(self):
data_1d = (torch.rand((1, 4, 5), dtype=torch.float),)
data_2d = (torch.rand((1, 4, 5, 1), dtype=torch.float),)
data_3d = (torch.rand((1, 4, 5, 1, 1), dtype=torch.float),)
data_dict = {1 : data_1d, 2 : data_2d, 3 : data_3d}
instance_norm_modules = {1 : nn.InstanceNorm1d,
2 : nn.InstanceNorm2d,
3 : nn.InstanceNorm3d}
quantized_instance_norm_modules = {
1 : nnq.InstanceNorm1d,
2 : nnq.InstanceNorm2d,
3 : nnq.InstanceNorm3d
}
for dim in [1, 2, 3]:
data = data_dict[dim]
module = instance_norm_modules[dim]
quantized_module = quantized_instance_norm_modules[dim]
self._test_norm_impl(
module, F.instance_norm, [4], data,
quantized_module, torch.ops.quantized.instance_norm,
skip_op_arg_for_functional=True)
def test_norm_weight_bias(self):
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = Linear()
self.scale = torch.randn(5, 5)
self.bias = torch.randn(5, 5)
def forward(self, x):
x1 = self.mods1(x)
y = F.layer_norm(x1, [5, 5], weight=self.scale, bias=self.bias)
return y
model = M()
expected_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_function(torch.ops.quantized.linear): 1,
ns.call_function(torch.ops.quantized.layer_norm): 1,
ns.call_method("dequantize"): 1,
}
self.checkGraphModeFxOp(
model,
(torch.rand(5, 5),),
QuantType.STATIC,
expected_node_occurrence=expected_occurrence
)
def _test_default_node_quant_handler_ops(
self, module, functional, qconfig, is_reference=True, node_list=None, additional_quant_pattern_dict=None
):
class M(torch.nn.Module):
def __init__(self, mod, func):
super().__init__()
self.module = mod()
self.functional = func
def forward(self, x):
x = self.module(x)
x = self.functional(x)
return x
if node_list is None:
node_list = []
if additional_quant_pattern_dict is None:
additional_quant_pattern_dict = {}
data = torch.randn((2, 2, 2, 2))
quant_type = QuantType.STATIC
prepare_custom_qconfig_dict = {"additional_quant_pattern": additional_quant_pattern_dict}
qconfig_dict = {"": qconfig}
m = M(module, functional).eval()
m_prep = prepare_fx(m, qconfig_dict, prepare_custom_qconfig_dict)
m_prep(data)
convert_fn = convert_to_reference_fx if is_reference else convert_fx
m_quant = convert_fn(m_prep, is_reference=is_reference)
m_quant(data)
self.checkGraphModuleNodes(m_quant, expected_node_list=node_list)
@unittest.skip("TODO: reenable with backend_config api")
def test_gelu_normal(self):
module = torch.nn.GELU
functional = torch.nn.functional.gelu
qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
is_reference = False
node_list = [
ns.call_module(module),
ns.call_function(functional),
]
self._test_default_node_quant_handler_ops(
module, functional, qconfig, is_reference, node_list)
@unittest.skip("TODO: reenable with backend_config api")
def test_softmax_normal(self):
module = torch.nn.Softmax
functional = torch.nn.functional.softmax
qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
is_reference = False
node_list = [
ns.call_module(torch.nn.quantized.Softmax),
ns.call_function(functional),
]
self._test_default_node_quant_handler_ops(
module, functional, qconfig, is_reference, node_list)
@unittest.skip("This is no longer needed right now, can enable later with new api")
def test_gelu_reference(self):
module = torch.nn.GELU
functional = torch.nn.functional.gelu
qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
is_reference = True
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize"),
ns.call_module(module),
ns.call_function(torch.quantize_per_tensor),
ns.call_method('dequantize'),
ns.call_function(functional),
ns.call_function(torch.quantize_per_tensor),
ns.call_method('dequantize')
]
# TODO: change these to use backend_config
additional_patterns = {torch.nn.GELU: DefaultNodeQuantizeHandler,
torch.nn.functional.gelu: DefaultNodeQuantizeHandler}
self._test_default_node_quant_handler_ops(
module, functional, qconfig, is_reference, node_list, additional_patterns)
self._test_default_node_quant_handler_ops(module, functional, self.custom_qconfig, is_reference, node_list,
additional_quant_pattern_dict=self.common_quant_patterns)
@unittest.skip("This is no longer needed right now, can enable later with new api")
def test_softmax_reference(self):
module = torch.nn.Softmax
functional = torch.nn.functional.softmax
qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
is_reference = True
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize"),
ns.call_module(module),
ns.call_function(torch.quantize_per_tensor),
ns.call_method('dequantize'),
ns.call_function(functional),
ns.call_function(torch.quantize_per_tensor),
ns.call_method('dequantize')
]
additional_patterns = {torch.nn.Softmax: DefaultNodeQuantizeHandler,
torch.nn.functional.softmax: DefaultNodeQuantizeHandler}
self._test_default_node_quant_handler_ops(
module, functional, qconfig, is_reference, node_list, additional_patterns)
self._test_default_node_quant_handler_ops(module, functional, self.custom_qconfig, is_reference, node_list,
additional_quant_pattern_dict=self.common_quant_patterns)
@unittest.skip("This is no longer needed right now, can enable later with new api")
def test_silu_reference(self):
module = torch.nn.SiLU
functional = torch.nn.functional.silu
qconfig = float16_static_qconfig
is_reference = True
node_list = [
ns.call_method("to"),
ns.call_method("dequantize"),
ns.call_module(module),
ns.call_method("to"),
ns.call_method('dequantize'),
ns.call_function(functional),
ns.call_method("to"),
ns.call_method('dequantize')
]
self._test_default_node_quant_handler_ops(
module, functional, qconfig, is_reference, node_list)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize"),
ns.call_module(module),
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize"),
ns.call_function(functional),
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize")
]
self._test_default_node_quant_handler_ops(module, functional, self.custom_qconfig, is_reference, node_list,
additional_quant_pattern_dict=self.common_quant_patterns)
@unittest.skip("This is no longer needed right now, can enable later with new api")
def test_mish_reference(self):
module = torch.nn.Mish
functional = torch.nn.functional.mish
qconfig = float16_static_qconfig
is_reference = True
node_list = [
ns.call_method("to"),
ns.call_method("dequantize"),
ns.call_module(module),
ns.call_method("to"),
ns.call_method('dequantize'),
ns.call_function(functional),
ns.call_method("to"),
ns.call_method('dequantize')
]
self._test_default_node_quant_handler_ops(
module, functional, qconfig, is_reference, node_list)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize"),
ns.call_module(module),
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize"),
ns.call_function(functional),
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize")
]
self._test_default_node_quant_handler_ops(module, functional, self.custom_qconfig, is_reference, node_list,
additional_quant_pattern_dict=self.common_quant_patterns)
def test_bmm_int_reference(self):
""" int8 is not supported for bmm so we won't produce reference
pattern for it
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bmm = torch.bmm
def forward(self, x, y):
out = self.bmm(x, y)
return out
data_x = torch.randn((2, 2, 2,))
data_y = torch.randn((2, 2, 2,))
example_inputs = (data_x, data_y)
qconfig_dict = {"": torch.ao.quantization.get_default_qconfig("fbgemm")}
is_reference = True
node_list = [
ns.call_function(torch.bmm),
]
m = M().eval()
m_prep = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m_prep(*example_inputs)
convert_fn = convert_to_reference_fx if is_reference else convert_fx
m_quant = convert_fn(m_prep)
m_quant(*example_inputs)
self.checkGraphModuleNodes(m_quant, expected_node_list=node_list)
@skipIfNoFBGEMM
def test_clamp(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
self.relu6 = torch.nn.ReLU6()
self.relu6_ = torch.nn.ReLU6(True)
self.hardtanh = torch.nn.Hardtanh()
self.hardtanh_ = torch.nn.Hardtanh(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.relu6(x)
self.relu6_(x)
x = F.relu6(x)
x = torch.clamp(x, -3, 3)
x = x.clamp(-2.5, 2.5)
# x = x.clamp_(-2, 2) # Enable when quantized `clamp_` is ready
x = self.hardtanh(x)
self.hardtanh_(x)
x = F.hardtanh(x)
return x
data = (torch.rand((1, 2, 5, 5), dtype=torch.float),)
# list of node that should occur in order
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method('dequantize')
]
for quant_type in self.static_quant_types:
self.checkGraphModeFxOp(
M(), data, quant_type, expected_node_list=node_list)
def test_fixed_qparams_ops_fp16(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
def forward(self, x):
x = self.sigmoid(x)
x = torch.sigmoid(x)
x = x.sigmoid()
x = self.tanh(x)
x = torch.tanh(x)
x = x.tanh()
return x
data = (torch.randn((2, 2, 2, 2), dtype=torch.float),)
quant_type = QuantType.STATIC
# TODO: use get_default_qconfig_mapping once it handles fp16
qconfig_mapping = QConfigMapping().set_global(float16_static_qconfig)
backend_config = get_test_only_legacy_native_backend_config()
node_occurrence = {
ns.call_method("to"): 7
}
self.checkGraphModeFxOp(
M(), data, quant_type, custom_qconfig_dict=qconfig_mapping,
expected_node_occurrence=node_occurrence,
backend_config=backend_config)
def test_fixed_qparams_ops_qint8(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
def forward(self, x):
x = self.sigmoid(x)
x = torch.sigmoid(x)
x = x.sigmoid()
x = self.tanh(x)
x = torch.tanh(x)
x = x.tanh()
return x
data = (torch.randn((2, 2, 2, 2), dtype=torch.float),)
quant_type = QuantType.STATIC
qconfig = torch.ao.quantization.QConfig(
activation=HistogramObserver.with_args(qscheme=torch.per_tensor_symmetric, dtype=torch.quint8),
weight=default_weight_observer)
qconfig_mapping = get_default_qconfig_mapping().set_global(qconfig)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 7,
ns.call_method("dequantize"): 7
}
self.checkGraphModeFxOp(
M(), data, quant_type, custom_qconfig_dict=qconfig_mapping,
expected_node_occurrence=node_occurrence, is_reference=True)
@skipIfNoFBGEMM
def test_general_shape_ops(self):
""" A test that checks dequantize will be swapped for
all supported general shape ops like aten::flatten
without actually checking for execution of these ops
"""
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.maxpool1d = torch.nn.MaxPool1d(kernel_size=3)
self.maxpool2d = torch.nn.MaxPool2d(kernel_size=3)
self.maxpool3d = torch.nn.MaxPool3d(kernel_size=3)
self.dropout = torch.nn.Dropout()
self.conv1 = torch.nn.Conv2d(3, 3, 3)
self.conv2 = torch.nn.Conv2d(3, 3, 3)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
# add_scalar
x = x + 3
# mul_scalar
x = x * 3
# add_scalar_out
x += 3
# mul_scalar_out
x *= 3
# add_scalar_relu
x = x + 3
x = F.relu(x)
# add_scalar_relu_out
x += 3
x = F.relu(x)
# mul_scalar_relu
x = x * 3
x = F.relu(x)
# mul_scalar_relu_out
x *= 3
x = F.relu(x)
x = self.maxpool1d(x)
x = self.maxpool2d(x)
x = self.maxpool3d(x)
x = torch.flatten(x)
x = x.reshape([-1])
x = x.resize_(1, 1, x)
x = x.view(-1)
# prim::ListConstruct
xs = [x, x]
# prim::ListUnpack
x, y = xs
# prim::TupleConstruct
xs = (x, x)
# prim::TupleUnpack
x, y = xs
x = x.transpose(1, 2)
x = x.contiguous()
# chunk is not supported since observer only supports
# observing single Tensor currently
x, y = torch.chunk(x, 2)
x = F.dropout(x)
x = self.dropout(x)
x = x.permute(0, 2, 3, 1)
x = x.repeat_interleave(3, 1)
x = torch.repeat_interleave(x, 3, 1)
x = self.relu(x)
x = F.relu(x)
x = F.relu(x, inplace=True)
x = x.relu()
x.relu_()
x = x.squeeze(0)
x.squeeze_(0)
x = torch.squeeze(x, 0)
x = x.unsqueeze(0)
x.unsqueeze_(0)
x = torch.unsqueeze(x, 0)
x = x.detach()
x.detach_()
x = x.repeat(4, 2)
y = []
y.append(x)
z = torch.stack(y, 0)
z = [z, z]
x, _ = z
x = self.conv2(x)
return x
example_inputs = (torch.rand(1, 3, 10, 10),)
# This model is not executable since we just put all ops
# in the same forward
m = M().eval()
qconfig_dict = {'': default_qconfig}
prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
# not runnable
quantized = convert_fx(prepared)
# This checks that the dequantize from the output of first conv
# is being propagated to the end, so that we don't insert extra
# observers and also successfully fused two quantized::conv2d
# patterns
# one quantize_per_tensor for input
# check exact counts of quantize and dequantize
count_check = {
# input of conv and two outputs of getitem
ns.call_function(torch.quantize_per_tensor) : 2,
# output of the model and two outputs of getitem
ns.call_method('dequantize') : 2
}
order_check = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_module(nnq.Conv2d),
ns.call_method('dequantize'),
]
self.checkGraphModuleNodes(
quantized,
expected_node_occurrence=count_check,
expected_node_list=order_check)
# Checking the is_reference output
m = M().eval()
qconfig_dict = {'': default_qconfig}
prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
# not runnable
quantized = convert_to_reference_fx(prepared)
@skipIfNoFBGEMM
def test_ave_pool_with_custom_cfg(self):
""" A test that checks correct patterns are produced for
avg_pool2d with customized config
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.avg_pool2d = torch.nn.AvgPool2d(3)
def forward(self, x):
x = self.avg_pool2d(x)
return x
# This model is not executable since we just put all ops
# in the same forward
m = M().eval()
# nothing to fuse so skipping the fuse step
qconfig_dict = {'': default_qconfig}
example_inputs = (torch.randn(1, 3, 3, 3),)
prepared = prepare_fx(
m, qconfig_dict, example_inputs=example_inputs,
prepare_custom_config={"input_quantized_idxs": [0]})
# not runnable
quantized = convert_fx(prepared)
# This checks that the dequantize from the output of first conv
# is being propagated to the end, so that we don't insert extra
# observers
# check exact counts of quantize and dequantize
count_check = {
ns.call_method('dequantize') : 1
}
order_check = [
ns.call_module(nn.AvgPool2d),
ns.call_method('dequantize'),
]
self.checkGraphModuleNodes(
quantized,
expected_node_occurrence=count_check,
expected_node_list=order_check)
@skipIfNoFBGEMM
def test_general_value_ops(self):
""" A test that checks correct patterns are produced for
all supported general value ops like aten::avg_pool2d \
without actually checking for execution of these ops
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
self.avg_pool1d = torch.nn.AvgPool1d(3)
self.avg_pool2d = torch.nn.AvgPool2d(3)
self.avg_pool3d = torch.nn.AvgPool3d(3)
self.adaptive_avg_pool1d = torch.nn.AdaptiveAvgPool1d((1))
self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1))
self.adaptive_avg_pool3d = torch.nn.AdaptiveAvgPool3d((1, 1, 1))
def forward(self, x):
x = self.conv(x)
x = self.avg_pool1d(x)
x = self.avg_pool2d(x)
x = self.avg_pool3d(x)
x = self.adaptive_avg_pool1d(x)
x = self.adaptive_avg_pool2d(x)
x = self.adaptive_avg_pool3d(x)
x = F.avg_pool1d(x, 3)
x = F.avg_pool2d(x, 3)
x = F.avg_pool3d(x, 3)
x = F.adaptive_avg_pool1d(x, (1))
x = F.adaptive_avg_pool2d(x, (1, 1))
x = F.adaptive_avg_pool3d(x, (1, 1, 1))
x = torch.mean(x)
x = torch.mean(x, [2, 3], False)
x = x.mean()
x = x.mean([2, 3], True)
x = F.interpolate(x, 4, mode='nearest')
x = F.interpolate(x, 4, mode='linear')
x = self.conv(x)
return x
# This model is not executable since we just put all ops
# in the same forward
m = M().eval()
# nothing to fuse so skipping the fuse step
qconfig_dict = {'': default_qconfig}
example_inputs = (torch.randn(1, 3, 3, 3),)
prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
# not runnable
quantized = convert_fx(prepared)
# This checks that the dequantize from the output of first conv
# is being propagated to the end, so that we don't insert extra
# observers
# check exact counts of quantize and dequantize
count_check = {
ns.call_function(torch.quantize_per_tensor) : 1,
ns.call_method('dequantize') : 1
}
order_check = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_module(nnq.Conv2d),
ns.call_method('dequantize'),
]
self.checkGraphModuleNodes(
quantized,
expected_node_occurrence=count_check,
expected_node_list=order_check)
def test_copy_node_fp32_input(self):
""" CopyNode works for both fp32 and int8 inputs, this is a test to make
sure that a CopyNode can be successfully quantized in both cases
"""
class M(torch.nn.Module):
def forward(self, x):
x = x.relu()
return x
m = M().eval()
m = prepare_fx(m, {"": default_reuse_input_qconfig}, example_inputs=(torch.randn(1),))
m = convert_fx(m)
# make sure it runs
m(torch.rand(1))
def test_getitem(self):
""" Make sure we only insert observer for getitem if the following node is matched
or needs to be quantized
"""
class M(torch.nn.Module):
def forward(self, xs):
x = xs[0]
return x
m = M().eval()
example_inputs = (torch.rand(1, 2),)
qconfig_mapping = get_default_qconfig_mapping()
m = prepare_fx(m, qconfig_mapping, example_inputs=example_inputs)
self.checkGraphModuleNodes(m, expected_node_occurrence={
ns.call_module(torch.ao.quantization.MinMaxObserver): 0
})
m = convert_fx(m)
m(*example_inputs)
class M2(torch.nn.Module):
def forward(self, xs):
x = xs[0]
x = torch.sigmoid(x)
return x
m2 = M2().eval()
example_inputs = ([torch.rand(1, 2)],)
qconfig_mapping = get_default_qconfig_mapping()
m2 = prepare_fx(m2, qconfig_mapping, example_inputs=example_inputs)
self.checkGraphModuleNodes(m2, expected_node_occurrence={
ns.call_module(torch.ao.quantization.FixedQParamsObserver): 2
})
m2 = convert_fx(m2)
self.checkGraphModuleNodes(m2, expected_node_list=[
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize")
])
m2(*example_inputs)
# testing prepare recognizes non-Tensor input for getitem
class M3(torch.nn.Module):
def forward(self, x):
s = x.shape
n, c = s[:2]
x = torch.sigmoid(x)
return x
m3 = M3().eval()
example_inputs = (torch.rand(1, 2, 3, 4),)
qconfig_mapping = get_default_qconfig_mapping()
m3 = prepare_fx(m3, qconfig_mapping, example_inputs=example_inputs)
self.checkGraphModuleNodes(m3, expected_node_occurrence={
ns.call_module(torch.ao.quantization.FixedQParamsObserver): 2
})
m3 = convert_fx(m3)
self.checkGraphModuleNodes(m3, expected_node_list=[
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize")
])
m3(*example_inputs)
@skipIfNoFBGEMM
def test_fixed_qparams_ops(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
self.sigmoid = torch.nn.Sigmoid()
self.hardsigmoid = torch.nn.Hardsigmoid()
self.tanh = torch.nn.Tanh()
self.softmax = torch.nn.Softmax(dim=0)
def forward(self, x):
x = self.conv(x)
# F.sigmoid is deprecated
x = self.sigmoid(x)
x = torch.sigmoid(x)
x = x.sigmoid()
x = self.hardsigmoid(x)
x = F.hardsigmoid(x)
x = F.hardsigmoid(x, inplace=True)
x = self.tanh(x)
# F.tanh is deprecated
x = torch.tanh(x)
x = x.tanh()
# TODO(future PR): handle F.softmax
x = self.softmax(x)
return x
for eval_mode in [True, False]:
# This model is not executable since we just put all ops
# in the same forward
m = M()
if eval_mode:
m.eval()
qconfig_mapping = get_default_qconfig_mapping()
prepare = prepare_fx
fq_count = 10
else:
m.train()
qconfig_mapping = get_default_qat_qconfig_mapping()
prepare = prepare_qat_fx
fq_count = 10
# nothing to fuse so skipping the fuse step
m_copy = copy.deepcopy(m)
example_inputs = (torch.rand(3, 3, 3, 3),)
prepared = prepare(m, qconfig_mapping, example_inputs=example_inputs)
prepared_copy = copy.deepcopy(prepared)
# check that prepare does not change model result
if eval_mode:
self.assertEqual(m_copy(*example_inputs), prepared_copy(*example_inputs))
# check the correct number of activation_post_process is inserted
expected_activation_post_process = FixedQParamsObserver if eval_mode else FixedQParamsFakeQuantize
count_check = {
ns.call_module(expected_activation_post_process) : fq_count,
}
self.checkGraphModuleNodes(
prepared,
expected_node_occurrence=count_check)
# not runnable
quantized = convert_fx(prepared)
quantized_reference = convert_to_reference_fx(prepared_copy)
# This checks that the dequantize from the output of first conv
# is being propagated to the end, so that we don't insert extra
# observers
# check exact counts of quantize and dequantize
count_check = {
ns.call_function(torch.quantize_per_tensor) : 1,
ns.call_method('dequantize') : 1
}
order_check = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_module(nn.Sigmoid),
ns.call_module(nnq.Softmax),
ns.call_method('dequantize'),
]
self.checkGraphModuleNodes(
quantized,
expected_node_occurrence=count_check,
expected_node_list=order_check)
reference_count_check = {
ns.call_function(torch.quantize_per_tensor) : 12,
ns.call_method('dequantize') : 12
}
reference_order_check = [
ns.call_function(torch.quantize_per_tensor),
ns.call_method('dequantize'),
ns.call_module(nnqr.Conv2d),
ns.call_function(torch.quantize_per_tensor),
ns.call_method('dequantize'),
ns.call_module(nn.Sigmoid),
ns.call_function(torch.quantize_per_tensor),
ns.call_method('dequantize'),
ns.call_module(nn.Softmax),
ns.call_function(torch.quantize_per_tensor),
ns.call_method('dequantize'),
]
self.checkGraphModuleNodes(
quantized_reference,
expected_node_occurrence=reference_count_check,
expected_node_list=reference_order_check)
# Verify that softmax scale and zero_point are correct
self.assertTrue(quantized.softmax.scale - (1.0 / 256) <= 1e-8)
self.assertTrue(quantized.softmax.zero_point == 0)
def test_float_functional(self):
class TorchAdd(nn.Module):
"""Wrapper around torch.add so that all ops can be found at build"""
def __init__(self):
super().__init__()
self.add_func = nnq.FloatFunctional()
def forward(self, x, y):
return self.add_func.add(x, y)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.ff1 = TorchAdd()
self.ff2 = nnq.FloatFunctional()
self.ff3 = nnq.FloatFunctional()
self.ff4 = nnq.FloatFunctional()
self.ff5 = nnq.FloatFunctional()
self.ff6 = nnq.FloatFunctional()
def forward(self, x):
x = self.ff1(x, x)
x = self.ff2.add_scalar(x, 3)
x = self.ff3.mul(x, x)
x = self.ff4.mul_scalar(x, 3)
x = self.ff5.add_relu(x, x)
x = self.ff6.cat([x])
return x
example_inputs = (torch.rand(3, 3),)
# Note: QAT test succeeded by chance, to make it actually work
# we need to fix eager mode FloatFunctional by removing
# activation_post_process in add_scalar and mul_scalar
for quant_type in self.static_quant_types:
m = M()
ref_m = torch.ao.quantization.QuantWrapper(M())
is_qat = quant_type == QuantType.QAT
if is_qat:
m.train()
ref_m.train()
qconfig = default_qat_qconfig
expected_act_post_process = torch.ao.quantization.FakeQuantize
else:
m.eval()
ref_m.eval()
qconfig = default_qconfig
expected_act_post_process = torch.ao.quantization.MinMaxObserver
prepare_fx_function = prepare_qat_fx if is_qat else prepare_fx
qconfig_dict = {"": qconfig}
m = prepare_fx_function(m, qconfig_dict, example_inputs=example_inputs)
node_occurrence = {
ns.call_module(expected_act_post_process): 7,
ns.call_module(torch.nn.quantized.FloatFunctional): 0
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
m(*example_inputs)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
ns.call_function(torch.ops.quantized.add),
ns.call_function(torch.ops.quantized.mul),
ns.call_function(torch.ops.quantized.mul),
ns.call_function(torch.ops.quantized.add_relu),
ns.call_function(torch.cat),
ns.call_method('dequantize')
]
m = convert_fx(m)
self.checkGraphModuleNodes(m, expected_node_list=node_list)
# make sure numerics match with eager mode
ref_m.qconfig = qconfig
prepare_function = prepare_qat if is_qat else prepare
ref_m = prepare_function(ref_m)
ref_m(*example_inputs)
ref_m = convert(ref_m)
# FX Graph Mode and Eager Mode now diverages in numerics of add_scalar and mul_scalar
# self.assertEqual(m(data), ref_m(data))
def test_embedding(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)
def forward(self, indices):
return self.emb(indices)
for qconfig_type in [float_qparams_weight_only_qconfig, float_qparams_weight_only_qconfig_4bit]:
model = M().eval()
indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8, 3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3])
example_inputs = (indices,)
quantized_node = ns.call_module(nnq.Embedding)
configs = [
(qconfig_type, ns.call_module(nnq.Embedding)),
(None, ns.call_module(nn.Embedding)),
(default_qconfig, ns.call_module(nn.Embedding)),
]
for qconfig, node in configs:
qconfig_dict = {"": qconfig}
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
self.checkGraphModuleNodes(m, expected_node_occurrence={
ns.call_module(torch.ao.quantization.MinMaxObserver): 0
})
m = convert_fx(m)
self.checkGraphModuleNodes(m, expected_node=node)
# make sure it runs
m(*example_inputs)
def test_embedding_bag(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, include_last_offset=True)
def forward(self, indices, offsets):
return self.emb(indices, offsets)
indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8, 3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3])
offsets = torch.tensor([0, 19, 20, 28, 28, 32])
quantized_node = ns.call_module(nnq.EmbeddingBag)
example_inputs = (indices, offsets)
for dtype in [torch.quint8, torch.quint4x2]:
model = M().eval()
float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype,
qscheme=torch.per_channel_affine_float_qparams,
ch_axis=0)
float_qparams_qconfig = QConfig(activation=default_placeholder_observer,
weight=float_qparams_observer)
self.checkGraphModeFxOp(
model,
example_inputs,
QuantType.DYNAMIC,
quantized_node,
custom_qconfig_dict={"": float_qparams_qconfig}
)
# check it works in None and static qconfig
for qconfig in [None, default_qconfig]:
qconfig_dict = {"": default_qconfig}
m = M().eval()
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
self.checkGraphModuleNodes(m, expected_node_occurrence={
ns.call_module(torch.ao.quantization.MinMaxObserver): 0
})
m = convert_fx(m)
self.checkGraphModuleNodes(m, expected_node=ns.call_module(nn.EmbeddingBag))
# make sure it runs
m(*example_inputs)
def _test_rnn_impl(self, qconfigs, M, module_type_strs, module_types, sample_input):
options = itertools.product(qconfigs, module_type_strs)
for qconfig, module_type_str in options:
model_eager = M(module_type_str).eval()
model_graph = copy.deepcopy(model_eager)
if torch.backends.quantized.engine == 'qnnpack' and \
qconfig is float16_dynamic_qconfig:
continue
# fp16 dynamic quant is not supported for qnnpack
eager_qconfig_dict = {x : qconfig for x in module_types}
model_eager = quantize_dynamic(model_eager, qconfig_spec=eager_qconfig_dict)
graph_qconfig_dict = {
"object_type": [
(x, qconfig) for x in module_types
]
}
model_graph = prepare_fx(model_graph, graph_qconfig_dict, example_inputs=(sample_input,))
model_graph = convert_fx(model_graph)
self.assertEqual(model_eager(sample_input), model_graph(sample_input))
self.checkScriptable(model_graph, [[sample_input]], True)
@override_qengines
def test_rnn_cell(self):
if torch.backends.quantized.engine not in ('fbgemm', 'qnnpack'):
return
qconfigs = [per_channel_dynamic_qconfig, default_dynamic_qconfig, float16_dynamic_qconfig]
module_type_strs = ['LSTMCell', 'GRUCell', 'RNNTanh', 'RNNReLU']
module_types = [torch.nn.LSTMCell, torch.nn.GRUCell, torch.nn.RNNCell]
sample_input = torch.tensor([[100, -155],
[-155, 100],
[100, -155]], dtype=torch.float)
self._test_rnn_impl(qconfigs, RNNCellDynamicModel, module_type_strs, module_types, sample_input)
@override_qengines
def test_rnn(self):
if torch.backends.quantized.engine not in ('fbgemm', 'qnnpack'):
return
qconfigs = [per_channel_dynamic_qconfig, default_dynamic_qconfig, float16_dynamic_qconfig]
module_type_strs = ['LSTM']
module_types = [torch.nn.LSTM]
niter = 10
sample_input = torch.tensor([[100, -155],
[-155, 100],
[100, -155]], dtype=torch.float).unsqueeze(0).repeat(niter, 1, 1)
self._test_rnn_impl(qconfigs, RNNDynamicModel, module_type_strs, module_types, sample_input)
def _test_conv_transpose_impl(
self, float_cls: Callable, q_cls: Callable, data: torch.Tensor):
with override_quantized_engine('qnnpack'):
# Create fp32 versions of FX and Eager models
m1 = torch.nn.Sequential(float_cls(1, 1, 1))
m2 = torch.nn.Sequential(float_cls(1, 1, 1))
m2.load_state_dict(m1.state_dict())
m2 = torch.ao.quantization.QuantWrapper(m2)
# FX graph
result_dict = self.checkGraphModeFxOp(
m1, (data,), QuantType.STATIC,
expected_node_occurrence={
ns.call_module(q_cls): 1,
})
q_result1 = result_dict["quantized_output"]
# Eager
m2.qconfig = get_default_qconfig(torch.backends.quantized.engine)
m2.eval()
m2p = torch.ao.quantization.prepare(m2)
m2p(data)
m2q = torch.ao.quantization.convert(m2p)
q_result2 = m2q(data)
# verify results match
self.assertEqual(q_result1, q_result2)
@unittest.skipUnless('qnnpack' in supported_qengines,
"This Pytorch Build has not been built with or does not support QNNPACK")
def test_conv_transpose_1d(self):
self._test_conv_transpose_impl(
torch.nn.ConvTranspose1d, nnq.ConvTranspose1d, torch.randn(4, 1, 4))
@unittest.skipUnless('qnnpack' in supported_qengines,
"This Pytorch Build has not been built with or does not support QNNPACK")
def test_conv_transpose_2d(self):
self._test_conv_transpose_impl(
torch.nn.ConvTranspose2d, nnq.ConvTranspose2d, torch.randn(4, 1, 4, 4))
def test_reshape_fp16(self):
class M(torch.nn.Module):
def __init__(self, w, b):
super().__init__()
self.w = w
self.b = b
def forward(self, x):
x = torch.nn.functional.linear(x, self.w)
x = x.reshape(-1, 4)
x = torch.nn.functional.linear(x, self.w)
return x
w = torch.randn(4, 4)
b = torch.randn(4)
m = M(w, b).eval()
qconfig_dict = {
# reshape will be quantized to fp16 as requested by this qconfig
"": float16_static_qconfig,
"object_type": [
(torch.nn.functional.linear, default_qconfig)
]
}
backend_config = get_test_only_legacy_native_backend_config()
example_inputs = (torch.randn(1, 4),)
m = prepare_fx(
m, qconfig_dict, example_inputs=example_inputs,
backend_config=backend_config)
expected_occurrence = {
# input and weight of first and second linear, output of first and second linear
ns.call_module(torch.ao.quantization.MinMaxObserver): 6,
# we insert placeholder observer for both input and output of reshape
ns.call_module(torch.ao.quantization.PlaceholderObserver): 2
}
self.checkGraphModuleNodes(
m,
expected_node_occurrence=expected_occurrence
)
m = convert_fx(m, backend_config=backend_config)
expected_occurrence = {
ns.call_function(torch.quantize_per_tensor): 2,
# dequantize after first linear, before reshape and before output
ns.call_method("dequantize"): 3,
# before reshape, to(fp16)
ns.call_method("to"): 1,
ns.call_function(torch.ops.quantized.linear): 2
}
self.checkGraphModuleNodes(
m,
expected_node_occurrence=expected_occurrence
)
# make sure it runs
m(torch.randn(2, 4))
def test_multiple_qconfigs_for_single_value(self):
""" Test multiple qconfigs for a single value"""
class M(torch.nn.Module):
def __init__(self, w, b):
super().__init__()
self.w = w
self.b = b
def forward(self, x):
x = torch.nn.functional.linear(x, self.w)
x = torch.sigmoid(x)
return x
w = torch.randn(4, 4)
b = torch.randn(4)
m = M(w, b).eval()
# TODO: use get_default_qconfig_mapping once it handles fp16
qconfig_mapping = QConfigMapping() \
.set_global(float16_static_qconfig) \
.set_object_type(torch.nn.functional.linear, default_qconfig)
example_inputs = (torch.randn(1, 4),)
backend_config = get_test_only_legacy_native_backend_config()
m = prepare_fx(
m, qconfig_mapping, example_inputs=example_inputs,
backend_config=backend_config)
expected_occurrence = {
# input and weight of linear, output of linear
ns.call_module(torch.ao.quantization.MinMaxObserver): 3,
# input and output of sigmoid
ns.call_module(torch.ao.quantization.PlaceholderObserver): 2,
}
self.checkGraphModuleNodes(
m,
expected_node_occurrence=expected_occurrence
)
# make sure it runs
m = convert_fx(m)
expected_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 3,
ns.call_method("to"): 2
}
self.checkGraphModuleNodes(
m,
expected_node_occurrence=expected_occurrence
)
def test_boolean_tensor(self):
""" Make sure we don't insert observer for boolean Tensors """
class M(torch.nn.Module):
def forward(self, x, mask):
mask = mask.unsqueeze(0)
mask = mask.unsqueeze(1)
x = x.masked_fill(mask, 1)
return x
m = M().eval()
example_inputs = (torch.rand(1, 2, 3, 4), torch.rand(3, 4).bool())
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
expected_occurrence = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 0
}
self.checkGraphModuleNodes(
m,
expected_node_occurrence=expected_occurrence)
m = convert_fx(m)
m(*example_inputs)
def test_chunk(self):
class M(torch.nn.Module):
def forward(self, x):
x, y = torch.chunk(x, 2)
x = x + y
return x
m = M().eval()
example_inputs = (torch.rand(2, 2, 2, 2),)
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
# make sure everything runs
def test_ref_pattern_multi_use(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.linear1 = torch.nn.Linear(5, 5)
def forward(self, x):
y = self.linear(x)
z = self.linear1(x)
a = torch.mul(z, 5)
b = torch.add(z, 5)
return (y, a, b)
m = M().eval()
qconfig_dict = {
"": None,
"object_type": [
(torch.nn.Linear, get_default_qconfig("fbgemm")),
(torch.nn.ReLU, get_default_qconfig("fbgemm")),
],
}
example_inputs = (torch.randn(1, 5),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m = convert_fx(m)
expected_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_module(nnq.Linear): 2,
ns.call_method("dequantize"): 2,
ns.call_function(torch.add): 1,
ns.call_function(torch.mul): 1,
}
self.checkGraphModuleNodes(
m,
expected_node_occurrence=expected_occurrence)
@unittest.skip("This is no longer needed right now, can enable later with new api")
def test_qmatmul(self):
class M(torch.nn.Module):
def forward(self, x, y):
z = torch.matmul(x, y)
return z
m = M().eval()
example_inputs = (torch.randn(2, 2), torch.randn(2, 2))
qconfig_dict = {"": torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mq = convert_fx(mp)
expected_occurrence = {
ns.call_function(torch.matmul): 0,
ns.call_function(torch.ops.quantized.matmul): 1,
}
self.checkGraphModuleNodes(
mq,
expected_node_occurrence=expected_occurrence)
# verify no crash
res = mq(*example_inputs)
class TestQuantizeFxModels(QuantizationTestCase):
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDA, "gpu is not available.")
def test_static_gpu_convert_basic(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(1, 6, 5)
self.linear1 = nn.Linear(120, 1)
def forward(self, x):
x = self.relu1(self.conv1(x))
y = self.linear1(x.view(-1))
return y
input = torch.randn((5, 1, 6, 6)).to('cuda')
example_inputs = (input,)
model = Net().to('cuda').eval()
qconfig_dict = {"": torch.ao.quantization.get_default_qconfig('fbgemm')}
model_prepared = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
model_prepared(*example_inputs)
model_quantized = convert_to_reference_fx(model_prepared)
out = model_quantized(*example_inputs)
self.assertEqual(out.device.type, 'cuda')
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDA, "gpu is not available.")
def test_switch_device_prepare_convert(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(1, 6, 5)
self.linear1 = nn.Linear(120, 1)
def forward(self, x):
x = self.relu1(self.conv1(x))
y = self.linear1(x.view(-1))
return y
for device in ['cuda', 'cpu']:
device_after = 'cuda' if device == 'cpu' else 'cpu'
input = torch.randn((5, 1, 6, 6)).to(device)
model = Net().to(device).eval()
qconfig_dict = {"": torch.ao.quantization.get_default_qconfig('fbgemm')}
model_prepared = prepare_fx(model, qconfig_dict, example_inputs=(input,))
model_prepared(input)
model_prepared.to(device_after)
model_quantized = convert_to_reference_fx(model_prepared)
out = model_quantized(input.to(device_after))
self.assertEqual(out.device.type, device_after)
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDA, "gpu is not available.")
def test_prepare_serialize_switch_device_convert(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.linear1 = nn.Linear(120, 1)
def forward(self, x):
x = self.conv1(x)
y = self.linear1(x.view(-1))
return y
for device in ['cuda', 'cpu']:
for device_after in ['cuda', 'cpu']:
input = torch.randn((5, 1, 6, 6)).to(device)
model = Net().to(device).eval()
qconfig_dict = {"": torch.ao.quantization.get_default_qconfig('fbgemm')}
model_prepared_first = prepare_fx(model, qconfig_dict, example_inputs=(input,))
model_prepared_second = prepare_fx(model, qconfig_dict, example_inputs=(input,))
model_prepared_first(input)
state_dict = model_prepared_first.state_dict()
del model_prepared_first
model_prepared_second.load_state_dict(state_dict)
model_prepared_second.to(device_after)
model_quantized = convert_to_reference_fx(model_prepared_second)
out = model_quantized(input.to(device_after))
self.assertEqual(out.device.type, device_after)
@skip_if_no_torchvision
def test_model_dropout(self):
from torchvision import models
m = models.mobilenet_v3_small()
qconfig_mapping = torch.ao.quantization.get_default_qat_qconfig_mapping('fbgemm')
example_inputs = (torch.randn(1, 3, 224, 224),)
mp = prepare_qat_fx(m, qconfig_mapping, example_inputs=example_inputs)
mp(*example_inputs)
with override_quantized_engine("qnnpack") if IS_ARM64 else contextlib.nullcontext():
mq = convert_fx(mp)
mq(*example_inputs)
def _test_model_impl(
self, mode, name, model, eager_quantizable_model,
check_with_eager=True,
diff_of_quant=None,
diff_from_eager=None):
if diff_of_quant is None or diff_from_eager is None:
diff_of_quant = {}
diff_from_eager = {}
if mode not in diff_of_quant or mode not in diff_from_eager:
diff_of_quant[mode] = {}
diff_from_eager[mode] = {}
input_tensor = torch.rand(1, 3, 224, 224)
input_tensor_inception = torch.rand(1, 3, 299, 299)
output_value = torch.randint(0, 1, (1,))
# print('quantizing:', name, ' mode:', mode)
if name == 'inception_v3':
input_value = input_tensor_inception
else:
input_value = input_tensor
qconfig = default_qconfig if mode == 'static' else default_qat_qconfig
qconfig_dict = {'': qconfig}
script = torch.jit.script(model)
# make sure graph module and script module are both runanble
original_out = model(input_value)
is_not_tuple_out = not isinstance(original_out, tuple)
script_out = script(input_value)
# set to train just before quantization
prepare_fx_fn = prepare_fx
if mode != 'static':
model.train()
prepare_fx_fn = prepare_qat_fx
prepared = prepare_fx_fn(model, qconfig_dict)
if mode == 'ddp':
mp.spawn(run_ddp,
args=(world_size, prepared),
nprocs=world_size,
join=True)
elif mode == 'qat':
assert prepared.training, 'prepared must be in training mode for qat'
optimizer = torch.optim.SGD(prepared.parameters(), lr=0.0001)
criterion = nn.CrossEntropyLoss()
train_one_epoch(prepared, criterion, optimizer, [(input_value, output_value)], torch.device('cpu'), 1)
else:
for i in range(10):
prepared(input_value)
# print('after observation root:', prepared.root)
qgraph = convert_fx(prepared)
# print('after quantization root:', qgraph.root)
# print('after quantization code:', qgraph.src)
qgraph.eval()
qgraph_script = torch.jit.script(qgraph)
# print('quantized and scripted:', qgraph_script.graph)
qgraph_out = qgraph(input_value)
qgraph_script = qgraph_script(input_value)
if is_not_tuple_out:
diff_of_quant[mode][name] = (original_out - qgraph_out).abs().max()
assert torch.allclose(qgraph_out, qgraph_script), 'graph, scripted graph'
else:
print('tuple output')
if eager_quantizable_model is not None:
# comparing to eager mode quantization
qeager = eager_quantizable_model
ref_out = qeager(input_value)
qeager.qconfig = qconfig
if mode == 'static':
qeager.fuse_model()
prepare(qeager, inplace=True)
else:
qeager.train()
qeager.fuse_model()
prepare_qat(qeager, inplace=True)
# calibration
if mode == 'ddp':
mp.spawn(run_ddp,
args=(world_size, qeager),
nprocs=world_size,
join=True)
elif mode == 'qat':
assert qeager.training, 'qeager should be in training mode for qat'
optimizer = torch.optim.SGD(qeager.parameters(), lr=0.0001)
train_one_epoch(qeager, criterion, optimizer, [(input_value, output_value)], torch.device('cpu'), 1)
else:
for i in range(10):
qeager(input_value)
# print('ref after observation:', qeager)
convert(qeager, inplace=True)
qeager.eval()
# print('ref after quantization:', qeager)
qeager_out = qeager(input_value)
qeager_script = torch.jit.script(qeager)
qscript_out = qeager_script(input_value)
if is_not_tuple_out:
diff_from_eager[mode][name] = (qeager_out - qgraph_out).abs().max()
if check_with_eager:
self.assertEqual(diff_from_eager[mode][name], 0,
'Result of graph mode quantization and ' +
'eager mode quantization on model: ' + name +
' should match. Mode: ' + mode +
' diff:' + str(diff_from_eager[mode][name]))
def _test_building_block(self, quant_type, BB):
eager = BB().float()
graph = copy.deepcopy(eager)
if quant_type == QuantType.STATIC:
qconfig = default_qconfig
eager_prepare = prepare
graph_prepare = prepare_fx
eager.eval()
graph.eval()
calibrate_or_train = test_only_eval_fn
data = self.img_data_2d
is_qat = False
else:
assert quant_type == QuantType.QAT
qconfig = default_qat_qconfig
eager_prepare = prepare_qat
graph_prepare = prepare_qat_fx
eager.train()
graph.train()
calibrate_or_train = test_only_train_fn
data = self.img_data_2d_train
is_qat = True
if hasattr(eager, "fuse_model"):
eager.fuse_model()
eager = QuantWrapper(eager)
eager.qconfig = qconfig
eager = eager_prepare(eager)
qconfig_dict = {"": qconfig}
graph = graph_prepare(graph, qconfig_dict, example_inputs=(data[0][0],))
eager_out = eager(data[0][0])
graph_out = graph(data[0][0])
# Eager Mode and FX Graph Mode QAT now differ in numerics both
# in Post Training and QAT because FX Graph Mode uses same fake_quant instances
# for input and output of CopyNode
# self.assertEqual(eager_out, graph_out)
calibrate_or_train(eager, data)
calibrate_or_train(graph, data)
eager = convert(eager)
graph = convert_fx(graph)
eager_out = eager(data[0][0])
graph_out = graph(data[0][0])
@override_qengines
def test_resnet_base(self):
models = [ResNetBase]
options = itertools.product(self.static_quant_types, models)
for quant_type, M in options:
self._test_building_block(quant_type, M)
@skip_if_no_torchvision
@skipIfNoFBGEMM
@unittest.skip("skip for now since tbb failed")
def test_torchvision(self):
from torchvision import models
from torchvision.models import quantization as quantized_models
from torchvision.models.quantization.utils import _replace_relu
def get_available_classification_models(models):
return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
model_list = get_available_classification_models(models)
quantized_model_list = get_available_classification_models(quantized_models)
quantized_model_list = set(quantized_model_list)
# test eager and graph consistency
model_list = quantized_model_list
# mobilenet/inception_v3/googlenet qat is not working due to AdaptiveAveragePool qat
# we might observe the output of AdaptiveAveragePool in the future
# and re-enable the test
fx_eager_not_matching = [
("mobilenet_v2", "qat"),
("inception_v3", "qat"),
("googlenet", "qat")
] # because relu6 is replaced as relu in mobilenetv2
diff_of_quant = {}
diff_from_eager = {}
modes = ['static', 'qat']
options = itertools.product(modes, model_list)
for mode, name in options:
pretrained = name in quantized_model_list # load pretrained model to compare with quantized model
kwargs = {}
# turn off transform input for inception_v3 since
# it's not quantized in eager mode and in fx graph
# mode we can't skip quantizing a method right now
# (might be supported in the future)
if name in ["inception_v3", "googlenet"]:
kwargs["transform_input"] = False
eager_quantizable_model = None
if name in quantized_model_list:
eager_quantizable_model = quantized_models.__dict__[name](pretrained=False, quantize=False, **kwargs).eval().float()
# compare with eager mode quantized model when it is available
pretrained = eager_quantizable_model is not None
model = models.__dict__[name](pretrained=pretrained, **kwargs).eval().float()
if name == "mobilenet_v2":
_replace_relu(model)
# disable aux logits
if hasattr(model, "aux_logits"):
model.aux_logits = False
model.AuxLogits = None
if eager_quantizable_model:
eager_quantizable_model.aux_logits = False
eager_quantizable_model.AuxLogits = None
check_with_eager = (name, mode) not in fx_eager_not_matching
self._test_model_impl(
mode, name, model, eager_quantizable_model,
check_with_eager,
diff_of_quant, diff_from_eager)
def print_diffs(diffs):
for mode, diffs_for_mode in diffs.items():
print('mode:', mode)
for name, diff in diffs_for_mode.items():
print(name, ':', diff)
# print('differences between float and quantized')
# print_diffs(diff_of_quant)
# print('----------------------')
# print('differences between graph mode and eager mode')
# print_diffs(diff_from_eager)
# print('----------------------')
@skip_if_no_torchvision
@skipIfNoFBGEMM
@unittest.skip("TODO: Test is always failing - https://github.com/pytorch/pytorch/issues/54979")
def test_resnet18_ddp(self):
from torchvision import models
from torchvision.models import quantization as quantized_models
eager_quantizable_model = quantized_models.__dict__[name](pretrained=False, quantize=False).eval().float()
model = models.__dict__[name](pretrained=False).eval().float()
self._test_model_impl(
'ddp', 'resnet18', model, eager_quantizable_model)
@override_qengines
def test_qat_embeddingbag_linear(self):
for device in get_supported_device_types():
class EmbeddingBagLinear(torch.nn.Module):
def __init__(self):
super(EmbeddingBagLinear, self).__init__()
self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, mode='sum')
self.linear = torch.nn.Linear(12, 1).to(dtype=torch.float)
def forward(self, input: torch.Tensor, offsets: Optional[torch.Tensor] = None,
per_sample_weights: Optional[torch.Tensor] = None):
x = self.emb(input, offsets, per_sample_weights)
x = self.linear(x)
return x
qengine = torch.backends.quantized.engine
qconfig_dict = {"": get_default_qat_qconfig(qengine),
"object_type": [(torch.nn.EmbeddingBag, default_embedding_qat_qconfig)]}
train_indices = [[torch.randint(0, 10, (12, 12)), torch.randn((12, 1))] for _ in range(2)]
eval_output = [[torch.randint(0, 10, (12, 1))]]
model = EmbeddingBagLinear().train()
prepared_fx_model = prepare_qat_fx(model, qconfig_dict, example_inputs=(train_indices[0][0],))
test_only_train_fn(prepared_fx_model, train_indices)
quant_model = convert_fx(prepared_fx_model,
qconfig_mapping=qconfig_dict)
def checkQuantized(model):
# Make sure EmbeddingBag is now a quantized EmbeddingBag.
self.assertTrue(type(model.emb), nn.quantized.EmbeddingBag)
# Also test that Linear has been quantized.
self.assertTrue(type(model.linear), nnq.Linear)
test_only_eval_fn(model, eval_output)
self.checkScriptable(model, eval_output)
self.checkNoQconfig(model)
checkQuantized(quant_model)
@override_qengines
def test_qat_embedding_linear(self):
for device in get_supported_device_types():
class EmbeddingLinear(torch.nn.Module):
def __init__(self):
super(EmbeddingLinear, self).__init__()
self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)
self.linear = torch.nn.Linear(12, 1).to(dtype=torch.float)
def forward(self, input: torch.Tensor):
x = torch.sum(self.emb(input), dim=1)
x = self.linear(x)
return x
qengine = torch.backends.quantized.engine
qconfig_dict = {"": get_default_qat_qconfig(qengine),
"object_type": [(torch.nn.Embedding, default_embedding_qat_qconfig)]}
train_indices = [[torch.randint(0, 10, (12, 12)), torch.randn((12, 1))] for _ in range(2)]
eval_output = [[torch.randint(0, 10, (12, 1))]]
model = EmbeddingLinear().train()
prepared_fx_model = prepare_qat_fx(model, qconfig_dict, example_inputs=(train_indices[0][0],))
test_only_train_fn(prepared_fx_model, train_indices)
quant_model = convert_fx(prepared_fx_model,
qconfig_mapping=qconfig_dict)
def checkQuantized(model):
# Make sure EmbeddingBag is now a quantized EmbeddingBag.
self.assertTrue(type(model.emb), nn.quantized.Embedding)
# Also test that Linear has been quantized.
self.assertTrue(type(model.linear), nnq.Linear)
test_only_eval_fn(model, eval_output)
self.checkScriptable(model, eval_output)
self.checkNoQconfig(model)
checkQuantized(quant_model)
@given(
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
)
)
@settings(deadline=None)
@override_qengines
def test_qat_functional_linear(self, device):
if torch.backends.quantized.engine not in ('fbgemm', 'qnnpack'):
return
class Linear(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mods1 = torch.nn.Sequential(Linear(), Linear())
self.mods2 = Linear()
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
return x
model = M().train()
ref_fake_quant = FakeQuantize.with_args(
observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255,
dtype=torch.quint8,
reduce_range=False,
)
ref_weight_fake_quant = FakeQuantize.with_args(
observer=MovingAverageMinMaxObserver,
quant_min=-128,
quant_max=127,
dtype=torch.qint8,
reduce_range=False,
)
ref_qat_qconfig = QConfig(
activation=ref_fake_quant, weight=ref_weight_fake_quant
)
qconfig_dict = {"": ref_qat_qconfig}
example_inputs = (torch.randn(1, 5),)
prepared_ref = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
custom_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(
observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255,
dtype=torch.quint8,
reduce_range=False,
)
custom_weight_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(
observer=MovingAverageMinMaxObserver,
quant_min=-128,
quant_max=127,
dtype=torch.qint8,
reduce_range=False,
)
custom_qconfig = QConfig(
activation=custom_fake_quant, weight=custom_weight_fake_quant
)
custom_qconfig_dict = {"": custom_qconfig}
prepared = prepare_qat_fx(model, custom_qconfig_dict, example_inputs=example_inputs)
prepared.to(device)
prepared_ref.to(device)
prepared.apply(torch.ao.quantization.disable_fake_quant)
prepared.apply(torch.ao.quantization.disable_observer)
prepared_ref.apply(torch.ao.quantization.disable_fake_quant)
prepared_ref.apply(torch.ao.quantization.disable_observer)
inp = torch.randn(5, 5, device=device, requires_grad=True)
for i in range(10):
if i == 2:
prepared.apply(torch.ao.quantization.enable_observer)
prepared_ref.apply(torch.ao.quantization.enable_observer)
if i == 4:
prepared.apply(torch.ao.quantization.enable_fake_quant)
prepared_ref.apply(torch.ao.quantization.enable_fake_quant)
inp = torch.randn(5, 5, device=device, requires_grad=True)
out_ref = prepared_ref(inp)
out = prepared(inp)
torch.testing.assert_allclose(out, out_ref)
# try backward pass
labels = torch.randn(5, 5, device=device)
loss = (out - labels).sum()
grad = torch.autograd.grad(loss, [inp])
loss_ref = (out_ref - labels).sum()
grad_ref = torch.autograd.grad(loss_ref, [inp])
torch.testing.assert_allclose(grad[0], grad_ref[0])
if 'fbgemm' in torch.backends.quantized.supported_engines:
# During the lowering step in convert, fold_weight calls quantized::linear_prepack
# which doesn't support QuantizedCuda backend
prepared.cpu()
prepared_ref.cpu()
converted = convert_fx(prepared)
converted_ref = convert_fx(prepared_ref)
inp = torch.rand(5, 5)
out = converted(inp)
out_ref = converted_ref(inp)
torch.testing.assert_allclose(out, out_ref)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/quantization/fx/test_quantize_fx.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: quantization"]
import torch
import torch.nn as nn
import torch.ao.quantization.quantize_fx as quantize_fx
import torch.nn.functional as F
from torch.ao.quantization import QConfig, QConfigMapping
from torch.ao.quantization.fx._model_report.detector import (
DynamicStaticDetector,
InputWeightEqualizationDetector,
PerChannelDetector,
OutlierDetector,
)
from torch.ao.quantization.fx._model_report.model_report_observer import ModelReportObserver
from torch.ao.quantization.fx._model_report.model_report_visualizer import ModelReportVisualizer
from torch.ao.quantization.fx._model_report.model_report import ModelReport
from torch.ao.quantization.observer import HistogramObserver, default_per_channel_weight_observer
from torch.nn.intrinsic.modules.fused import ConvReLU2d, LinearReLU
from torch.testing._internal.common_quantization import (
ConvModel,
QuantizationTestCase,
SingleLayerLinearModel,
TwoLayerLinearModel,
skipIfNoFBGEMM,
skipIfNoQNNPACK,
override_quantized_engine,
)
"""
Partition of input domain:
Model contains: conv or linear, both conv and linear
Model contains: ConvTransposeNd (not supported for per_channel)
Model is: post training quantization model, quantization aware training model
Model is: composed with nn.Sequential, composed in class structure
QConfig utilizes per_channel weight observer, backend uses non per_channel weight observer
QConfig_dict uses only one default qconfig, Qconfig dict uses > 1 unique qconfigs
Partition on output domain:
There are possible changes / suggestions, there are no changes / suggestions
"""
# Default output for string if no optimizations are possible
DEFAULT_NO_OPTIMS_ANSWER_STRING = (
"Further Optimizations for backend {}: \nNo further per_channel optimizations possible."
)
# Example Sequential Model with multiple Conv and Linear with nesting involved
NESTED_CONV_LINEAR_EXAMPLE = torch.nn.Sequential(
torch.nn.Conv2d(3, 3, 2, 1),
torch.nn.Sequential(torch.nn.Linear(9, 27), torch.nn.ReLU()),
torch.nn.Linear(27, 27),
torch.nn.ReLU(),
torch.nn.Conv2d(3, 3, 2, 1),
)
# Example Sequential Model with Conv sub-class example
LAZY_CONV_LINEAR_EXAMPLE = torch.nn.Sequential(
torch.nn.LazyConv2d(3, 3, 2, 1),
torch.nn.Sequential(torch.nn.Linear(5, 27), torch.nn.ReLU()),
torch.nn.ReLU(),
torch.nn.Linear(27, 27),
torch.nn.ReLU(),
torch.nn.LazyConv2d(3, 3, 2, 1),
)
# Example Sequential Model with Fusion directly built into model
FUSION_CONV_LINEAR_EXAMPLE = torch.nn.Sequential(
ConvReLU2d(torch.nn.Conv2d(3, 3, 2, 1), torch.nn.ReLU()),
torch.nn.Sequential(LinearReLU(torch.nn.Linear(9, 27), torch.nn.ReLU())),
LinearReLU(torch.nn.Linear(27, 27), torch.nn.ReLU()),
torch.nn.Conv2d(3, 3, 2, 1),
)
# Test class
# example model to use for tests
class ThreeOps(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 3)
self.bn = nn.BatchNorm2d(3)
self.relu = nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.bn(x)
x = self.relu(x)
return x
def get_example_inputs(self):
return (torch.randn(1, 3, 3, 3),)
class TwoThreeOps(nn.Module):
def __init__(self):
super().__init__()
self.block1 = ThreeOps()
self.block2 = ThreeOps()
def forward(self, x):
x = self.block1(x)
y = self.block2(x)
z = x + y
z = F.relu(z)
return z
def get_example_inputs(self):
return (torch.randn(1, 3, 3, 3),)
class TestFxModelReportDetector(QuantizationTestCase):
"""Prepares and callibrate the model"""
def _prepare_model_and_run_input(self, model, q_config_mapping, input):
model_prep = torch.ao.quantization.quantize_fx.prepare_fx(model, q_config_mapping, input) # prep model
model_prep(input).sum() # callibrate the model
return model_prep
"""Case includes:
one conv or linear
post training quantiztion
composed as module
qconfig uses per_channel weight observer
Only 1 qconfig in qconfig dict
Output has no changes / suggestions
"""
@skipIfNoFBGEMM
def test_simple_conv(self):
with override_quantized_engine('fbgemm'):
torch.backends.quantized.engine = "fbgemm"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
input = torch.randn(1, 3, 10, 10)
prepared_model = self._prepare_model_and_run_input(ConvModel(), q_config_mapping, input)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# no optims possible and there should be nothing in per_channel_status
self.assertEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# there shoud only be one conv there in this model
self.assertEqual(per_channel_info["conv"]["backend"], torch.backends.quantized.engine)
self.assertEqual(len(per_channel_info), 1)
self.assertEqual(list(per_channel_info)[0], "conv")
self.assertEqual(
per_channel_info["conv"]["per_channel_quantization_supported"],
True,
)
self.assertEqual(per_channel_info["conv"]["per_channel_quantization_used"], True)
"""Case includes:
Multiple conv or linear
post training quantization
composed as module
qconfig doesn't use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_multi_linear_model_without_per_channel(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
TwoLayerLinearModel(),
q_config_mapping,
TwoLayerLinearModel().get_example_inputs()[0],
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# pick a random key to look at
rand_key: str = list(per_channel_info.keys())[0]
self.assertEqual(per_channel_info[rand_key]["backend"], torch.backends.quantized.engine)
self.assertEqual(len(per_channel_info), 2)
# for each linear layer, should be supported but not used
for linear_key in per_channel_info.keys():
module_entry = per_channel_info[linear_key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""Case includes:
Multiple conv or linear
post training quantization
composed as Module
qconfig doesn't use per_channel weight observer
More than 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_multiple_q_config_options(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
# qconfig with support for per_channel quantization
per_channel_qconfig = QConfig(
activation=HistogramObserver.with_args(reduce_range=True),
weight=default_per_channel_weight_observer,
)
# we need to design the model
class ConvLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 2, 1)
self.fc1 = torch.nn.Linear(9, 27)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(27, 27)
self.conv2 = torch.nn.Conv2d(3, 3, 2, 1)
def forward(self, x):
x = self.conv1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.conv2(x)
return x
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(
torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine)
).set_object_type(torch.nn.Conv2d, per_channel_qconfig)
prepared_model = self._prepare_model_and_run_input(
ConvLinearModel(),
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# the only suggestions should be to linear layers
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
# if linear False, if conv2d true cuz it uses different config
if "fc" in key:
self.assertEqual(module_entry["per_channel_quantization_used"], False)
elif "conv" in key:
self.assertEqual(module_entry["per_channel_quantization_used"], True)
else:
raise ValueError("Should only contain conv and linear layers as key values")
"""Case includes:
Multiple conv or linear
post training quantization
composed as sequential
qconfig doesn't use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_sequential_model_format(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
NESTED_CONV_LINEAR_EXAMPLE,
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""Case includes:
Multiple conv or linear
post training quantization
composed as sequential
qconfig doesn't use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_conv_sub_class_considered(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
LAZY_CONV_LINEAR_EXAMPLE,
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer and it considered the lazyConv2d
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""Case includes:
Multiple conv or linear
post training quantization
composed as sequential
qconfig uses per_channel weight observer
Only 1 qconfig in qconfig dict
Output has no possible changes / suggestions
"""
@skipIfNoFBGEMM
def test_fusion_layer_in_sequential(self):
with override_quantized_engine('fbgemm'):
torch.backends.quantized.engine = "fbgemm"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
FUSION_CONV_LINEAR_EXAMPLE,
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# no optims possible and there should be nothing in per_channel_status
self.assertEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer and it considered all the nested fusion components
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], True)
"""Case includes:
Multiple conv or linear
quantitative aware training
composed as model
qconfig does not use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_qat_aware_model_example(self):
# first we want a QAT model
class QATConvLinearReluModel(torch.nn.Module):
def __init__(self):
super(QATConvLinearReluModel, self).__init__()
# QuantStub converts tensors from floating point to quantized
self.quant = torch.quantization.QuantStub()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.bn = torch.nn.BatchNorm2d(1)
self.relu = torch.nn.ReLU()
# DeQuantStub converts tensors from quantized to floating point
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.dequant(x)
return x
with override_quantized_engine('qnnpack'):
# create a model instance
model_fp32 = QATConvLinearReluModel()
model_fp32.qconfig = torch.quantization.get_default_qat_qconfig("qnnpack")
# model must be in eval mode for fusion
model_fp32.eval()
model_fp32_fused = torch.quantization.fuse_modules(model_fp32, [["conv", "bn", "relu"]])
# model must be set to train mode for QAT logic to work
model_fp32_fused.train()
# prepare the model for QAT, different than for post training quantization
model_fp32_prepared = torch.quantization.prepare_qat(model_fp32_fused)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(model_fp32_prepared)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# make sure it was able to find the single conv in the fused model
self.assertEqual(len(per_channel_info), 1)
# for the one conv, it should still give advice to use different qconfig
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""
Partition on Domain / Things to Test
- All zero tensor
- Multiple tensor dimensions
- All of the outward facing functions
- Epoch min max are correctly updating
- Batch range is correctly averaging as expected
- Reset for each epoch is correctly resetting the values
Partition on Output
- the calcuation of the ratio is occurring correctly
"""
class TestFxModelReportObserver(QuantizationTestCase):
class NestedModifiedSingleLayerLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.mod1 = SingleLayerLinearModel()
self.obs2 = ModelReportObserver()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.mod1(x)
x = self.obs2(x)
x = self.fc1(x)
x = self.relu(x)
return x
def run_model_and_common_checks(self, model, ex_input, num_epochs, batch_size):
# split up data into batches
split_up_data = torch.split(ex_input, batch_size)
for epoch in range(num_epochs):
# reset all model report obs
model.apply(
lambda module: module.reset_batch_and_epoch_values()
if isinstance(module, ModelReportObserver)
else None
)
# quick check that a reset occurred
self.assertEqual(
getattr(model, "obs1").average_batch_activation_range,
torch.tensor(float(0)),
)
self.assertEqual(getattr(model, "obs1").epoch_activation_min, torch.tensor(float("inf")))
self.assertEqual(getattr(model, "obs1").epoch_activation_max, torch.tensor(float("-inf")))
# loop through the batches and run through
for index, batch in enumerate(split_up_data):
num_tracked_so_far = getattr(model, "obs1").num_batches_tracked
self.assertEqual(num_tracked_so_far, index)
# get general info about the batch and the model to use later
batch_min, batch_max = torch.aminmax(batch)
current_average_range = getattr(model, "obs1").average_batch_activation_range
current_epoch_min = getattr(model, "obs1").epoch_activation_min
current_epoch_max = getattr(model, "obs1").epoch_activation_max
# run input through
model(ex_input)
# check that average batch activation range updated correctly
correct_updated_value = (current_average_range * num_tracked_so_far + (batch_max - batch_min)) / (
num_tracked_so_far + 1
)
self.assertEqual(
getattr(model, "obs1").average_batch_activation_range,
correct_updated_value,
)
if current_epoch_max - current_epoch_min > 0:
self.assertEqual(
getattr(model, "obs1").get_batch_to_epoch_ratio(),
correct_updated_value / (current_epoch_max - current_epoch_min),
)
"""Case includes:
all zero tensor
dim size = 2
run for 1 epoch
run for 10 batch
tests input data observer
"""
def test_zero_tensor_errors(self):
# initialize the model
model = self.NestedModifiedSingleLayerLinear()
# generate the desired input
ex_input = torch.zeros((10, 1, 5))
# run it through the model and do general tests
self.run_model_and_common_checks(model, ex_input, 1, 1)
# make sure final values are all 0
self.assertEqual(getattr(model, "obs1").epoch_activation_min, 0)
self.assertEqual(getattr(model, "obs1").epoch_activation_max, 0)
self.assertEqual(getattr(model, "obs1").average_batch_activation_range, 0)
# we should get an error if we try to calculate the ratio
with self.assertRaises(ValueError):
ratio_val = getattr(model, "obs1").get_batch_to_epoch_ratio()
"""Case includes:
non-zero tensor
dim size = 2
run for 1 epoch
run for 1 batch
tests input data observer
"""
def test_single_batch_of_ones(self):
# initialize the model
model = self.NestedModifiedSingleLayerLinear()
# generate the desired input
ex_input = torch.ones((1, 1, 5))
# run it through the model and do general tests
self.run_model_and_common_checks(model, ex_input, 1, 1)
# make sure final values are all 0 except for range
self.assertEqual(getattr(model, "obs1").epoch_activation_min, 1)
self.assertEqual(getattr(model, "obs1").epoch_activation_max, 1)
self.assertEqual(getattr(model, "obs1").average_batch_activation_range, 0)
# we should get an error if we try to calculate the ratio
with self.assertRaises(ValueError):
ratio_val = getattr(model, "obs1").get_batch_to_epoch_ratio()
"""Case includes:
non-zero tensor
dim size = 2
run for 10 epoch
run for 15 batch
tests non input data observer
"""
def test_observer_after_relu(self):
# model specific to this test
class NestedModifiedObserverAfterRelu(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.mod1 = SingleLayerLinearModel()
self.obs2 = ModelReportObserver()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.mod1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.obs2(x)
return x
# initialize the model
model = NestedModifiedObserverAfterRelu()
# generate the desired input
ex_input = torch.randn((15, 1, 5))
# run it through the model and do general tests
self.run_model_and_common_checks(model, ex_input, 10, 15)
"""Case includes:
non-zero tensor
dim size = 2
run for multiple epoch
run for multiple batch
tests input data observer
"""
def test_random_epochs_and_batches(self):
# set up a basic model
class TinyNestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.obs2 = ModelReportObserver()
def forward(self, x):
x = self.obs1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.obs2(x)
return x
class LargerIncludeNestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.nested = TinyNestModule()
self.fc1 = SingleLayerLinearModel()
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.nested(x)
x = self.fc1(x)
x = self.relu(x)
return x
class ModifiedThreeOps(torch.nn.Module):
def __init__(self, batch_norm_dim):
super(ModifiedThreeOps, self).__init__()
self.obs1 = ModelReportObserver()
self.linear = torch.nn.Linear(7, 3, 2)
self.obs2 = ModelReportObserver()
if batch_norm_dim == 2:
self.bn = torch.nn.BatchNorm2d(2)
elif batch_norm_dim == 3:
self.bn = torch.nn.BatchNorm3d(4)
else:
raise ValueError("Dim should only be 2 or 3")
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.linear(x)
x = self.obs2(x)
x = self.bn(x)
x = self.relu(x)
return x
class HighDimensionNet(torch.nn.Module):
def __init__(self):
super(HighDimensionNet, self).__init__()
self.obs1 = ModelReportObserver()
self.fc1 = torch.nn.Linear(3, 7)
self.block1 = ModifiedThreeOps(3)
self.fc2 = torch.nn.Linear(3, 7)
self.block2 = ModifiedThreeOps(3)
self.fc3 = torch.nn.Linear(3, 7)
def forward(self, x):
x = self.obs1(x)
x = self.fc1(x)
x = self.block1(x)
x = self.fc2(x)
y = self.block2(x)
y = self.fc3(y)
z = x + y
z = F.relu(z)
return z
# the purpose of this test is to give the observers a variety of data examples
# initialize the model
models = [
self.NestedModifiedSingleLayerLinear(),
LargerIncludeNestModel(),
ModifiedThreeOps(2),
HighDimensionNet(),
]
# get some number of epochs and batches
num_epochs = 10
num_batches = 15
input_shapes = [(1, 5), (1, 5), (2, 3, 7), (4, 1, 8, 3)]
# generate the desired inputs
inputs = []
for shape in input_shapes:
ex_input = torch.randn((num_batches, *shape))
inputs.append(ex_input)
# run it through the model and do general tests
for index, model in enumerate(models):
self.run_model_and_common_checks(model, inputs[index], num_epochs, num_batches)
"""
Partition on domain / things to test
There is only a single test case for now.
This will be more thoroughly tested with the implementation of the full end to end tool coming soon.
"""
class TestFxModelReportDetectDynamicStatic(QuantizationTestCase):
@skipIfNoFBGEMM
def test_nested_detection_case(self):
class SingleLinear(torch.nn.Module):
def __init__(self):
super(SingleLinear, self).__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
class TwoBlockNet(torch.nn.Module):
def __init__(self):
super(TwoBlockNet, self).__init__()
self.block1 = SingleLinear()
self.block2 = SingleLinear()
def forward(self, x):
x = self.block1(x)
y = self.block2(x)
z = x + y
z = F.relu(z)
return z
with override_quantized_engine('fbgemm'):
# create model, example input, and qconfig mapping
torch.backends.quantized.engine = "fbgemm"
model = TwoBlockNet()
example_input = torch.randint(-10, 0, (1, 3, 3, 3))
example_input = example_input.to(torch.float)
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig("fbgemm"))
# prep model and select observer
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
obs_ctr = ModelReportObserver
# find layer to attach to and store
linear_fqn = "block2.linear" # fqn of target linear
target_linear = None
for node in model_prep.graph.nodes:
if node.target == linear_fqn:
target_linear = node
break
# insert into both module and graph pre and post
# set up to insert before target_linear (pre_observer)
with model_prep.graph.inserting_before(target_linear):
obs_to_insert = obs_ctr()
pre_obs_fqn = linear_fqn + ".model_report_pre_observer"
model_prep.add_submodule(pre_obs_fqn, obs_to_insert)
model_prep.graph.create_node(op="call_module", target=pre_obs_fqn, args=target_linear.args)
# set up and insert after the target_linear (post_observer)
with model_prep.graph.inserting_after(target_linear):
obs_to_insert = obs_ctr()
post_obs_fqn = linear_fqn + ".model_report_post_observer"
model_prep.add_submodule(post_obs_fqn, obs_to_insert)
model_prep.graph.create_node(op="call_module", target=post_obs_fqn, args=(target_linear,))
# need to recompile module after submodule added and pass input through
model_prep.recompile()
num_iterations = 10
for i in range(num_iterations):
if i % 2 == 0:
example_input = torch.randint(-10, 0, (1, 3, 3, 3)).to(torch.float)
else:
example_input = torch.randint(0, 10, (1, 3, 3, 3)).to(torch.float)
model_prep(example_input)
# run it through the dynamic vs static detector
dynamic_vs_static_detector = DynamicStaticDetector()
dynam_vs_stat_str, dynam_vs_stat_dict = dynamic_vs_static_detector.generate_detector_report(model_prep)
# one of the stats should be stationary, and the other non-stationary
# as a result, dynamic should be recommended
data_dist_info = [
dynam_vs_stat_dict[linear_fqn][DynamicStaticDetector.PRE_OBS_DATA_DIST_KEY],
dynam_vs_stat_dict[linear_fqn][DynamicStaticDetector.POST_OBS_DATA_DIST_KEY],
]
self.assertTrue("stationary" in data_dist_info)
self.assertTrue("non-stationary" in data_dist_info)
self.assertTrue(dynam_vs_stat_dict[linear_fqn]["dynamic_recommended"])
class TestFxModelReportClass(QuantizationTestCase):
@skipIfNoFBGEMM
def test_constructor(self):
"""
Tests the constructor of the ModelReport class.
Specifically looks at:
- The desired reports
- Ensures that the observers of interest are properly initialized
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
backend = torch.backends.quantized.engine
# create a model
model = ThreeOps()
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, model.get_example_inputs()[0])
# make an example set of detectors
test_detector_set = set([DynamicStaticDetector(), PerChannelDetector(backend)])
# initialize with an empty detector
model_report = ModelReport(model_prep, test_detector_set)
# make sure internal valid reports matches
detector_name_set = set([detector.get_detector_name() for detector in test_detector_set])
self.assertEqual(model_report.get_desired_reports_names(), detector_name_set)
# now attempt with no valid reports, should raise error
with self.assertRaises(ValueError):
model_report = ModelReport(model, set([]))
# number of expected obs of interest entries
num_expected_entries = len(test_detector_set)
self.assertEqual(len(model_report.get_observers_of_interest()), num_expected_entries)
for value in model_report.get_observers_of_interest().values():
self.assertEqual(len(value), 0)
@skipIfNoFBGEMM
def test_prepare_model_callibration(self):
"""
Tests model_report.prepare_detailed_calibration that prepares the model for callibration
Specifically looks at:
- Whether observers are properly inserted into regular nn.Module
- Whether the target and the arguments of the observers are proper
- Whether the internal representation of observers of interest is updated
"""
with override_quantized_engine('fbgemm'):
# create model report object
# create model
model = TwoThreeOps()
# make an example set of detectors
torch.backends.quantized.engine = "fbgemm"
backend = torch.backends.quantized.engine
test_detector_set = set([DynamicStaticDetector(), PerChannelDetector(backend)])
# initialize with an empty detector
# prepare the model
example_input = model.get_example_inputs()[0]
current_backend = torch.backends.quantized.engine
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
model_report = ModelReport(model_prep, test_detector_set)
# prepare the model for callibration
prepared_for_callibrate_model = model_report.prepare_detailed_calibration()
# see whether observers properly in regular nn.Module
# there should be 4 observers present in this case
modules_observer_cnt = 0
for fqn, module in prepared_for_callibrate_model.named_modules():
if isinstance(module, ModelReportObserver):
modules_observer_cnt += 1
self.assertEqual(modules_observer_cnt, 4)
model_report_str_check = "model_report"
# also make sure arguments for observers in the graph are proper
for node in prepared_for_callibrate_model.graph.nodes:
# not all node targets are strings, so check
if isinstance(node.target, str) and model_report_str_check in node.target:
# if pre-observer has same args as the linear (next node)
if "pre_observer" in node.target:
self.assertEqual(node.args, node.next.args)
# if post-observer, args are the target linear (previous node)
if "post_observer" in node.target:
self.assertEqual(node.args, (node.prev,))
# ensure model_report observers of interest updated
# there should be two entries
self.assertEqual(len(model_report.get_observers_of_interest()), 2)
for detector in test_detector_set:
self.assertTrue(detector.get_detector_name() in model_report.get_observers_of_interest().keys())
# get number of entries for this detector
detector_obs_of_interest_fqns = model_report.get_observers_of_interest()[detector.get_detector_name()]
# assert that the per channel detector has 0 and the dynamic static has 4
if isinstance(detector, PerChannelDetector):
self.assertEqual(len(detector_obs_of_interest_fqns), 0)
elif isinstance(detector, DynamicStaticDetector):
self.assertEqual(len(detector_obs_of_interest_fqns), 4)
# ensure that we can prepare for callibration only once
with self.assertRaises(ValueError):
prepared_for_callibrate_model = model_report.prepare_detailed_calibration()
def get_module_and_graph_cnts(self, callibrated_fx_module):
r"""
Calculates number of ModelReportObserver modules in the model as well as the graph structure.
Returns a tuple of two elements:
int: The number of ModelReportObservers found in the model
int: The number of model_report nodes found in the graph
"""
# get the number of observers stored as modules
modules_observer_cnt = 0
for fqn, module in callibrated_fx_module.named_modules():
if isinstance(module, ModelReportObserver):
modules_observer_cnt += 1
# get number of observers in the graph
model_report_str_check = "model_report"
graph_observer_cnt = 0
# also make sure arguments for observers in the graph are proper
for node in callibrated_fx_module.graph.nodes:
# not all node targets are strings, so check
if isinstance(node.target, str) and model_report_str_check in node.target:
# increment if we found a graph observer
graph_observer_cnt += 1
return (modules_observer_cnt, graph_observer_cnt)
@skipIfNoFBGEMM
def test_generate_report(self):
"""
Tests model_report.generate_model_report to ensure report generation
Specifically looks at:
- Whether correct number of reports are being generated
- Whether observers are being properly removed if specified
- Whether correct blocking from generating report twice if obs removed
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# check whether the correct number of reports are being generated
filled_detector_set = set([DynamicStaticDetector(), PerChannelDetector(torch.backends.quantized.engine)])
single_detector_set = set([DynamicStaticDetector()])
# create our models
model_full = TwoThreeOps()
model_single = TwoThreeOps()
# prepare and callibrate two different instances of same model
# prepare the model
example_input = model_full.get_example_inputs()[0]
current_backend = torch.backends.quantized.engine
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
model_prep_full = quantize_fx.prepare_fx(model_full, q_config_mapping, example_input)
model_prep_single = quantize_fx.prepare_fx(model_single, q_config_mapping, example_input)
# initialize one with filled detector
model_report_full = ModelReport(model_prep_full, filled_detector_set)
# initialize another with a single detector set
model_report_single = ModelReport(model_prep_single, single_detector_set)
# prepare the models for callibration
prepared_for_callibrate_model_full = model_report_full.prepare_detailed_calibration()
prepared_for_callibrate_model_single = model_report_single.prepare_detailed_calibration()
# now callibrate the two models
num_iterations = 10
for i in range(num_iterations):
example_input = torch.tensor(torch.randint(100, (1, 3, 3, 3)), dtype=torch.float)
prepared_for_callibrate_model_full(example_input)
prepared_for_callibrate_model_single(example_input)
# now generate the reports
model_full_report = model_report_full.generate_model_report(True)
model_single_report = model_report_single.generate_model_report(False)
# check that sizes are appropriate
self.assertEqual(len(model_full_report), len(filled_detector_set))
self.assertEqual(len(model_single_report), len(single_detector_set))
# make sure observers are being properly removed for full report since we put flag in
modules_observer_cnt, graph_observer_cnt = self.get_module_and_graph_cnts(prepared_for_callibrate_model_full)
self.assertEqual(modules_observer_cnt, 0) # assert no more observer modules
self.assertEqual(graph_observer_cnt, 0) # assert no more observer nodes in graph
# make sure observers aren't being removed for single report since not specified
modules_observer_cnt, graph_observer_cnt = self.get_module_and_graph_cnts(prepared_for_callibrate_model_single)
self.assertNotEqual(modules_observer_cnt, 0)
self.assertNotEqual(graph_observer_cnt, 0)
# make sure error when try to rerun report generation for full report but not single report
with self.assertRaises(Exception):
model_full_report = model_report_full.generate_model_report(
prepared_for_callibrate_model_full, False
)
# make sure we don't run into error for single report
model_single_report = model_report_single.generate_model_report(False)
@skipIfNoFBGEMM
def test_generate_visualizer(self):
"""
Tests that the ModelReport class can properly create the ModelReportVisualizer instance
Checks that:
- Correct number of modules are represented
- Modules are sorted
- Correct number of features for each module
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# test with multiple detectors
detector_set = set()
detector_set.add(OutlierDetector(reference_percentile=0.95))
detector_set.add(InputWeightEqualizationDetector(0.5))
model = TwoThreeOps()
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = _get_prepped_for_calibration_model_helper(
model, detector_set, model.get_example_inputs()[0]
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# try to visualize without generating report, should throw error
with self.assertRaises(Exception):
mod_rep_visualizaiton = mod_report.generate_visualizer()
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(remove_inserted_observers=False)
# now we get the visualizer should not error
mod_rep_visualizer: ModelReportVisualizer = mod_report.generate_visualizer()
# since we tested with outlier detector, which looks at every base level module
# should be six entries in the ordered dict
mod_fqns_to_features = mod_rep_visualizer.generated_reports
self.assertEqual(len(mod_fqns_to_features), 6)
# outlier detector has 9 feature per module
# input-weight has 12 features per module
# there are 1 common data point, so should be 12 + 9 - 1 = 20 unique features per common modules
# all linears will be common
for module_fqn in mod_fqns_to_features:
if ".linear" in module_fqn:
linear_info = mod_fqns_to_features[module_fqn]
self.assertEqual(len(linear_info), 20)
@skipIfNoFBGEMM
def test_qconfig_mapping_generation(self):
"""
Tests for generation of qconfigs by ModelReport API
- Tests that qconfigmapping is generated
- Tests that mappings include information for for relavent modules
"""
pass
@skipIfNoFBGEMM
def test_equalization_mapping_generation(self):
"""
Tests for generation of qconfigs by ModelReport API
- Tests that equalization config generated when input-weight equalization detector used
- Tests that mappings include information for for relavent modules
"""
pass
class TestFxDetectInputWeightEqualization(QuantizationTestCase):
class SimpleConv(torch.nn.Module):
def __init__(self, con_dims):
super().__init__()
self.relu = torch.nn.ReLU()
self.conv = torch.nn.Conv2d(con_dims[0], con_dims[1], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
class TwoBlockComplexNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.block1 = TestFxDetectInputWeightEqualization.SimpleConv((3, 32))
self.block2 = TestFxDetectInputWeightEqualization.SimpleConv((3, 3))
self.conv = torch.nn.Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1), padding=(1, 1), bias=False)
self.linear = torch.nn.Linear(768, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.block1(x)
x = self.conv(x)
y = self.block2(x)
y = y.repeat(1, 1, 2, 2)
z = x + y
z = z.flatten(start_dim=1)
z = self.linear(z)
z = self.relu(z)
return z
def get_fusion_modules(self):
return [['conv', 'relu']]
def get_example_inputs(self):
return (torch.randn((1, 3, 28, 28)),)
class ReluOnly(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(x)
return x
def get_example_inputs(self):
return (torch.arange(27).reshape((1, 3, 3, 3)),)
def _get_prepped_for_calibration_model(self, model, detector_set, fused=False):
r"""Returns a model that has been prepared for callibration and corresponding model_report"""
# pass in necessary inputs to helper
example_input = model.get_example_inputs()[0]
return _get_prepped_for_calibration_model_helper(model, detector_set, example_input, fused)
@skipIfNoFBGEMM
def test_input_weight_equalization_determine_points(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
detector_set = set([InputWeightEqualizationDetector(0.5)])
# get tst model and callibrate
non_fused = self._get_prepped_for_calibration_model(self.TwoBlockComplexNet(), detector_set)
fused = self._get_prepped_for_calibration_model(self.TwoBlockComplexNet(), detector_set, fused=True)
# reporter should still give same counts even for fused model
for prepared_for_callibrate_model, mod_report in [non_fused, fused]:
# supported modules to check
mods_to_check = set([nn.Linear, nn.Conv2d])
# get the set of all nodes in the graph their fqns
node_fqns = set([node.target for node in prepared_for_callibrate_model.graph.nodes])
# there should be 4 node fqns that have the observer inserted
correct_number_of_obs_inserted = 4
number_of_obs_found = 0
obs_name_to_find = InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME
for node in prepared_for_callibrate_model.graph.nodes:
# if the obs name is inside the target, we found an observer
if obs_name_to_find in str(node.target):
number_of_obs_found += 1
self.assertEqual(number_of_obs_found, correct_number_of_obs_inserted)
# assert that each of the desired modules have the observers inserted
for fqn, module in prepared_for_callibrate_model.named_modules():
# check if module is a supported module
is_in_include_list = sum(list(map(lambda x: isinstance(module, x), mods_to_check))) > 0
if is_in_include_list:
# make sure it has the observer attribute
self.assertTrue(hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
else:
# if it's not a supported type, it shouldn't have observer attached
self.assertTrue(not hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
@skipIfNoFBGEMM
def test_input_weight_equalization_report_gen(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
test_input_weight_detector = InputWeightEqualizationDetector(0.4)
detector_set = set([test_input_weight_detector])
model = self.TwoBlockComplexNet()
# prepare the model for callibration
prepared_for_callibrate_model, model_report = self._get_prepped_for_calibration_model(
model, detector_set
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = model_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
input_weight_str, input_weight_dict = generated_report[test_input_weight_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv / linear layers
self.assertEqual(len(input_weight_dict), 4)
# we can validate that the max and min values of the detector were recorded properly for the first one
# this is because no data has been processed yet, so it should be values from original input
example_input = example_input.reshape((3, 28, 28)) # reshape input
for module_fqn in input_weight_dict:
# look for the first linear
if "block1.linear" in module_fqn:
block_1_lin_recs = input_weight_dict[module_fqn]
# get input range info and the channel axis
ch_axis = block_1_lin_recs[InputWeightEqualizationDetector.CHANNEL_KEY]
# ensure that the min and max values extracted match properly
example_min, example_max = torch.aminmax(example_input, dim=ch_axis)
dimension_min = torch.amin(example_min, dim=ch_axis)
dimension_max = torch.amax(example_max, dim=ch_axis)
# make sure per channel min and max are as expected
min_per_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
min_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MIN_KEY
max_per_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
max_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MAX_KEY
per_channel_min = block_1_lin_recs[min_per_key]
per_channel_max = block_1_lin_recs[max_per_key]
self.assertEqual(per_channel_min, dimension_min)
self.assertEqual(per_channel_max, dimension_max)
# make sure per channel min and max are as expected
min_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
min_key += InputWeightEqualizationDetector.GLOBAL_MIN_KEY
max_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
max_key += InputWeightEqualizationDetector.GLOBAL_MAX_KEY
# make sure the global min and max were correctly recorded and presented
global_min = block_1_lin_recs[min_key]
global_max = block_1_lin_recs[max_key]
self.assertEqual(global_min, min(dimension_min))
self.assertEqual(global_max, max(dimension_max))
input_ratio = torch.sqrt((per_channel_max - per_channel_min) / (global_max - global_min))
# ensure comparision stat passed back is sqrt of range ratios
# need to get the weight ratios first
# make sure per channel min and max are as expected
min_per_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
min_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MIN_KEY
max_per_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
max_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MAX_KEY
# get weight per channel and global info
per_channel_min = block_1_lin_recs[min_per_key]
per_channel_max = block_1_lin_recs[max_per_key]
# make sure per channel min and max are as expected
min_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
min_key += InputWeightEqualizationDetector.GLOBAL_MIN_KEY
max_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
max_key += InputWeightEqualizationDetector.GLOBAL_MAX_KEY
global_min = block_1_lin_recs[min_key]
global_max = block_1_lin_recs[max_key]
weight_ratio = torch.sqrt((per_channel_max - per_channel_min) / (global_max - global_min))
# also get comp stat for this specific layer
comp_stat = block_1_lin_recs[InputWeightEqualizationDetector.COMP_METRIC_KEY]
weight_to_input_ratio = weight_ratio / input_ratio
self.assertEqual(comp_stat, weight_to_input_ratio)
# only looking at the first example so can break
break
@skipIfNoFBGEMM
def test_input_weight_equalization_report_gen_empty(self):
# tests report gen on a model that doesn't have any layers
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
test_input_weight_detector = InputWeightEqualizationDetector(0.4)
detector_set = set([test_input_weight_detector])
model = self.ReluOnly()
# prepare the model for callibration
prepared_for_callibrate_model, model_report = self._get_prepped_for_calibration_model(model, detector_set)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = model_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
input_weight_str, input_weight_dict = generated_report[test_input_weight_detector.get_detector_name()]
# we should have 0 layers since there is only a Relu
self.assertEqual(len(input_weight_dict), 0)
# make sure that the string only has two lines, as should be if no suggestions
self.assertEqual(input_weight_str.count("\n"), 2)
class TestFxDetectOutliers(QuantizationTestCase):
class LargeBatchModel(torch.nn.Module):
def __init__(self, param_size):
super().__init__()
self.param_size = param_size
self.linear = torch.nn.Linear(param_size, param_size)
self.relu_1 = torch.nn.ReLU()
self.conv = torch.nn.Conv2d(param_size, param_size, 1)
self.relu_2 = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu_1(x)
x = self.conv(x)
x = self.relu_2(x)
return x
def get_example_inputs(self):
param_size = self.param_size
return (torch.randn((1, param_size, param_size, param_size)),)
def get_outlier_inputs(self):
param_size = self.param_size
random_vals = torch.randn((1, param_size, param_size, param_size))
# change one in some of them to be a massive value
random_vals[:, 0:param_size:2, 0, 3] = torch.tensor([3.28e8])
return (random_vals,)
def _get_prepped_for_calibration_model(self, model, detector_set, use_outlier_data=False):
r"""Returns a model that has been prepared for callibration and corresponding model_report"""
# call the general helper function to callibrate
example_input = model.get_example_inputs()[0]
# if we specifically want to test data with outliers replace input
if use_outlier_data:
example_input = model.get_outlier_inputs()[0]
return _get_prepped_for_calibration_model_helper(model, detector_set, example_input)
@skipIfNoFBGEMM
def test_outlier_detection_determine_points(self):
# use fbgemm and create our model instance
# then create model report instance with detector
# similar to test for InputWeightEqualization but key differences that made refactoring not viable
# not explicitly testing fusion because fx workflow automatically
with override_quantized_engine('fbgemm'):
detector_set = set([OutlierDetector(reference_percentile=0.95)])
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
self.LargeBatchModel(param_size=128), detector_set
)
# supported modules to check
mods_to_check = set([nn.Linear, nn.Conv2d, nn.ReLU])
# there should be 4 node fqns that have the observer inserted
correct_number_of_obs_inserted = 4
number_of_obs_found = 0
obs_name_to_find = InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME
number_of_obs_found = sum(
[1 if obs_name_to_find in str(node.target) else 0 for node in prepared_for_callibrate_model.graph.nodes]
)
self.assertEqual(number_of_obs_found, correct_number_of_obs_inserted)
# assert that each of the desired modules have the observers inserted
for fqn, module in prepared_for_callibrate_model.named_modules():
# check if module is a supported module
is_in_include_list = isinstance(module, tuple(mods_to_check))
if is_in_include_list:
# make sure it has the observer attribute
self.assertTrue(hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
else:
# if it's not a supported type, it shouldn't have observer attached
self.assertTrue(not hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
@skipIfNoFBGEMM
def test_no_outlier_report_gen(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
# test with multiple detectors
outlier_detector = OutlierDetector(reference_percentile=0.95)
dynamic_static_detector = DynamicStaticDetector(tolerance=0.5)
param_size: int = 4
detector_set = set([outlier_detector, dynamic_static_detector])
model = self.LargeBatchModel(param_size=param_size)
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
model, detector_set
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(True)
# check that sizes are appropriate only 2 detectors
self.assertEqual(len(generated_report), 2)
# get the specific report for input weight equalization
outlier_str, outlier_dict = generated_report[outlier_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv + linear + relu
self.assertEqual(len(outlier_dict), 4)
# assert the following are true for all the modules
for module_fqn in outlier_dict:
# get the info for the specific module
module_dict = outlier_dict[module_fqn]
# there really should not be any outliers since we used a normal distribution to perform this calculation
outlier_info = module_dict[OutlierDetector.OUTLIER_KEY]
self.assertEqual(sum(outlier_info), 0)
# ensure that the number of ratios and batches counted is the same as the number of params
self.assertEqual(len(module_dict[OutlierDetector.COMP_METRIC_KEY]), param_size)
self.assertEqual(len(module_dict[OutlierDetector.NUM_BATCHES_KEY]), param_size)
@skipIfNoFBGEMM
def test_all_outlier_report_gen(self):
# make the percentile 0 and the ratio 1, and then see that everything is outlier according to it
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
# create detector of interest
outlier_detector = OutlierDetector(ratio_threshold=1, reference_percentile=0)
param_size: int = 16
detector_set = set([outlier_detector])
model = self.LargeBatchModel(param_size=param_size)
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
model, detector_set
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
outlier_str, outlier_dict = generated_report[outlier_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv + linear + relu
self.assertEqual(len(outlier_dict), 4)
# assert the following are true for all the modules
for module_fqn in outlier_dict:
# get the info for the specific module
module_dict = outlier_dict[module_fqn]
# everything should be an outlier because we said that the max should be equal to the min for all of them
# however we will just test and say most should be in case we have several 0 channel values
outlier_info = module_dict[OutlierDetector.OUTLIER_KEY]
assert sum(outlier_info) >= len(outlier_info) / 2
# ensure that the number of ratios and batches counted is the same as the number of params
self.assertEqual(len(module_dict[OutlierDetector.COMP_METRIC_KEY]), param_size)
self.assertEqual(len(module_dict[OutlierDetector.NUM_BATCHES_KEY]), param_size)
@skipIfNoFBGEMM
def test_multiple_run_consistent_spike_outlier_report_gen(self):
# specifically make a row really high consistently in the number of batches that you are testing and try that
# generate report after just 1 run, and after many runs (30) and make sure above minimum threshold is there
with override_quantized_engine('fbgemm'):
# detector of interest
outlier_detector = OutlierDetector(reference_percentile=0.95)
param_size: int = 8
detector_set = set([outlier_detector])
model = self.LargeBatchModel(param_size=param_size)
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
model, detector_set, use_outlier_data=True
)
# now we actually callibrate the model
example_input = model.get_outlier_inputs()[0]
example_input = example_input.to(torch.float)
# now callibrate minimum 30 times to make it above minimum threshold
for i in range(30):
example_input = model.get_outlier_inputs()[0]
example_input = example_input.to(torch.float)
# make 2 of the batches to have zero channel
if i % 14 == 0:
# make one channel constant
example_input[0][1] = torch.zeros_like(example_input[0][1])
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
outlier_str, outlier_dict = generated_report[outlier_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv + linear + relu
self.assertEqual(len(outlier_dict), 4)
# assert the following are true for all the modules
for module_fqn in outlier_dict:
# get the info for the specific module
module_dict = outlier_dict[module_fqn]
# because we ran 30 times, we should have at least a couple be significant
# could be less because some channels could possibly be all 0
sufficient_batches_info = module_dict[OutlierDetector.IS_SUFFICIENT_BATCHES_KEY]
assert sum(sufficient_batches_info) >= len(sufficient_batches_info) / 2
# half of them should be outliers, because we set a really high value every 2 channels
outlier_info = module_dict[OutlierDetector.OUTLIER_KEY]
self.assertEqual(sum(outlier_info), len(outlier_info) / 2)
# ensure that the number of ratios and batches counted is the same as the number of params
self.assertEqual(len(module_dict[OutlierDetector.COMP_METRIC_KEY]), param_size)
self.assertEqual(len(module_dict[OutlierDetector.NUM_BATCHES_KEY]), param_size)
# for the first one ensure the per channel max values are what we set
if module_fqn == "linear.0":
# check that the non-zero channel count, at least 2 should be there
# for the first module
counts_info = module_dict[OutlierDetector.CONSTANT_COUNTS_KEY]
assert sum(counts_info) >= 2
# half of the recorded max values should be what we set
matched_max = sum([val == 3.28e8 for val in module_dict[OutlierDetector.MAX_VALS_KEY]])
self.assertEqual(matched_max, param_size / 2)
class TestFxModelReportVisualizer(QuantizationTestCase):
def _callibrate_and_generate_visualizer(self, model, prepared_for_callibrate_model, mod_report):
r"""
Callibrates the passed in model, generates report, and returns the visualizer
"""
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(remove_inserted_observers=False)
# now we get the visualizer should not error
mod_rep_visualizer: ModelReportVisualizer = mod_report.generate_visualizer()
return mod_rep_visualizer
@skipIfNoFBGEMM
def test_get_modules_and_features(self):
"""
Tests the get_all_unique_module_fqns and get_all_unique_feature_names methods of
ModelReportVisualizer
Checks whether returned sets are of proper size and filtered properly
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# test with multiple detectors
detector_set = set()
detector_set.add(OutlierDetector(reference_percentile=0.95))
detector_set.add(InputWeightEqualizationDetector(0.5))
model = TwoThreeOps()
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = _get_prepped_for_calibration_model_helper(
model, detector_set, model.get_example_inputs()[0]
)
mod_rep_visualizer: ModelReportVisualizer = self._callibrate_and_generate_visualizer(
model, prepared_for_callibrate_model, mod_report
)
# ensure the module fqns match the ones given by the get_all_unique_feature_names method
actual_model_fqns = set(mod_rep_visualizer.generated_reports.keys())
returned_model_fqns = mod_rep_visualizer.get_all_unique_module_fqns()
self.assertEqual(returned_model_fqns, actual_model_fqns)
# now ensure that features are all properly returned
# all the linears have all the features for two detectors
# can use those as check that method is working reliably
b_1_linear_features = mod_rep_visualizer.generated_reports["block1.linear"]
# first test all features
returned_all_feats = mod_rep_visualizer.get_all_unique_feature_names(False)
self.assertEqual(returned_all_feats, set(b_1_linear_features.keys()))
# now test plottable features
plottable_set = set()
for feature_name in b_1_linear_features:
if type(b_1_linear_features[feature_name]) == torch.Tensor:
plottable_set.add(feature_name)
returned_plottable_feats = mod_rep_visualizer.get_all_unique_feature_names()
self.assertEqual(returned_plottable_feats, plottable_set)
def _prep_visualizer_helper(self):
r"""
Returns a mod rep visualizer that we test in various ways
"""
# set backend for test
torch.backends.quantized.engine = "fbgemm"
# test with multiple detectors
detector_set = set()
detector_set.add(OutlierDetector(reference_percentile=0.95))
detector_set.add(InputWeightEqualizationDetector(0.5))
model = TwoThreeOps()
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = _get_prepped_for_calibration_model_helper(
model, detector_set, model.get_example_inputs()[0]
)
mod_rep_visualizer: ModelReportVisualizer = self._callibrate_and_generate_visualizer(
model, prepared_for_callibrate_model, mod_report
)
return mod_rep_visualizer
@skipIfNoFBGEMM
def test_generate_tables_match_with_report(self):
"""
Tests the generate_table_view()
ModelReportVisualizer
Checks whether the generated dict has proper information
Visual check that the tables look correct performed during testing
"""
with override_quantized_engine('fbgemm'):
# get the visualizer
mod_rep_visualizer = self._prep_visualizer_helper()
table_dict = mod_rep_visualizer.generate_filtered_tables()
# test primarily the dict since it has same info as str
tensor_headers, tensor_table = table_dict[ModelReportVisualizer.TABLE_TENSOR_KEY]
channel_headers, channel_table = table_dict[ModelReportVisualizer.TABLE_CHANNEL_KEY]
# these two together should be the same as the generated report info in terms of keys
tensor_info_modules = set(row[1] for row in tensor_table)
channel_info_modules = set(row[1] for row in channel_table)
combined_modules: Set = tensor_info_modules.union(channel_info_modules)
generated_report_keys: Set = set(mod_rep_visualizer.generated_reports.keys())
self.assertEqual(combined_modules, generated_report_keys)
@skipIfNoFBGEMM
def test_generate_tables_no_match(self):
"""
Tests the generate_table_view()
ModelReportVisualizer
Checks whether the generated dict has proper information
Visual check that the tables look correct performed during testing
"""
with override_quantized_engine('fbgemm'):
# get the visualizer
mod_rep_visualizer = self._prep_visualizer_helper()
# try a random filter and make sure that there are no rows for either table
empty_tables_dict = mod_rep_visualizer.generate_filtered_tables(module_fqn_filter="random not there module")
# test primarily the dict since it has same info as str
tensor_headers, tensor_table = empty_tables_dict[ModelReportVisualizer.TABLE_TENSOR_KEY]
channel_headers, channel_table = empty_tables_dict[ModelReportVisualizer.TABLE_CHANNEL_KEY]
tensor_info_modules = set(row[1] for row in tensor_table)
channel_info_modules = set(row[1] for row in channel_table)
combined_modules: Set = tensor_info_modules.union(channel_info_modules)
self.assertEqual(len(combined_modules), 0) # should be no matching modules
@skipIfNoFBGEMM
def test_generate_tables_single_feat_match(self):
"""
Tests the generate_table_view()
ModelReportVisualizer
Checks whether the generated dict has proper information
Visual check that the tables look correct performed during testing
"""
with override_quantized_engine('fbgemm'):
# get the visualizer
mod_rep_visualizer = self._prep_visualizer_helper()
# try a matching filter for feature and make sure only those features show up
# if we filter to a very specific feature name, should only have 1 additional column in each table row
single_feat_dict = mod_rep_visualizer.generate_filtered_tables(feature_filter=OutlierDetector.MAX_VALS_KEY)
# test primarily the dict since it has same info as str
tensor_headers, tensor_table = single_feat_dict[ModelReportVisualizer.TABLE_TENSOR_KEY]
channel_headers, channel_table = single_feat_dict[ModelReportVisualizer.TABLE_CHANNEL_KEY]
# get the number of features in each of these
tensor_info_features = len(tensor_headers)
channel_info_features = len(channel_headers) - ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS
# make sure that there are no tensor features, and that there is one channel level feature
self.assertEqual(tensor_info_features, 0)
self.assertEqual(channel_info_features, 1)
def _get_prepped_for_calibration_model_helper(model, detector_set, example_input, fused: bool = False):
r"""Returns a model that has been prepared for callibration and corresponding model_report"""
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# create model instance and prepare it
example_input = example_input.to(torch.float)
q_config_mapping = torch.ao.quantization.get_default_qconfig_mapping()
# if they passed in fusion paramter, make sure to test that
if fused:
model = torch.quantization.fuse_modules(model, model.get_fusion_modules())
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
model_report = ModelReport(model_prep, detector_set)
# prepare the model for callibration
prepared_for_callibrate_model = model_report.prepare_detailed_calibration()
return (prepared_for_callibrate_model, model_report)
|
pytorch-master
|
test/quantization/fx/test_model_report_fx.py
|
# Owner(s): ["oncall: quantization"]
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.intrinsic.quantized as nniq
import torch.nn.quantized as nnq
from torch.ao.quantization import default_qconfig
from torch.ao.quantization.observer import MinMaxObserver, PerChannelMinMaxObserver
from torch.ao.quantization.quantize_fx import prepare_fx, convert_fx
from torch.ao.quantization.fx._equalize import (
_InputEqualizationObserver,
_WeightEqualizationObserver,
calculate_equalization_scale,
default_equalization_qconfig,
_convert_equalization_ref,
get_layer_sqnr_dict,
get_equalization_qconfig_dict,
)
from torch.testing._internal.common_quantization import (
NodeSpec as ns,
QuantizationTestCase,
SingleLayerLinearModel,
TwoLayerLinearModel,
LinearAddModel,
SingleLayerFunctionalLinearModel,
TwoLayerFunctionalLinearModel,
FunctionalLinearAddModel,
ConvModel,
TwoLayerConvModel,
SingleLayerFunctionalConvModel,
TwoLayerFunctionalConvModel,
skipIfNoFBGEMM,
LinearReluModel,
LinearReluLinearModel,
LinearReluAddModel,
FunctionalLinearReluModel,
FunctionalLinearReluLinearModel,
ConvReluModel,
ConvReluConvModel,
ConvReluAddModel,
FunctionalConvReluModel,
FunctionalConvReluConvModel,
)
# Standard Libraries
import copy
import numpy as np
# Testing utils
from hypothesis import given
from hypothesis import strategies as st
default_qconfig_dict = {"": default_qconfig}
specific_qconfig_dict = {
"": None,
"object_type": [(nn.Linear, default_qconfig),
(F.linear, default_qconfig),
(nn.ReLU, default_qconfig),
(F.relu, default_qconfig),
(nn.Conv2d, default_qconfig),
(F.conv2d, default_qconfig)]
}
default_equalization_qconfig_dict = {
"": None,
"object_type": [(nn.Linear, default_equalization_qconfig),
(F.linear, default_equalization_qconfig),
(nn.ReLU, default_equalization_qconfig),
(F.relu, default_equalization_qconfig),
(nn.Conv2d, default_equalization_qconfig),
(F.conv2d, default_equalization_qconfig)]
}
class TestEqualizeFx(QuantizationTestCase):
def channel_minmax(self, input, axis=1):
''' Finds the min/max of inputs associated with a specific channel
'''
size_of_tensor_dim = input.ndim
axis_list = list(range(size_of_tensor_dim))
axis_list.remove(axis)
axis_list.sort(reverse=True)
mins = input.copy()
maxs = input.copy()
for a in axis_list:
mins = mins.min(a)
maxs = maxs.max(a)
return (mins, maxs)
@given(ndim=st.sampled_from((2, 3, 4, 5)),
input_qdtype=st.sampled_from((torch.qint8, torch.quint8)),
input_qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),
weight_qdtype=st.sampled_from((torch.qint8, torch.quint8)),
weight_qscheme=st.sampled_from((torch.per_channel_affine, torch.per_channel_symmetric,
torch.per_channel_affine_float_qparams)))
def test_input_weight_eq_observer(self, ndim, input_qdtype, input_qscheme, weight_qdtype, weight_qscheme):
sizes = []
for _ in range((ndim - 1) * 2):
sizes.append(np.random.randint(2, 10))
channel = np.random.randint(1, 10)
if ndim == 2:
x = np.random.random(size=(sizes[0], channel))
w = np.random.random(size=(sizes[1], channel))
elif ndim == 3:
x = np.random.random(size=(sizes[0], channel, sizes[1]))
w = np.random.random(size=(sizes[2], channel, sizes[3]))
elif ndim == 4:
x = np.random.random(size=(sizes[0], channel, sizes[1], sizes[2]))
w = np.random.random(size=(sizes[3], channel, sizes[4], sizes[5]))
elif ndim == 5:
x = np.random.random(size=(sizes[0], channel, sizes[1], sizes[2], sizes[3]))
w = np.random.random(size=(sizes[4], channel, sizes[5], sizes[6], sizes[7]))
x = (x * 10).round(decimals=2).astype(np.float32)
w = (w * 10).round(decimals=2).astype(np.float32)
input_eq_obs = _InputEqualizationObserver(dtype=input_qdtype, qscheme=input_qscheme)
weight_eq_obs = _WeightEqualizationObserver(dtype=weight_qdtype, qscheme=weight_qscheme)
ret_x = input_eq_obs(torch.tensor(x))
ret_w = weight_eq_obs(torch.tensor(w))
self.assertEqual((ret_x, ret_w), (x, w))
# Check the min/max input columns are correct
ref_min_inputs, ref_max_inputs = self.channel_minmax(x)
min_inputs, max_inputs = input_eq_obs.get_input_minmax()
self.assertEqual(min_inputs, torch.tensor(ref_min_inputs, dtype=torch.float32))
self.assertEqual(max_inputs, torch.tensor(ref_max_inputs, dtype=torch.float32))
# Check the min/max weight columns are correct
ref_min_weights_col, ref_max_weights_col = self.channel_minmax(w)
min_weights_col, max_weights_col = weight_eq_obs.get_weight_col_minmax()
self.assertEqual(min_weights_col, torch.tensor(ref_min_weights_col, dtype=torch.float32))
self.assertEqual(max_weights_col, torch.tensor(ref_max_weights_col, dtype=torch.float32))
# Check the equalization scale is correct
equalization_scale = calculate_equalization_scale(input_eq_obs, weight_eq_obs)
ref_equalization_scale = np.sqrt((ref_max_weights_col - ref_min_weights_col) /
(ref_max_inputs - ref_min_inputs))
self.assertEqual(equalization_scale, torch.tensor(ref_equalization_scale, dtype=torch.float32))
input_eq_obs.set_equalization_scale(equalization_scale)
weight_eq_obs.set_equalization_scale(equalization_scale)
# Check the input scale/zero-point values
min_input_scaled, max_input_scaled = input_eq_obs.calculate_scaled_minmax()
input_quant_obs = MinMaxObserver(dtype=input_qdtype, qscheme=input_qscheme)
input_quant_obs.min_val = min_input_scaled
input_quant_obs.max_val = max_input_scaled
input_qparams = input_quant_obs.calculate_qparams()
ref_min_input_scaled = np.min(ref_min_inputs * ref_equalization_scale)
ref_min_input_scaled = min(0, ref_min_input_scaled)
ref_max_input_scaled = np.max(ref_max_inputs * ref_equalization_scale)
ref_max_input_scaled = max(0, ref_max_input_scaled)
if input_qscheme == torch.per_tensor_symmetric:
ref_scale = 2 * max(abs(ref_min_input_scaled), ref_max_input_scaled) / 255
ref_zero_point = 0 if input_qdtype is torch.qint8 else 128
else:
ref_scale = (ref_max_input_scaled - ref_min_input_scaled) / 255
quant_min = -128 if input_qdtype is torch.qint8 else 0
quant_max = 127 if input_qdtype is torch.qint8 else 255
ref_zero_point = quant_min - np.round(ref_min_input_scaled / ref_scale)
np.clip(ref_zero_point, quant_min, quant_max)
self.assertEqual(input_qparams[0].item(), ref_scale, atol=1e-5, rtol=0)
self.assertEqual(input_qparams[1].item(), ref_zero_point)
# During input-weight equalization, we will scale the weights so that
# the following weight quantized observer will have the correct scaled qparams
# Check the weight scale/zero-point values of the quantized observer
weight_quant_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=weight_qdtype, qscheme=weight_qscheme)
# Scale the weights for input-weight equalization
new_shape = [1] * w.ndim
new_shape[1] = w.shape[1]
ref_w_scaled = w * np.reciprocal(ref_equalization_scale.reshape(tuple(new_shape)))
w = torch.tensor(w)
new_shape[1] = w.size(1)
w_scaled = torch.mul(w, torch.reciprocal(equalization_scale.view(new_shape)))
self.assertEqual(w_scaled, ref_w_scaled)
# Call forward on the weight quantization observer
weight_quant_obs(w_scaled)
# Check the min/max weight rows are correct
ref_min_weights_scaled, ref_max_weights_scaled = self.channel_minmax(ref_w_scaled)
self.assertEqual(weight_quant_obs.min_val, torch.tensor(ref_min_weights_scaled, dtype=torch.float32))
self.assertEqual(weight_quant_obs.max_val, torch.tensor(ref_max_weights_scaled, dtype=torch.float32))
weight_qparams = weight_quant_obs.calculate_qparams()
if weight_qscheme == torch.per_channel_symmetric:
ref_min_weights_scaled = np.minimum(np.zeros(ref_min_weights_scaled.shape), ref_min_weights_scaled)
ref_max_weights_scaled = np.maximum(np.zeros(ref_max_weights_scaled.shape), ref_max_weights_scaled)
ref_scales = 2 * np.maximum(np.abs(ref_min_weights_scaled), ref_max_weights_scaled) / 255
ref_zero_points = np.zeros_like(
ref_scales) if weight_qdtype is torch.qint8 else np.ones_like(ref_scales) * 128
elif weight_qscheme == torch.per_channel_affine_float_qparams:
ref_scales = (ref_max_weights_scaled - ref_min_weights_scaled) / 255
ref_scales = np.where(ref_scales > 1e-7, ref_scales, np.ones_like(ref_scales))
ref_zero_points = -1 * ref_min_weights_scaled / ref_scales
else:
ref_min_weights_scaled = np.minimum(np.zeros_like(ref_min_weights_scaled), ref_min_weights_scaled)
ref_max_weights_scaled = np.maximum(np.zeros_like(ref_max_weights_scaled), ref_max_weights_scaled)
ref_scales = (ref_max_weights_scaled - ref_min_weights_scaled) / 255
ref_zero_points = -128 if weight_qdtype is torch.qint8 else 0
ref_zero_points = ref_zero_points - np.round(ref_min_weights_scaled / ref_scales)
self.assertEqual(weight_qparams[0], torch.tensor(
ref_scales, dtype=weight_qparams[0].dtype), rtol=1e-5, atol=0.0001)
self.assertEqual(weight_qparams[1], torch.tensor(
ref_zero_points, dtype=weight_qparams[1].dtype), rtol=1e-5, atol=1)
def test_input_weight_equalization_prepare(self):
""" Tests that graphs created after prepare_fx is as expected
"""
single_nn_layer_node_occurrence = {
ns.call_module(_InputEqualizationObserver): 1,
ns.call_module(MinMaxObserver): 2,
}
two_nn_layer_node_occurrence = {
ns.call_module(_InputEqualizationObserver): 2,
ns.call_module(MinMaxObserver): 3,
}
single_F_layer_node_occurrence = {
ns.call_module(_InputEqualizationObserver): 1,
ns.call_module(_WeightEqualizationObserver): 1,
ns.call_module(MinMaxObserver): 3,
}
two_F_layer_node_occurrence = {
ns.call_module(_InputEqualizationObserver): 2,
ns.call_module(_WeightEqualizationObserver): 2,
ns.call_module(MinMaxObserver): 5,
}
fp_F_layer_node_occurrence = {
ns.call_module(_InputEqualizationObserver): 2,
ns.call_module(_WeightEqualizationObserver): 2,
ns.call_module(MinMaxObserver): 6,
}
tests = [(SingleLayerLinearModel, single_nn_layer_node_occurrence),
(TwoLayerLinearModel, two_nn_layer_node_occurrence),
(TwoLayerFunctionalLinearModel, two_F_layer_node_occurrence),
(FunctionalLinearAddModel, fp_F_layer_node_occurrence),
(LinearReluModel, single_nn_layer_node_occurrence),
(LinearReluLinearModel, two_nn_layer_node_occurrence),
(FunctionalLinearReluModel, single_F_layer_node_occurrence),
(FunctionalLinearReluLinearModel, two_F_layer_node_occurrence),
(ConvModel, single_nn_layer_node_occurrence),
(TwoLayerConvModel, two_nn_layer_node_occurrence),
(TwoLayerFunctionalConvModel, two_F_layer_node_occurrence),
(ConvReluModel, single_nn_layer_node_occurrence),
(ConvReluConvModel, two_nn_layer_node_occurrence),
(FunctionalConvReluModel, single_F_layer_node_occurrence),
(FunctionalConvReluConvModel, two_F_layer_node_occurrence)]
for (M, node_occurrence) in tests:
m = M().eval()
example_inputs = m.get_example_inputs()
prepared = prepare_fx(
m,
specific_qconfig_dict,
example_inputs=example_inputs,
_equalization_config=default_equalization_qconfig_dict)
self.checkGraphModuleNodes(prepared, expected_node_occurrence=node_occurrence)
def test_input_weight_equalization_branching(self):
""" Tests that graphs containing branches are prepared correctly.
Specifically, equalization observers should not be inserted in front of
branches in which both initial layers in the branches plan to be
quantized.
"""
# Tests that we do not add an equalization observer due to both initial
# nodes in the branch containing layers that need to be equalized.
# Note that this should print out 2 warning messages for not being able
# to equalize layers linear1 and linear1 because it is part of a branch
class TestBranchingWithoutEqualizationModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = nn.Linear(5, 5)
self.linear2 = nn.Linear(5, 5)
def forward(self, x):
y = self.linear1(x)
z = self.linear2(x)
return torch.add(y, z)
no_eq_branching_node_occurrence = {
ns.call_module(_InputEqualizationObserver): 0,
ns.call_module(MinMaxObserver): 3,
}
m = TestBranchingWithoutEqualizationModel().eval()
example_inputs = (torch.rand(1, 5),)
prepared = prepare_fx(
m, specific_qconfig_dict, example_inputs=example_inputs,
_equalization_config=default_equalization_qconfig_dict)
self.checkGraphModuleNodes(prepared, expected_node_occurrence=no_eq_branching_node_occurrence)
# Tests that we will add an equalization observer because there is only
# one initial node in the branch that needs to be equalized
class TestBranchingWithEqualizationModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = nn.Linear(5, 5)
def forward(self, x):
y = self.linear1(x)
z = torch.add(x, 5)
return torch.add(y, z)
eq_branching_node_occurrence = {
ns.call_module(_InputEqualizationObserver): 1,
ns.call_module(MinMaxObserver): 2,
}
m = TestBranchingWithEqualizationModel().eval()
example_inputs = (torch.randn(1, 5),)
prepared = prepare_fx(
m, specific_qconfig_dict, example_inputs=example_inputs,
_equalization_config=default_equalization_qconfig_dict)
self.checkGraphModuleNodes(prepared, expected_node_occurrence=eq_branching_node_occurrence)
@skipIfNoFBGEMM
def test_input_weight_equalization_convert(self):
""" Tests that the modified model for equalization (before quantization)
returns the same output as the original model
"""
tests = [(SingleLayerLinearModel, 2), (LinearAddModel, 2), (TwoLayerLinearModel, 2),
(SingleLayerFunctionalLinearModel, 2), (FunctionalLinearAddModel, 2),
(TwoLayerFunctionalLinearModel, 2),
(LinearReluModel, 2), (LinearReluLinearModel, 2), (LinearReluAddModel, 2),
(FunctionalLinearReluModel, 2), (FunctionalLinearReluLinearModel, 2),
(ConvModel, 4), (TwoLayerConvModel, 4), (SingleLayerFunctionalConvModel, 4),
(TwoLayerFunctionalConvModel, 4),
(ConvReluModel, 4), (ConvReluConvModel, 4), (ConvReluAddModel, 4),
(FunctionalConvReluModel, 4), (FunctionalConvReluConvModel, 4)]
for (M, ndim) in tests:
m = M().eval()
if ndim == 2:
x = torch.rand((5, 5))
elif ndim == 4:
x = torch.rand((16, 3, 224, 224))
example_inputs = (x,)
prepared = prepare_fx(
copy.deepcopy(m),
specific_qconfig_dict,
example_inputs=example_inputs,
_equalization_config=default_equalization_qconfig_dict
)
output = prepared(x)
convert_ref = _convert_equalization_ref(prepared)
convert_ref_output = convert_ref(x)
prepared = prepare_fx(
m, specific_qconfig_dict,
example_inputs=example_inputs,
_equalization_config=default_equalization_qconfig_dict)
prepared(x)
convert_fx(prepared) # Check if compile
self.assertEqual(output, convert_ref_output)
def calculate_equalization_scale_ref(self, x, w):
""" Calculates the equalization scale based on the input and weight
"""
min_inputs = x.min(axis=0)
max_inputs = x.max(axis=0)
min_weights_col = w.min(axis=0)
max_weights_col = w.max(axis=0)
equalization_scale = np.sqrt((max_weights_col - min_weights_col) /
(max_inputs - min_inputs))
return equalization_scale
def get_expected_eq_scales(self, model, x):
""" For each module in the graph, we want to calculate the equalization
scale at that point. This only works for models containing single or
connected linear layers.
"""
exp_eq_scales = []
for _, module in model.named_children():
weight = module.weight.detach().numpy()
bias = module.bias.detach().numpy()
eq_scale = self.calculate_equalization_scale_ref(x, weight)
exp_eq_scales.append(eq_scale)
x = x @ weight.T + bias
return exp_eq_scales
def test_input_weight_equalization_equalization_scales(self):
""" After applying the equalization functions, check if the equalization
scales are the expected values
"""
tests = [SingleLayerLinearModel, TwoLayerLinearModel,
SingleLayerFunctionalLinearModel, TwoLayerFunctionalLinearModel]
x = torch.rand((5, 5))
for M in tests:
m = M().eval()
exp_eq_scales = self.get_expected_eq_scales(m, x.detach().numpy())
example_inputs = (x,)
prepared = prepare_fx(
m, specific_qconfig_dict,
example_inputs=example_inputs,
_equalization_config=default_equalization_qconfig_dict)
prepared(*example_inputs)
convert_ref = _convert_equalization_ref(prepared)
convert_ref(x)
counter = 0
for node in convert_ref.graph.nodes:
if 'equalization_scale' in node.name and node.op == 'get_attr':
self.assertEqual(convert_ref.get_buffer(str(node.target)).reshape(-1), exp_eq_scales[counter])
counter += 1
def get_expected_weights_bias(self, model, x, exp_eq_scales):
""" For each module in the graph, we want to calculate the expected
scaled weight and bias values. This only works for models containing
single or connected linear layers.
"""
exp_weights = []
exp_bias = []
for i, (_, module) in enumerate(model.named_children()):
weight = module.weight.detach().numpy()
bias = module.bias.detach().numpy()
scaled_weight = weight * np.reciprocal(exp_eq_scales[i])
scaled_bias = bias
if i + 1 < len(exp_eq_scales):
scaled_weight = (scaled_weight.T * exp_eq_scales[i + 1]).T
scaled_bias = (scaled_bias.T * exp_eq_scales[i + 1]).T
exp_weights.append(scaled_weight)
exp_bias.append(scaled_bias)
x = x @ weight.T + bias
return exp_weights, exp_bias
def test_input_weight_equalization_weights_bias(self):
""" After applying the equalization functions check if the weights and
biases are as expected
"""
tests = [SingleLayerLinearModel, TwoLayerLinearModel,
SingleLayerFunctionalLinearModel, TwoLayerFunctionalLinearModel]
x = torch.rand((5, 5))
for M in tests:
m = M().eval()
exp_eq_scales = self.get_expected_eq_scales(m, x.detach().numpy())
exp_weights, exp_bias = self.get_expected_weights_bias(m, x.detach().numpy(), exp_eq_scales)
example_inputs = (x,)
prepared = prepare_fx(
m, specific_qconfig_dict,
example_inputs=example_inputs,
_equalization_config=default_equalization_qconfig_dict)
prepared(x)
convert_ref = _convert_equalization_ref(prepared)
convert_ref(x)
modules = dict(convert_ref.named_modules(remove_duplicate=False))
counter = 0
for node in convert_ref.graph.nodes:
if node.op == 'call_module' and isinstance(modules[str(node.target)], nn.Linear):
self.assertEqual(modules[str(node.target)].weight, exp_weights[counter])
self.assertEqual(modules[str(node.target)].bias, exp_bias[counter])
counter += 1
def get_expected_inp_act_vals(self, model, x, exp_eq_scales, exp_weights, exp_bias):
""" For each module in the graph, we want to calculate the expected
min/max values for every input activation node. This only works for
models containing only single or connected linear layers.
"""
x = x * exp_eq_scales[0]
exp_inp_activation_vals = []
for i, _ in enumerate(model.named_children()):
exp_inp_activation_vals.append((x.min(), x.max()))
x = x @ exp_weights[i].T + exp_bias[i]
exp_inp_activation_vals.append((x.min(), x.max()))
return exp_inp_activation_vals
def get_expected_weight_act_vals(self, exp_weights):
""" For each module in the graph, we want to calculate the expected
min/max values for every weight activation node. This is assuming that
the weight observers are all MinMaxObservers.
"""
exp_weight_activation_vals = []
for w in exp_weights:
exp_weight_activation_vals.append((w.min(), w.max()))
return exp_weight_activation_vals
def test_input_weight_equalization_activation_values(self):
""" After applying the equalization functions check if the input
observer's min/max values are as expected
"""
tests = [SingleLayerLinearModel, TwoLayerLinearModel, SingleLayerFunctionalLinearModel]
x = torch.rand((5, 5))
torch.manual_seed(0)
for M in tests:
m = M().eval()
exp_eq_scales = self.get_expected_eq_scales(m, x.detach().numpy())
exp_weights, exp_bias = self.get_expected_weights_bias(m, x.detach().numpy(), exp_eq_scales)
exp_inp_act_vals = self.get_expected_inp_act_vals(m, x, exp_eq_scales, exp_weights, exp_bias)
exp_weight_act_vals = self.get_expected_weight_act_vals(exp_weights)
example_inputs = (x,)
prepared = prepare_fx(
m, specific_qconfig_dict,
example_inputs=example_inputs,
_equalization_config=default_equalization_qconfig_dict)
prepared(x)
convert_ref = _convert_equalization_ref(prepared)
convert_ref(x)
modules = dict(convert_ref.named_modules(remove_duplicate=False))
inp_counter = 0
weight_counter = 0
for node in convert_ref.graph.nodes:
users = list(node.users)
if node.op == 'call_module' and isinstance(modules[str(node.target)], MinMaxObserver):
if len(users) == 1 and users[0].target == torch.nn.functional.linear and users[0].args[1] == node:
# Check min/max values of weight activation layers
exp_min_val, exp_max_val = exp_weight_act_vals[weight_counter]
self.assertEqual(modules[str(node.target)].min_val, exp_min_val)
self.assertEqual(modules[str(node.target)].max_val, exp_max_val)
weight_counter += 1
else:
# Check min/max values of input activation layers
exp_min_val, exp_max_val = exp_inp_act_vals[inp_counter]
self.assertEqual(modules[str(node.target)].min_val, exp_min_val)
self.assertEqual(modules[str(node.target)].max_val, exp_max_val)
inp_counter += 1
def check_orig_and_eq_graphs(self, orig_model, eq_model):
""" Given a non-equalized model and an equalized model, check that the
graphs are structured in the same way, except the equalized model has
additional 'equalization_scale' and 'mul' nodes.
"""
orig_idx = 0
orig_nodes = list(orig_model.graph.nodes)
orig_modules = dict(orig_model.named_modules(remove_duplicate=False))
eq_idx = 0
eq_nodes = list(eq_model.graph.nodes)
eq_modules = dict(eq_model.named_modules(remove_duplicate=False))
while orig_idx < len(orig_nodes) and eq_idx < len(eq_nodes):
if 'equalization_scale' in eq_nodes[eq_idx].name and 'mul' in eq_nodes[eq_idx + 1].name:
# Skip the equalization and mul nodes
eq_idx += 2
continue
elif orig_nodes[orig_idx].op != eq_nodes[eq_idx].op:
return False
elif orig_nodes[orig_idx].op == 'call_module':
# Check that the type of call_modules are the same (ex. nn.Linear, MinMaxObserver)
orig_node = orig_nodes[orig_idx]
eq_node = eq_nodes[eq_idx]
if type(orig_modules[orig_node.target]) is not type(eq_modules[eq_node.target]):
return False
elif orig_nodes[orig_idx].op == 'call_function':
# Check that the call_functions are the same (ex. F.linear)
orig_node = orig_nodes[orig_idx]
eq_node = eq_nodes[eq_idx]
if orig_node.target != eq_node.target:
return False
eq_idx += 1
orig_idx += 1
return True
@skipIfNoFBGEMM
def test_input_weight_equalization_graphs(self):
""" Tests that the modified model for equalization has the same graph
structure as the model without equalization (before and after
quantization).
"""
linear_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method('dequantize')
]
linearAdd_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method('dequantize'),
ns.call_function(torch.add),
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method('dequantize')
]
linear2_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_module(nnq.Linear),
ns.call_method('dequantize')
]
functionalLinear_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear),
ns.call_method('dequantize')
]
functionalLinearAdd_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear),
ns.call_method('dequantize'),
ns.call_function(torch.add),
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear),
ns.call_method('dequantize')
]
functionalLinear2_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.linear),
ns.call_method('dequantize')
]
linearRelu_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nniq.LinearReLU),
ns.call_method('dequantize')
]
linearReluLinear_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nniq.LinearReLU),
ns.call_module(nnq.Linear),
ns.call_method('dequantize')
]
functionalLinearRelu_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear_relu),
ns.call_method('dequantize')
]
functionalLinearReluLinear_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear_relu),
ns.call_function(torch.ops.quantized.linear),
ns.call_method('dequantize')
]
conv_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method('dequantize')
]
conv2_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_module(nnq.Conv2d),
ns.call_method('dequantize')
]
functionalConv_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.conv2d),
ns.call_method('dequantize')
]
functionalConv2_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.conv2d),
ns.call_function(torch.ops.quantized.conv2d),
ns.call_method('dequantize')
]
convRelu_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nniq.ConvReLU2d),
ns.call_method('dequantize')
]
convReluConv_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nniq.ConvReLU2d),
ns.call_module(nnq.Conv2d),
ns.call_method('dequantize')
]
functionalConvRelu_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.conv2d_relu),
ns.call_method('dequantize')
]
functionalConvReluConv_node_list = [
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.conv2d_relu),
ns.call_function(torch.ops.quantized.conv2d),
ns.call_method('dequantize')
]
tests = [(SingleLayerLinearModel, linear_node_list),
(LinearAddModel, linearAdd_node_list),
(TwoLayerLinearModel, linear2_node_list),
(SingleLayerFunctionalLinearModel, functionalLinear_node_list),
(FunctionalLinearAddModel, functionalLinearAdd_node_list),
(TwoLayerFunctionalLinearModel, functionalLinear2_node_list),
(LinearReluModel, linearRelu_node_list),
(LinearReluLinearModel, linearReluLinear_node_list),
(FunctionalLinearReluModel, functionalLinearRelu_node_list),
(FunctionalLinearReluLinearModel, functionalLinearReluLinear_node_list),
(ConvModel, conv_node_list),
(TwoLayerConvModel, conv2_node_list),
(SingleLayerFunctionalConvModel, functionalConv_node_list),
(TwoLayerFunctionalConvModel, functionalConv2_node_list),
(ConvReluModel, convRelu_node_list),
(ConvReluConvModel, convReluConv_node_list),
(FunctionalConvReluModel, functionalConvRelu_node_list),
(FunctionalConvReluConvModel, functionalConvReluConv_node_list)]
for (M, node_list) in tests:
m = M().eval()
example_inputs = m.get_example_inputs()
prepared = prepare_fx(
m, specific_qconfig_dict,
example_inputs=example_inputs,
_equalization_config=default_equalization_qconfig_dict)
equalized_quantized_model = convert_fx(prepared)
# Check the order of nodes in the graph
self.checkGraphModuleNodes(equalized_quantized_model, expected_node_list=node_list)
@skipIfNoFBGEMM
def test_input_weight_equalization_results(self):
""" Tests that for small models, the results of quantized models that
have been equalized are very close to models that have not been equalized.
"""
tests = [SingleLayerLinearModel, TwoLayerLinearModel, LinearAddModel,
SingleLayerFunctionalLinearModel, TwoLayerFunctionalLinearModel]
x = torch.rand((5, 5))
for M in tests:
m = M().eval()
# No equalization
example_inputs = (x,)
prepared = prepare_fx(
copy.deepcopy(m),
specific_qconfig_dict,
example_inputs=example_inputs,
_equalization_config={})
prepared(x)
quantized = convert_fx(prepared) # Check if compile
quantized_output = quantized(x)
# With equalization
prepared = prepare_fx(
copy.deepcopy(m),
specific_qconfig_dict,
example_inputs=example_inputs,
_equalization_config=default_equalization_qconfig_dict
)
prepared(x)
equalized_and_quantized = convert_fx(prepared) # Check if compile
equalized_and_quantized_output = equalized_and_quantized(x)
self.assertEqual(quantized_output, equalized_and_quantized_output, rtol=1e-5, atol=0.1)
@skipIfNoFBGEMM
def test_selective_equalization(self):
""" Tests that we are able to run numeric suite on the equalized model
and construct a valid equalization_config equalizing only the top
4 layers with the highest quantization errors.
"""
torch.manual_seed(1)
class M(nn.Module):
def __init__(self):
super().__init__()
self.bot = torch.nn.Sequential(torch.nn.Linear(5, 5))
self.top = torch.nn.Sequential(torch.nn.Linear(5, 5))
def forward(self, x):
x = self.bot(x)
x = torch.add(x, 5)
x = self.top(x)
return x
float_model = M().eval()
# Hard coded so that the top layer has a higher quantization error
x = torch.tensor([[0.0642, 0.7824, 0.4255, 0.7106, 0.5957],
[0.8373, 0.8851, 0.8229, 0.0212, 0.8987],
[0.9077, 0.7538, 0.4530, 0.5772, 0.1376],
[0.0690, 0.9002, 0.7998, 0.2768, 0.8985],
[0.0282, 0.5068, 0.6725, 0.1829, 0.5480]])
# Quantize the float model
example_inputs = (x,)
prepared_model = prepare_fx(
copy.deepcopy(float_model),
specific_qconfig_dict,
example_inputs=example_inputs
)
prepared_model(x)
quantized_model = convert_fx(copy.deepcopy(prepared_model))
# Get the SQNR between the float and quantized model
layer_to_sqnr_dict = get_layer_sqnr_dict(copy.deepcopy(prepared_model), quantized_model, x)
# Construct the equalization_qconfig_dict equalizing layers with the highest
# quantization errors
selective_equalization_qconfig_dict = get_equalization_qconfig_dict(layer_to_sqnr_dict, 1)
# Create the selectively equalized model
prepared_model = prepare_fx(
copy.deepcopy(float_model),
specific_qconfig_dict,
example_inputs=example_inputs,
_equalization_config=selective_equalization_qconfig_dict,
)
prepared_model(x)
equalized_model = convert_fx(prepared_model)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method('dequantize'),
ns.call_function(torch.add),
ns.call_function(torch.mul),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method('dequantize')
]
# Check the order of nodes in the graph
self.checkGraphModuleNodes(equalized_model, expected_node_list=node_list)
|
pytorch-master
|
test/quantization/fx/test_equalize_fx.py
|
pytorch-master
|
test/quantization/dbr/__init__.py
|
|
# Owner(s): ["oncall: quantization"]
import collections
import copy
import math
import tempfile
import unittest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.intrinsic as nni
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from torch.testing._internal.common_quantization import (
skipIfNoFBGEMM,
skip_if_no_torchvision,
QuantizationTestCase,
NodeSpec,
)
from torch.testing import FileCheck
from torch.quantization import (
ObserverBase,
FakeQuantizeBase,
QConfig,
MinMaxObserver,
)
from torch.quantization.quantize_fx import (
prepare_fx,
convert_fx,
)
from torch.ao.quantization._dbr.quantization_state import AutoQuantizationState
import torch.ao.quantization._quantize_dbr as _quantize_dbr
import torch.ao.ns._numeric_suite_dbr as ns
# TODO(future PR): move these utils out of the FX folder
import torch.ao.ns._numeric_suite_fx as ns_fx
from torch.ao.quantization._dbr.torchscript_utils import (
remove_redundant_aliases,
)
def _allclose(a, b):
if isinstance(a, tuple):
assert isinstance(b, tuple)
result = True
for a_inner, b_inner in zip(a, b):
result = result and torch.allclose(a_inner, b_inner)
return result
elif isinstance(a, torch.Tensor):
assert isinstance(b, torch.Tensor)
return torch.allclose(a, b)
raise AssertionError('unhandled type')
class QuantizeDBRTestCase(QuantizationTestCase):
def _test_auto_tracing(
self,
m,
qconfig,
example_args,
fuse_modules=True,
do_fx_comparison=True,
do_torchscript_checks=True,
# there are some keys in DBR prepare_custom_config_dict which
# are not supported in FX, this argument is for DBR only
dbr_prepare_custom_config_dict=None,
):
m_copy = copy.deepcopy(m)
qconfig_dict = {'': qconfig}
mp = _quantize_dbr.prepare(
m, qconfig_dict, example_args, fuse_modules=fuse_modules,
prepare_custom_config_dict=dbr_prepare_custom_config_dict)
out_p = mp(*example_args)
# print(mp)
mq = _quantize_dbr.convert(mp)
# print(mq)
# verify it runs
out_q = mq(*example_args)
# print(out_q)
# compare it against FX
if do_fx_comparison:
m_copy_p = prepare_fx(m_copy, {'': qconfig}, example_inputs=example_args)
out_m_copy_p = m_copy_p(*example_args)
# print(m_copy_p)
m_copy_q = convert_fx(m_copy_p)
# print(m_copy_q)
# print(m_copy_q.graph)
out_q_fx = m_copy_q(*example_args)
# print(out_q)
# print(out_q_fx)
self.assertTrue(_allclose(out_p, out_m_copy_p))
# print(out_q)
# print(out_q_fx)
self.assertTrue(_allclose(out_q, out_q_fx))
if do_torchscript_checks:
# verify torch.jit.trace works
mq_jit_traced = torch.jit.trace(
mq, example_args, check_trace=False)
# print(mq_jit_traced.graph)
traced_out = mq_jit_traced(*example_args)
self.assertTrue(_allclose(traced_out, out_q))
# verify torch.jit.script works
rewritten = mq.rewrite_for_scripting()
rewritten_out = rewritten(*example_args)
# print(rewritten)
self.assertTrue(_allclose(rewritten_out, out_q))
scripted_rewritten = torch.jit.script(rewritten)
# print(scripted_rewritten.graph)
scripted_rewritten_out = scripted_rewritten(*example_args)
# print('scripted_rewritten_out', scripted_rewritten_out)
self.assertTrue(_allclose(scripted_rewritten_out, out_q))
traced_rewritten = torch.jit.trace(
rewritten, example_args, check_trace=False)
traced_rewritten_out = traced_rewritten(*example_args)
self.assertTrue(_allclose(traced_rewritten_out, out_q))
@skipIfNoFBGEMM
class TestQuantizeDBRIndividualOps(QuantizeDBRTestCase):
"""
Tests that DBR quantization covers individual ops
"""
def test_conv(self):
convs = {1: nn.Conv1d, 2: nn.Conv2d, 3: nn.Conv3d}
class M(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = convs[dim](3, 3, 3)
def forward(self, x):
x1 = self.conv(x)
return x1
data = {
1: torch.randn(1, 3, 10),
2: torch.randn(1, 3, 10, 10),
3: torch.randn(1, 3, 5, 5, 5)
}
for dim in range(1, 4):
m = M(dim).eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (data[dim],))
def test_conv_functional(self):
convs = {1: F.conv1d, 2: F.conv2d, 3: F.conv3d}
class M(torch.nn.Module):
def __init__(self, dim, weight, bias):
super().__init__()
self.conv_func = convs[dim]
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(bias)
self.stride = (1,) * dim
self.padding = (0,) * dim
self.dilation = (1,) * dim
self.groups = 1
def forward(self, x):
x = self.conv_func(
x, self.weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
return x
data = {
1: torch.randn(1, 3, 10),
2: torch.randn(1, 3, 10, 10),
3: torch.randn(1, 3, 5, 5, 5)
}
bias = torch.randn(1)
for dim in range(1, 4):
model_fp32 = M(dim, data[dim], bias).eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (data[dim],))
def test_linear_dynamic(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
x1 = self.linear(x)
return x1
m = M().eval()
qconfig = torch.quantization.default_dynamic_qconfig
# qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 1, 1),))
def test_linear_functional(self):
class LinearFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(4, 4))
self.b1 = nn.Parameter(torch.ones(4))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w1, self.b1)
return x
model_fp32 = LinearFunctional().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.randn(1, 1, 4, 4),))
def test_linear_functional_nobias(self):
class LinearFunctional(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Parameter(torch.empty(4, 4))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
x = F.linear(x, self.w1)
return x
model_fp32 = LinearFunctional().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.randn(1, 1, 4, 4),))
# TODO(future PR): implement observer sharing to match FX
def test_cat_fp32(self):
class M(torch.nn.Module):
def forward(self, x):
x = torch.cat([x, x], dim=1)
return x
m = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 2, 2),))
class M(torch.nn.Module):
def forward(self, x):
x = torch.cat((x, x), dim=1)
return x
m = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 2, 2),))
def test_cat_int(self):
class M(torch.nn.Module):
def forward(self, x):
x = torch.cat([x, x], dim=1)
return x
qconfig = torch.quantization.default_qconfig
for dtype in (torch.int32, torch.int64):
m = M().eval()
self._test_auto_tracing(
m, qconfig, (torch.zeros(1, 1, 1, 1, dtype=dtype),),
# FX graph mode quant does not support this yet
do_fx_comparison=False)
def test_add(self):
class M(torch.nn.Module):
def forward(self, x):
x = x + x
x = x + 1.0
x = 1.0 + x
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 2, 2),))
def test_add_int32(self):
class M(torch.nn.Module):
def forward(self, x):
x = x + x
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.ones(1, 1, 2, 2, dtype=torch.int32),),
# FX graph mode quantization does not automatically detect
# tensor inputs in non-float dtypes.
do_fx_comparison=False)
def test_sub(self):
class M(torch.nn.Module):
def forward(self, x):
x = x - x
x = x - 1.0
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 2, 2),))
def test_mul(self):
class M(torch.nn.Module):
def forward(self, x):
x = x * x
x = x * 1.0
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 2, 2),))
def test_mul_int(self):
# TODO: make all the math functions work correctly for integer types
# TODO: make the same improvement in FX graph mode quant, if possible
class M(torch.nn.Module):
def forward(self, x):
x = x * x
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
for dtype in (torch.int32, torch.int64):
self._test_auto_tracing(
copy.deepcopy(model_fp32), qconfig,
(torch.ones(1, 1, 2, 2, dtype=dtype),),
# FX graph mode quant does not support this yet
do_fx_comparison=False)
def test_div(self):
class M(torch.nn.Module):
def forward(self, x):
x = x / x
x = x / 1.0
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 2, 2),))
def test_embedding(self):
# Note: this test is just testing that models with embeddings
# do not crash with a global qconfig defined. Embedding quantization
# is not actually happening in this prototype yet.
# TODO(future PR): fix this and update this code.
# test subclass
class EmbeddingSubclass(nn.Embedding):
pass
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.embedding = EmbeddingSubclass(1, 1)
def forward(self, x):
x = self.embedding(x)
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_dynamic_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.LongTensor([[0]]),),
fuse_modules=False)
# test regular embedding
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.embedding = nn.Embedding(1, 1)
def forward(self, x):
x = self.embedding(x)
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_dynamic_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.LongTensor([[0]]),),
fuse_modules=False)
@skipIfNoFBGEMM
class TestQuantizeDBR(QuantizeDBRTestCase):
def test_fusion(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.relu = torch.nn.ReLU()
self.child = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.ReLU(),
)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
x = self.child(x)
return x
m = M().eval()
qconfig = torch.quantization.default_qconfig
mp = _quantize_dbr.prepare(m, {'': qconfig}, (torch.randn(1, 1, 1, 1),))
self.assertTrue(isinstance(mp.conv, nni.ConvReLU2d))
self.assertTrue(isinstance(mp.child[0], nni.ConvReLU2d))
def test_fusion2(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.bn = torch.nn.BatchNorm2d(1)
# self.conv2 = torch.nn.Conv2d(1, 1, 1)
self.relu = torch.nn.LeakyReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
m = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 2, 2),))
def test_fusion_called_multiple_times(self):
"""
Tests that fusion works if the modules to fuse get called multiple
times in the same forward.
Currently, observers are not shared between successive calls of
the same module.
TODO(future PR): make them shared (this is easy to detect)
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.relu = torch.nn.ReLU()
def forward(self, x):
for _ in range(2):
x = self.conv(x)
x = self.relu(x)
return x
m = M().eval()
qconfig = torch.quantization.default_qconfig
# fx graph mode quant doesn't support using a single module multiple times
# right now, so this would crash, we can handle this case later
# if it is needed
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 2, 2),), do_fx_comparison=False)
def test_fusion_functions(self):
class M(torch.nn.Module):
def forward(self, x):
x = x + x
x = torch.relu(x)
return x
m = M().eval()
qconfig = torch.quantization.default_qconfig
mp = _quantize_dbr.prepare(m, {'': qconfig}, (torch.randn(1, 1, 1, 1),))
self.assertTrue(
mp._auto_quant_state.idx_to_seen_q_op_infos[0].fusion_info is not None)
self.assertTrue(
mp._auto_quant_state.idx_to_seen_q_op_infos[1].fusion_info is not None)
# verify that the add relu is not observed
self.assertTrue(
'1' not in mp._auto_quant_state.tensor_id_to_observer)
# verify that the relu is observed
self.assertTrue(
'2' in mp._auto_quant_state.tensor_id_to_observer)
mp(torch.randn(1, 1, 1, 1))
mq = _quantize_dbr.convert(mp)
# verify that the add-relu got fused
mqt = torch.jit.trace(mq, (torch.randn(1, 1, 1, 1),))
FileCheck().check_count("quantized::add_relu", 1, exactly=True).run(
mqt.graph)
# TODO(future PR): use information about non-quantizeable ops during
# matching fusion patterns
def test_observers_not_touched_by_tracing(self):
"""
Verifies that running dynamic tracing does not change any data
stored in observers and fake quants.
"""
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
qconfig = torch.quantization.default_qconfig
mp = _quantize_dbr.prepare(m, {'': qconfig}, (torch.randn(1, 1, 1, 1),))
for _, mod in mp.named_modules():
if isinstance(mod, (ObserverBase, FakeQuantizeBase)):
scale, zp = mod.calculate_qparams()
# Assume that if scale is 1.0 and zp is 0, no calibration
# has happened.
self.assertTrue(torch.allclose(scale, torch.ones(1)))
self.assertTrue(torch.equal(zp, torch.zeros(1, dtype=torch.long)))
def test_multiple_modules(self):
m = nn.Sequential(
nn.Sequential(nn.Conv2d(1, 1, 1)),
nn.Sequential(nn.Conv2d(1, 1, 1)),
).eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 2, 2),))
def test_child_modules(self):
m = nn.Sequential(nn.Sequential(nn.Conv2d(1, 1, 1))).eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 2, 2),))
def test_conv_mod_qat(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x1 = self.conv(x)
return x1
m = M().eval()
qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')
self._test_auto_tracing(
copy.deepcopy(m), qconfig, (torch.randn(1, 1, 2, 2),))
# test backprop does not crash
inputs = torch.randn(1, 1, 1, 1)
inputs.requires_grad = True
mp = _quantize_dbr.prepare(m, {'': qconfig}, (inputs,))
output = mp(inputs)
labels = torch.randn(1, 1, 1, 1)
loss = (output - labels).sum()
loss.backward()
optim = torch.optim.SGD(mp.parameters(), lr=0.01)
optim.step()
def test_conv_functional_qat(self):
class M(torch.nn.Module):
def __init__(self, weight2d, bias2d):
super().__init__()
self.weight2d = torch.nn.Parameter(weight2d)
self.bias2d = torch.nn.Parameter(bias2d)
self.stride2d = (1, 1)
self.padding2d = (0, 0)
self.dilation2d = (1, 1)
self.groups = 1
def forward(self, x):
x = F.conv2d(
x, self.weight2d, self.bias2d, self.stride2d, self.padding2d,
self.dilation2d, self.groups)
return x
m = M(torch.randn(1, 1, 1, 1), torch.randn(1)).eval()
qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 2, 2),))
# test backprop does not crash
inputs = torch.randn(1, 1, 1, 1)
inputs.requires_grad = True
m = M(torch.randn(1, 1, 1, 1), torch.randn(1)).eval()
mp = _quantize_dbr.prepare(m, {'': qconfig}, (inputs,))
output = mp(inputs)
labels = torch.randn(1, 1, 1, 1)
loss = (output - labels).sum()
loss.backward()
optim = torch.optim.SGD(mp.parameters(), lr=0.01)
optim.step()
@unittest.skip('FX graph mode is using fake_quantize with PTQ, TODO verify')
def test_conv_unsupported_inplace_conv(self):
"""
Verifies that having an quantizeable op which is inplace
is handled well
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
x = F.hardsigmoid(x, inplace=True)
x = self.conv2(x)
return x
m = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 2, 2),))
def test_conv_functional_dynamic_weights(self):
class M(torch.nn.Module):
def __init__(self, weight2d, bias2d):
super().__init__()
self.weight2d = torch.nn.Parameter(weight2d)
self.bias2d = torch.nn.Parameter(bias2d)
self.stride2d = (1, 1)
self.padding2d = (0, 0)
self.dilation2d = (1, 1)
self.groups = 1
def forward(self, x):
updated_weight = self.weight2d * x
x = F.conv2d(
x, updated_weight, self.bias2d, self.stride2d, self.padding2d,
self.dilation2d, self.groups)
return x
model_fp32 = M(torch.randn(1, 1, 1, 1), torch.randn(1)).eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.randn(1, 1, 2, 2),),
# FX implements this functionality instead of skipping it
do_fx_comparison=False,
# TODO enable scripting support for this
do_torchscript_checks=False)
def test_method(self):
class M(torch.nn.Module):
def forward(self, x):
x = x + x
x = torch.relu(x)
# x = x.relu()
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 2, 2),))
def test_module_created_during_forward(self):
"""Some BERT models have this pattern"""
class M(torch.nn.Module):
def forward(self, x):
x = nn.Softmax(dim=-1)(x)
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.randn(1, 1, 1, 1),),
# This syntax is not supported by FX or TorchScript
do_fx_comparison=False, do_torchscript_checks=False)
def test_module_returns_namedtuple(self):
NamedTuple = collections.namedtuple("NamedTuple", ["x0", "x1"])
"""Some hf models have this pattern"""
class M1(torch.nn.Module):
def forward(self, x):
return NamedTuple(x, x)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.m1 = M1()
def forward(self, x):
m1 = self.m1(x)
return (m1.x0, m1.x1)
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.randn(1, 1, 1, 1),),
# TODO(future PR): add FX rewrite support
do_fx_comparison=False, do_torchscript_checks=False)
def test_child_module_does_not_return_tensor(self):
class M1(torch.nn.Module):
def forward(self, x):
pass
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.m1 = M1()
def forward(self, x):
self.m1(x)
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.randn(1, 1, 1, 1),),
# TODO(future PR): add FX rewrite support
do_fx_comparison=False, do_torchscript_checks=False)
def _get_non_traceable_module_class_test_model(self):
class M1(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
x = x + x
return x
class M2(torch.nn.Module):
def __init__(self):
super().__init__()
self.m1 = M1()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.m1(x)
x = self.conv(x)
x = x + x
return x
class M3(torch.nn.Module):
def __init__(self):
super().__init__()
self.m2 = M2()
def forward(self, x):
x = self.m2(x)
return x
return M3().eval(), M1, M2, M3
def test_prepare_custom_config_dict_non_traceable_module_class_child_leaf(self):
# if M1 is set as leaf, M2 and M3 should have auto_quant_state
qconfig_dict = {'': torch.quantization.default_qconfig}
m, M1, M2, M3 = self._get_non_traceable_module_class_test_model()
prepare_custom_config_dict = {
'non_traceable_module_class': [M1],
}
mp = _quantize_dbr.prepare(
m, qconfig_dict, (torch.randn(1, 1, 1, 1),),
prepare_custom_config_dict=prepare_custom_config_dict)
self.assertTrue(not hasattr(mp.m2.m1, '_auto_quant_state'))
self.assertTrue(hasattr(mp.m2, '_auto_quant_state'))
self.assertTrue(hasattr(mp, '_auto_quant_state'))
mq = _quantize_dbr.convert(mp)
self.assertTrue(isinstance(mq.m2.m1.conv, nn.Conv2d))
self.assertTrue(isinstance(mq.m2.conv, nnq.Conv2d))
mqt = torch.jit.trace(mq, (torch.randn(1, 1, 1, 1),))
# mqt.m2.m1 should not have quantized ops
FileCheck().check_count("aten::add", 1, exactly=True).run(mqt.m2.m1.graph)
FileCheck().check_count("quantized::add", 0, exactly=True).run(mqt.m2.m1.graph)
# mqt.m2.m1 should have quantized ops
FileCheck().check_count("aten::add", 0, exactly=True).run(mqt.m2.graph)
FileCheck().check_count("quantized::add", 1, exactly=True).run(mqt.m2.graph)
# TODO(future PR): ensure modules in leaves do not get quantized
def test_prepare_custom_config_dict_non_traceable_module_class_mid_leaf(self):
# if M2 is set as leaf, only M1 should have auto_quant_state
qconfig_dict = {'': torch.quantization.default_qconfig}
m, M1, M2, M3 = self._get_non_traceable_module_class_test_model()
prepare_custom_config_dict = {
'non_traceable_module_class': [M2],
}
mp = _quantize_dbr.prepare(
m, qconfig_dict, (torch.randn(1, 1, 1, 1),),
prepare_custom_config_dict=prepare_custom_config_dict)
self.assertTrue(not hasattr(mp.m2.m1, '_auto_quant_state'))
self.assertTrue(not hasattr(mp.m2, '_auto_quant_state'))
self.assertTrue(hasattr(mp, '_auto_quant_state'))
mq = _quantize_dbr.convert(mp)
self.assertTrue(isinstance(mq.m2.m1.conv, nn.Conv2d))
self.assertTrue(isinstance(mq.m2.conv, nn.Conv2d))
mqt = torch.jit.trace(mq, (torch.randn(1, 1, 1, 1),))
# mqt.m2 and all children should not have quantized ops
FileCheck().check_count("aten::add", 1, exactly=True).run(mqt.m2.m1.graph)
FileCheck().check_count("quantized::add", 0, exactly=True).run(mqt.m2.m1.graph)
FileCheck().check_count("aten::add", 1, exactly=True).run(mqt.m2.graph)
FileCheck().check_count("quantized::add", 0, exactly=True).run(mqt.m2.graph)
def test_module_list(self):
class Child(torch.nn.Module):
def forward(self, x):
return x + x
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.module_list = torch.nn.ModuleList([
Child(),
])
def forward(self, x):
for module in self.module_list:
# TODO(future PR): we should see if there is a better
# solution other than asking users to do this
if not isinstance(module, AutoQuantizationState):
x = module(x)
return x
m = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
m, qconfig, (torch.randn(8, 1, 1, 1),),
# TODO(future PR): enable scripting for ModuleList + DBR
do_fx_comparison=True, do_torchscript_checks=False)
@unittest.skip('TODO build this')
def test_module_input_types(self):
class M(torch.nn.Module):
def forward(self, x=None, y=None):
print('x', x)
print('y', y)
assert x is not None and y is not None
return (x, y)
model_fp32 = M().eval()
example_inputs = {'y': torch.randn(1), 'x': torch.randn(1)}
ExampleInputsTupleCtr = collections.namedtuple('ExampleInputs', example_inputs)
example_inputs_tuple = ExampleInputsTupleCtr(**example_inputs)
ms = torch.jit.trace(model_fp32, example_inputs_tuple)
return
qconfig = torch.quantization.default_qconfig
# dict
kwargs = {'x': torch.randn(1, 1, 2, 2)}
self._test_auto_tracing(model_fp32, qconfig, (), kwargs)
def test_module_return_types(self):
class M1(torch.nn.Module):
def forward(self, x):
return x, x
class M2(torch.nn.Module):
def __init__(self):
super().__init__()
self.m1 = M1()
def forward(self, x):
x1, x2 = self.m1(x)
return x1
model_fp32 = M2().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 2, 2),))
def test_inplace_unquantizeable_op(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.silu = nn.SiLU(inplace=True)
# self.silu = nn.SiLU()
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.silu(x)
x = self.conv2(x)
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 2, 2),))
def test_vovnet_sequential(self):
# We cannot quantize SequentialAppendList directly because
# AutoQuantizationStateModuleDict would appear in self.items.
# However, we can wrap it and quantize the wrapper.
class SequentialAppendList(nn.Sequential):
def __init__(self, *args):
super(SequentialAppendList, self).__init__(*args)
def forward(self, x: torch.Tensor) -> torch.Tensor:
concat_list = []
for i, module in enumerate(self):
if i == 0:
concat_list.append(module(x))
else:
concat_list.append(module(concat_list[-1]))
x = torch.cat(concat_list, dim=1)
return x
class Wrapper(nn.Module):
def __init__(self, *args):
super().__init__()
self.append_list = SequentialAppendList(*args)
def forward(self, x):
x = self.append_list(x)
return x
m = Wrapper(torch.nn.Conv2d(1, 1, 1)).eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 1, 1),))
def test_unsupported_ops(self):
class M(torch.nn.Module):
def forward(self, x):
x = F.tanhshrink(x)
x = x + x
x = F.tanhshrink(x)
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 2, 2),))
def test_unsupported_ops_recorded(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv2d = nn.Conv2d(1, 1, 1)
self.softshrink = nn.Softshrink()
def forward(self, x):
# supported
x = self.conv2d(x)
x = x + x
# not supported
x = self.softshrink(x)
x = F.tanhshrink(x)
return x
m = M().eval()
qconfig_dict = {'': torch.quantization.default_qconfig}
mp = _quantize_dbr.prepare(m, qconfig_dict, (torch.randn(1, 1, 1, 1),))
self.assertTrue(len(mp._auto_quant_state.seen_nonq_op_infos) == 2)
self.assertTrue(mp._auto_quant_state.seen_nonq_op_infos[0].type == nn.Softshrink)
self.assertTrue(mp._auto_quant_state.seen_nonq_op_infos[1].type == F.tanhshrink)
def test_unknown_op_after_quantized(self):
class M(torch.nn.Module):
def forward(self, x):
x = x + x
std = x.std()
return std
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.randn(1, 1, 2, 2),),
fuse_modules=False)
def test_module_calls_items(self):
# We cannot quantize M1 directly because
# AutoQuantizationStateModuleDict would appear in self.items.
# However, we can wrap it and quantize the wrapper.
class M1(torch.nn.ModuleDict):
def __init__(self):
super().__init__()
for i in range(2):
layer = nn.ReLU()
self.add_module("layer_" + str(i), layer)
def forward(self, x):
layers = [x]
for name, layer in self.items():
layers.append(layer(x))
return torch.cat(layers, dim=1)
class M2(torch.nn.Module):
def __init__(self):
super().__init__()
self.m1 = M1()
def forward(self, x):
x = self.m1(x)
return x
model_fp32 = M2().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
model_fp32, qconfig, (torch.randn(1, 1, 2, 2),),
# TODO(future PR): implement observer sharing for torch.cat
# in DBR quant, to ensure that numerical behavior matches
do_fx_comparison=False)
def test_subclass_of_quantizeable_module(self):
"""
If a user creates a subclass of nn.BatchNorm2d, that subclass
should not be quantized unless the user defines a custom module.
"""
class BN2d(torch.nn.BatchNorm2d):
pass
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.bn = BN2d(1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.conv2(x)
return x
m = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
m, qconfig, (torch.randn(1, 1, 2, 2),),
# the module is not symbolically traceable
do_fx_comparison=False)
# TODO(future PR): move into a separate test file
def test_numeric_suite(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Sequential(nn.Conv2d(1, 1, 1))
def forward(self, x):
x = self.conv(x)
x = self.conv2(x)
x = x + x
return x
m = M().eval()
qconfig = torch.quantization.default_qconfig
example_args = (torch.randn(1, 1, 2, 2),)
mp = _quantize_dbr.prepare(m, {'': qconfig}, example_args)
out_p = mp(*example_args)
mq = _quantize_dbr.convert(copy.deepcopy(mp))
out_q = mq(*example_args)
mp, mq = ns.add_loggers('mp', mp, 'mq', mq)
mp(*example_args)
mq(*example_args)
act_comparison = ns.extract_logger_info(mp, mq, 'mq')
ns_fx.extend_logger_results_with_comparison(
act_comparison, 'mp', 'mq', torch.ao.ns.fx.utils.compute_sqnr,
'sqnr')
# TODO(future PR): enforce validity of the result above, using
# NS for FX utils. Will need some refactoring.
# TODO(future PR): consider adding a util for below
to_print = []
for idx, (layer_name, v) in enumerate(act_comparison.items()):
to_print.append([
layer_name,
v['node_output']['mq'][0]['fqn'],
v['node_output']['mq'][0]['ref_node_target_type'],
v['node_output']['mq'][0]['sqnr']])
def test_qconfig_dict_global(self):
"""
Verifies that the '' option of qconfig_dict works
"""
# regular case
m = nn.Sequential(nn.Conv2d(1, 1, 1))
qconfig_dict = {'': torch.quantization.default_qconfig}
example_args = (torch.randn(1, 1, 1, 1),)
mp = _quantize_dbr.prepare(m, qconfig_dict, example_args)
mp(*example_args)
mq = _quantize_dbr.convert(mp)
mq(*example_args)
self.assertTrue(isinstance(mq[0], nnq.Conv2d))
# quantization turned off
m = nn.Sequential(nn.Conv2d(1, 1, 1))
qconfig_dict = {'': None}
example_args = (torch.randn(1, 1, 1, 1),)
mp = _quantize_dbr.prepare(m, qconfig_dict, example_args)
mp(*example_args)
mq = _quantize_dbr.convert(mp)
mq(*example_args)
self.assertTrue(isinstance(mq[0], nn.Conv2d))
def test_qconfig_dict_object_type_module(self):
"""
Verifies that the 'object_type' option of qconfig_dict works
on module types.
"""
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Hardswish(),
nn.Conv2d(1, 1, 1),
)
qconfig_dict = {
'': torch.quantization.default_qconfig,
'object_type': [
(nn.Conv2d, torch.quantization.default_qconfig),
(nn.Hardswish, None),
],
}
example_args = (torch.randn(1, 1, 1, 1),)
mp = _quantize_dbr.prepare(m, qconfig_dict, example_args)
mp(*example_args)
mq = _quantize_dbr.convert(mp)
mq(*example_args)
self.assertTrue(isinstance(mq[0], nnq.Conv2d))
self.assertTrue(isinstance(mq[1], nn.Hardswish))
self.assertTrue(isinstance(mq[2], nnq.Conv2d))
def test_qconfig_dict_object_type_function(self):
"""
Verifies that the 'object_type' option of qconfig_dict works
on function types.
"""
class M(nn.Module):
def forward(self, x):
x = x + x
x = x * x
return x
m = M()
qconfig_dict = {
'': torch.quantization.default_qconfig,
'object_type': [
(torch.add, None),
],
}
example_args = (torch.randn(1, 1, 1, 1),)
mp = _quantize_dbr.prepare(m, qconfig_dict, example_args)
mp(*example_args)
mq = _quantize_dbr.convert(mp)
mq(*example_args)
rewritten = mq.rewrite_for_scripting()
expected_occurrence = {
NodeSpec.call_function(torch.add): 1,
NodeSpec.call_function(toq.add): 0,
NodeSpec.call_function(toq.mul): 1,
}
self.checkGraphModuleNodes(
rewritten, expected_node_occurrence=expected_occurrence)
def test_qconfig_dict_object_type_method(self):
"""
Verifies that the 'object_type' option of qconfig_dict works
on method types.
"""
class M(nn.Module):
def forward(self, x):
x = x.add(x)
x = x.mul(x)
return x
m = M()
qconfig_dict = {
'': torch.quantization.default_qconfig,
'object_type': [
(torch.Tensor.add, None),
],
}
example_args = (torch.randn(1, 1, 1, 1),)
mp = _quantize_dbr.prepare(m, qconfig_dict, example_args)
mp(*example_args)
mq = _quantize_dbr.convert(mp)
mq(*example_args)
rewritten = mq.rewrite_for_scripting()
expected_occurrence = {
NodeSpec.call_function(torch.add): 1,
NodeSpec.call_function(toq.add): 0,
NodeSpec.call_function(toq.mul): 1,
}
self.checkGraphModuleNodes(
rewritten, expected_node_occurrence=expected_occurrence)
def test_qconfig_dict_object_type_function_global_none(self):
"""
Verifies that the 'object_type' option of qconfig_dict works
on function types when global qconfig is None.
"""
class M(nn.Module):
def forward(self, x):
x = x + x
return x
m = M()
qconfig_dict = {
'': None,
'object_type': [
(torch.add, torch.quantization.default_qconfig),
],
}
example_args = (torch.randn(1, 1, 1, 1),)
mp = _quantize_dbr.prepare(m, qconfig_dict, example_args)
mp(*example_args)
mq = _quantize_dbr.convert(mp)
mq(*example_args)
rewritten = mq.rewrite_for_scripting()
expected_occurrence = {
NodeSpec.call_function(torch.add): 0,
NodeSpec.call_function(toq.add): 1,
}
self.checkGraphModuleNodes(
rewritten, expected_node_occurrence=expected_occurrence)
def test_qconfig_dict_module_name(self):
"""
Verifies that the 'module_name' option of qconfig_dict works
on module types.
"""
m = nn.Sequential(
nn.Sequential(
nn.Conv2d(1, 1, 1),
),
nn.Conv2d(1, 1, 1),
nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Conv2d(1, 1, 1),
),
)
qconfig_dict = {
'': torch.quantization.default_qconfig,
'module_name': [
('0', torch.quantization.default_qconfig),
('1', None),
('2.0', None),
],
}
example_args = (torch.randn(1, 1, 1, 1),)
mp = _quantize_dbr.prepare(m, qconfig_dict, example_args)
mp(*example_args)
mq = _quantize_dbr.convert(mp)
mq(*example_args)
self.assertTrue(isinstance(mq[0][0], nnq.Conv2d))
self.assertTrue(isinstance(mq[1], nn.Conv2d))
self.assertTrue(isinstance(mq[2][0], nn.Conv2d))
self.assertTrue(isinstance(mq[2][1], nnq.Conv2d))
def test_qconfig_dict_unsupported_does_not_crash_when_empty(self):
"""
Verifies that the yet unimplemented keys of qconfig_dict only
crash when they have non-zero values
"""
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
qconfig_dict = {'': torch.quantization.default_qconfig}
example_inputs = (torch.randn(1, 1, 1, 1),)
# this modifies qconfig_dict inplace to include more keys
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
# need this line to not crash
mp = _quantize_dbr.prepare(m, qconfig_dict, example_inputs)
def _test_serialization(self, model, input_shape):
example_inputs = (torch.randn(*input_shape),)
qconfig_dict = {'': torch.quantization.default_qconfig}
m = model().eval()
m = _quantize_dbr.prepare(m, qconfig_dict, example_inputs)
# calibrate, to populate statistics
m(example_inputs[0])
m = _quantize_dbr.convert(m)
qconfig_dict = {'': torch.quantization.default_qconfig}
m2 = model().eval()
m2 = _quantize_dbr.prepare(m2, qconfig_dict, example_inputs)
# do not calibrate, to ensure important statistics are populated without calibration and
# the results are different at every node, including the quantize_per_tensor node
m2 = _quantize_dbr.convert(m2)
# Results should be different without loading from serialized state_dict
expected = m(example_inputs[0])
actual = m2(example_inputs[0])
self.assertFalse(_allclose(expected, actual))
# Results should be the same after loading from serialized state_dict
with tempfile.NamedTemporaryFile(delete=False) as f:
torch.save(m.state_dict(), f)
with open(f.name, 'rb') as f2:
loaded_state_dict = torch.load(f2)
m2.load_state_dict(loaded_state_dict)
expected = m(example_inputs[0])
actual = m2(example_inputs[0])
self.assertTrue(_allclose(expected, actual))
def test_serialization(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
x1 = self.conv(x)
x2 = self.linear(x1)
return x2
input_shape = (1, 1, 1, 1)
self._test_serialization(M, input_shape)
def test_serialization_functional(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
# conv
self.weight2d = torch.nn.Parameter(torch.randn(1, 1, 1, 1))
self.bias2d = torch.nn.Parameter(torch.randn(1))
self.stride2d = (1, 1)
self.padding2d = (0, 0)
self.dilation2d = (1, 1)
self.groups = 1
# linear
self.w1 = nn.Parameter(torch.empty(1, 1))
self.b1 = nn.Parameter(torch.ones(1))
torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5))
def forward(self, x):
updated_weight = self.weight2d * x
x = F.conv2d(
x, updated_weight, self.bias2d, self.stride2d, self.padding2d,
self.dilation2d, self.groups)
# TODO: Investigate why serialization does not work with functional linear
# x = F.linear(x, self.w1, self.b1)
return x
input_shape = (1, 1, 1, 1)
self._test_serialization(M, input_shape)
def test_jit_tracing_removes_aliases(self):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Sequential(
nn.Conv2d(1, 1, 1),
),
)
qconfig_dict = {'': torch.quantization.default_qconfig}
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = _quantize_dbr.prepare(m, qconfig_dict, example_inputs)
mq = _quantize_dbr.convert(mp)
mqs = torch.jit.trace(mq, example_inputs)
FileCheck().check_count("aten::alias", 5, exactly=True).run(
mqs.inlined_graph)
res1 = mqs(*example_inputs)
mqs = remove_redundant_aliases(mqs)
res2 = mqs(*example_inputs)
self.assertTrue(torch.allclose(res1, res2))
# TODO(future PR): figure out why aliasing still appears in the inlined
# graph, and if that is fixed then just check the inlined graph.
for graph in (
mqs.graph,
getattr(mqs, '1').graph,
getattr(getattr(mqs, '1'), '0').graph,
):
FileCheck().check_count("aten::alias", 0, exactly=True).run(graph)
def test_conv_int32_reference_model(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
int32_obs_ctr = MinMaxObserver.with_args(dtype=torch.qint32)
int32_qconfig = QConfig(weight=int32_obs_ctr, activation=int32_obs_ctr)
qconfig_dict = {'': int32_qconfig}
mp = _quantize_dbr.prepare(m, qconfig_dict, (torch.randn(1, 1, 1, 1),))
mp(torch.randn(1, 1, 1, 1))
mq = _quantize_dbr.convert(mp)
res = mq(torch.randn(1, 1, 1, 1))
mqt = torch.jit.trace(mq, (torch.randn(1, 1, 1, 1),))
# verify the right ops are present:
# x0 -> quant -> (dequant -> conv_ref -> quant) -> dequant -> x1
FileCheck()\
.check_count("aten::quantize_per_tensor", 2, exactly=True)\
.run(mqt.graph)
FileCheck()\
.check_count("aten::dequantize", 2, exactly=True)\
.run(mqt.graph)
@skipIfNoFBGEMM
class TestQuantizeDBRMultipleOps(QuantizeDBRTestCase):
"""
Tests that DBR quantization covers interactions between multiple ops
Most of these tests were added when the code was an early prototype
and were one-off test cases for patterns which happened to break
the code on various models at the time of writing. A lot of these
can probably be removed in the future as they are replaced by more
systematic individual and fusion tests.
"""
def test_dropout_conv(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.dropout = nn.Dropout()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
# this can be sometimes inplace
x1 = self.dropout(x)
x1 = self.conv(x)
return x1
m = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 2, 2),))
def test_conv_flatten_linear(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
x1 = self.conv(x)
# TODO(future PR): unbreak this
# x1 = torch.nn.functional.adaptive_avg_pool2d(x, (1, 1))
x1 = torch.nn.functional.adaptive_avg_pool2d(x1, (1, 1))
x2 = torch.flatten(x1, 1)
x3 = self.linear(x2)
return x3
m = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 1, 1),))
def test_conv_add(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x1 = self.conv(x)
x2 = x1 + x
return x2
m = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(m, qconfig, (torch.randn(1, 1, 2, 2),))
def test_conv_scalar_add(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
x = x + 1.0
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 2, 2),))
def test_conv_relu_add(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.relu = torch.nn.ReLU()
def forward(self, x):
x1 = self.conv(x)
x2 = self.relu(x1)
x3 = x1 + x
return x3
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 2, 2),))
def test_linear_torch_relu(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.u1 = nn.Linear(1, 1)
self.v1 = nn.Linear(1, 1)
self.u2 = nn.Linear(1, 1)
self.v2 = nn.Linear(1, 1)
self.w = nn.Linear(1, 1)
def forward(self, x):
x = self.w(x)
x = x + torch.relu(self.v1(torch.relu(self.u1(x))))
return x + torch.relu(self.v2(torch.relu(self.u2(x))))
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 1, 1),))
def test_gelu_linear(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.gelu = torch.nn.GELU()
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
x = self.linear(x)
x = self.gelu(x)
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 1, 1),))
def test_dropout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.dropout = nn.Dropout()
self.linear = torch.nn.Linear(1, 1)
self.linear2 = torch.nn.Linear(1, 1)
def forward(self, x):
x = self.linear(x)
x = self.dropout(x)
x = self.linear2(x)
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 1, 1),))
def test_module_then_add(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
x = self.linear(x)
x = x + 1.0
x = x + 1.0
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 1, 1),))
def test_add_linear(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
x = x + x
x = self.linear(x)
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(model_fp32, qconfig, (torch.randn(1, 1, 1, 1),))
def test_inplace_add(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.embedding1 = nn.Embedding(1, 1)
self.embedding2 = nn.Embedding(1, 1)
self.layernorm = nn.LayerNorm(1)
def forward(self, x):
x1 = self.embedding1(x)
x1 += self.embedding2(x)
x2 = self.layernorm(x1)
return x
model_fp32 = M().eval()
qconfig = torch.quantization.default_qconfig
prepare_custom_config_dict = {
'output_dtypes': (torch.int64,),
}
self._test_auto_tracing(
model_fp32, qconfig, (torch.LongTensor([[0]]),),
fuse_modules=False,
dbr_prepare_custom_config_dict=prepare_custom_config_dict)
def test_lstm(self):
# building block of torchbenchmark/tts_angular
class LSTMWithProjection(nn.Module):
def __init__(self, input_size, hidden_size, proj_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.proj_size = proj_size
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True)
self.linear = nn.Linear(hidden_size, proj_size, bias=False)
def forward(self, x):
self.lstm.flatten_parameters()
o, (_, _) = self.lstm(x)
return self.linear(o)
m = LSTMWithProjection(1, 1, 1).eval()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
m, qconfig, (torch.randn(1, 1, 1),),
# the module is not symbolically traceable
do_fx_comparison=False)
@skipIfNoFBGEMM
class TestQuantizeDBRModels(QuantizeDBRTestCase):
@skip_if_no_torchvision
def test_mobilenet_v2(self):
import torchvision
m = torchvision.models.__dict__['mobilenet_v2'](pretrained=False).eval().float()
qconfig = torch.quantization.default_qconfig
self._test_auto_tracing(
m, qconfig, (torch.randn(1, 3, 224, 224),),
# TODO fix this (reason TBD)
do_torchscript_checks=False)
@skip_if_no_torchvision
def test_mobilenet_v2_removes_aliases(self):
import torchvision
m = torchvision.models.__dict__['mobilenet_v2'](pretrained=False)\
.eval().float()
qconfig_dict = {'': torch.quantization.default_qconfig}
example_inputs = (torch.randn(1, 3, 224, 224),)
mp = _quantize_dbr.prepare(m, qconfig_dict, example_inputs)
mq = _quantize_dbr.convert(mp)
mqs = torch.jit.trace(mq, example_inputs)
res1 = mqs(*example_inputs)
mqs = remove_redundant_aliases(mqs)
res2 = mqs(*example_inputs)
self.assertTrue(torch.allclose(res1, res2))
|
pytorch-master
|
test/quantization/dbr/test_quantize_dbr.py
|
# Owner(s): ["oncall: quantization"]
from .common import AOMigrationTestCase
class TestAOMigrationQuantizationFx(AOMigrationTestCase):
def test_package_import_quantize_fx(self):
self._test_package_import('quantize_fx')
def test_function_import_quantize_fx(self):
function_list = [
'_check_is_graph_module',
'_swap_ff_with_fxff',
'_fuse_fx',
'Scope',
'ScopeContextManager',
'QuantizationTracer',
'_prepare_fx',
'_prepare_standalone_module_fx',
'fuse_fx',
'prepare_fx',
'prepare_qat_fx',
'_convert_fx',
'convert_fx',
'_convert_standalone_module_fx',
]
self._test_function_import('quantize_fx', function_list)
def test_package_import_fx(self):
self._test_package_import('fx')
def test_function_import_fx(self):
function_list = [
'prepare',
'convert',
'fuse',
]
self._test_function_import('fx', function_list)
def test_package_import_fx_graph_module(self):
self._test_package_import('fx.graph_module')
def test_function_import_fx_graph_module(self):
function_list = [
'FusedGraphModule',
'ObservedGraphModule',
'is_observed_module',
'ObservedStandaloneGraphModule',
'is_observed_standalone_module',
'QuantizedGraphModule'
]
self._test_function_import('fx.graph_module', function_list)
def test_package_import_fx_pattern_utils(self):
self._test_package_import('fx.pattern_utils')
def test_function_import_fx_pattern_utils(self):
function_list = [
'QuantizeHandler',
'register_fusion_pattern',
'get_default_fusion_patterns',
'register_quant_pattern',
'get_default_quant_patterns',
'get_default_output_activation_post_process_map'
]
self._test_function_import('fx.pattern_utils', function_list)
def test_package_import_fx_equalize(self):
self._test_package_import('fx._equalize')
def test_function_import_fx_equalize(self):
function_list = [
'reshape_scale',
'_InputEqualizationObserver',
'_WeightEqualizationObserver',
'calculate_equalization_scale',
'EqualizationQConfig',
'input_equalization_observer',
'weight_equalization_observer',
'default_equalization_qconfig',
'fused_module_supports_equalization',
'nn_module_supports_equalization',
'node_supports_equalization',
'is_equalization_observer',
'get_op_node_and_weight_eq_obs',
'maybe_get_weight_eq_obs_node',
'maybe_get_next_input_eq_obs',
'maybe_get_next_equalization_scale',
'scale_input_observer',
'scale_weight_node',
'scale_weight_functional',
'clear_weight_quant_obs_node',
'remove_node',
'update_obs_for_equalization',
'convert_eq_obs',
'_convert_equalization_ref',
'get_layer_sqnr_dict',
'get_equalization_qconfig_dict'
]
self._test_function_import('fx._equalize', function_list)
def test_package_import_fx_quantization_patterns(self):
self._test_package_import('fx.quantization_patterns')
def test_function_import_fx_quantization_patterns(self):
function_list = [
'QuantizeHandler',
'BinaryOpQuantizeHandler',
'CatQuantizeHandler',
'ConvReluQuantizeHandler',
'LinearReLUQuantizeHandler',
'BatchNormQuantizeHandler',
'EmbeddingQuantizeHandler',
'RNNDynamicQuantizeHandler',
'DefaultNodeQuantizeHandler',
'FixedQParamsOpQuantizeHandler',
'CopyNodeQuantizeHandler',
'CustomModuleQuantizeHandler',
'GeneralTensorShapeOpQuantizeHandler',
'StandaloneModuleQuantizeHandler'
]
self._test_function_import('fx.quantization_patterns', function_list)
def test_package_import_fx_match_utils(self):
self._test_package_import('fx.match_utils')
def test_function_import_fx_match_utils(self):
function_list = [
'MatchResult',
'MatchAllNode',
'is_match',
'find_matches'
]
self._test_function_import('fx.match_utils', function_list)
def test_package_import_fx_prepare(self):
self._test_package_import('fx.prepare')
def test_function_import_fx_prepare(self):
function_list = [
'prepare'
]
self._test_function_import('fx.prepare', function_list)
def test_package_import_fx_convert(self):
self._test_package_import('fx.convert')
def test_function_import_fx_convert(self):
function_list = [
'convert'
]
self._test_function_import('fx.convert', function_list)
def test_package_import_fx_fuse(self):
self._test_package_import('fx.fuse')
def test_function_import_fx_fuse(self):
function_list = ['fuse']
self._test_function_import('fx.fuse', function_list)
def test_package_import_fx_fusion_patterns(self):
self._test_package_import('fx.fusion_patterns')
def test_function_import_fx_fusion_patterns(self):
function_list = [
'FuseHandler',
'DefaultFuseHandler'
]
self._test_function_import('fx.fusion_patterns', function_list)
# we removed matching test for torch.quantization.fx.quantization_types
# old: torch.quantization.fx.quantization_types
# new: torch.ao.quantization.quantization_types
# both are valid, but we'll deprecate the old path in the future
def test_package_import_fx_utils(self):
self._test_package_import('fx.utils')
def test_function_import_fx_utils(self):
function_list = [
'graph_pretty_str',
'get_per_tensor_qparams',
'quantize_node',
'get_custom_module_class_keys',
'get_linear_prepack_op_for_dtype',
'get_qconv_prepack_op',
'get_qconv_op',
'get_new_attr_name_with_prefix',
'graph_module_from_producer_nodes',
'assert_and_get_unique_device',
'create_getattr_from_value',
'create_qparam_nodes',
'all_node_args_have_no_tensors',
'node_return_type_is_int',
'get_non_observable_arg_indexes_and_types',
'is_get_tensor_info_node',
'maybe_get_next_module'
]
self._test_function_import('fx.utils', function_list)
|
pytorch-master
|
test/quantization/ao_migration/test_quantization_fx.py
|
pytorch-master
|
test/quantization/ao_migration/__init__.py
|
|
# Owner(s): ["oncall: quantization"]
from .common import AOMigrationTestCase
class TestAOMigrationQuantization(AOMigrationTestCase):
def test_package_import_quantize(self):
self._test_package_import('quantize')
def test_function_import_quantize(self):
function_list = [
'_convert',
'_observer_forward_hook',
'_propagate_qconfig_helper',
'_remove_activation_post_process',
'_remove_qconfig',
'add_observer_',
'add_quant_dequant',
'convert',
'get_observer_dict',
'get_unique_devices_',
'is_activation_post_process',
'prepare',
'prepare_qat',
'propagate_qconfig_',
'quantize',
'quantize_dynamic',
'quantize_qat',
'register_activation_post_process_hook',
'swap_module',
]
self._test_function_import('quantize', function_list)
def test_package_import_stubs(self):
self._test_package_import('stubs')
def test_function_import_stubs(self):
function_list = [
'QuantStub',
'DeQuantStub',
'QuantWrapper',
]
self._test_function_import('stubs', function_list)
def test_package_import_quantize_jit(self):
self._test_package_import('quantize_jit')
def test_function_import_quantize_jit(self):
function_list = [
'_check_is_script_module',
'_check_forward_method',
'script_qconfig',
'script_qconfig_dict',
'fuse_conv_bn_jit',
'_prepare_jit',
'prepare_jit',
'prepare_dynamic_jit',
'_convert_jit',
'convert_jit',
'convert_dynamic_jit',
'_quantize_jit',
'quantize_jit',
'quantize_dynamic_jit',
]
self._test_function_import('quantize_jit', function_list)
def test_package_import_fake_quantize(self):
self._test_package_import('fake_quantize')
def test_function_import_fake_quantize(self):
function_list = [
'_is_per_channel',
'_is_per_tensor',
'_is_symmetric_quant',
'FakeQuantizeBase',
'FakeQuantize',
'FixedQParamsFakeQuantize',
'FusedMovingAvgObsFakeQuantize',
'default_fake_quant',
'default_weight_fake_quant',
'default_fixed_qparams_range_neg1to1_fake_quant',
'default_fixed_qparams_range_0to1_fake_quant',
'default_per_channel_weight_fake_quant',
'default_histogram_fake_quant',
'default_fused_act_fake_quant',
'default_fused_wt_fake_quant',
'default_fused_per_channel_wt_fake_quant',
'_is_fake_quant_script_module',
'disable_fake_quant',
'enable_fake_quant',
'disable_observer',
'enable_observer',
]
self._test_function_import('fake_quantize', function_list)
|
pytorch-master
|
test/quantization/ao_migration/test_ao_migration.py
|
# Owner(s): ["oncall: quantization"]
from .common import AOMigrationTestCase
class TestAOMigrationQuantization(AOMigrationTestCase):
r"""Modules and functions related to the
`torch/quantization` migration to `torch/ao/quantization`.
"""
def test_package_import_quantize(self):
self._test_package_import('quantize')
def test_function_import_quantize(self):
function_list = [
'_convert',
'_observer_forward_hook',
'_propagate_qconfig_helper',
'_remove_activation_post_process',
'_remove_qconfig',
'add_observer_',
'add_quant_dequant',
'convert',
'get_observer_dict',
'get_unique_devices_',
'is_activation_post_process',
'prepare',
'prepare_qat',
'propagate_qconfig_',
'quantize',
'quantize_dynamic',
'quantize_qat',
'register_activation_post_process_hook',
'swap_module',
]
self._test_function_import('quantize', function_list)
def test_package_import_stubs(self):
self._test_package_import('stubs')
def test_function_import_stubs(self):
function_list = [
'QuantStub',
'DeQuantStub',
'QuantWrapper',
]
self._test_function_import('stubs', function_list)
def test_package_import_quantize_jit(self):
self._test_package_import('quantize_jit')
def test_function_import_quantize_jit(self):
function_list = [
'_check_is_script_module',
'_check_forward_method',
'script_qconfig',
'script_qconfig_dict',
'fuse_conv_bn_jit',
'_prepare_jit',
'prepare_jit',
'prepare_dynamic_jit',
'_convert_jit',
'convert_jit',
'convert_dynamic_jit',
'_quantize_jit',
'quantize_jit',
'quantize_dynamic_jit',
]
self._test_function_import('quantize_jit', function_list)
def test_package_import_fake_quantize(self):
self._test_package_import('fake_quantize')
def test_function_import_fake_quantize(self):
function_list = [
'_is_per_channel',
'_is_per_tensor',
'_is_symmetric_quant',
'FakeQuantizeBase',
'FakeQuantize',
'FixedQParamsFakeQuantize',
'FusedMovingAvgObsFakeQuantize',
'default_fake_quant',
'default_weight_fake_quant',
'default_fixed_qparams_range_neg1to1_fake_quant',
'default_fixed_qparams_range_0to1_fake_quant',
'default_per_channel_weight_fake_quant',
'default_histogram_fake_quant',
'default_fused_act_fake_quant',
'default_fused_wt_fake_quant',
'default_fused_per_channel_wt_fake_quant',
'_is_fake_quant_script_module',
'disable_fake_quant',
'enable_fake_quant',
'disable_observer',
'enable_observer',
]
self._test_function_import('fake_quantize', function_list)
def test_package_import_fuse_modules(self):
self._test_package_import('fuse_modules')
def test_function_import_fuse_modules(self):
function_list = [
'_fuse_modules',
'_get_module',
'_set_module',
'fuse_conv_bn',
'fuse_conv_bn_relu',
'fuse_known_modules',
'fuse_modules',
'get_fuser_method',
]
self._test_function_import('fuse_modules', function_list)
def test_package_import_quant_type(self):
self._test_package_import('quant_type')
def test_function_import_quant_type(self):
function_list = [
'QuantType',
'quant_type_to_str',
]
self._test_function_import('quant_type', function_list)
def test_package_import_observer(self):
self._test_package_import('observer')
def test_function_import_observer(self):
function_list = [
"_PartialWrapper",
"_with_args",
"_with_callable_args",
"ABC",
"ObserverBase",
"_ObserverBase",
"MinMaxObserver",
"MovingAverageMinMaxObserver",
"PerChannelMinMaxObserver",
"MovingAveragePerChannelMinMaxObserver",
"HistogramObserver",
"PlaceholderObserver",
"RecordingObserver",
"NoopObserver",
"_is_activation_post_process",
"_is_per_channel_script_obs_instance",
"get_observer_state_dict",
"load_observer_state_dict",
"default_observer",
"default_placeholder_observer",
"default_debug_observer",
"default_weight_observer",
"default_histogram_observer",
"default_per_channel_weight_observer",
"default_dynamic_quant_observer",
"default_float_qparams_observer",
]
self._test_function_import('observer', function_list)
def test_package_import_qconfig(self):
self._test_package_import('qconfig')
def test_function_import_qconfig(self):
function_list = [
"QConfig",
"default_qconfig",
"default_debug_qconfig",
"default_per_channel_qconfig",
"QConfigDynamic",
"default_dynamic_qconfig",
"float16_dynamic_qconfig",
"float16_static_qconfig",
"per_channel_dynamic_qconfig",
"float_qparams_weight_only_qconfig",
"default_qat_qconfig",
"default_weight_only_qconfig",
"default_activation_only_qconfig",
"default_qat_qconfig_v2",
"get_default_qconfig",
"get_default_qat_qconfig",
"assert_valid_qconfig",
"QConfigAny",
"add_module_to_qconfig_obs_ctr",
"qconfig_equals"
]
self._test_function_import('qconfig', function_list)
def test_package_import_quantization_mappings(self):
self._test_package_import('quantization_mappings')
def test_function_import_quantization_mappings(self):
function_list = [
"no_observer_set",
"get_default_static_quant_module_mappings",
"get_static_quant_module_class",
"get_dynamic_quant_module_class",
"get_default_qat_module_mappings",
"get_default_dynamic_quant_module_mappings",
"get_default_qconfig_propagation_list",
"get_default_compare_output_module_list",
"get_default_float_to_quantized_operator_mappings",
"get_quantized_operator",
"_get_special_act_post_process",
"_has_special_act_post_process",
]
dict_list = [
"DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS",
"DEFAULT_STATIC_QUANT_MODULE_MAPPINGS",
"DEFAULT_QAT_MODULE_MAPPINGS",
"DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS",
# "_INCLUDE_QCONFIG_PROPAGATE_LIST",
"DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS",
"DEFAULT_MODULE_TO_ACT_POST_PROCESS",
]
self._test_function_import('quantization_mappings', function_list)
self._test_dict_import('quantization_mappings', dict_list)
def test_package_import_fuser_method_mappings(self):
self._test_package_import('fuser_method_mappings')
def test_function_import_fuser_method_mappings(self):
function_list = [
"fuse_conv_bn",
"fuse_conv_bn_relu",
"fuse_linear_bn",
"get_fuser_method",
]
dict_list = [
"DEFAULT_OP_LIST_TO_FUSER_METHOD"
]
self._test_function_import('fuser_method_mappings', function_list)
self._test_dict_import('fuser_method_mappings', dict_list)
def test_package_import_utils(self):
self._test_package_import('utils')
def test_function_import_utils(self):
function_list = [
'activation_dtype',
'activation_is_int8_quantized',
'activation_is_statically_quantized',
'calculate_qmin_qmax',
'check_min_max_valid',
'get_combined_dict',
'get_qconfig_dtypes',
'get_qparam_dict',
'get_quant_type',
'get_swapped_custom_module_class',
'getattr_from_fqn',
'is_per_channel',
'is_per_tensor',
'weight_dtype',
'weight_is_quantized',
'weight_is_statically_quantized',
]
self._test_function_import('utils', function_list)
|
pytorch-master
|
test/quantization/ao_migration/test_quantization.py
|
from torch.testing._internal.common_utils import TestCase
import importlib
from typing import List, Optional
class AOMigrationTestCase(TestCase):
def _test_package_import(self, package_name: str, base: Optional[str] = None):
r"""Tests the module import by making sure that all the internals match
(except the dunder methods)."""
if base is None:
base = 'quantization'
old_base = 'torch.' + base
new_base = 'torch.ao.' + base
old_module = importlib.import_module(f'{old_base}.{package_name}')
new_module = importlib.import_module(f'{new_base}.{package_name}')
old_module_dir = set(dir(old_module))
new_module_dir = set(dir(new_module))
# Remove magic modules from checking in subsets
for el in list(old_module_dir):
if el[:2] == '__' and el[-2:] == '__':
old_module_dir.remove(el)
assert (old_module_dir <= new_module_dir), \
f"Importing {old_module} vs. {new_module} does not match: " \
f"{old_module_dir - new_module_dir}"
def _test_function_import(self, package_name: str, function_list: List[str],
base: Optional[str] = None):
r"""Tests individual function list import by comparing the functions
and their hashes."""
if base is None:
base = 'quantization'
old_base = 'torch.' + base
new_base = 'torch.ao.' + base
old_location = importlib.import_module(f'{old_base}.{package_name}')
new_location = importlib.import_module(f'{new_base}.{package_name}')
for fn_name in function_list:
old_function = getattr(old_location, fn_name)
new_function = getattr(new_location, fn_name)
assert old_function == new_function, f"Functions don't match: {fn_name}"
assert hash(old_function) == hash(new_function), \
f"Hashes don't match: {old_function}({hash(old_function)}) vs. " \
f"{new_function}({hash(new_function)})"
def _test_dict_import(self, package_name: str, dict_list: List[str],
base: Optional[str] = None):
r"""Tests individual function list import by comparing the functions
and their hashes."""
if base is None:
base = 'quantization'
old_base = 'torch.' + base
new_base = 'torch.ao.' + base
old_location = importlib.import_module(f'{old_base}.{package_name}')
new_location = importlib.import_module(f'{new_base}.{package_name}')
for dict_name in dict_list:
old_dict = getattr(old_location, dict_name)
new_dict = getattr(new_location, dict_name)
assert old_dict == new_dict, f"Dicts don't match: {dict_name}"
for key in new_dict.keys():
assert old_dict[key] == new_dict[key], f"Dicts don't match: {dict_name} for key {key}"
|
pytorch-master
|
test/quantization/ao_migration/common.py
|
# Owner(s): ["oncall: quantization"]
import torch
from torch.testing._internal.common_quantization import (
skipIfNoFBGEMM
)
from torch.testing._internal.common_utils import suppress_warnings
from torch.testing._internal.jit_utils import JitTestCase
from typing import Tuple
import copy
class TestDeprecatedJitQuantized(JitTestCase):
@skipIfNoFBGEMM
def test_rnn_cell_quantized(self):
d_in, d_hid = 2, 2
for cell in [
torch.nn.LSTMCell(d_in, d_hid).float(),
torch.nn.GRUCell(d_in, d_hid).float(),
torch.nn.RNNCell(d_in, d_hid).float(),
]:
if isinstance(cell, torch.nn.LSTMCell):
num_chunks = 4
elif isinstance(cell, torch.nn.GRUCell):
num_chunks = 3
elif isinstance(cell, torch.nn.RNNCell):
num_chunks = 1
# Replace parameter values s.t. the range of values is exactly
# 255, thus we will have 0 quantization error in the quantized
# GEMM call. This i s for testing purposes.
#
# Note that the current implementation does not support
# accumulation values outside of the range representable by a
# 16 bit integer, instead resulting in a saturated value. We
# must take care that in our test we do not end up with a dot
# product that overflows the int16 range, e.g.
# (255*127+255*127) = 64770. So, we hardcode the test values
# here and ensure a mix of signedness.
vals = [[100, -155],
[100, -155],
[-155, 100],
[-155, 100],
[100, -155],
[-155, 100],
[-155, 100],
[100, -155]]
vals = vals[:d_hid * num_chunks]
cell.weight_ih = torch.nn.Parameter(
torch.tensor(vals, dtype=torch.float),
requires_grad=False)
cell.weight_hh = torch.nn.Parameter(
torch.tensor(vals, dtype=torch.float),
requires_grad=False)
ref = copy.deepcopy(cell)
cell = torch.jit.quantized.quantize_rnn_cell_modules(cell)
x = torch.tensor([[100, -155],
[-155, 100],
[100, -155]], dtype=torch.float)
h0_vals = [[-155, 100],
[-155, 155],
[100, -155]]
hx = torch.tensor(h0_vals, dtype=torch.float)
if isinstance(cell, torch.jit.quantized.QuantizedLSTMCell):
cx = torch.tensor(h0_vals, dtype=torch.float)
hiddens = (hx, cx)
else:
hiddens = hx
if isinstance(cell, torch.jit.quantized.QuantizedLSTMCell):
class ScriptWrapper(torch.jit.ScriptModule):
def __init__(self, cell):
super(ScriptWrapper, self).__init__()
self.cell = cell
@torch.jit.script_method
def forward(self, x: torch.Tensor,
hiddens: Tuple[torch.Tensor, torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
return self.cell(x, hiddens)
else:
class ScriptWrapper(torch.jit.ScriptModule):
def __init__(self, cell):
super(ScriptWrapper, self).__init__()
self.cell = cell
@torch.jit.script_method
def forward(self, x: torch.Tensor, hiddens: torch.Tensor) -> torch.Tensor:
return self.cell(x, hiddens)
cell = ScriptWrapper(cell)
outs = cell(x, hiddens)
cell = self.getExportImportCopyWithPacking(cell)
outs = cell(x, hiddens)
ref_outs = ref(x, hiddens)
self.assertEqual(len(outs), len(ref_outs))
for out, ref_out in zip(outs, ref_outs):
torch.testing.assert_close(out, ref_out)
@skipIfNoFBGEMM
def test_rnn_quantized(self):
d_in, d_hid = 2, 2
for cell in [
torch.nn.LSTM(d_in, d_hid).float(),
torch.nn.GRU(d_in, d_hid).float(),
]:
# Replace parameter values s.t. the range of values is exactly
# 255, thus we will have 0 quantization error in the quantized
# GEMM call. This i s for testing purposes.
#
# Note that the current implementation does not support
# accumulation values outside of the range representable by a
# 16 bit integer, instead resulting in a saturated value. We
# must take care that in our test we do not end up with a dot
# product that overflows the int16 range, e.g.
# (255*127+255*127) = 64770. So, we hardcode the test values
# here and ensure a mix of signedness.
vals = [[100, -155],
[100, -155],
[-155, 100],
[-155, 100],
[100, -155],
[-155, 100],
[-155, 100],
[100, -155]]
if isinstance(cell, torch.nn.LSTM):
num_chunks = 4
elif isinstance(cell, torch.nn.GRU):
num_chunks = 3
vals = vals[:d_hid * num_chunks]
cell.weight_ih_l0 = torch.nn.Parameter(
torch.tensor(vals, dtype=torch.float),
requires_grad=False)
cell.weight_hh_l0 = torch.nn.Parameter(
torch.tensor(vals, dtype=torch.float),
requires_grad=False)
ref = copy.deepcopy(cell)
cell_int8 = torch.jit.quantized.quantize_rnn_modules(cell, dtype=torch.int8)
cell_fp16 = torch.jit.quantized.quantize_rnn_modules(cell, dtype=torch.float16)
niter = 10
x = torch.tensor([[100, -155],
[-155, 100],
[100, -155]], dtype=torch.float).unsqueeze(0).repeat(niter, 1, 1)
h0_vals = [[-155, 100],
[-155, 155],
[100, -155]]
hx = torch.tensor(h0_vals, dtype=torch.float).unsqueeze(0)
cx = torch.tensor(h0_vals, dtype=torch.float).unsqueeze(0)
if isinstance(ref, torch.nn.LSTM):
hiddens = (hx, cx)
elif isinstance(ref, torch.nn.GRU):
hiddens = hx
ref_out, ref_hid = ref(x, hiddens)
# Compare int8 quantized to unquantized
output_int8, final_hiddens_int8 = cell_int8(x, hiddens)
torch.testing.assert_close(output_int8, ref_out)
for out, ref in zip(final_hiddens_int8, ref_hid):
torch.testing.assert_close(out, ref)
# Compare fp16 quantized to unquantized
output_fp16, final_hiddens_fp16 = cell_fp16(x, hiddens)
torch.testing.assert_close(output_fp16, ref_out)
for out, ref in zip(final_hiddens_fp16, ref_hid):
torch.testing.assert_close(out, ref)
def compare_quantized_unquantized(ScriptWrapper, cell):
wrapper = ScriptWrapper(cell)
# Compare quantize scripted module to unquantized
script_out, script_hid = wrapper(x, hiddens)
torch.testing.assert_close(script_out, ref_out)
for out, ref in zip(script_hid, ref_hid):
torch.testing.assert_close(out, ref)
# Compare export/import to unquantized
export_import_wrapper = self.getExportImportCopyWithPacking(wrapper)
ei_out, ei_hid = export_import_wrapper(x, hiddens)
torch.testing.assert_close(ei_out, ref_out)
for out, ref in zip(ei_hid, ref_hid):
torch.testing.assert_close(out, ref)
if isinstance(cell, torch.jit.quantized.QuantizedGRU):
class ScriptWrapper(torch.jit.ScriptModule):
def __init__(self, cell):
super(ScriptWrapper, self).__init__()
self.cell = cell
@torch.jit.script_method
def forward(self, x: torch.Tensor, hiddens: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return self.cell(x, hiddens)
compare_quantized_unquantized(ScriptWrapper, cell)
elif isinstance(cell, torch.jit.quantized.QuantizedLSTM):
for cell in [cell_int8, cell_fp16]:
class ScriptWrapper(torch.jit.ScriptModule):
def __init__(self, cell):
super(ScriptWrapper, self).__init__()
self.cell = cell
@torch.jit.script_method
def forward(self, x, hiddens):
# type: (torch.Tensor, Tuple[torch.Tensor, torch.Tensor])
# -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]
return self.cell(x, hiddens)
compare_quantized_unquantized(ScriptWrapper, cell)
if 'fbgemm' in torch.backends.quantized.supported_engines:
# Suppression: using deprecated quant api
@suppress_warnings
def test_quantization_modules(self):
K1, N1 = 2, 2
class FooBar(torch.nn.Module):
def __init__(self):
super(FooBar, self).__init__()
self.linear1 = torch.nn.Linear(K1, N1).float()
def forward(self, x):
x = self.linear1(x)
return x
fb = FooBar()
fb.linear1.weight = torch.nn.Parameter(
torch.tensor([[-150, 100], [100, -150]], dtype=torch.float), requires_grad=False)
fb.linear1.bias = torch.nn.Parameter(torch.zeros_like(fb.linear1.bias), requires_grad=False)
x = (torch.rand(1, K1).float() - 0.5) / 10.0
value = torch.tensor([[100, -150]], dtype=torch.float)
y_ref = fb(value)
fb_int8 = torch.jit.quantized.quantize_linear_modules(fb)
traced_int8 = torch.jit.trace(fb_int8, (x,))
fb_int8 = self.getExportImportCopyWithPacking(traced_int8)
y_int8 = fb_int8(value)
fb_fp16 = torch.jit.quantized.quantize_linear_modules(fb, torch.float16)
traced_fp16 = torch.jit.trace(fb_fp16, (x,))
fb_fp16 = self.getExportImportCopyWithPacking(traced_fp16)
y_fp16 = fb_fp16(value)
torch.testing.assert_close(y_int8, y_ref, rtol=0.0001, atol=1e-3)
torch.testing.assert_close(y_fp16, y_ref, rtol=0.0001, atol=1e-3)
@skipIfNoFBGEMM
def test_erase_class_tensor_shapes(self):
class Linear(torch.nn.Module):
def __init__(self, in_features, out_features):
super(Linear, self).__init__()
qweight = torch._empty_affine_quantized(
[out_features, in_features], scale=1, zero_point=0,
dtype=torch.qint8)
self._packed_weight = torch.ops.quantized.linear_prepack(qweight)
@torch.jit.export
def __getstate__(self):
return (torch.ops.quantized.linear_unpack(self._packed_weight)[0], self.training)
def forward(self):
return self._packed_weight
@torch.jit.export
def __setstate__(self, state):
self._packed_weight = torch.ops.quantized.linear_prepack(state[0])
self.training = state[1]
@property
def weight(self):
return torch.ops.quantized.linear_unpack(self._packed_weight)[0]
@weight.setter
def weight(self, w):
self._packed_weight = torch.ops.quantized.linear_prepack(w)
with torch._jit_internal._disable_emit_hooks():
x = torch.jit.script(Linear(10, 10))
torch._C._jit_pass_erase_shape_information(x.graph)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/quantization/jit/test_deprecated_jit_quant.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: quantization"]
# torch
import torch
from torch.testing import FileCheck
from torch.testing._internal.common_quantization import QuantizationTestCase
class TestFusionPasses(QuantizationTestCase):
def test_quantized_add_relu_fusion(self):
class MAdd(torch.nn.Module):
def __init__(self):
super(MAdd, self).__init__()
def forward(self, x, y):
a = torch.ops.quantized.add(x, y, 1., 0)
relu_out = torch.relu(a)
return relu_out
A = torch.arange(-128, 130, dtype=torch.float)
B = torch.arange(-128, 130, dtype=torch.float)
scale = 2.0
zero_point = 127
qA = torch.quantize_per_tensor(A, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
qB = torch.quantize_per_tensor(B, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
# Check quantized add + relu fusion
m = MAdd()
scripted_m = torch.jit.script(m)
ref_output = scripted_m(qA, qB)
# Must inline the graph.
# In this test case since we are directly calling ops
# it does not matter, however if we are calling nn
# modules we have to inline graph.
torch._C._jit_pass_inline(scripted_m.graph)
torch._C._jit_pass_fuse_quantized_add_relu(scripted_m.graph)
FileCheck().check_not("aten::relu") \
.check("quantized::add_relu") \
.run(scripted_m.graph)
output = scripted_m(qA, qB)
self.assertEqual(ref_output, output)
class MAddOut(torch.nn.Module):
def __init__(self):
super(MAddOut, self).__init__()
def forward(self, x, y, z):
a = torch.ops.quantized.add_out(x, y, z)
relu_out = torch.relu(a)
return relu_out
qC = torch._empty_affine_quantized(qA.shape,
scale=scale,
zero_point=zero_point,
dtype=torch.quint8)
# Check quantized add + relu fusion
m = MAddOut()
scripted_m = torch.jit.script(m)
ref_output = scripted_m(qA, qB, qC)
# Must inline the graph.
# In this test case since we are directly calling ops
# it does not matter, however if we are calling nn
# modules we have to inline graph.
torch._C._jit_pass_inline(scripted_m.graph)
torch._C._jit_pass_fuse_quantized_add_relu(scripted_m.graph)
FileCheck().check_not("aten::relu") \
.check_not("quantized::add_out") \
.check("quantized::add_relu_out") \
.run(scripted_m.graph)
output = scripted_m(qA, qB, qC)
self.assertEqual(ref_output, output)
class MAddScalar(torch.nn.Module):
def __init__(self):
super(MAddScalar, self).__init__()
def forward(self, x, y : float):
a = torch.ops.quantized.add_scalar(x, y)
relu_out = torch.relu(a)
return relu_out
# Check quantized add + relu fusion
m = MAddScalar()
scripted_m = torch.jit.script(m)
ref_output = scripted_m(qA, 3.)
torch._C._jit_pass_inline(scripted_m.graph)
torch._C._jit_pass_fuse_quantized_add_relu(scripted_m.graph)
FileCheck().check_not("aten::relu") \
.check_not("quantized::add_scalar(") \
.check("quantized::add_scalar_relu") \
.run(scripted_m.graph)
output = scripted_m(qA, 3.)
self.assertEqual(ref_output, output)
class MAddScalarOut(torch.nn.Module):
def __init__(self):
super(MAddScalarOut, self).__init__()
def forward(self, x, y : float, z):
a = torch.ops.quantized.add_scalar_out(x, y, z)
relu_out = torch.relu(a)
return relu_out
qC = torch._empty_affine_quantized(qA.shape,
scale=scale,
zero_point=zero_point,
dtype=torch.quint8)
m = MAddScalarOut()
scripted_m = torch.jit.script(m)
ref_output = scripted_m(qA, 3., qC)
torch._C._jit_pass_inline(scripted_m.graph)
torch._C._jit_pass_fuse_quantized_add_relu(scripted_m.graph)
FileCheck().check_not("aten::relu") \
.check_not("quantized::add_scalar_out") \
.check("quantized::add_scalar_relu_out") \
.run(scripted_m.graph)
output = scripted_m(qA, 3., qC)
self.assertEqual(ref_output, output)
|
pytorch-master
|
test/quantization/jit/test_fusion_passes.py
|
# -*- coding: utf-8 -*-
# Owner(s): ["oncall: quantization"]
# torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.jit
import torch.jit.quantized
# torch.ao.quantization
from torch.ao.quantization import (
QConfig,
default_dynamic_qconfig,
float16_dynamic_qconfig,
default_observer,
per_channel_dynamic_qconfig,
default_per_channel_weight_observer,
default_qconfig,
get_default_qconfig,
quantize,
quantize_dynamic,
default_weight_observer,
default_histogram_observer,
fuse_modules,
quantize_jit,
quantize_dynamic_jit,
PlaceholderObserver,
)
# torch.ao.quantization.quantize_jit
from torch.ao.quantization.quantize_jit import (
convert_jit,
convert_dynamic_jit,
fuse_conv_bn_jit,
prepare_jit,
prepare_dynamic_jit,
script_qconfig,
)
# Testing utils
from torch.testing._internal.common_quantized import (
override_qengines,
qengine_is_fbgemm,
qengine_is_qnnpack,
)
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
skipIfNoFBGEMM,
get_script_module,
SingleLayerLinearModel,
SkipQuantModel,
NestedModel,
ConvModel,
ConvTransposeModel,
default_per_channel_qconfig,
test_only_eval_fn,
ConvBnModel,
)
# Annotated models
from torch.testing._internal.common_quantization import (
AnnotatedSingleLayerLinearModel,
AnnotatedSkipQuantModel,
AnnotatedNestedModel,
AnnotatedConvModel,
AnnotatedConvTransposeModel,
AnnotatedConvBnModel,
)
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import attrs_with_prefix
from torch.testing._internal.jit_utils import get_forward
from torch.testing._internal.jit_utils import get_forward_graph
from torch.testing._internal.common_utils import skipIfSlowGradcheckEnv
from torch.jit._recursive import wrap_cpp_module
# Standard library
from typing import List, Tuple
import io
import itertools
import unittest
class TestQuantizeJitPasses(QuantizationTestCase):
"""Test graph mode quantization passes used by quantize_jit"""
def test_skip_dequant_constant_prop(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3).float()
def forward(self, x):
return self.conv(x)
m = torch.jit.script(M())
observer = (
default_per_channel_weight_observer.with_args(ch_axis=1)
)
qconfig_dict = {"": QConfig(activation=default_observer, weight=observer)}
m = prepare_jit(m, qconfig_dict)
data = torch.randn(1, 3, 10, 10, dtype=torch.float)
m(data)
m = convert_jit(m, debug=True)
freezed = torch.jit.freeze(m)
freezed(data)
# After freezing, weight becomes Constant.
# We have this pattern in the original graph: Constant f32_weight -> quant -> dequant
# After skipping dequant during Constant Propagation, the resulting graph will be:
# Constant int8_weight -> dequant
FileCheck().check_count("aten::quantize_per_tensor", 2, exactly=True).run(freezed.graph)
FileCheck().check_count("aten::quantize_per_channel", 0, exactly=True).run(freezed.graph)
FileCheck().check_count("aten::dequantize", 3, exactly=True).run(freezed.graph)
FileCheck().check("aten::quantize_per_tensor").check_next("aten::dequantize").check_not(
"aten::quantize_per_channel"
).check("aten::dequantize").check_next("aten::conv2d").check_next(
"aten::quantize_per_tensor"
).check_next(
"aten::dequantize"
).run(
freezed.graph
)
def test_foldbn_trivial(self):
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
# Test trivial case
class TestModule(torch.nn.Module):
def __init__(self, dim):
super(TestModule, self).__init__()
self.conv = conv_module[dim](1, 20, 5, 1)
self.bn = bn_module[dim](num_features=20)
self.bn.eps = 0.0023
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
options = itertools.product([True, False], [2, 3])
data = {2: torch.rand(1, 1, 6, 6), 3: torch.rand(1, 1, 6, 6, 6)}
# Check that the transformation doesn't change numerics
for tracing, dim in options:
eager = TestModule(dim).eval()
x = data[dim]
scripted_or_traced = get_script_module(eager, tracing, x).eval()
# Check that in the original script module's forward we have two
# CallMethod nodes. One of them should be for conv.forward and the other
# for bn.forward.
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 2, exactly=True
).run(str(get_forward(scripted_or_traced._c).graph))
# Run FoldConvBatchnorm pass.
scripted_or_traced = fuse_conv_bn_jit(scripted_or_traced)
# Check that after the pass one of the CallMethods is gone (supposedly,
# the bn.forward).
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 1, exactly=True
).run(str(get_forward_graph(scripted_or_traced._c)))
# Check that the transformation doesn't change numerics
self.assertEqual(eager(x), scripted_or_traced(x))
def test_foldbn_trivial_nobias(self):
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
# Test trivial case
class TestModule(torch.nn.Module):
def __init__(self, dim):
super(TestModule, self).__init__()
self.conv = conv_module[dim](1, 20, 5, 1, bias=False)
self.bn = bn_module[dim](num_features=20)
# to make sure new bias is not zero
self.bn.eps = 0.0027
self.bn.bias = torch.nn.Parameter(torch.rand([20]))
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
options = itertools.product([True, False], [2, 3])
data = {2: torch.rand(1, 1, 6, 6), 3: torch.rand(1, 1, 6, 6, 6)}
for tracing, dim in options:
eager = TestModule(dim).eval()
x = data[dim]
scripted_or_traced = get_script_module(eager, tracing, x).eval()
# Check that in the original script module's forward we have two
# CallMethod nodes. One of them should be for conv.forward and the other
# for bn.forward.
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 2, exactly=True
).run(str(get_forward_graph(scripted_or_traced._c)))
# Run FoldConvBatchnorm pass.
scripted_or_traced = fuse_conv_bn_jit(scripted_or_traced)
# Check that after the pass one of the CallMethods is gone (supposedly,
# the bn.forward).
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 1, exactly=True
).run(str(get_forward_graph(scripted_or_traced._c)))
# Check that the transformation doesn't change numerics
self.assertEqual(eager(x), scripted_or_traced(x))
def test_foldbn_in_submodule(self):
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
# Test that we find Conv-BN patterns in submodules
class SubModule(torch.nn.Module):
def __init__(self, dim):
super(SubModule, self).__init__()
self.conv = conv_module[dim](1, 20, 5, 1)
self.bn = bn_module[dim](num_features=20)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class TestModule(torch.nn.Module):
def __init__(self, dim):
super(TestModule, self).__init__()
self.sub = SubModule(dim)
def forward(self, x):
x = self.sub(x)
return x
options = itertools.product([True, False], [2, 3])
data = {2: torch.rand(1, 1, 10, 10), 3: torch.rand(1, 1, 10, 10, 10)}
for tracing, dim in options:
eager = TestModule(dim).eval()
x = data[dim]
scripted_or_traced = get_script_module(eager, tracing, x).eval()
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 2, exactly=True
).run(str(get_forward_graph(scripted_or_traced.sub._c)))
scripted_or_traced = fuse_conv_bn_jit(scripted_or_traced)
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 1, exactly=True
).run(str(get_forward_graph(scripted_or_traced.sub._c)))
self.assertEqual(eager(x), scripted_or_traced(x))
def test_foldbn_shared_classtype(self):
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
class TestModule(torch.nn.Module):
def __init__(self, dim, bias=False):
super(TestModule, self).__init__()
self.conv1 = conv_module[dim](5, 5, 3, bias=bias)
self.bn1 = bn_module[dim](num_features=5)
self.bn1.running_mean.fill_(-0.2)
self.bn1.bias = torch.nn.Parameter(torch.rand([5]))
# to make sure new bias is not zero
self.bn1.eps = 0.0023
self.conv2 = conv_module[dim](5, 5, 3, bias=bias)
self.bn2 = bn_module[dim](num_features=5)
self.bn2.eps = 0.0029
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
return x
options = itertools.product([True, False], [2, 2], [True, False])
data = {2: torch.rand(1, 5, 6, 6), 3: torch.rand(1, 5, 6, 6, 6)}
for tracing, dim, bias in options:
eager = TestModule(dim, bias).eval()
x = data[dim]
scripted_or_traced = get_script_module(eager, tracing, x)
folded = fuse_conv_bn_jit(scripted_or_traced)
self.assertEqual(eager(x), scripted_or_traced(x))
def test_foldbn_no_fusion(self):
"""Test that we don't fuse the cases when module type does not match"""
class CustomConv(torch.nn.Module):
def __init__(self):
super(CustomConv, self).__init__()
def forward(self, x):
return x
class CustomBn(torch.nn.Module):
def __init__(self):
super(CustomBn, self).__init__()
def forward(self, x):
return x
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = CustomConv()
self.bn = CustomBn()
def forward(self, x):
return self.bn(self.conv(x))
m = torch.jit.script(M())
m = fuse_conv_bn_jit(m)
FileCheck().check_count("prim::CallMethod", 2, exactly=True).run(m.graph)
def test_foldbn_complex_cases(self):
# This test case attempt to try combinations of conv2d/conv3d with bias/nobias
# as well as BatchNorm with affine/no-affine along with varying the
# number of layers.
# this only works when default dtype is double
torch.set_default_dtype(torch.double)
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
class SubModule(torch.nn.Module):
def __init__(self, dim, num_blocks, enable_bias, enable_affine):
super(SubModule, self).__init__()
layers = []
for i in range(num_blocks):
layers.append(conv_module[dim](20, 20, 5, 1, bias=enable_bias))
bn_obj = bn_module[dim](num_features=20, affine=enable_affine)
if enable_affine:
bn_obj.weight = torch.nn.Parameter(
torch.rand_like(bn_obj.weight)
)
bn_obj.bias = torch.nn.Parameter(torch.rand_like(bn_obj.bias))
bn_obj.running_mean = torch.rand_like(bn_obj.running_mean)
bn_obj.running_var = torch.rand_like(bn_obj.running_var)
layers.append(bn_obj)
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class TestModule(torch.nn.Module):
def __init__(self, dim, num_blocks, enable_bias, enable_affine):
super(TestModule, self).__init__()
self.sub = SubModule(dim, num_blocks, enable_bias, enable_affine)
def forward(self, x):
x = self.sub(x)
return x
options = itertools.product(
[True, False], [2, 3], [True, False], [True, False], [1, 2]
)
data = {2: torch.rand(1, 20, 10, 10), 3: torch.rand(1, 20, 10, 10, 10)}
for tracing, dim, enable_bias, enable_bn_affine, num_layers in options:
eager = TestModule(dim, num_layers, enable_bias, enable_bn_affine).eval()
x = data[dim]
scripted_or_traced = get_script_module(eager, tracing, x).eval()
FileCheck().check_count(
'prim::CallMethod[name="forward"]', num_layers * 2, exactly=True
).run(str(get_forward_graph(scripted_or_traced.sub.layers._c)))
scripted_or_traced = fuse_conv_bn_jit(scripted_or_traced)
FileCheck().check_count(
'prim::CallMethod[name="forward"]', num_layers, exactly=True
).run(str(get_forward_graph(scripted_or_traced.sub.layers._c)))
self.assertEqual(eager(x), scripted_or_traced(x))
torch.set_default_dtype(torch.float)
def test_fuse_linear(self):
class FunctionalLinear(torch.nn.Module):
def __init__(self, weight, bias):
super(FunctionalLinear, self).__init__()
self.weight = weight
self.bias = bias
def forward(self, x):
res = torch.matmul(x, self.weight.t())
if self.bias is not None:
res.add_(self.bias)
return res
x1 = torch.rand(3)
w1 = torch.rand(5, 3)
b1 = torch.rand(5)
x2 = torch.rand(5, 5)
w2 = torch.rand(5, 5)
b2 = torch.rand(5)
x3 = torch.rand(5, 5, 5)
w3 = torch.rand(5, 5)
b3 = torch.rand(5)
for has_bias, (x, weight, b) in itertools.product(
[True, False], [(x1, w1, b1), (x2, w2, b2), (x3, w3, b3)]
):
bias = b if has_bias else None
model = torch.jit.trace(FunctionalLinear(weight, bias), [x])
for node in model.graph.nodes():
if node.kind() == "aten::matmul":
source_range_1 = node.sourceRange()
torch._C._jit_pass_fuse_linear(model.graph)
for node in model.graph.nodes():
if node.kind() == "aten::linear":
source_range_2 = node.sourceRange()
FileCheck().check("aten::linear").run(model.graph)
check_not = ["aten::matmul", "aten::addmm", "aten::add_", "aten::t("]
for cn in check_not:
FileCheck().check_not(cn).run(model.graph)
# make sure it runs
self.assertTrue(source_range_1 == source_range_2)
model(x)
# check matmuls are not fused
class Matmul(torch.nn.Module):
def __init__(self, weight):
super(Matmul, self).__init__()
self.weight = weight
def forward(self, x):
return torch.matmul(x, self.weight)
x = torch.rand(5, 6, 5)
w = torch.rand(5, 5, 100)
model = torch.jit.trace(Matmul(w), [x])
torch._C._jit_pass_fuse_linear(model.graph)
# check 3d matmul is not fused
FileCheck().check("aten::matmul").run(model.graph)
FileCheck().check_not("aten::linear").run(model.graph)
# make sure it runs
model(x)
def test_insert_observers(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
def forward(self, x):
return self.conv(x)
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# for input and output of conv
assert len(attrs_with_prefix(m, "_observer_")) == 2
# for weight
assert len(attrs_with_prefix(m.conv, "_observer_")) == 1
def test_insert_observers_interface(self):
@torch.jit.interface
class SubInterface(torch.nn.Module):
def addOne(self, inp) -> torch.Tensor:
pass
class Sub(torch.nn.Module):
def __init__(self):
super(Sub, self).__init__()
self.fc = torch.nn.Linear(5, 5)
def addOne(self, inp):
return self.fc(inp) + 1
def forward(self, x):
return self.addOne(x)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
self.sub = Sub()
def forward(self, x):
return self.sub(self.conv(x))
m = torch.jit.script(M())
qconfig_dict = {"sub.conv": default_qconfig}
m = prepare_jit(m, qconfig_dict)
def test_insert_observers_interface_unshare_type(self):
@torch.jit.interface
class OperatorIf(nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
class Operator(nn.Module):
def __init__(self, a):
super().__init__()
self.a = a
def forward(self, inp: torch.Tensor) -> torch.Tensor:
return self.a * (inp + self.a)
class Inner(nn.Module):
op: OperatorIf
def __init__(self, op):
super().__init__()
self.op = op
def forward(self, inp):
return self.op(inp)
class Outer(nn.Module):
def __init__(self):
super().__init__()
self.inner_a = Inner(Operator(1))
self.inner_b = Inner(Operator(3.0))
def forward(self, inp):
return self.inner_a(inp) + self.inner_b(inp)
qconfig_dict = {"inner_a": default_qconfig, "inner_b": default_qconfig}
eager_model = Outer()
for tracing in [True, False]:
x = torch.rand(3)
script_model = get_script_module(eager_model, tracing, x)
# make sure it runs
prepare_jit(script_model, qconfig_dict)
def test_insert_observers_child_qconfig(self):
class Sub(torch.nn.Module):
def __init__(self):
super(Sub, self).__init__()
self.fc = torch.nn.Linear(5, 5)
def forward(self, x):
return self.fc(x)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
self.sub = Sub()
def forward(self, x):
return self.sub(self.conv(x))
m = torch.jit.script(M())
qconfig_dict = {"sub.fc": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# input and output of sub
assert len(attrs_with_prefix(m, "_observer_")) == 2
# not quantized
assert len(attrs_with_prefix(m.conv, "_observer_")) == 0
# no observers since we observe in the outer most call site
assert len(attrs_with_prefix(m.sub, "_observer_")) == 0
# weight of linear
assert len(attrs_with_prefix(m.sub.fc, "_observer_")) == 1
@unittest.skipUnless(
"fbgemm" in torch.backends.quantized.supported_engines,
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
def test_insert_observers_skip_values(self):
class ConvFunctionalReLU(torch.nn.Module):
def __init__(self):
super(ConvFunctionalReLU, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
def forward(self, x):
return F.relu(self.conv(x))
class ConvReLUModule(torch.nn.Module):
def __init__(self):
super(ConvReLUModule, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(self.conv(x))
class AddReLUModule(torch.nn.Module):
def __init__(self):
super(AddReLUModule, self).__init__()
self.relu = torch.nn.ReLU()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
out = self.conv(x)
out += x
return self.relu(out)
class AddFunctionalReLU(torch.nn.Module):
def __init__(self):
super(AddFunctionalReLU, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
out = self.conv(x)
out += x
return F.relu(out)
def attrs_with_prefix(module, prefix):
return [x for x, _ in module._modules._c.items() if x.startswith(prefix)]
qconfig_dict = {"": default_qconfig}
m = torch.jit.script(ConvFunctionalReLU())
m = prepare_jit(m, qconfig_dict)
# observer for weight of conv
assert len(attrs_with_prefix(m.conv, "_observer_")) == 1
# observer for input of conv and output of relu
assert len(attrs_with_prefix(m, "_observer_")) == 2
m = torch.jit.script(ConvReLUModule())
m = prepare_jit(m, qconfig_dict)
# observer for input of conv and output of relu
assert len(attrs_with_prefix(m, "_observer_")) == 2
# observer for weight of conv
assert len(attrs_with_prefix(m.conv, "_observer_")) == 1
# observer for output of relu
assert len(attrs_with_prefix(m.relu, "_observer_")) == 0
m = torch.jit.script(AddReLUModule())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
assert len(attrs_with_prefix(m, "_observer")) == 3
assert len(attrs_with_prefix(m.relu, "_observer")) == 0
FileCheck().check("aten::add_").check_not(
'Observer = prim::GetAttr[name="_observer_'
).check("ReLU = prim::GetAttr").run(str(get_forward_graph(m._c)))
m = torch.jit.script(AddFunctionalReLU())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
assert len(attrs_with_prefix(m, "_observer")) == 3
FileCheck().check("aten::add_").check_not(
'Observer = prim::GetAttr[name="_observer_'
).check("CallFunction").check('Observer = prim::GetAttr[name="_observer_').run(
str(get_forward_graph(m._c))
)
def test_insert_observers_weight_dtype(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
def forward(self, x):
return F.relu(self.conv(x))
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
activation_dtypes = set(
obs.getattr("dtype")
for x, obs in m._modules._c.items()
if x.startswith("_observer_")
)
weight_dtypes = set(
obs.getattr("dtype")
for x, obs in m.conv._modules._c.items()
if x.startswith("_observer_")
)
assert len(activation_dtypes) == 1, "Expected to have 1 activation dtype"
assert len(weight_dtypes) == 1, "Expected to have 1 weight dtype"
assert (
list(activation_dtypes)[0] != list(weight_dtypes)[0]
), "Expected activation dtype to "
" be different from wegiht dtype"
def test_insert_observers_for_reused_weight(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y, weight):
x = F.conv2d(x, weight)
y = F.conv2d(y, weight)
return x + y
m = torch.jit.script(M()).eval()
m = prepare_jit(m, {"": default_qconfig})
# 3 for x, y, weight, one for output of each F.conv2d and one for output of add
assert len(attrs_with_prefix(m, "_observer")) == 6
def test_insert_observers_shared_class_type(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 5, 3).float()
self.conv2 = torch.nn.Conv2d(3, 5, 3).float()
def forward(self, x):
return self.conv2(self.conv1(x))
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# conv1 and conv2 shares the same type, we need to
# make sure we didn't quantize the type twice
conv1_observers = attrs_with_prefix(m.conv1, "_observer_")
conv2_observers = attrs_with_prefix(m.conv2, "_observer_")
assert len(conv1_observers) == 1, "Expected to have 1 observer submodules"
assert len(conv2_observers) == 1, "Expected to have 1 observer submodules"
assert (
conv1_observers == conv2_observers
), "Expect conv1 and conv2 to have same observers since the class type is shared"
def test_insert_observers_for_general_ops(self):
"""Make sure we skip observers for ops that doesn't require
observation, e.g. flatten
"""
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
x = self.conv(x)
x = torch.flatten(x)
return x
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# input and output of conv
assert len(attrs_with_prefix(m, "_observer_")) == 2
FileCheck().check('Observer = prim::GetAttr[name="_observer_').check(
'prim::GetAttr[name="conv"]'
).check("prim::CallMethod").check(
'Observer = prim::GetAttr[name="_observer_'
).check(
"aten::flatten"
).check_not(
'Observer = prim::GetAttr[name="_observer_'
).run(
m.graph
)
# TODO: this is too long, split this to test_insert_observers.py and remove
# insrt_observers prefix
def test_insert_observers_propagate_observed(self):
"""Make sure we propagate observed property through general ops"""
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3).float()
self.conv2 = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
x = self.conv1(x)
x = torch.flatten(x)
# we don't want to insert observer for input of self.conv2
# because output of self.conv1 is already observed
x = self.conv2(x)
return x
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# input and output of conv
assert len(attrs_with_prefix(m, "_observer_")) == 3
FileCheck().check('Observer = prim::GetAttr[name="_observer_').check(
'prim::GetAttr[name="conv1"]'
).check("prim::CallMethod").check(
'Observer = prim::GetAttr[name="_observer_'
).check(
"aten::flatten"
).check_not(
'Observer = prim::GetAttr[name="_observer_'
).check(
'prim::GetAttr[name="conv2"]'
).check(
'Observer = prim::GetAttr[name="_observer_'
).run(
m.graph
)
def test_insert_observers_propagate_observed_in_submodule(self):
"""Make sure we propagate observed property through general ops"""
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3).float()
self.conv2 = torch.nn.Conv2d(3, 3, 3).float()
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
x = self.conv1(x)
x = self.avgpool(x)
# we don't want to insert observer for input of self.conv2
# because output of self.conv1 is already observed
x = self.conv2(x)
return x
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# input and output of conv
assert len(attrs_with_prefix(m, "_observer_")) == 3
FileCheck().check('Observer = prim::GetAttr[name="_observer_').check(
'prim::GetAttr[name="conv1"]'
).check("prim::CallMethod").check(
'Observer = prim::GetAttr[name="_observer_'
).check(
"prim::CallMethod"
).check_not(
'Observer = prim::GetAttr[name="_observer_'
).check(
'prim::GetAttr[name="conv2"]'
).check(
'Observer = prim::GetAttr[name="_observer_'
).run(
m.graph
)
def test_insert_observers_propagate_observed_for_function(self):
def channel_shuffle(x: torch.Tensor, groups: int) -> torch.Tensor:
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 1).float()
self.conv2 = torch.nn.Conv2d(3, 3, 1).float()
def forward(self, x):
x = self.conv1(x)
x = channel_shuffle(x, 1)
x = self.conv2(x)
return x
data = [
(
torch.rand((1, 3, 10, 10), dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long),
)
for _ in range(2)
]
m = torch.jit.script(M()).eval()
m = prepare_jit(m, {"": default_qconfig})
# we want to test that channel_shuffle is going to pass
# the observed property from the output of conv1 to input of conv2
# so that we don't insert observers for input of conv2
assert (
len(
attrs_with_prefix(
m,
"_observer_",
)
)
== 3
)
def test_insert_observers_for_if(self):
class QuantProp(torch.nn.Module):
def __init__(self, use_skip):
super(QuantProp, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
self.use_skip = use_skip
def forward(self, x):
if self.use_skip:
x = self.conv(x)
return torch.reshape(x, x.shape)
else:
x = self.conv(x)
return torch.reshape(x, x.shape)
class Res(torch.nn.Module):
def __init__(self, use_skip):
super(Res, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
self.use_skip = use_skip
def forward(self, x):
if self.use_skip:
return self.conv(x)
else:
return self.conv(x)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.quant_prop = QuantProp(True)
self.res = Res(False)
def forward(self, x):
x = self.quant_prop(x)
x = self.res(x)
return x
data = [torch.rand(1, 3, 10, 10, dtype=torch.float)]
result = {False: [1, 2, 2], True: [2, 1, 0]}
for tracing in [True, False]:
if tracing:
m = torch.jit.trace(M(), data).eval()
else:
m = torch.jit.script(M()).eval()
m = prepare_jit(m, {"": default_qconfig})
assert (
len(
attrs_with_prefix(
m,
"_observer_",
)
)
== result[tracing][0]
)
assert (
len(
attrs_with_prefix(
m.quant_prop,
"_observer_",
)
)
== result[tracing][1]
)
assert (
len(
attrs_with_prefix(
m.res,
"_observer_",
)
)
== result[tracing][2]
)
def test_insert_observers_for_nested_if(self):
class Res(torch.nn.Module):
def __init__(self, use_skip):
super(Res, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
self.cond = use_skip
self.use_skip = use_skip
def forward(self, x):
if self.use_skip:
if self.cond:
return self.conv(x)
else:
return self.conv(x)
else:
return self.conv(x)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.res1 = Res(True)
self.res2 = Res(False)
def forward(self, x):
x = self.res1(x)
x = self.res2(x)
return x
data = torch.rand((1, 3, 10, 10), dtype=torch.float)
result = {True: 3, False: 1}
for tracing in [True, False]:
if tracing:
m = torch.jit.trace(M(), data).eval()
else:
m = torch.jit.script(M()).eval()
m = prepare_jit(m, {"": default_qconfig})
assert len(attrs_with_prefix(m, "_observer_")) == result[tracing]
def test_insert_observers_for_if_consistent_observation(self):
"""check quantization for if works as long as
output of all branches are quantized/observed consistently
"""
class M(torch.nn.Module):
def __init__(self, cond):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
self.cond = cond
def forward(self, x):
x = self.conv(x)
# x is already observed
if self.cond:
x = torch.flatten(x)
return x
class M2(torch.nn.Module):
def __init__(self, cond):
super(M2, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3).float()
self.conv2 = torch.nn.Conv2d(3, 3, 3).float()
self.cond = cond
def forward(self, x):
x = self.conv1(x)
if self.cond:
x = self.conv2(x)
# x will be observed in the branch
else:
x = torch.flatten(x)
# since output for both branch are quantized
# the if node is quantized consistently
return x
data = torch.rand((1, 3, 5, 5), dtype=torch.float)
options = list(itertools.product([True, False], [True, False]))
for cond, tracing in options:
if tracing:
m = torch.jit.trace(M(cond), data)
else:
m = torch.jit.script(M(cond))
m = prepare_jit(m, {"": default_qconfig})
assert len(attrs_with_prefix(m, "_observer_")) == 2
for cond, tracing in options:
if tracing:
m = torch.jit.trace(M2(cond), data)
else:
m = torch.jit.script(M2(cond))
m = prepare_jit(m, {"": default_qconfig})
num_observers = 2 if tracing and not cond else 3
assert len(attrs_with_prefix(m, "_observer_")) == num_observers
def test_insert_quant_dequant(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3).float()
def forward(self, x):
return self.conv(x)
for is_per_channel in [True, False]:
m = torch.jit.script(M())
observer = (
default_per_channel_weight_observer.with_args(ch_axis=1)
if is_per_channel
else default_observer
)
qconfig_dict = {"": QConfig(activation=observer, weight=observer)}
m = prepare_jit(m, qconfig_dict)
data = torch.randn(1, 3, 10, 10, dtype=torch.float)
m(data)
m = convert_jit(m, debug=True)
assert (
len(m._modules._c.items()) == 1
), "Expected to have single submodule of conv"
# make sure the quantized model is executable
m(data)
quant_func = (
"aten::quantize_per_channel"
if is_per_channel
else "aten::quantize_per_tensor"
)
FileCheck().check_count(quant_func, 3, exactly=True).run(m.graph)
def test_insert_quant_dequant_shared_class_type(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3).float()
self.conv2 = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
return self.conv2(self.conv1(x))
for is_per_channel in [True, False]:
m = torch.jit.script(M())
observer = (
default_per_channel_weight_observer.with_args(ch_axis=1)
if is_per_channel
else default_observer
)
qconfig = QConfig(activation=observer, weight=observer)
qconfig_dict = {"": qconfig}
m = prepare_jit(m, qconfig_dict)
# observers for input, output and value between conv1/conv2
assert (
len(attrs_with_prefix(m, "_observer_")) == 3
), "Expected to have 3 obervers"
# observer for weight
assert (
len(attrs_with_prefix(m.conv1, "_observer_")) == 1
), "Expected to have 1 obervers"
# observer for weight
assert (
len(attrs_with_prefix(m.conv2, "_observer_")) == 1
), "Expected to have 1 obervers"
data = torch.randn(1, 3, 10, 10, dtype=torch.float)
m(data)
m = convert_jit(m, debug=True)
m(data)
assert m.conv1._c._type() == m.conv2._c._type()
# check all observers have been removed
assert (
len(attrs_with_prefix(m, "_observer_")) == 0
), "Expected to have 0 obervers"
assert (
len(attrs_with_prefix(m.conv1, "_observer_")) == 0
), "Expected to have 0 obervers"
assert (
len(attrs_with_prefix(m.conv2, "_observer_")) == 0
), "Expected to have 0 obervers"
quant_func = (
"aten::quantize_per_channel"
if is_per_channel
else "aten::quantize_per_tensor"
)
for module in ["conv1", "conv2"]:
conv = m._c.getattr(module)
# quantize weight
FileCheck().check(quant_func).check_next("aten::dequantize").check(
'prim::CallMethod[name="_conv_forward"]'
).check("return").run(get_forward_graph(conv))
# no quantize node in _conv_forward
FileCheck().check_not(quant_func).check("aten::conv2d").check_not(
quant_func
).check("return").run(conv._get_method("_conv_forward").graph)
def test_dedup_module_uses(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(x)
x -= 0.5
return self.relu(x)
data = torch.randn((2, 2))
m = torch.jit.script(M())
ref_res = m(data)
assert (
len([x for x, _ in m._modules._c.items() if x.startswith("relu")]) == 1
), "Expected to have 1 relu modules after dedup module uses"
torch._C._jit_pass_dedup_module_uses(m._c)
m = torch.jit._recursive.wrap_cpp_module(m._c)
res = m(data)
assert (
len([x for x, _ in m._modules._c.items() if x.startswith("relu")]) == 2
), "Expected to have 2 relu modules after dedup module uses"
self.assertEqual(res, ref_res)
def test_replicate_dequantize(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
def forward(self, x):
x = torch.dequantize(x)
r = self.conv(x)
r += x
return r
x = torch.randn([1, 3, 10, 10], dtype=torch.float)
x = torch.quantize_per_tensor(x, 0.5, 1, torch.quint8)
m = torch.jit.script(M())
ref_res = m(x)
FileCheck().check_count("aten::dequantize", 1, exactly=True).run(m.graph)
torch._C._jit_pass_replicate_dequantize(m.graph)
FileCheck().check_count("aten::dequantize", 2, exactly=True).run(m.graph)
res = get_forward(m._c)(x)
self.assertEqual(res, ref_res)
def test_replicate_dequantize_in_block(self):
class M(torch.nn.Module):
def __init__(self, cond):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
self.cond = cond
def forward(self, x):
x = torch.dequantize(x)
if self.cond:
x = self.conv(x)
else:
x = x + 3
return x
x = torch.randn([1, 3, 10, 10], dtype=torch.float)
x = torch.quantize_per_tensor(x, 0.5, 1, torch.quint8)
m = torch.jit.script(M(True))
ref_res = m(x)
FileCheck().check_count("aten::dequantize", 1, exactly=True).run(m.graph)
torch._C._jit_pass_replicate_dequantize(m.graph)
FileCheck().check_count("aten::dequantize", 2, exactly=True).run(m.graph)
# check dequantize is right before CallMethod of conv
FileCheck().check("aten::dequantize").check_next("CallMethod").run(m.graph)
# check dequantize is right before add
FileCheck().check("aten::dequantize").check("aten::dequantize").check_next(
"aten::add"
).run(m.graph)
res = get_forward(m._c)(x)
self.assertEqual(res, ref_res)
def test_swap_functional_linear(self):
# TODO: This pass replaces any function called "linear" with "aten::linear"
# No longer necessary, and also quite surprising
def linear(input, weight, bias):
return torch.nn.functional.linear(input, weight, bias)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, weight, bias):
x = torch.dequantize(x)
weight = torch.dequantize(weight)
x = linear(x, weight, bias)
x = torch.quantize_per_tensor(
x, scale=1.0, zero_point=0, dtype=torch.quint8
)
return x
x = torch.rand((10, 5), dtype=torch.float)
x = torch.quantize_per_tensor(x, scale=0.5, zero_point=1, dtype=torch.quint8)
weight = torch.rand((5, 5), dtype=torch.float)
weight = torch.quantize_per_tensor(
weight, scale=0.5, zero_point=1, dtype=torch.qint8
)
bias = torch.rand((5), dtype=torch.float)
m = torch.jit.script(M())
ref_res = m(x, weight, bias)
FileCheck().check("CallFunction").run(m.graph)
torch._C._jit_pass_swap_functional_linear(m.graph)
FileCheck().check("aten::linear").check_not("CallFunction").run(m.graph)
res = m(x, weight, bias)
self.assertEqual(res, ref_res)
def test_replicate_quantize_for_if(self):
"""We want to move quantize nodes for output of prim::If
inside the prim::If blocks so that we can match quantization
patterns.
"""
class Res(torch.nn.Module):
def __init__(self):
super(Res, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
self.conv2 = torch.nn.Conv2d(3, 3, 1).float()
self.use_skip = True
def forward(self, x: torch.Tensor, cond: bool) -> torch.Tensor:
# to avoid being frozen
self.use_skip = cond
if self.use_skip:
return self.conv(x)
else:
return self.conv2(x)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.res1 = Res()
self.res2 = Res()
def forward(self, x):
x = self.res1(x, True)
x = self.res2(x, False)
return x
data = [[torch.rand((1, 3, 10, 10), dtype=torch.float)]]
qconfig_dict = {"": default_qconfig}
m = torch.jit.script(M()).eval()
m = quantize_jit(m, qconfig_dict, test_only_eval_fn, [data])
# make sure patterns in both branches are fused
FileCheck().check_count("quantized::conv2d(", 4, exactly=True).run(m.graph)
def test_finalize_for_linear(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.fc = torch.nn.Linear(5, 5).float()
def forward(self, x):
return self.fc(x)
data = [[torch.rand((1, 5), dtype=torch.float)]]
qconfig_dict = {"": default_qconfig}
model = torch.jit.script(M()).eval()
model = quantize_jit(model, qconfig_dict, test_only_eval_fn, [data])
# make sure there is only one quantize_per_tensor for input
# and linear_prepack is folded
FileCheck().check_count("aten::quantize_per_tensor", 1, exactly=True).check_not(
"quantized::linear_prepack"
).check("quantized::linear").run(model.graph)
def test_inplace_option(self):
for tracing in [True, False]:
model = get_script_module(
torch.nn.Conv2d(3, 3, 3).float(), tracing, self.img_data_2d[0][0]
)
qconfig_dict = {"": default_qconfig}
quantize_jit(
model, qconfig_dict, test_only_eval_fn, [self.img_data_2d], inplace=True
)
FileCheck().check("quantized::conv2d").run(model.graph)
FileCheck().check_not("aten::conv2d").run(model.graph)
def test_finalize_debug(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
self.avgpool = torch.nn.AvgPool2d(3)
def forward(self, x):
x = self.conv(x)
x = self.avgpool(x)
return x
data = [[torch.rand((1, 3, 10, 10), dtype=torch.float)]]
qconfig_dict = {"": default_qconfig}
model = torch.jit.script(M()).eval()
model = quantize_jit(model, qconfig_dict, test_only_eval_fn, [data], debug=True)
FileCheck().check_not("quantized::conv2d").check("aten::conv2d").check(
"aten::avg_pool2d"
).check("aten::q_scale").check_next("aten::q_zero_point").check_next(
"prim::dtype"
).check_next(
"aten::quantize_per_tensor"
).check(
"aten::dequantize"
).run(
model.graph
)
def test_module_list(self):
class SimpleLinearLayer(torch.nn.Module):
def __init__(self):
super(SimpleLinearLayer, self).__init__()
self.fc = torch.nn.Linear(5, 5).float()
def forward(self, x):
return self.fc(x)
class ComplexModel(torch.nn.Module):
def __init__(self):
super(ComplexModel, self).__init__()
self.layers = torch.nn.ModuleList(
[SimpleLinearLayer() for i in range(2)]
)
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
states = []
for layer in self.layers:
val = layer(x)
states.append(val)
return states
data = torch.rand((1, 5), dtype=torch.float)
qconfig_dict = {"": default_qconfig}
model = torch.jit.script(ComplexModel()).eval()
model = prepare_jit(model, qconfig_dict)
assert len(attrs_with_prefix(model, "_observer")) == 3
model(data)
model = convert_jit(model, debug=False)
FileCheck().check("quantized::linear").check("quantized::linear").run(
model.graph
)
def test_conv_trace(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1d = torch.nn.Conv1d(3, 3, 3).float()
self.conv2d = torch.nn.Conv2d(3, 3, 3).float()
self.conv3d = torch.nn.Conv3d(3, 3, 3).float()
def forward(self, x, y, z):
a = self.conv1d(x)
b = self.conv2d(y)
c = self.conv3d(z)
return (a, b, c)
qconfig_dict = {"": default_qconfig}
inputs = (
torch.rand((1, 3, 10), dtype=torch.float),
torch.rand((1, 3, 10, 10), dtype=torch.float),
torch.rand((1, 3, 10, 10, 10), dtype=torch.float),
)
model = torch.jit.trace(M(), inputs).eval()
m = prepare_jit(model, qconfig_dict)
FileCheck().check("aten::conv1d").check_not("aten::_convolution").run(
str(get_forward_graph(m.conv1d._c))
)
FileCheck().check("aten::conv2d").check_not("aten::_convolution").run(
str(get_forward_graph(m.conv2d._c))
)
FileCheck().check("aten::conv3d").check_not("aten::_convolution").run(
str(get_forward_graph(m.conv3d._c))
)
def test_convtranspose_trace(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.convtranspose1d = torch.nn.ConvTranspose1d(3, 3, 3).float()
self.convtranspose2d = torch.nn.ConvTranspose2d(3, 3, 3).float()
self.convtranspose3d = torch.nn.ConvTranspose3d(3, 3, 3).float()
def forward(self, x, y, z):
a = self.convtranspose1d(x)
b = self.convtranspose2d(y)
c = self.convtranspose3d(z)
return (a, b, c)
qconfig_dict = {"": default_qconfig}
inputs = (
torch.rand((1, 3, 10), dtype=torch.float),
torch.rand((1, 3, 10, 10), dtype=torch.float),
torch.rand((1, 3, 10, 10, 10), dtype=torch.float),
)
model = torch.jit.trace(M(), inputs).eval()
m = prepare_jit(model, qconfig_dict)
FileCheck().check("aten::conv_transpose1d").check_not("aten::_convolution").run(
str(get_forward_graph(m.convtranspose1d._c))
)
FileCheck().check("aten::conv_transpose2d").check_not("aten::_convolution").run(
str(get_forward_graph(m.convtranspose2d._c))
)
FileCheck().check("aten::conv_transpose3d").check_not("aten::_convolution").run(
str(get_forward_graph(m.convtranspose3d._c))
)
@unittest.skipUnless(
"fbgemm" in torch.backends.quantized.supported_engines,
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
def test_replicate_dequant_same_value(self):
class Mul(torch.nn.Module):
def __init__(self):
super(Mul, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
x = self.conv(x)
return x * x
data = [[torch.rand((1, 3, 10, 10), dtype=torch.float)]]
qconfig_dict = {"": default_qconfig}
model = torch.jit.script(Mul()).eval()
m = quantize_jit(model, qconfig_dict, test_only_eval_fn, [data])
FileCheck().check("quantized::mul(").check_not("aten::mul").run(m.graph)
def test_interface_with_fork(self):
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.embedding1 = torch.nn.EmbeddingBag(
num_embeddings=10,
embedding_dim=12,
include_last_offset=True,
sparse=False,
mode="sum",
)
def forward(self, x, y):
return self.embedding1(x, y)
class OrigMod(torch.nn.Module):
def __init__(self):
super(OrigMod, self).__init__()
self.embedding1 = torch.nn.EmbeddingBag(
num_embeddings=10,
embedding_dim=12,
include_last_offset=True,
sparse=False,
mode="sum",
)
def forward(self, x, y):
return self.embedding1(x, y)
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
class TestModule(torch.nn.Module):
proxy_mod: ModInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule()
def forward(self, x, y):
a = self.proxy_mod(x, y)
b = self.sub(x, y)
return b
class MainModule(torch.nn.Module):
def __init__(self):
super(MainModule, self).__init__()
self.test = TestModule()
def forward(self, x, y):
fut = torch.jit._fork(self.test.forward, x, y)
z = torch.jit._wait(fut)
return z
indices = torch.tensor(
[
9,
6,
5,
7,
8,
8,
9,
2,
8,
6,
6,
9,
1,
6,
8,
8,
3,
2,
3,
6,
3,
6,
5,
7,
0,
8,
4,
6,
5,
8,
2,
3,
]
)
offsets = torch.tensor([0, 19, 20, 28, 28, 32])
m = torch.jit.trace(MainModule(), (indices, offsets))
m.eval()
int8_qconfig = QConfig(
activation=PlaceholderObserver.with_args(
dtype=torch.float, custom_op_name="embedding_bag_byte"
),
weight=PlaceholderObserver.with_args(custom_op_name="embedding_bag_byte"),
)
m = prepare_jit(m, {"": int8_qconfig})
m = convert_jit(m)
FileCheck().check("quantized::embedding_bag_byte_rowwise_offsets").run(m.graph)
@skipIfNoFBGEMM
def test_quantize_fork_wait(self):
"""Tests the case where fork and wait calls are in different subgraphs
Calling inline fork-wait only removes the fork call and leaves aten::wait
calls in the graph, with Tensor as input (instead of Future[Tensor])
"""
class MainModule(nn.Module):
def __init__(self):
super(MainModule, self).__init__()
self.fork_ops = ForkModule()
def init_values(self, x):
shared_module = self.fork_ops(x)
self.fork_dict = shared_module
def forward(self, x):
val = torch.jit._wait(self.fork_ops(x))
return val
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, x):
w = torch.ones(5, 5)
b = torch.zeros(5)
return torch.nn.functional.linear(x, w, b)
class ForkModule(nn.Module):
def __init__(self):
super(ForkModule, self).__init__()
self.test = TestModule()
def forward(self, x):
fut = torch.jit._fork(self.test.forward, x)
return fut
model = MainModule().eval()
traced = torch.jit.trace(model, (torch.randn(5, 5),))
model = prepare_dynamic_jit(traced, {"": default_qconfig})
model = convert_dynamic_jit(model)
FileCheck().check("quantized::linear_dynamic").run(model.graph)
# Make sure model save works
b = io.BytesIO()
torch.jit.save(model, b)
@skipIfSlowGradcheckEnv
class TestQuantizeJitOps(QuantizationTestCase):
"""Test graph mode post training static quantization works
for individual ops end to end.
"""
@skipIfNoFBGEMM
def test_linear(self):
class ModuleLinear(torch.nn.Module):
def __init__(self, has_relu=False, f_relu=False):
super(ModuleLinear, self).__init__()
self.linear = torch.nn.Linear(30, 4).float()
if has_relu:
if f_relu:
self.relu = F.relu
else:
self.relu = torch.nn.ReLU()
else:
self.relu = torch.nn.Identity()
def forward(self, x):
return self.relu(self.linear(x))
class FuncLinear(torch.nn.Module):
def __init__(self, has_relu=False, f_relu=False):
super(FuncLinear, self).__init__()
self.w = torch.randn(4, 30)
self.b = torch.randn(4)
if has_relu:
if f_relu:
self.relu = F.relu
else:
self.relu = torch.nn.ReLU()
else:
self.relu = torch.nn.Identity()
def forward(self, x):
return self.relu(F.linear(x, self.w, self.b))
data = [[torch.rand((1, 30), dtype=torch.float)]]
for model, tracing in itertools.product(
[ModuleLinear(has_relu=False), FuncLinear(has_relu=False)], [True, False]
):
model = self.checkGraphModeOp(model, data, "quantized::linear", tracing)
FileCheck().check_count("aten::quantize_per_tensor", 1, exactly=True).run(
model.graph
)
FileCheck().check_not("quantized::linear_prepack").run(model.graph)
for f_relu, tracing in itertools.product([True, False], [True, False]):
for model in [
ModuleLinear(has_relu=True, f_relu=f_relu),
FuncLinear(has_relu=True, f_relu=f_relu),
]:
model = self.checkGraphModeOp(
model, data, "quantized::linear_relu", tracing
)
checker = (
FileCheck()
.check_not("aten::linear")
.check_not("aten::relu")
.check_not("quantized::linear(")
.check_not("quantized::relu(")
.run(model.graph)
)
@skipIfNoFBGEMM
def test_quantized_conv(self):
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
class Conv(torch.nn.Module):
def __init__(self, dim):
super(Conv, self).__init__()
self.conv = conv_module[dim](3, 3, 3).float()
def forward(self, x):
return self.conv(x)
options = itertools.product([1, 2, 3], [True, False])
for dim, tracing in options:
model = self.checkGraphModeOp(
Conv(dim),
self.img_data_dict[dim],
"quantized::conv{}d".format(dim),
tracing,
)
# make sure there is only one quantize_per_tensor for input
# and conv2d_prepack is folded
FileCheck().check_count("aten::quantize_per_tensor", 1, exactly=True).run(
model.graph
)
FileCheck().check_not("quantized::conv{}d_prepack".format(dim)).run(
model.graph
)
@skipIfNoFBGEMM
def test_quantized_conv_relu(self):
"""tests for conv1d_relu/conv2d_relu/conv3d_relu"""
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
class ConvNdRelu(torch.nn.Module):
def __init__(self, dim, inplace):
super(ConvNdRelu, self).__init__()
self.conv = conv_module[dim](3, 3, 3).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x):
return self.relu(self.conv(x))
class ConvNdFunctionalRelu(torch.nn.Module):
def __init__(self, dim):
super(ConvNdFunctionalRelu, self).__init__()
self.conv = conv_module[dim](3, 3, 3).float()
def forward(self, x):
return F.relu(self.conv(x))
class ConvNdInplaceFunctionalRelu(torch.nn.Module):
def __init__(self, dim):
super(ConvNdInplaceFunctionalRelu, self).__init__()
self.conv = conv_module[dim](3, 3, 3).float()
def forward(self, x):
return F.relu(self.conv(x), True)
options = itertools.product([1, 2, 3], [True, False])
for dim, tracing in options:
for orig_m in [
ConvNdRelu(dim, True),
ConvNdRelu(dim, False),
ConvNdFunctionalRelu(dim),
ConvNdInplaceFunctionalRelu(dim),
]:
conv_name = "conv{}d".format(dim)
m = self.checkGraphModeOp(
orig_m,
self.img_data_dict[dim],
"quantized::conv{}d_relu(".format(dim),
tracing=tracing,
)
FileCheck().check_not("aten::conv{}d(".format(dim)).check_not(
"aten::relu"
).check_not("quantized::conv{}d(".format(dim)).check_not(
"quantized::relu("
).run(
m.graph
)
@skipIfNoFBGEMM
def test_quantized_add_alpha(self):
"""Test quant fusion for multiple aten::add using same
constant alpha as the third argument
"""
class QuantizedAdd(torch.nn.Module):
def __init__(self):
super(QuantizedAdd, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
z = x + y
w = y + z
return z + w
data = [
[
torch.randn(1, 2, 5, 5, dtype=torch.float),
torch.randn(1, 2, 5, 5, dtype=torch.float),
]
]
for tracing in [True, False]:
m = self.checkGraphModeOp(QuantizedAdd(), data, "quantized::add", tracing)
FileCheck().check_count("quantized::add", 3, exactly=True).run(m.graph)
FileCheck().check_not("aten::add").check_not("aten::add_").run(m.graph)
@skipIfNoFBGEMM
def test_quantized_add_relu_alpha(self):
"""Test quant fusion for multiple aten::add using same
constant alpha as the third argument in add_relu pattern
"""
class AddRelu(torch.nn.Module):
def __init__(self, inplace):
super(AddRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x = x + y
x = self.relu(x)
x = x + y
return self.relu(x)
class InplaceAddRelu(torch.nn.Module):
def __init__(self, inplace):
super(InplaceAddRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x += y
x = self.relu(x)
x += y
return self.relu(x)
class AddFunctionalRelu(torch.nn.Module):
def __init__(self):
super(AddFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x = x + y
x = F.relu(x)
x = x + y
return F.relu(x)
class InplaceAddFunctionalRelu(torch.nn.Module):
def __init__(self):
super(InplaceAddFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x += y
x = F.relu(x)
x += y
return F.relu(x)
class AddInplaceFunctionalRelu(torch.nn.Module):
def __init__(self):
super(AddInplaceFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x = x + y
x = F.relu(x, True)
x = x + y
return F.relu(x, True)
class InplaceAddInplaceFunctionalRelu(torch.nn.Module):
def __init__(self):
super(InplaceAddInplaceFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x += y
x = F.relu(x, True)
x += y
return F.relu(x, True)
data = [
[
torch.rand((1, 2, 5, 5), dtype=torch.float),
torch.rand((1, 2, 5, 5), dtype=torch.float),
]
]
for m_orig in [
AddRelu(True),
AddRelu(False),
InplaceAddRelu(True),
InplaceAddRelu(False),
AddFunctionalRelu(),
InplaceAddFunctionalRelu(),
AddInplaceFunctionalRelu(),
InplaceAddInplaceFunctionalRelu(),
]:
for tracing in [True, False]:
m = self.checkGraphModeOp(
m_orig, data, "quantized::add_relu(", tracing=tracing
)
FileCheck().check_count("quantized::add_relu(", 2, exactly=True).run(
m.graph
)
FileCheck().check_not("aten::add(").check_not("aten::add_(").check_not(
"aten::relu("
).check_not("aten::relu_(").check_not("quantized::add(").check_not(
"quantized::relu("
).run(
m.graph
)
@skipIfNoFBGEMM
def test_quantized_add(self):
class QuantizedAdd(torch.nn.Module):
def __init__(self):
super(QuantizedAdd, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
return x + y
class QuantizedInplaceAdd(torch.nn.Module):
def __init__(self):
super(QuantizedInplaceAdd, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x += y
return x
class NonQuantizedAdd(torch.nn.Module):
def __init__(self):
super(NonQuantizedAdd, self).__init__()
def forward(self, x, y):
return x + y
class NonQuantizedInplaceAdd(torch.nn.Module):
def __init__(self):
super(NonQuantizedInplaceAdd, self).__init__()
def forward(self, x, y):
x += y
return x
data = [
[
torch.randn(1, 2, 3, 3, dtype=torch.float),
torch.randn(1, 2, 3, 3, dtype=torch.float),
]
]
for m, quantized in [
(QuantizedAdd(), True),
(QuantizedInplaceAdd(), True),
(NonQuantizedAdd(), False),
(NonQuantizedInplaceAdd(), False),
]:
for tracing in [True, False]:
op = "quantized::add" if quantized else "aten::add"
m = self.checkGraphModeOp(m, data, op, tracing)
# TODO: remove after refactor of checkGraphModeOp
if quantized:
FileCheck().check_not("aten::add").check_not("aten::add_").run(
m.graph
)
else:
FileCheck().check_not("quantized::add").run(m.graph)
@skipIfNoFBGEMM
def test_quantized_add_scalar(self):
class QuantizedAddScalar(torch.nn.Module):
def __init__(self):
super(QuantizedAddScalar, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
return x + 3
class QuantizedInplaceAddScalar(torch.nn.Module):
def __init__(self):
super(QuantizedInplaceAddScalar, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
x += 3
return x
class NonQuantizedAddScalar(torch.nn.Module):
def __init__(self):
super(NonQuantizedAddScalar, self).__init__()
def forward(self, x):
return x + 3
class NonQuantizedInplaceAddScalar(torch.nn.Module):
def __init__(self):
super(NonQuantizedInplaceAddScalar, self).__init__()
def forward(self, x):
x += 3
return x
data = [[torch.randn(1, 2, 3, 3, dtype=torch.float)]]
for m, quantized in [
(QuantizedAddScalar(), True),
(QuantizedInplaceAddScalar(), True),
(NonQuantizedAddScalar(), False),
(NonQuantizedInplaceAddScalar(), False),
]:
for tracing in [True, False]:
op = "quantized::add_scalar" if quantized else "aten::add"
# we don't check the numerical consistency for add_scalar
# since it's not supported
m = self.checkGraphModeOp(m, data, op, tracing, check=False)
# TODO: remove after refactor of checkGraphModeOp
if quantized:
FileCheck().check_not("aten::add").check_not("aten::add_").run(
m.graph
)
else:
FileCheck().check_not("quantized::add_scalar").run(m.graph)
@skipIfNoFBGEMM
def test_quantized_add_relu(self):
class AddRelu(torch.nn.Module):
def __init__(self, inplace):
super(AddRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x = x + y
return self.relu(x)
class InplaceAddRelu(torch.nn.Module):
def __init__(self, inplace):
super(InplaceAddRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x += y
return self.relu(x)
class AddFunctionalRelu(torch.nn.Module):
def __init__(self):
super(AddFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x = x + y
return F.relu(x)
class InplaceAddFunctionalRelu(torch.nn.Module):
def __init__(self):
super(InplaceAddFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x += y
return F.relu(x)
class AddInplaceFunctionalRelu(torch.nn.Module):
def __init__(self):
super(AddInplaceFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x = x + y
return F.relu(x, True)
class InplaceAddInplaceFunctionalRelu(torch.nn.Module):
def __init__(self):
super(InplaceAddInplaceFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x += y
return F.relu(x, True)
data = [
[
torch.rand((1, 2, 5, 5), dtype=torch.float),
torch.rand((1, 2, 5, 5), dtype=torch.float),
]
]
for m in [
AddRelu(True),
AddRelu(False),
InplaceAddRelu(True),
InplaceAddRelu(False),
AddFunctionalRelu(),
InplaceAddFunctionalRelu(),
AddInplaceFunctionalRelu(),
InplaceAddInplaceFunctionalRelu(),
]:
for tracing in [True, False]:
m = self.checkGraphModeOp(m, data, "quantized::add_relu(", tracing)
FileCheck().check_not("aten::add(").check_not("aten::add_(").check_not(
"aten::relu("
).check_not("aten::relu_(").check_not("quantized::add(").check_not(
"quantized::relu("
).run(
m.graph
)
@skipIfNoFBGEMM
def test_quantized_add_scalar_relu(self):
class AddScalarRelu(torch.nn.Module):
def __init__(self, inplace):
super(AddScalarRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x):
x = self.conv(x)
return self.relu(x + 3)
class InplaceAddScalarRelu(torch.nn.Module):
def __init__(self, inplace):
super(InplaceAddScalarRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x):
x = self.conv(x)
x += 3
return self.relu(x)
class AddScalarFunctionalRelu(torch.nn.Module):
def __init__(self):
super(AddScalarFunctionalRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
return F.relu(x + 3)
class InplaceAddScalarFunctionalRelu(torch.nn.Module):
def __init__(self):
super(InplaceAddScalarFunctionalRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
x += 3
return F.relu(x)
class AddScalarInplaceFunctionalRelu(torch.nn.Module):
def __init__(self):
super(AddScalarInplaceFunctionalRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
return F.relu(x + 3, True)
class InplaceAddScalarInplaceFunctionalRelu(torch.nn.Module):
def __init__(self):
super(InplaceAddScalarInplaceFunctionalRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
x += 3
return F.relu(x, True)
data = [[torch.rand((1, 2, 5, 5), dtype=torch.float)]]
for m in [
AddScalarRelu(True),
AddScalarRelu(False),
InplaceAddScalarRelu(True),
InplaceAddScalarRelu(False),
AddScalarFunctionalRelu(),
InplaceAddScalarFunctionalRelu(),
AddScalarInplaceFunctionalRelu(),
InplaceAddScalarInplaceFunctionalRelu(),
]:
for tracing in [True, False]:
# quantized::add_scalar_relu or quantized::add_scalar_relu_out
# TODO: split this after refactor of checkGraphModeOp
m = self.checkGraphModeOp(
m, data, "quantized::add_scalar_relu", tracing, check=False
)
FileCheck().check_not("aten::add(").check_not("aten::add_(").check_not(
"aten::relu("
).check_not("aten::relu_(").check_not(
"quantized::add_scalar("
).check_not(
"quantized::relu("
).run(
m.graph
)
@skipIfNoFBGEMM
def test_quantized_cat(self):
"""quantization of the output of cat will be depend on the
input of cat. we only quantize the output of cat when its inputs are quantized.
"""
class QuantizedCat(torch.nn.Module):
def __init__(self):
super(QuantizedCat, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
return torch.cat([x, y], 1)
class NonQuantizedCat(torch.nn.Module):
def __init__(self):
super(NonQuantizedCat, self).__init__()
def forward(self, x, y):
return torch.cat([x, y], 1)
data = [
[
torch.randn(1, 2, 5, 5, dtype=torch.float),
torch.randn(1, 2, 5, 5, dtype=torch.float),
]
]
for tracing in [True, False]:
m = self.checkGraphModeOp(QuantizedCat(), data, "quantized::cat", tracing)
FileCheck().check_not("aten::cat").run(m.graph)
m = self.checkGraphModeOp(NonQuantizedCat(), data, "aten::cat", tracing)
FileCheck().check_not("quantized::cat").run(m.graph)
@skipIfNoFBGEMM
def test_qbatch_norm(self):
bn_module = {
1: torch.nn.BatchNorm1d,
2: torch.nn.BatchNorm2d,
3: torch.nn.BatchNorm3d,
}
class M(torch.nn.Module):
def __init__(self, dim):
super(M, self).__init__()
self.bn = bn_module[dim](3).to(torch.float)
def forward(self, x):
return self.bn(x)
options = itertools.product([True, False], [1, 2, 3])
for tracing, dim in options:
model = self.checkGraphModeOp(
M(dim), self.img_data_dict[dim], "quantized::batch_norm", tracing
)
FileCheck().check_not("aten::batch_norm").run(model.graph)
@skipIfNoFBGEMM
def test_qbatch_norm_relu_BNRelu(self):
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
class BNRelu(torch.nn.Module):
def __init__(self, dim, inplace):
super(BNRelu, self).__init__()
self.bn = bn_module[dim](3).to(torch.float)
self.relu = torch.nn.ReLU(inplace=inplace)
def forward(self, x):
return self.relu(self.bn(x))
options = itertools.product([True, False], [2, 3])
for tracing, dim in options:
for instance in [BNRelu(dim, True), BNRelu(dim, False)]:
model = self.checkGraphModeOp(instance, self.img_data_dict[dim],
"quantized::batch_norm_relu", tracing)
FileCheck().check_not("aten::batch_norm") \
.check_not("aten::relu") \
.check_not("aten::relu_") \
.run(model.graph)
@skipIfNoFBGEMM
def test_qbatch_norm_relu_BNFuncRelu(self):
bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
class BNFuncRelu(torch.nn.Module):
def __init__(self, dim):
super(BNFuncRelu, self).__init__()
self.bn = bn_module[dim](3).to(torch.float)
def forward(self, x):
return F.relu(self.bn(x), False)
options = itertools.product([True, False], [2, 3])
for tracing, dim in options:
instance = BNFuncRelu(dim)
model = self.checkGraphModeOp(instance, self.img_data_dict[dim],
"quantized::batch_norm_relu", tracing)
FileCheck().check_not("aten::batch_norm") \
.check_not("aten::relu") \
.check_not("aten::relu_") \
.run(model.graph)
@skipIfNoFBGEMM
def test_qbatch_norm_relu_BNFuncInplaceRelu(self):
bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d}
class BNFuncInplaceRelu(torch.nn.Module):
def __init__(self, dim):
super(BNFuncInplaceRelu, self).__init__()
self.bn = bn_module[dim](3).to(torch.float)
def forward(self, x):
return F.relu(self.bn(x), True)
options = itertools.product([True, False], [2, 3])
for tracing, dim in options:
instance = BNFuncInplaceRelu(dim)
model = self.checkGraphModeOp(instance, self.img_data_dict[dim],
"quantized::batch_norm_relu", tracing)
FileCheck().check_not("aten::batch_norm") \
.check_not("aten::relu") \
.check_not("aten::relu_") \
.run(model.graph)
@skipIfNoFBGEMM
def test_quantized_mul(self):
class QuantizedMul(torch.nn.Module):
def __init__(self):
super(QuantizedMul, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
return x * y
class QuantizedInplaceMul(torch.nn.Module):
def __init__(self):
super(QuantizedInplaceMul, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x *= y
return x
class NonQuantizedMul(torch.nn.Module):
def __init__(self):
super(NonQuantizedMul, self).__init__()
def forward(self, x, y):
return x * y
class NonQuantizedInplaceMul(torch.nn.Module):
def __init__(self):
super(NonQuantizedInplaceMul, self).__init__()
def forward(self, x, y):
x *= y
return x
data = [
[
torch.randn(1, 2, 10, 10, dtype=torch.float),
torch.randn(1, 2, 10, 10, dtype=torch.float),
]
]
for m, quantized in [
(QuantizedMul(), True),
(QuantizedInplaceMul(), True),
(NonQuantizedMul(), False),
(NonQuantizedInplaceMul(), False),
]:
for tracing in [True, False]:
op = "quantized::mul" if quantized else "aten::mul"
m = self.checkGraphModeOp(m, data, op, tracing)
# TODO: remove after refactor of checkGraphModeOp
if quantized:
FileCheck().check_not("aten::mul").check_not("aten::mul_").run(
m.graph
)
else:
FileCheck().check_not("quantized::mul").run(m.graph)
@skipIfNoFBGEMM
def test_quantized_mul_scalar(self):
class QuantizedMulScalar(torch.nn.Module):
def __init__(self):
super(QuantizedMulScalar, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
return x * 3
class QuantizedInplaceMulScalar(torch.nn.Module):
def __init__(self):
super(QuantizedInplaceMulScalar, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
x *= 3
return x
class NonQuantizedMulScalar(torch.nn.Module):
def __init__(self):
super(NonQuantizedMulScalar, self).__init__()
def forward(self, x):
return x * 3
class NonQuantizedInplaceMulScalar(torch.nn.Module):
def __init__(self):
super(NonQuantizedInplaceMulScalar, self).__init__()
def forward(self, x):
x *= 3
return x
data = [[torch.randn(1, 2, 5, 5, dtype=torch.float)]]
for m, quantized in [
(QuantizedMulScalar(), True),
(QuantizedInplaceMulScalar(), True),
(NonQuantizedMulScalar(), False),
(NonQuantizedInplaceMulScalar(), False),
]:
for tracing in [True, False]:
op = "quantized::mul_scalar" if quantized else "aten::mul"
# we don't check the numerical consistency for add_scalar
# since it's not supported
m = self.checkGraphModeOp(m, data, op, tracing, check=False)
# TODO: remove after refactor of checkGraphModeOp
if quantized:
FileCheck().check_not("aten::mul").check_not("aten::mul_").run(
m.graph
)
else:
FileCheck().check_not("quantized::mul_scalar").run(m.graph)
@skipIfNoFBGEMM
def test_quantized_mul_relu(self):
class MulRelu(torch.nn.Module):
def __init__(self, inplace):
super(MulRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x = x * y
return self.relu(x)
class InplaceMulRelu(torch.nn.Module):
def __init__(self, inplace):
super(InplaceMulRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x *= y
return self.relu(x)
class MulFunctionalRelu(torch.nn.Module):
def __init__(self):
super(MulFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x = x * y
return F.relu(x)
class InplaceMulFunctionalRelu(torch.nn.Module):
def __init__(self):
super(InplaceMulFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x *= y
return F.relu(x)
class MulInplaceFunctionalRelu(torch.nn.Module):
def __init__(self):
super(MulInplaceFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x = x * y
return F.relu(x, True)
class InplaceMulInplaceFunctionalRelu(torch.nn.Module):
def __init__(self):
super(InplaceMulInplaceFunctionalRelu, self).__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2).float()
self.conv2 = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
x *= y
return F.relu(x, True)
data = [
[
torch.rand((1, 2, 5, 5), dtype=torch.float),
torch.rand((1, 2, 5, 5), dtype=torch.float),
]
]
for m in [
MulRelu(True),
MulRelu(False),
InplaceMulRelu(True),
InplaceMulRelu(False),
MulFunctionalRelu(),
InplaceMulFunctionalRelu(),
MulInplaceFunctionalRelu(),
InplaceMulInplaceFunctionalRelu(),
]:
for tracing in [True, False]:
m = self.checkGraphModeOp(m, data, "quantized::mul_relu(", tracing)
FileCheck().check_not("aten::mul(").check_not("aten::mul_(").check_not(
"aten::relu("
).check_not("aten::relu_(").check_not("quantized::mul(").check_not(
"quantized::relu("
).run(
m.graph
)
@skipIfNoFBGEMM
def test_quantized_mul_scalar_relu(self):
class MulScalarRelu(torch.nn.Module):
def __init__(self, inplace):
super(MulScalarRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x):
x = self.conv(x)
return self.relu(x * 3)
class InplaceMulScalarRelu(torch.nn.Module):
def __init__(self, inplace):
super(InplaceMulScalarRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
self.relu = torch.nn.ReLU(inplace)
def forward(self, x):
x = self.conv(x)
x *= 3
return self.relu(x)
class MulScalarFunctionalRelu(torch.nn.Module):
def __init__(self):
super(MulScalarFunctionalRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
return F.relu(x * 3)
class InplaceMulScalarFunctionalRelu(torch.nn.Module):
def __init__(self):
super(InplaceMulScalarFunctionalRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
x *= 3
return F.relu(x)
class MulScalarInplaceFunctionalRelu(torch.nn.Module):
def __init__(self):
super(MulScalarInplaceFunctionalRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
return F.relu(x * 3, True)
class InplaceMulScalarInplaceFunctionalRelu(torch.nn.Module):
def __init__(self):
super(InplaceMulScalarInplaceFunctionalRelu, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
def forward(self, x):
x = self.conv(x)
x *= 3
return F.relu(x, True)
data = [[torch.randn(1, 2, 5, 5, dtype=torch.float)]]
for m in [
MulScalarRelu(True),
MulScalarRelu(False),
InplaceMulScalarRelu(True),
InplaceMulScalarRelu(False),
MulScalarFunctionalRelu(),
InplaceMulScalarFunctionalRelu(),
MulScalarInplaceFunctionalRelu(),
InplaceMulScalarInplaceFunctionalRelu(),
]:
for tracing in [True, False]:
# quantized::mul_scalar_relu or quantized::mul_scalar_relu_out
m = self.checkGraphModeOp(
m, data, "quantized::mul_scalar_relu", tracing, check=False
)
FileCheck().check_not("aten::mul(").check_not("aten::mul_(").check_not(
"aten::relu("
).check_not("aten::relu_(").check_not(
"quantized::mul_scalar("
).check_not(
"quantized::relu("
).run(
m.graph
)
def test_hardswish(self):
class FunctionalHardswish(torch.nn.Module):
def __init__(self, inplace):
super(FunctionalHardswish, self).__init__()
self.inplace = inplace
def forward(self, input):
return torch.nn.functional.hardswish(input, inplace=self.inplace)
modules = [
torch.nn.Hardswish(),
FunctionalHardswish(True),
FunctionalHardswish(False),
]
for test_case in itertools.product([True, False], modules):
tracing, m = test_case
m = self.checkGraphModeOp(
m, self.img_data_2d, "quantized::hardswish", tracing
)
FileCheck().check_not("aten::hardswish").check_not("aten::hardswish_").run(
m.graph
)
def test_elu(self):
class FunctionalELU(torch.nn.Module):
def __init__(self, inplace=False):
super(FunctionalELU, self).__init__()
self.inplace = inplace
def forward(self, input):
return torch.nn.functional.elu(input, inplace=self.inplace)
modules = [torch.nn.ELU, FunctionalELU]
for test_case in itertools.product([True, False], [True, False], modules):
tracing, inplace, mod_class = test_case
m = mod_class(inplace=inplace)
m = self.checkGraphModeOp(m, self.img_data_2d, "quantized::elu", tracing)
FileCheck().check_not("aten::elu").check_not("aten::elu_").run(m.graph)
def test_layer_norm(self):
data = [[torch.rand((1, 2, 5, 5), dtype=torch.float)] for _ in range(2)]
layer_norm = torch.nn.LayerNorm([2, 5, 5])
for tracing in [True, False]:
m = self.checkGraphModeOp(
layer_norm, data, "quantized::layer_norm", tracing
)
FileCheck().check_not("aten::layer_norm").run(m.graph)
def test_group_norm(self):
data = [[torch.rand((1, 4, 5, 5), dtype=torch.float)] for _ in range(2)]
group_norm = torch.nn.GroupNorm(2, 4)
for tracing in [True, False]:
m = self.checkGraphModeOp(
group_norm, data, "quantized::group_norm", tracing
)
FileCheck().check_not("aten::group_norm").run(m.graph)
def test_instance_norm(self):
data_1d = [[torch.rand((1, 4, 5), dtype=torch.float)] for _ in range(2)]
data_2d = [[torch.rand((1, 4, 5, 1), dtype=torch.float)] for _ in range(2)]
data_3d = [[torch.rand((1, 4, 5, 1, 1), dtype=torch.float)] for _ in range(2)]
data = {1: data_1d, 2: data_2d, 3: data_3d}
instance_norm_modules = {
1: torch.nn.InstanceNorm1d,
2: torch.nn.InstanceNorm2d,
3: torch.nn.InstanceNorm3d,
}
options = itertools.product([1, 2, 3], [True, False])
for dim, tracing in options:
instance_norm = instance_norm_modules[dim](4)
m = self.checkGraphModeOp(
instance_norm, data[dim], "quantized::instance_norm", tracing
)
FileCheck().check_not("aten::instance_norm").run(m.graph)
@skipIfNoFBGEMM
def test_dequantize_tuple(self):
"""Make sure dequantize can support Tuple of tensor"""
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3).float()
self.conv2 = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
x1 = self.conv1(x)
x2 = self.conv2(x)
return x1, x2
for tracing in [True, False]:
self.checkGraphModeOp(M(), self.img_data_2d, "quantized::conv2d", tracing)
@skipIfNoFBGEMM
def test_clamp(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 2).float()
self.relu6 = torch.nn.ReLU6()
self.relu6_ = torch.nn.ReLU6(True)
self.hardtanh = torch.nn.Hardtanh()
self.hardtanh_ = torch.nn.Hardtanh(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.relu6(x)
self.relu6_(x)
x = F.relu6(x)
x = torch.clamp(x, -3, 3)
x = x.clamp(-2.5, 2.5)
# x = x.clamp_(-2, 2) # Enable when quantized `clamp_` is ready
x = self.hardtanh(x)
self.hardtanh_(x)
x = F.hardtanh(x)
F.hardtanh_(x)
return x
data = [[torch.rand((1, 2, 5, 5), dtype=torch.float)]]
options = itertools.product(
["aten::clamp", "aten::hardtanh", "aten::hardtanh_"], [True, False]
)
for op, tracing in options:
m = self.checkGraphModeOp(M(), data, op, tracing)
FileCheck().check_count("aten::quantize_per_tensor", 1, exactly=True).run(
m.graph
)
FileCheck().check_count("aten::dequantize", 1, exactly=True).run(m.graph)
def test_general_shape_ops(self):
"""A test that checks dequantize will be swapped for
all supported general shape ops like aten::flatten
without actually checking for execution of these ops
"""
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.maxpool1d = torch.nn.MaxPool1d(kernel_size=3)
self.maxpool2d = torch.nn.MaxPool2d(kernel_size=3)
self.maxpool3d = torch.nn.MaxPool3d(kernel_size=3)
self.dropout = torch.nn.Dropout()
self.conv1 = torch.nn.Conv2d(3, 3, 3)
self.conv2 = torch.nn.Conv2d(3, 3, 3)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
# add_scalar
x = x + 3
# mul_scalar
x = x * 3
# add_scalar_out
x += 3
# mul_scalar_out
x *= 3
# add_scalar_relu
x = x + 3
x = F.relu(x)
# add_scalar_relu_out
x += 3
x = F.relu(x)
# mul_scalar_relu
x = x * 3
x = F.relu(x)
# mul_scalar_relu_out
x *= 3
x = F.relu(x)
x = self.maxpool1d(x)
x = self.maxpool2d(x)
x = self.maxpool3d(x)
x = torch.flatten(x)
x = torch.max(x)
x = torch.min(x)
x = x.reshape([-1])
x = x.resize_(1, 1, x.numel())
x = x.view(-1)
# prim::ListConstruct
xs = [x, x]
# prim::ListUnpack
x, y = xs
# prim::TupleConstruct
xs = (x, x)
# prim::TupleUnpack
x, y = xs
x = x.transpose(1, 2)
x = x.contiguous()
x, y = torch.chunk(x, 2)
x = F.dropout(x)
x = self.dropout(x)
x, _ = torch.sort(x)
x = x.permute(0, 2, 3, 1)
x = torch.repeat_interleave(x, 3, 1)
x = self.relu(x)
x = F.relu(x)
x.relu_()
x = x.squeeze(0)
x.squeeze_(0)
x = torch.squeeze(x, 0)
x = x.unsqueeze(0)
x.unsqueeze_(0)
x = torch.unsqueeze(x, 0)
x = x.detach()
x.detach_()
x = x.repeat(4, 2)
y = []
y.append(x)
z = torch.stack(y, 0)
z = [z, z]
x, _ = z
x = self.conv2(x)
return x
data = torch.rand(1, 3, 10, 10)
# This model is not executable since we just put all ops
# in the same forward, therefore we only test scripting
m = torch.jit.script(M())
qconfig = script_qconfig(default_qconfig)
# dummy data to suppress warning
get_forward(qconfig.activation)(data)
get_forward(qconfig.weight)(data)
m = wrap_cpp_module(
torch._C._jit_pass_insert_observers(
m._c, "forward", {"": qconfig}, inplace=False
)
)
m = convert_jit(m)
# This checks that the dequantize from the output of first conv
# is being propagated to the end, so that we don't insert extra
# observers and also successfully fused two quantized::conv2d
# patterns
# one quantize_per_tensor for input
FileCheck().check_count("aten::quantize_per_tensor", 1, exactly=True).run(
m.graph
)
FileCheck().check_count("quantized::conv2d(", 2, exactly=True).run(m.graph)
FileCheck().check_count("aten::dequantize", 1, exactly=True).run(m.graph)
FileCheck().check("quantized::add_scalar").check("quantized::mul_scalar").run(
m.graph
)
def test_general_value_ops(self):
""" A test that checks correct patterns are produced for
all supported general value ops like aten::avg_pool2d \
without actually checking for execution of these ops
"""
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
self.avg_pool1d = torch.nn.AvgPool1d(3)
self.avg_pool2d = torch.nn.AvgPool2d(3)
self.avg_pool3d = torch.nn.AvgPool3d(3)
self.adaptive_avg_pool1d = torch.nn.AdaptiveAvgPool1d((1))
self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1))
self.adaptive_avg_pool3d = torch.nn.AdaptiveAvgPool3d((1, 1, 1))
self.leaky_relu = torch.nn.LeakyReLU()
self.hardsigmoid = torch.nn.Hardsigmoid()
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
def forward(self, x):
x = self.conv(x)
x = self.avg_pool1d(x)
x = self.avg_pool2d(x)
x = self.avg_pool3d(x)
x = self.adaptive_avg_pool1d(x)
x = self.adaptive_avg_pool2d(x)
x = self.adaptive_avg_pool3d(x)
x = F.avg_pool1d(x, 3)
x = F.avg_pool2d(x, 3)
x = F.avg_pool3d(x, 3)
x = F.adaptive_avg_pool1d(x, (1))
x = F.adaptive_avg_pool2d(x, (1, 1))
x = F.adaptive_avg_pool3d(x, (1, 1, 1))
x = torch.mean(x)
x = torch.mean(x, [2, 3], False)
x = x.mean()
x = x.mean([2, 3], True)
# interpolate node will introduce 3 quantize_per_tensor ops
x = F.interpolate(x, 4, mode="nearest") # interpolate node
x = F.upsample(x, (32, 32)) # interpolate node
x = F.upsample_nearest(x, (32, 32)) # interpolate node
x = F.interpolate(x, 4, mode="linear") # common node
x = F.upsample_bilinear(x, (32, 32)) # common node
x = self.leaky_relu(x)
x = F.leaky_relu(x)
x.leaky_relu_()
x = self.hardsigmoid(x)
x = F.hardsigmoid(x)
x.hardsigmoid_()
x = self.sigmoid(x)
x = torch.sigmoid(x)
# F.sigmoid is deprecated
x = x.sigmoid()
x.sigmoid_()
x = self.tanh(x)
# F.tanh is deprecated
x = torch.tanh(x)
x = x.tanh()
x.tanh_()
x = self.conv(x)
return x
# This model is not executable since we just put all ops
# in the same forward, therefore we only test scripting
m = torch.jit.script(M())
qconfig = script_qconfig(default_qconfig)
# dummy data to suppress warning
data = torch.rand(1, 3, 10, 10)
get_forward(qconfig.activation)(data)
get_forward(qconfig.weight)(data)
m = wrap_cpp_module(
torch._C._jit_pass_insert_observers(
m._c, "forward", {"": qconfig}, inplace=False
)
)
# Checking the model before fianlize contain unfused patterns
# that numerically matches the model after quantize by checking
# number of aten::quantize_per_tensor functions
# conv has 3 quantize_per_tensor for activations and 1 for weight
# and for N general value op between conv we should have
# N + 1 quantize_per_tensor between these ops
m1 = convert_jit(m, debug=True)
# NB: This Needs to be updated when we add more ops to test
# mapping from number of quant for the op to the number of these ops
# for example, for `3` in the key means for this type of op
# we'll have 3 quantize_per_tensor
num_op_by_num_quant = {1: 32, 2: 2, 3: 3}
num_quantize_per_tensor = 1 # for output
for num_quant, num_op in num_op_by_num_quant.items():
num_quantize_per_tensor += num_op * num_quant
num_quantize_per_tensor -= 4 # constant propagation removes some prepacks
FileCheck().check_count(
"aten::quantize_per_tensor(", num_quantize_per_tensor, exactly=True
).run(m1.graph)
# This checks that the dequantize from the output of first conv
# is being propagated to the end, so that we don't insert extra
# observers and also successfully fused two quantized::conv2d
# patterns
# one quantize_per_tensor for input
m2 = convert_jit(m, debug=False)
FileCheck().check_count("aten::quantize_per_tensor(", 1, exactly=True).run(
m2.graph
)
FileCheck().check_count("quantized::conv2d(", 2, exactly=True).check(
"aten::dequantize("
).run(m2.graph)
@override_qengines
def test_conv_with_benchmark_flag(self):
r"""Verifies that convolutions get quantized when
torch.backends.cudnn.benchmark is enabled
"""
if not qengine_is_qnnpack():
return
with torch.backends.cudnn.flags(enabled=True):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
m.eval()
m = torch.jit.trace(m, torch.rand(4, 1, 4, 4))
qconfig = torch.ao.quantization.get_default_qconfig("qnnpack")
prepared_model = torch.ao.quantization.prepare_jit(m, {"": qconfig})
prepared_model(torch.rand(4, 1, 4, 4))
converted_model = torch.ao.quantization.convert_jit(prepared_model)
FileCheck().check("quantized::conv2d").run(converted_model.graph)
@skipIfNoFBGEMM
def test_cat_linear(self):
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel, self).__init__()
self.weight = torch.randn(5, 5)
def forward(self, x, y):
a = torch.cat([x, y])
b = F.linear(a, self.weight)
c = F.linear(b, self.weight)
return b, c
model = LinearModel().eval()
qconfig = {"": default_qconfig}
float_model = torch.jit.script(model)
prepared_model = prepare_jit(float_model, qconfig)
prepared_model(torch.rand(5, 5), torch.rand(5, 5))
converted_model = convert_jit(prepared_model)
FileCheck().check("quantized::linear").check("quantized::linear").run(
converted_model.graph
)
class TestQuantizeDynamicJitPasses(QuantizationTestCase):
def test_prepare_dynamic(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.fc = torch.nn.Linear(5, 5)
def forward(self, x):
return self.fc(x)
model = torch.jit.script(M())
for qconfig in [float16_dynamic_qconfig, default_dynamic_qconfig]:
m = prepare_dynamic_jit(model, {"": qconfig})
# observer for weight
assert len(attrs_with_prefix(m.fc, "_observer_")) == 1
if qconfig == float16_dynamic_qconfig:
observer_name = 'PlaceholderObserver = prim::GetAttr[name="_observer_'
FileCheck().check(observer_name).run(m.fc.graph)
else:
# for input of FC for dynamic quant
assert len(attrs_with_prefix(m, "_observer_")) == 1
observer_name = 'Observer = prim::GetAttr[name="_observer_'
FileCheck().check(observer_name).check(
'prim::GetAttr[name="fc"]'
).check("prim::CallMethod").check_not(observer_name).run(m.graph)
def test_prepare_dynamic_child_qconfig(self):
class Sub(torch.nn.Module):
def __init__(self):
super(Sub, self).__init__()
self.fc = torch.nn.Linear(5, 5)
def forward(self, x):
return self.fc(x)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
self.sub = Sub()
def forward(self, x):
return self.sub(self.conv(x))
m = torch.jit.script(M())
# only quantize child module.
m = prepare_dynamic_jit(m, {"sub.fc": default_dynamic_qconfig})
# input of sub for dynamic quant
assert len(attrs_with_prefix(m, "_observer_")) == 1
# not quantized
assert len(attrs_with_prefix(m.conv, "_observer_")) == 0
# no observers since we observe in the outer most call site
assert len(attrs_with_prefix(m.sub, "_observer_")) == 0
# weight of linear
assert len(attrs_with_prefix(m.sub.fc, "_observer_")) == 1
FileCheck().check('prim::GetAttr[name="sub').check("prim::CallMethod").check(
'Observer = prim::GetAttr[name="_observer_'
).check("prim::CallMethod").check_not(
'Observer = prim::GetAttr[name="_observer_'
).run(
m.graph
)
def test_insert_quant_dequant_linear_dynamic(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.fc1 = torch.nn.Linear(5, 5).float()
self.fc2 = torch.nn.Linear(5, 5).float()
def forward(self, x):
x = self.fc1(x)
return self.fc2(x)
for is_per_channel in [True, False]:
m = torch.jit.script(M())
qconfig = (
per_channel_dynamic_qconfig
if is_per_channel is True
else default_dynamic_qconfig
)
m = quantize_dynamic_jit(m, {"": qconfig}, debug=True)
assert (
len(m._modules._c.items()) == 2
), "Expected to have two submodule of linear"
wt_quant_func = (
"aten::quantize_per_channel"
if is_per_channel
else "aten::quantize_per_tensor"
)
act_quant_func = "aten::quantize_per_tensor"
# quantizing activations
FileCheck().check("aten::_choose_qparams_per_tensor").check_next(
act_quant_func
).check_next("aten::dequantize").check(
"aten::_choose_qparams_per_tensor"
).check_next(
act_quant_func
).check_next(
"aten::dequantize"
).check(
wt_quant_func
).check_next(
"aten::dequantize"
).check_not(
wt_quant_func
).check(
"return"
).run(
m.graph
)
@override_qengines
def test_dynamic_multi_op(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = x + 5
return self.fc1(x)
x = torch.randn(5, 5)
for tracing in [True, False]:
model = self.checkGraphModeOp(
M(), x, "quantized::linear_dynamic", tracing=tracing, dynamic=True
)
# add op is not dynamically quantized.
FileCheck().check("aten::add").run(model.graph)
@override_qengines
def test_dynamic_quant_multi_uses(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.fc = torch.nn.Linear(5, 5).float()
def forward(self, x):
size1 = x.size()
size2 = x.size()
return self.fc(x), size1, size2
x = torch.randn(5, 5)
for tracing in [True, False]:
model = self.checkGraphModeOp(
M(), x, "quantized::linear_dynamic", tracing=tracing, dynamic=True
)
FileCheck().check_not("aten::_choose_qparams_per_tensor").run(model.graph)
@override_qengines
def test_dynamic_shared_weights(self):
class myMod(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.linear = nn.Linear(5, 5)
self.linear.weight = weight
def forward(self, x):
return self.linear(x)
class DynamicModel(torch.nn.Module):
def __init__(self):
super(DynamicModel, self).__init__()
self.weight = torch.nn.Parameter(torch.ones(5, 5))
self.mod1 = myMod(self.weight)
def forward(self, x):
y = self.mod1(x)
z = torch.nn.functional.linear(y, self.weight)
return z
model = torch.jit.script(DynamicModel()).eval()
data = torch.randn(5, 5, dtype=torch.float)
quant_ops = ["mod1", ""]
counts = [1, 2]
for op, count in zip(quant_ops, counts):
qconfig_dict = {op: default_dynamic_qconfig}
m1 = quantize_dynamic_jit(model, qconfig_dict)
out_graph = m1(data)
FileCheck().check_count(
"quantized::linear_dynamic(", count, exactly=True
).check_not("aten::_choose_qparams_per_tensor").run(m1.graph)
# Explicitly call forward on model before convert
m2 = prepare_dynamic_jit(model, qconfig_dict)
m2(data)
m2 = convert_dynamic_jit(m2, debug=False)
out_ref = m2(data)
self.assertEqual(out_graph, out_ref)
@override_qengines
def test_dynamic_with_if(self):
class Res(torch.nn.Module):
def __init__(self):
super(Res, self).__init__()
self.weight = torch.nn.Parameter(torch.ones(5, 5))
def forward(self, x: torch.Tensor, cond: bool) -> torch.Tensor:
if cond:
return torch.nn.functional.linear(x, self.weight)
else:
return torch.nn.functional.linear(x, self.weight)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.res1 = Res()
self.res2 = Res()
def forward(self, x):
x = self.res1(x, True)
x = self.res2(x, False)
return x
model = torch.jit.script(M()).eval()
data = torch.randn(5, 5, dtype=torch.float)
qconfig_dict = {"": default_dynamic_qconfig}
for tracing in [True, False]:
m1 = self.checkGraphModeOp(
M(), data, "quantized::linear_dynamic", tracing=tracing, dynamic=True
)
FileCheck().check_count(
"quantized::linear_dynamic(", 2, exactly=True
).check_not("aten::_choose_qparams_per_tensor").run(m1.graph)
# Check to make sure weight observers run correctly
ref_qparams = []
qconfig = script_qconfig(default_dynamic_qconfig)
wt_module = wrap_cpp_module(qconfig.weight)
for wt in [model.res1.weight, model.res2.weight]:
wt_module(wt)
qparams = wt_module.calculate_qparams()
ref_qparams.append((qparams[0].item(), qparams[1].item()))
m2 = quantize_dynamic_jit(model, qconfig_dict, debug=True)
graph_params = []
for x, obs in m2._modules._c.items():
if x == "res1":
graph_params.append(
(obs.getattr("weight.2_scale_0"), obs.getattr("weight.2_zero_point_0"))
)
elif x == "res2":
graph_params.append(
(obs.getattr("weight.4_scale_0"), obs.getattr("weight.4_zero_point_0"))
)
self.assertEqual(ref_qparams, graph_params)
def test_dynamic_weight_observer(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.fc = torch.nn.Linear(5, 5).float()
self.fc2 = torch.nn.Linear(5, 5).float()
def forward(self, x):
x = self.fc(x)
return self.fc2(x)
qconfig_dict = {"": default_dynamic_qconfig}
eager_model = M().eval()
for tracing in [True, False]:
x = torch.rand(5, 5)
model = get_script_module(eager_model, tracing, x)
ref_qparams = []
for wt in [model.fc.weight, model.fc2.weight]:
wt_module = default_dynamic_qconfig.weight()
wt_module(wt)
qparams = wt_module.calculate_qparams()
ref_qparams.append((qparams[0].item(), qparams[1].item()))
model = quantize_dynamic_jit(model, qconfig_dict, debug=True)
graph_qparams = []
for x, obs in model._modules._c.items():
n = 2 if x == 'fc' and tracing else 1
graph_qparams.append(
(obs.getattr(f"weight.{n}_scale_0"),
obs.getattr(f"weight.{n}_zero_point_0"))
)
self.assertEqual(ref_qparams, graph_qparams)
def test_convert_dynamic_fp16(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.fc = torch.nn.Linear(5, 5)
def forward(self, x):
return self.fc(x)
m = torch.jit.script(M())
m = quantize_dynamic_jit(m, {"": float16_dynamic_qconfig}, debug=True)
FileCheck().check("aten::_saturate_weight_to_fp16").check(
"aten::linear"
).check_not("aten::dequantize").check_not("aten::quantize").run(m.graph)
def test_quantize_dynamic_fp16(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.fc = torch.nn.Linear(5, 5)
def forward(self, x):
return self.fc(x)
m = torch.jit.script(M())
m = quantize_dynamic_jit(m, {"": float16_dynamic_qconfig})
FileCheck().check("quantized::linear_dynamic_fp16").check_not(
"aten::linear"
).check_not("aten::dequantize").check_not("aten::quantize").run(m.graph)
class TestQuantizeDynamicJitOps(QuantizationTestCase):
"""Test graph mode post training dynamic quantization works
for individual ops end to end.
"""
@override_qengines
def test_linear(self):
class FunctionalLinear(torch.nn.Module):
def __init__(self, weight, bias):
super(FunctionalLinear, self).__init__()
self.weight = weight
self.bias = bias
def forward(self, x):
return F.linear(x, self.weight, self.bias)
x = torch.rand(5, 5)
for tracing in [True, False]:
model = self.checkGraphModeOp(
torch.nn.Linear(5, 5),
x,
"quantized::linear_dynamic",
tracing=tracing,
dynamic=True,
)
weight = torch.rand(5, 5)
b = torch.rand(5)
for tracing, has_bias in itertools.product([True, False], [True, False]):
bias = b if has_bias else None
model = self.checkGraphModeOp(
FunctionalLinear(weight, bias),
x,
"quantized::linear_dynamic",
tracing=tracing,
dynamic=True,
)
@skipIfNoFBGEMM
def test_embedding_bag(self):
class M(torch.nn.Module):
def __init__(self, weights):
super(M, self).__init__()
self.embedding1 = torch.nn.EmbeddingBag(
num_embeddings=10,
embedding_dim=12,
include_last_offset=True,
sparse=True,
_weight=weights,
mode="sum",
)
self.embedding2 = torch.nn.EmbeddingBag(
num_embeddings=10,
embedding_dim=12,
include_last_offset=True,
sparse=True,
_weight=weights,
mode="sum",
)
def forward(self, indices1, offsets1, indices2, offsets2):
e1 = self.embedding1(indices1, offsets1)
e2 = self.embedding2(indices2, offsets2)
return e1, e2
weights = torch.randn(10, 12, dtype=torch.float32)
module = M(weights)
indices = torch.tensor(
[
9,
6,
5,
7,
8,
8,
9,
2,
8,
6,
6,
9,
1,
6,
8,
8,
3,
2,
3,
6,
3,
6,
5,
7,
0,
8,
4,
6,
5,
8,
2,
3,
]
)
offsets = torch.tensor([0, 19, 20, 28, 28, 32])
dummy_inputs = (indices, offsets, indices, offsets)
for trace in [True, False]:
if trace:
m = torch.jit.trace(module, dummy_inputs)
else:
m = torch.jit.script(module)
int4_qconfig = QConfig(
activation=PlaceholderObserver.with_args(
dtype=torch.float, custom_op_name="embedding_bag_4bit"
),
weight=PlaceholderObserver.with_args(
custom_op_name="embedding_bag_4bit"
),
)
int8_qconfig = QConfig(
activation=PlaceholderObserver.with_args(
dtype=torch.float, custom_op_name="embedding_bag_byte"
),
weight=PlaceholderObserver.with_args(
custom_op_name="embedding_bag_byte"
),
)
m = prepare_jit(m, {"embedding1": int4_qconfig, "embedding2": int8_qconfig})
m = convert_jit(m)
FileCheck().check("quantized::embedding_bag_4bit_rowwise_offsets").check(
"quantized::embedding_bag_byte_rowwise_offsets"
).run(m.graph)
m(*dummy_inputs)
# Ensure that attempting to quantize an EmbeddingBag throws an error if
# padding_idx is not None
@skipIfNoFBGEMM
def test_embedding_bag_padding_idx_error(self):
class M(torch.nn.Module):
def __init__(self, weights):
super(M, self).__init__()
self.embedding = torch.nn.EmbeddingBag(
num_embeddings=10,
embedding_dim=12,
include_last_offset=True,
sparse=True,
_weight=weights,
mode="sum",
padding_idx=0,
)
def forward(self, indices, offsets):
e = self.embedding(indices, offsets)
return e
weights = torch.randn(10, 12, dtype=torch.float32)
module = M(weights)
indices = torch.tensor([0, 1, 2, 3, 4])
offsets = torch.tensor([0, 2, 5])
dummy_inputs = (indices, offsets)
int4_qconfig = QConfig(
activation=PlaceholderObserver.with_args(
dtype=torch.float, custom_op_name="embedding_bag_4bit"
),
weight=PlaceholderObserver.with_args(
custom_op_name="embedding_bag_4bit"
),
)
int8_qconfig = QConfig(
activation=PlaceholderObserver.with_args(
dtype=torch.float, custom_op_name="embedding_bag_byte"
),
weight=PlaceholderObserver.with_args(
custom_op_name="embedding_bag_byte"
),
)
error_msg = r'Expected aten::embedding_bag padding_idx input to be None'
for trace, qconfig in itertools.product([True, False], [int4_qconfig, int8_qconfig]):
if trace:
m = torch.jit.trace(module, dummy_inputs)
else:
m = torch.jit.script(module)
m = prepare_jit(m, {"embedding": qconfig})
with self.assertRaisesRegex(RuntimeError, error_msg):
m = convert_jit(m)
class TestQuantizeJit(QuantizationTestCase):
@override_qengines
def test_single_linear(self):
r"""Compare the result of quantizing single linear layer in
eager mode and graph mode
"""
# eager mode
annotated_linear_model = AnnotatedSingleLayerLinearModel(
torch.backends.quantized.engine
).eval()
linear_model = SingleLayerLinearModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
linear_model.fc1.weight = torch.nn.Parameter(
annotated_linear_model.fc1.module.weight.detach()
)
linear_model.fc1.bias = torch.nn.Parameter(
annotated_linear_model.fc1.module.bias.detach()
)
model_eager = quantize(
annotated_linear_model, test_only_eval_fn, [self.calib_data]
)
qconfig_dict = {"": get_default_qconfig(torch.backends.quantized.engine)}
model_traced = torch.jit.trace(linear_model, self.calib_data[0][0])
model_script = torch.jit.script(linear_model)
result_eager = model_eager(self.calib_data[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.calib_data],
inplace=False,
)
self.assertEqual(model_quantized(self.calib_data[0][0]), result_eager)
@skipIfNoFBGEMM
def test_observer_with_ignored_function(self):
r"""Test observers with ignored function and make sure it works in
graph mode
"""
# eager mode
annotated_linear_model = AnnotatedSingleLayerLinearModel("fbgemm").eval()
for qconfig in [
QConfig(activation=default_observer, weight=default_weight_observer),
QConfig(
activation=default_histogram_observer, weight=default_weight_observer
),
QConfig(
activation=default_observer, weight=default_per_channel_weight_observer
),
]:
annotated_linear_model.qconfig = qconfig
linear_model = SingleLayerLinearModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
linear_model.fc1.weight = torch.nn.Parameter(
annotated_linear_model.fc1.module.weight.detach()
)
linear_model.fc1.bias = torch.nn.Parameter(
annotated_linear_model.fc1.module.bias.detach()
)
model_eager = quantize(
annotated_linear_model, test_only_eval_fn, [self.calib_data]
)
qconfig_dict = {"": qconfig}
model_traced = torch.jit.trace(linear_model, self.calib_data[0][0])
model_script = torch.jit.script(linear_model)
result_eager = model_eager(self.calib_data[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.calib_data],
inplace=False,
)
self.assertEqual(model_quantized(self.calib_data[0][0]), result_eager)
@override_qengines
def test_conv(self):
r"""Compare the result of quantizing conv layer in
eager mode and graph mode
"""
# eager mode
annotated_conv_model = AnnotatedConvModel(
torch.backends.quantized.engine
).eval()
conv_model = ConvModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
conv_model.conv.weight = torch.nn.Parameter(
annotated_conv_model.conv.weight.detach()
)
model_eager = quantize(
annotated_conv_model, test_only_eval_fn, [self.img_data_2d]
)
qconfig_dict = {"": get_default_qconfig(torch.backends.quantized.engine)}
model_traced = torch.jit.trace(conv_model, self.img_data_2d[0][0])
model_script = torch.jit.script(conv_model)
result_eager = model_eager(self.img_data_2d[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.img_data_2d],
inplace=False,
)
self.assertEqual(model_quantized(self.img_data_2d[0][0]), result_eager)
@override_qengines
def test_conv_transpose(self):
r"""Compare the result of quantizing conv_transpose layer in
eager mode and graph mode
"""
if not qengine_is_qnnpack():
return # Currently only qnnpack is supported
# eager mode
annotated_conv_model = AnnotatedConvTransposeModel(
torch.backends.quantized.engine
).eval()
conv_model = ConvTransposeModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
conv_model.conv.weight = torch.nn.Parameter(
annotated_conv_model.conv.weight.detach()
)
model_eager = quantize(
annotated_conv_model, test_only_eval_fn, [self.img_data_2d]
)
qconfig_dict = {"": get_default_qconfig(torch.backends.quantized.engine)}
model_traced = torch.jit.trace(conv_model, self.img_data_2d[0][0])
model_script = torch.jit.script(conv_model)
result_eager = model_eager(self.img_data_2d[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.img_data_2d],
inplace=False,
)
self.assertEqual(model_quantized(self.img_data_2d[0][0]), result_eager)
@override_qengines
def test_conv_bn(self):
r"""Compare the result of quantizing conv + bn layer in
eager mode and graph mode
"""
# eager mode
conv_model = AnnotatedConvBnModel().eval()
conv_model_to_script = ConvBnModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
conv_model_to_script.conv.weight = torch.nn.Parameter(
conv_model.conv.weight.detach()
)
fuse_modules(conv_model, ["conv", "bn"], inplace=True)
model_eager = quantize(conv_model, test_only_eval_fn, [self.img_data_2d])
qconfig_dict = {"": default_qconfig}
model_script = quantize_jit(
torch.jit.script(conv_model_to_script),
qconfig_dict,
test_only_eval_fn,
[self.img_data_2d],
inplace=False,
)
result_eager = model_eager(self.img_data_2d[0][0])
result_script = model_script(self.img_data_2d[0][0])
self.assertEqual(result_eager, result_script)
@override_qengines
def test_nested(self):
# Eager mode
eager_model = AnnotatedNestedModel(torch.backends.quantized.engine).eval()
# Graph mode
script_model = NestedModel().eval()
# Copy weights for eager_model
script_model.sub1.fc.weight = torch.nn.Parameter(
eager_model.sub1.fc.weight.detach()
)
script_model.sub1.fc.bias = torch.nn.Parameter(
eager_model.sub1.fc.bias.detach()
)
script_model.sub2.fc1.weight = torch.nn.Parameter(
eager_model.sub2.fc1.module.weight.detach()
)
script_model.sub2.fc1.bias = torch.nn.Parameter(
eager_model.sub2.fc1.module.bias.detach()
)
script_model.sub2.fc2.weight = torch.nn.Parameter(
eager_model.sub2.fc2.weight.detach()
)
script_model.sub2.fc2.bias = torch.nn.Parameter(
eager_model.sub2.fc2.bias.detach()
)
script_model.fc3.weight = torch.nn.Parameter(
eager_model.fc3.module.weight.detach()
)
script_model.fc3.bias = torch.nn.Parameter(eager_model.fc3.module.bias.detach())
model_eager = quantize(eager_model, test_only_eval_fn, [self.calib_data])
qconfig_dict = {
"sub2.fc1": default_per_channel_qconfig
if qengine_is_fbgemm()
else default_qconfig,
"fc3": default_qconfig,
}
model_traced = torch.jit.trace(script_model, self.calib_data[0][0])
model_script = torch.jit.script(script_model)
result_eager = model_eager(self.calib_data[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.calib_data],
inplace=False,
)
self.assertEqual(model_quantized(self.calib_data[0][0]), result_eager)
@override_qengines
def test_skip_quant(self):
"""Test None qconfig"""
# Eager mode
eager_model = AnnotatedSkipQuantModel(torch.backends.quantized.engine).eval()
# Graph mode
script_model = SkipQuantModel().eval()
# Copy weights for eager_model
script_model.sub.fc1.weight = torch.nn.Parameter(
eager_model.sub.module.fc1.weight.detach()
)
script_model.sub.fc1.bias = torch.nn.Parameter(
eager_model.sub.module.fc1.bias.detach()
)
script_model.sub.fc2.weight = torch.nn.Parameter(
eager_model.sub.module.fc2.weight.detach()
)
script_model.sub.fc2.bias = torch.nn.Parameter(
eager_model.sub.module.fc2.bias.detach()
)
script_model.fc.weight = torch.nn.Parameter(eager_model.fc.weight.detach())
script_model.fc.bias = torch.nn.Parameter(eager_model.fc.bias.detach())
eager_model.fuse_modules()
model_eager = quantize(eager_model, test_only_eval_fn, [self.calib_data])
qconfig_dict = {
"": get_default_qconfig(torch.backends.quantized.engine),
"fc": None,
}
model_traced = torch.jit.trace(script_model, self.calib_data[0][0])
model_script = torch.jit.script(script_model)
result_eager = model_eager(self.calib_data[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.calib_data],
inplace=False,
)
self.assertEqual(model_quantized(self.calib_data[0][0]), result_eager)
@override_qengines
def test_single_linear_dynamic(self):
r"""Compare the result of dynamic quantization of single linear layer in
eager mode and graph mode.
"""
if qengine_is_qnnpack():
# eager mode
annotated_linear_model = AnnotatedSingleLayerLinearModel("qnnpack").eval()
linear_model = SingleLayerLinearModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
linear_model.fc1.weight = torch.nn.Parameter(
annotated_linear_model.fc1.module.weight.detach()
)
linear_model.fc1.bias = torch.nn.Parameter(
annotated_linear_model.fc1.module.bias.detach()
)
qconfig_dict = {"": default_dynamic_qconfig}
model_eager = quantize_dynamic(annotated_linear_model, qconfig_dict)
model_traced = torch.jit.trace(linear_model, self.calib_data[0][0])
model_script = torch.jit.script(linear_model)
result_eager = model_eager(self.calib_data[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_dynamic_jit(model_under_test, qconfig_dict)
self.assertEqual(model_quantized(self.calib_data[0][0]), result_eager)
# Check to make sure choose_qparams->quant->dequant->linear is numerically
# equivalent to the final quantized model.
model_fake_quantized = quantize_dynamic_jit(
model_under_test, qconfig_dict, debug=True
)
self.assertEqual(
model_fake_quantized(self.calib_data[0][0]), result_eager
)
@skipIfNoFBGEMM
def test_linear_dynamic_fp16(self):
linear_model = SingleLayerLinearModel().eval()
# Create weight tensor values that are beyond fp16 max
x = torch.ones(5, 5) * 65532
linear_model.fc1.weight = torch.nn.Parameter(x)
import warnings
model_eager = quantize_dynamic(linear_model, dtype=torch.float16)
result_eager = model_eager(self.calib_data[0][0])
for trace in [True]:
with warnings.catch_warnings(record=True) as w:
quantized_model = self.checkGraphModeOp(
linear_model,
self.calib_data[0][0],
"quantized::linear_dynamic_fp16",
tracing=trace,
dynamic=True,
qconfig=float16_dynamic_qconfig,
)
# compare result with eager mode
self.assertEqual(quantized_model(self.calib_data[0][0]), result_eager)
|
pytorch-master
|
test/quantization/jit/test_quantize_jit.py
|
pytorch-master
|
test/quantization/jit/__init__.py
|
|
# Owner(s): ["oncall: quantization"]
import unittest
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.ao.quantization import (
DeQuantStub,
QuantStub,
convert,
default_qconfig,
prepare,
quantize,
quantize_dynamic,
)
from torch.ao.ns._numeric_suite import (
OutputLogger,
Shadow,
ShadowLogger,
compare_model_outputs,
compare_model_stub,
compare_weights,
prepare_model_outputs,
get_matching_activations,
)
from torch.testing._internal.common_quantization import (
AnnotatedConvBnReLUModel,
AnnotatedConvModel,
AnnotatedConvTransposeModel,
AnnotatedSingleLayerLinearModel,
LSTMwithHiddenDynamicModel,
AnnotatedTwoLayerLinearModel,
QuantizationTestCase,
SingleLayerLinearDynamicModel,
test_only_eval_fn,
skip_if_no_torchvision,
)
from torch.testing._internal.common_quantized import override_qengines
from torch.testing._internal.common_utils import IS_ARM64
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.qconfig = default_qconfig
self.mod1 = torch.nn.Conv2d(3, 3, 3, bias=False).to(dtype=torch.float)
self.mod2 = nn.ReLU()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.mod1(x)
x = self.mod2(x)
x = self.dequant(x)
return x
class ModelWithSubModules(torch.nn.Module):
def __init__(self):
super(ModelWithSubModules, self).__init__()
self.mod1 = SubModule()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.mod1(x)
x = self.conv(x)
return x
class ModelWithFunctionals(torch.nn.Module):
def __init__(self):
super(ModelWithFunctionals, self).__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.mymul = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
self.my_scalar_add = nnq.FloatFunctional()
self.my_scalar_mul = nnq.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.mycat.cat([x, x, x])
x = self.myadd.add(x, x)
x = self.mymul.mul(x, x)
x = self.myadd_relu.add_relu(x, x)
w = self.my_scalar_add.add_scalar(x, -0.5)
w = self.my_scalar_mul.mul_scalar(w, 0.5)
w = self.dequant(w)
return w
class TestNumericSuiteEager(QuantizationTestCase):
@override_qengines
def test_compare_weights_conv_static(self):
r"""Compare the weights of float and static quantized conv layer"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model):
weight_dict = compare_weights(
float_model.state_dict(), q_model.state_dict()
)
self.assertEqual(len(weight_dict), 1)
for k, v in weight_dict.items():
self.assertTrue(v["float"].shape == v["quantized"].shape)
model_list = [AnnotatedConvModel(qengine), AnnotatedConvBnReLUModel(qengine)]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize(model, test_only_eval_fn, [self.img_data_2d])
compare_and_validate_results(model, q_model)
@override_qengines
def test_compare_weights_linear_static(self):
r"""Compare the weights of float and static quantized linear layer"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model):
weight_dict = compare_weights(
float_model.state_dict(), q_model.state_dict()
)
self.assertEqual(len(weight_dict), 1)
for k, v in weight_dict.items():
self.assertTrue(v["float"].shape == v["quantized"].shape)
model_list = [AnnotatedSingleLayerLinearModel(qengine)]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize(model, test_only_eval_fn, [self.calib_data])
compare_and_validate_results(model, q_model)
@override_qengines
def test_compare_weights_linear_dynamic(self):
r"""Compare the weights of float and dynamic quantized linear layer"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model):
weight_dict = compare_weights(
float_model.state_dict(), q_model.state_dict()
)
self.assertEqual(len(weight_dict), 1)
for k, v in weight_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
model_list = [SingleLayerLinearDynamicModel(qengine)]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize_dynamic(model)
compare_and_validate_results(model, q_model)
@override_qengines
def test_compare_weights_lstm_dynamic(self):
r"""Compare the weights of float and dynamic quantized LSTM layer"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model):
weight_dict = compare_weights(
float_model.state_dict(), q_model.state_dict()
)
self.assertEqual(len(weight_dict), 1)
for k, v in weight_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
model_list = [LSTMwithHiddenDynamicModel(qengine)]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize_dynamic(model)
compare_and_validate_results(model, q_model)
@override_qengines
def test_compare_model_stub_conv_static(self):
r"""Compare the output of static quantized conv layer and its float shadow module"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model, module_swap_list, data):
ob_dict = compare_model_stub(float_model, q_model, module_swap_list, data)
self.assertEqual(len(ob_dict), 1)
for k, v in ob_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
model_list = [AnnotatedConvModel(qengine),
AnnotatedConvTransposeModel("qnnpack"), # ConvT cannot use per channel weights
AnnotatedConvBnReLUModel(qengine)]
module_swap_list = [nn.Conv2d, nn.intrinsic.modules.fused.ConvReLU2d, nn.ConvTranspose2d]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize(model, test_only_eval_fn, [self.img_data_2d])
compare_and_validate_results(
model, q_model, module_swap_list, self.img_data_2d[0][0]
)
@override_qengines
def test_compare_model_stub_linear_static(self):
r"""Compare the output of static quantized linear layer and its float shadow module"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model, module_swap_list, data):
ob_dict = compare_model_stub(float_model, q_model, module_swap_list, data)
self.assertEqual(len(ob_dict), 1)
for k, v in ob_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
linear_data = self.calib_data[0][0]
module_swap_list = [nn.Linear]
model_list = [AnnotatedSingleLayerLinearModel(qengine)]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize(model, test_only_eval_fn, [self.calib_data])
compare_and_validate_results(model, q_model, module_swap_list, linear_data)
@override_qengines
def test_compare_model_stub_partial(self):
r"""Compare the output of static quantized linear layer and its float shadow module"""
qengine = torch.backends.quantized.engine
# TODO: Rebase on top of PR to remove compare and validate results here
def compare_and_validate_results(float_model, q_model, module_swap_list, data):
ob_dict = compare_model_stub(float_model, q_model, module_swap_list, data)
self.assertEqual(len(ob_dict), 1)
for k, v in ob_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
linear_data = self.calib_data[0][0]
module_swap_list = [nn.Linear]
model_list = [AnnotatedTwoLayerLinearModel()]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize(model, test_only_eval_fn, [self.calib_data])
compare_and_validate_results(model, q_model, module_swap_list, linear_data)
@override_qengines
def test_compare_model_stub_submodule_static(self):
r"""Compare the output of static quantized submodule and its float shadow module"""
qengine = torch.backends.quantized.engine
model = ModelWithSubModules().eval()
q_model = quantize(model, test_only_eval_fn, [self.img_data_2d])
module_swap_list = [SubModule, nn.Conv2d]
ob_dict = compare_model_stub(
model, q_model, module_swap_list, self.img_data_2d[0][0]
)
# Since conv is not quantized, we do not insert a shadow module
# mod1 contains a linear that is quantized, so we insert a shadow module
self.assertTrue(isinstance(q_model.mod1, Shadow))
self.assertFalse(isinstance(q_model.conv, Shadow))
@override_qengines
def test_compare_model_stub_functional_static(self):
r"""Compare the output of static quantized functional layer and its float shadow module"""
qengine = torch.backends.quantized.engine
model = ModelWithFunctionals().eval()
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
q_model = prepare(model, inplace=False)
q_model(self.img_data_2d[0][0])
q_model = convert(q_model)
module_swap_list = [nnq.FloatFunctional]
ob_dict = compare_model_stub(
model, q_model, module_swap_list, self.img_data_2d[0][0]
)
self.assertEqual(len(ob_dict), 6)
self.assertTrue(isinstance(q_model.mycat, Shadow))
self.assertTrue(isinstance(q_model.myadd, Shadow))
self.assertTrue(isinstance(q_model.mymul, Shadow))
self.assertTrue(isinstance(q_model.myadd_relu, Shadow))
self.assertTrue(isinstance(q_model.my_scalar_add, Shadow))
self.assertTrue(isinstance(q_model.my_scalar_mul, Shadow))
for k, v in ob_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
@override_qengines
def test_compare_model_stub_linear_dynamic(self):
r"""Compare the output of dynamic quantized linear layer and its float shadow module"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model, module_swap_list, data):
ob_dict = compare_model_stub(float_model, q_model, module_swap_list, data)
self.assertEqual(len(ob_dict), 1)
for k, v in ob_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
linear_data = self.calib_data[0][0]
model_list = [SingleLayerLinearDynamicModel(qengine)]
module_swap_list = [nn.Linear, nn.LSTM]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize_dynamic(model)
compare_and_validate_results(model, q_model, module_swap_list, linear_data)
@override_qengines
def test_compare_model_stub_lstm_dynamic(self):
r"""Compare the output of dynamic quantized LSTM layer and its float shadow module"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(
float_model, q_model, module_swap_list, input, hidden
):
ob_dict = compare_model_stub(
float_model, q_model, module_swap_list, input, hidden
)
self.assertEqual(len(ob_dict), 1)
for k, v in ob_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
lstm_input = torch.rand((1, 1, 2))
lstm_hidden = (torch.rand(1, 1, 2), torch.rand(1, 1, 2))
model_list = [LSTMwithHiddenDynamicModel(qengine)]
module_swap_list = [nn.Linear, nn.LSTM]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize_dynamic(model)
compare_and_validate_results(
model, q_model, module_swap_list, lstm_input, lstm_hidden
)
@override_qengines
def test_compare_model_outputs_conv_static(self):
r"""Compare the output of conv layer in stataic quantized model and corresponding
output of conv layer in float model
"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model, data):
act_compare_dict = compare_model_outputs(float_model, q_model, data)
expected_act_compare_dict_keys = {"conv.stats", "quant.stats"}
self.assertTrue(act_compare_dict.keys() == expected_act_compare_dict_keys)
for k, v in act_compare_dict.items():
self.assertTrue(v["float"][0].shape == v["quantized"][0].shape)
model_list = [AnnotatedConvModel(qengine), AnnotatedConvBnReLUModel(qengine)]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize(model, test_only_eval_fn, [self.img_data_2d])
compare_and_validate_results(model, q_model, self.img_data_2d[0][0])
@override_qengines
def test_compare_model_outputs_linear_static(self):
r"""Compare the output of linear layer in static quantized model and corresponding
output of conv layer in float model
"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model, data):
act_compare_dict = compare_model_outputs(float_model, q_model, data)
expected_act_compare_dict_keys = {"fc1.quant.stats", "fc1.module.stats"}
self.assertTrue(act_compare_dict.keys() == expected_act_compare_dict_keys)
for k, v in act_compare_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
linear_data = self.calib_data[0][0]
model_list = [AnnotatedSingleLayerLinearModel(qengine)]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize(model, test_only_eval_fn, [self.calib_data])
compare_and_validate_results(model, q_model, linear_data)
@override_qengines
def test_compare_model_outputs_functional_static(self):
r"""Compare the output of functional layer in static quantized model and corresponding
output of conv layer in float model
"""
qengine = torch.backends.quantized.engine
model = ModelWithFunctionals().eval()
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
q_model = prepare(model, inplace=False)
q_model(self.img_data_2d[0][0])
q_model = convert(q_model)
act_compare_dict = compare_model_outputs(model, q_model, self.img_data_2d[0][0])
self.assertEqual(len(act_compare_dict), 5)
expected_act_compare_dict_keys = {
"mycat.stats",
"myadd.stats",
"mymul.stats",
"myadd_relu.stats",
"quant.stats",
}
self.assertTrue(act_compare_dict.keys() == expected_act_compare_dict_keys)
for k, v in act_compare_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
@override_qengines
def test_compare_model_outputs_linear_dynamic(self):
r"""Compare the output of linear layer in dynamic quantized model and corresponding
output of conv layer in float model
"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model, data):
act_compare_dict = compare_model_outputs(float_model, q_model, data)
expected_act_compare_dict_keys = {"fc1.stats"}
self.assertTrue(act_compare_dict.keys() == expected_act_compare_dict_keys)
for k, v in act_compare_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(v["float"][i].shape == v["quantized"][i].shape)
linear_data = self.calib_data[0][0]
model_list = [SingleLayerLinearDynamicModel(qengine)]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize_dynamic(model)
compare_and_validate_results(model, q_model, linear_data)
@override_qengines
def test_compare_model_outputs_lstm_dynamic(self):
r"""Compare the output of LSTM layer in dynamic quantized model and corresponding
output of conv layer in float model
"""
qengine = torch.backends.quantized.engine
def compare_and_validate_results(float_model, q_model, input, hidden):
act_compare_dict = compare_model_outputs(
float_model, q_model, input, hidden
)
expected_act_compare_dict_keys = {"lstm.stats"}
self.assertTrue(act_compare_dict.keys() == expected_act_compare_dict_keys)
for k, v in act_compare_dict.items():
self.assertTrue(len(v["float"]) == len(v["quantized"]))
for i, val in enumerate(v["quantized"]):
self.assertTrue(len(v["float"][i]) == len(v["quantized"][i]))
if i == 0:
self.assertTrue(v["float"][i][0].shape == v["quantized"][i][0].shape)
else:
self.assertTrue(
v["float"][i][0].shape == v["quantized"][i][0].shape
)
self.assertTrue(
v["float"][i][1].shape == v["quantized"][i][1].shape
)
lstm_input = torch.rand((1, 1, 2))
lstm_hidden = (torch.rand(1, 1, 2), torch.rand(1, 1, 2))
model_list = [LSTMwithHiddenDynamicModel(qengine)]
for model in model_list:
model.eval()
if hasattr(model, "fuse_model"):
model.fuse_model()
q_model = quantize_dynamic(model)
compare_and_validate_results(model, q_model, lstm_input, lstm_hidden)
@override_qengines
def test_output_logger(self):
r"""Compare output from OutputLogger with the expected results"""
x = torch.rand(2, 2)
y = torch.rand(2, 1)
l = []
l.append(x)
l.append(y)
logger = OutputLogger()
logger.forward(x)
logger.forward(y)
self.assertEqual(l, logger.stats["tensor_val"])
@override_qengines
def test_shadow_logger(self):
r"""Compare output from ShawdowLogger with the expected results"""
a_float = torch.rand(2, 2)
a_quantized = torch.rand(2, 2)
b_float = torch.rand(3, 2, 2)
b_quantized = torch.rand(3, 2, 2)
logger = ShadowLogger()
logger.forward(a_float, a_quantized)
logger.forward(b_float, b_quantized)
self.assertEqual(len(logger.stats["float"]), 2)
self.assertEqual(len(logger.stats["quantized"]), 2)
@skip_if_no_torchvision
def _test_vision_model(self, float_model):
float_model.to('cpu')
float_model.eval()
float_model.fuse_model()
float_model.qconfig = torch.quantization.default_qconfig
img_data = [(torch.rand(2, 3, 224, 224, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)) for _ in range(2)]
qmodel = quantize(float_model, torch.quantization.default_eval_fn, [img_data], inplace=False)
wt_compare_dict = compare_weights(float_model.state_dict(), qmodel.state_dict())
def compute_error(x, y):
Ps = torch.norm(x)
Pn = torch.norm(x - y)
return 20 * torch.log10(Ps / Pn)
data = img_data[0][0]
# Take in floating point and quantized model as well as input data, and returns a dict, with keys
# corresponding to the quantized module names and each entry being a dictionary with two keys 'float' and
# 'quantized', containing the activations of floating point and quantized model at matching locations.
act_compare_dict = compare_model_outputs(float_model, qmodel, data)
for key in act_compare_dict:
compute_error(act_compare_dict[key]['float'][0], act_compare_dict[key]['quantized'][0].dequantize())
prepare_model_outputs(float_model, qmodel)
for data in img_data:
float_model(data[0])
qmodel(data[0])
# Find the matching activation between floating point and quantized modules, and return a dict with key
# corresponding to quantized module names and each entry being a dictionary with two keys 'float'
# and 'quantized', containing the matching floating point and quantized activations logged by the logger
act_compare_dict = get_matching_activations(float_model, qmodel)
@skip_if_no_torchvision
@unittest.skipIf(IS_ARM64, "Not working on arm right now")
def test_mobilenet_v2(self):
from torchvision.models.quantization import mobilenet_v2
self._test_vision_model(mobilenet_v2(pretrained=True, quantize=False))
@skip_if_no_torchvision
@unittest.skipIf(IS_ARM64, "Not working on arm right now")
def test_mobilenet_v3(self):
from torchvision.models.quantization import mobilenet_v3_large
self._test_vision_model(mobilenet_v3_large(pretrained=True, quantize=False))
|
pytorch-master
|
test/quantization/eager/test_numeric_suite_eager.py
|
# Owner(s): ["oncall: quantization"]
import copy
import math
import torch
import torch.nn as nn
import torch.backends.mkldnn
from torch.nn import Conv2d, BatchNorm2d, ReLU, init
from torch.nn.intrinsic.qat import ConvBn2d, ConvBnReLU2d
from torch.nn.modules.utils import _pair
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.qat as nnqat
import torch.nn.intrinsic.qat as nniqat
import torch.nn.qat.dynamic as nnqatd
from torch.ao.quantization import (
prepare,
convert,
prepare_qat,
quantize_qat,
QuantStub,
DeQuantStub,
default_qconfig,
default_qat_qconfig,
default_embedding_qat_qconfig,
default_symmetric_qnnpack_qat_qconfig,
get_default_qat_qconfig,
FixedQParamsFakeQuantize,
FusedMovingAvgObsFakeQuantize,
get_embedding_qat_module_mappings,
get_embedding_static_quant_module_mappings,
NoopObserver,
)
from torch.ao.quantization.qconfig import qconfig_equals
from torch.testing._internal.common_quantization import (
DeFusedEmbeddingBagLinear,
QuantizationTestCase,
QuantStubModel,
ManualLinearQATModel,
ManualDropoutQATModel,
ManualLinearDynamicQATModel,
ManualConvLinearQATModel,
ManualConvLinearSymmQATModel,
ManualEmbeddingBagLinear,
TwoLayerLinearModel,
test_only_eval_fn,
test_only_train_fn,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
supported_qengines,
override_qengines,
)
from torch.testing._internal.common_utils import skipIfNoXNNPACK
from hypothesis import given
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
from functools import reduce
class _ReferenceConvBnNd(torch.nn.Conv2d, torch.nn.modules.conv._ConvNd):
"""
Conv-BN fusion implemented with explicit folding. Useful
to verify numerical equivalency with non-folded version.
"""
def __init__(self,
# ConvNd args
in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups,
bias,
padding_mode,
# BatchNormNd args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None):
nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size,
stride, padding, dilation, transposed,
output_padding, groups, False, padding_mode)
assert qconfig, 'qconfig must be provided for QAT module'
self.qconfig = qconfig
self.eps = eps
self.momentum = momentum
self.freeze_bn = freeze_bn if self.training else True
self.num_features = out_channels
self.gamma = nn.Parameter(torch.empty(out_channels))
self.beta = nn.Parameter(torch.empty(out_channels))
self.affine = True
self.track_running_stats = True
self.register_buffer('running_mean', torch.zeros(out_channels))
self.register_buffer('running_var', torch.ones(out_channels))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
self.activation_post_process = self.qconfig.activation()
self.weight_fake_quant = self.qconfig.weight()
if bias:
self.bias = nn.Parameter(torch.empty(out_channels))
else:
self.register_parameter('bias', None)
self.reset_bn_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_bn_parameters(self):
self.reset_running_stats()
init.uniform_(self.gamma)
init.zeros_(self.beta)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def reset_parameters(self):
super(_ReferenceConvBnNd, self).reset_parameters()
# A hack to avoid resetting on undefined parameters
if hasattr(self, 'gamma'):
self.reset_bn_parameters()
def update_bn_stats(self):
self.freeze_bn = False
return self
def freeze_bn_stats(self):
self.freeze_bn = True
return self
def _forward(self, input):
# exponential_average_factor is self.momentum set to
# (when it is available) only so that if gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and not self.freeze_bn and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
# we use running statistics from the previous batch, so this is an
# approximation of the approach mentioned in the whitepaper, but we only
# need to do one convolution in this case instead of two
running_std = torch.sqrt(self.running_var + self.eps)
scale_factor = self.gamma / running_std
scaled_weight = self.weight * scale_factor.reshape([-1, 1, 1, 1])
if self.bias is not None:
zero_bias = torch.zeros_like(self.bias)
else:
zero_bias = torch.zeros(self.out_channels, device=scaled_weight.device)
conv = self._conv_forward(input, self.weight_fake_quant(scaled_weight), zero_bias)
if self.training and not self.freeze_bn:
# recovering original conv to get original batch_mean and batch_var
if self.bias is not None:
conv_orig = conv / scale_factor.reshape([1, -1, 1, 1]) + self.bias.reshape([1, -1, 1, 1])
else:
conv_orig = conv / scale_factor.reshape([1, -1, 1, 1])
batch_mean = torch.mean(conv_orig, dim=[0, 2, 3])
batch_var = torch.var(conv_orig, dim=[0, 2, 3], unbiased=False)
n = float(conv_orig.numel() / conv_orig.size()[1])
unbiased_batch_var = batch_var * (n / (n - 1))
batch_rstd = torch.ones_like(batch_var, memory_format=torch.contiguous_format) / torch.sqrt(batch_var + self.eps)
conv = (self.gamma * batch_rstd).reshape([1, -1, 1, 1]) * conv_orig + \
(self.beta - self.gamma * batch_rstd * batch_mean).reshape([1, -1, 1, 1])
self.running_mean = exponential_average_factor * batch_mean.detach() + \
(1 - exponential_average_factor) * self.running_mean
self.running_var = exponential_average_factor * unbiased_batch_var.detach() + \
(1 - exponential_average_factor) * self.running_var
else:
if self.bias is None:
conv = conv + (self.beta - self.gamma * self.running_mean /
running_std).reshape([1, -1, 1, 1])
else:
conv = conv + (self.gamma * (self.bias - self.running_mean) / running_std + self.beta).reshape([1, -1, 1, 1])
return conv
def extra_repr(self):
# TODO(jerryzh): extend
return super(_ReferenceConvBnNd, self).extra_repr()
def forward(self, input):
return self.activation_post_process(self._forward(input))
@classmethod
def from_float(cls, mod, qconfig=None):
r"""Create a qat module from a float module or qparams_dict
Args: `mod` a float module, either produced by torch.ao.quantization utilities
or directly from user
"""
assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__
if not qconfig:
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid qconfig'
qconfig = mod.qconfig
conv, bn = mod[0], mod[1]
qat_convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size,
conv.stride, conv.padding, conv.dilation,
conv.groups, conv.bias is not None,
conv.padding_mode,
bn.eps, bn.momentum,
False,
qconfig)
qat_convbn.weight = conv.weight
qat_convbn.bias = conv.bias
qat_convbn.gamma = bn.weight
qat_convbn.beta = bn.bias
qat_convbn.running_mean = bn.running_mean
qat_convbn.running_var = bn.running_var
qat_convbn.num_batches_tracked = bn.num_batches_tracked
return qat_convbn
class _ReferenceConvBn2d(_ReferenceConvBnNd, nn.Conv2d):
_FLOAT_MODULE = torch.nn.intrinsic.ConvBn2d
def __init__(self,
# ConvNd args
in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=None,
padding_mode='zeros',
# BatchNorm2d args
# num_features: out_channels
eps=1e-05, momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
_ReferenceConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, False, _pair(0), groups, bias, padding_mode,
eps, momentum, freeze_bn, qconfig)
class TestQuantizeEagerQAT(QuantizationTestCase):
def setUp(self):
super().setUp()
self.embed_linear_data_train = [[torch.randint(0, 10, (12, 12), dtype=torch.long),
torch.randn((12, 1), dtype=torch.float)]
for _ in range(2)]
self.embed_data = [[torch.randint(0, 10, (12, 1))]]
def test_manual(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualLinearQATModel(qengine)
model = prepare_qat(model)
self.checkObservers(model)
test_only_train_fn(model, self.train_data)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.fc1), nnq.Linear)
self.assertEqual(type(model.fc2), nnq.Linear)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
model = quantize_qat(ManualLinearQATModel(qengine), test_only_train_fn,
[self.train_data])
checkQuantized(model)
def test_dropout(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualDropoutQATModel(qengine)
model = prepare_qat(model)
self.checkObservers(model)
test_only_train_fn(model, self.train_data)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.fc1), nnq.Linear)
self.assertEqual(type(model.dropout), nnq.Dropout)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
model = quantize_qat(ManualDropoutQATModel(qengine), test_only_train_fn,
[self.train_data])
checkQuantized(model)
def test_eval_only_fake_quant(self):
r"""Using FakeQuant in evaluation only mode,
this is useful for estimating accuracy loss when we quantize the
network
"""
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualLinearQATModel(qengine)
model = prepare_qat(model)
self.checkObservers(model)
model.eval()
test_only_eval_fn(model, self.calib_data)
def test_conv_linear(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualConvLinearQATModel()
model = prepare_qat(model)
self.checkObservers(model)
test_only_train_fn(model, self.img_data_2d_train)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.conv), nnq.Conv2d)
self.assertEqual(type(model.fc1), nnq.Linear)
self.assertEqual(type(model.fc2), nnq.Linear)
test_only_eval_fn(model, self.img_data_2d)
self.checkScriptable(model, self.img_data_2d)
self.checkNoQconfig(model)
checkQuantized(model)
model = ManualConvLinearQATModel()
model = quantize_qat(model, test_only_train_fn, [self.img_data_2d_train])
checkQuantized(model)
@skipIfNoXNNPACK
def test_conv_linear_symm(self):
r"""Same as test_conv_linear but with Symmetric quantization.
Supported only with qengine=qnnpack, which uses symmetric
kernels from xnnpack library."""
for qengine in supported_qengines:
if qengine != 'qnnpack':
continue
with override_quantized_engine(qengine):
model = ManualConvLinearSymmQATModel()
model = prepare_qat(model)
self.checkObservers(model)
test_only_train_fn(model, self.img_data_2d_train)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.conv), nnq.Conv2d)
self.assertEqual(type(model.fc1), nnq.Linear)
self.assertEqual(type(model.fc2), nnq.Linear)
test_only_eval_fn(model, self.img_data_2d)
self.checkScriptable(model, self.img_data_2d)
self.checkNoQconfig(model)
checkQuantized(model)
model = ManualConvLinearSymmQATModel()
model = quantize_qat(model, test_only_train_fn, [self.img_data_2d_train])
checkQuantized(model)
def test_dynamic_qat_linear(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
# Dynamic QAT without memoryless observers should fail
with self.assertRaisesRegex(ValueError,
"Dynamic QAT requires a memoryless observer." +
"This means a MovingAverage observer with averaging constant equal to 1"
):
model = ManualLinearDynamicQATModel(default_qat_qconfig)
model = prepare_qat(model, mapping={torch.nn.Linear: nnqatd.Linear})
model = ManualLinearDynamicQATModel()
model = prepare_qat(model, mapping={torch.nn.Linear: nnqatd.Linear})
self.assertEqual(type(model.fc1), nnqatd.Linear)
self.assertEqual(type(model.fc2), nnqatd.Linear)
self.checkObservers(model)
test_only_train_fn(model, self.train_data)
model = convert(model, mapping={nnqatd.Linear: nnqd.Linear})
self.assertEqual(type(model.fc1), nnqd.Linear)
self.assertEqual(type(model.fc2), nnqd.Linear)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
def test_defused_embedding_bag_linear(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = DeFusedEmbeddingBagLinear().train()
model = prepare_qat(model, mapping=get_embedding_qat_module_mappings())
self.checkObservers(model)
test_only_train_fn(model, self.embed_linear_data_train)
# make sure activation_post_process is inserted after Linear.
self.assertEqual(type(model.linear.activation_post_process), FusedMovingAvgObsFakeQuantize)
# make sure that Embedding has a noop for activation.
self.assertEqual(type(model.emb.activation_post_process), NoopObserver)
# make sure that FakeQuant zero_points are correct dtype
self.assertEqual(model.emb.weight_fake_quant.zero_point.dtype, torch.float32)
self.assertEqual(model.linear.weight_fake_quant.zero_point.dtype, torch.int32)
model = convert(model, mapping=get_embedding_static_quant_module_mappings())
def checkQuantized(model):
# make sure Embedding is now a QuantizedEmbedding
self.assertEqual(type(model.emb), nn.quantized.Embedding)
# make sure Linear is now a QuantizedLinear
self.assertEqual(type(model.linear), nn.quantized.Linear)
test_only_eval_fn(model, self.embed_data)
self.checkScriptable(model, self.embed_data)
self.checkNoQconfig(model)
checkQuantized(model)
def test_embedding_bag_linear(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualEmbeddingBagLinear().train()
model = prepare_qat(model, mapping=get_embedding_qat_module_mappings())
self.checkObservers(model)
test_only_train_fn(model, self.embed_linear_data_train)
# make sure not activation_post_process is inserted for EmbeddingBag
self.assertFalse(hasattr(model, "activation_post_process"))
# make sure that FakeQuant zero_points are correct dtype
self.assertEqual(model.emb.weight_fake_quant.zero_point.dtype, torch.float32)
self.assertEqual(model.linear.weight_fake_quant.zero_point.dtype, torch.int32)
model = convert(model, mapping=get_embedding_static_quant_module_mappings())
def checkQuantized(model):
# Make sure EmbeddingBag is now a quantized EmbeddingBag.
self.assertTrue(type(model.emb), nn.quantized.EmbeddingBag)
# Also test that Linear has been quantized.
self.assertTrue(type(model.linear), nnq.Linear)
test_only_eval_fn(model, self.embed_data)
self.checkScriptable(model, self.embed_data)
self.checkNoQconfig(model)
checkQuantized(model)
model = ManualEmbeddingBagLinear()
def test_train_save_load_eval(self):
r"""Test QAT flow of creating a model, doing QAT and saving the quantized state_dict
During eval, we first call prepare_qat and conver on the model and then load the state_dict
and compare results against original model
"""
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = TwoLayerLinearModel()
model = torch.ao.quantization.QuantWrapper(model)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine)
model = prepare_qat(model)
fq_state_dict = model.state_dict()
test_only_train_fn(model, self.train_data)
model = convert(model)
quant_state_dict = model.state_dict()
x = torch.rand(2, 5, dtype=torch.float)
ref = model(x)
# Create model again for eval. Check result using quantized state_dict
model = TwoLayerLinearModel()
model = torch.ao.quantization.QuantWrapper(model)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine)
torch.ao.quantization.prepare_qat(model, inplace=True)
new_state_dict = model.state_dict()
# Check to make sure the model after prepare_qat has the same state_dict as original.
self.assertEqual(set(fq_state_dict.keys()), set(new_state_dict.keys()))
torch.ao.quantization.convert(model, inplace=True)
model.eval()
model.load_state_dict(quant_state_dict)
out = model(x)
self.assertEqual(ref, out)
# Check model created using prepare has same state dict as quantized state_dict
model = TwoLayerLinearModel()
model.eval()
model = torch.ao.quantization.QuantWrapper(model)
model.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
torch.ao.quantization.prepare(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
self.assertEqual(set(model.state_dict().keys()), set(quant_state_dict.keys()))
model.eval()
model.load_state_dict(quant_state_dict)
out = model(x)
self.assertEqual(ref, out)
@override_qengines
def test_forward_hooks_preserved(self):
r"""Test QAT on preserving pre forward and post forward hooks of original model
"""
qengine = torch.backends.quantized.engine
model = QuantStubModel()
counter = {
'pre_forwards': 0,
'forwards': 0,
}
def fw_pre_hook(h_module, input):
counter['pre_forwards'] += 1
def fw_hook(h_module, input, output):
counter['forwards'] += 1
model.fc.register_forward_pre_hook(fw_pre_hook)
model.fc.register_forward_hook(fw_hook)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine)
model = prepare_qat(model)
def checkHooksIsPresent(model, before_convert=True):
forward_hooks = 1
if before_convert:
self.assertEqual(len(model.quant._forward_hooks.values()), 1,
"Quantization observer hook has disappeared")
forward_hooks = 2
self.assertObjectIn(fw_pre_hook, model.fc._forward_pre_hooks.values())
self.assertObjectIn(fw_hook, model.fc._forward_hooks.values())
self.assertEqual(len(model.fc._forward_pre_hooks.values()), 1,
"Extra pre forward hooks have appeared on a layer")
self.assertEqual(len(model.fc._forward_hooks.values()), forward_hooks,
"Extra post forward hooks have appeared on a layer")
checkHooksIsPresent(model, True)
x = torch.rand(2, 5, dtype=torch.float)
model(x)
torch.ao.quantization.convert(model, inplace=True)
checkHooksIsPresent(model, False)
def test_add_scalar_uses_input_qparams(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.ao.quantization.QuantStub()
self.ff = torch.nn.quantized.FloatFunctional()
def forward(self, x):
x = self.quant(x)
x = self.ff.add_scalar(x, 1.0)
return x
m = M()
m.qconfig = torch.ao.quantization.default_qconfig
mp = torch.ao.quantization.prepare_qat(m)
mp(torch.randn(4, 4))
mq = torch.ao.quantization.convert(mp)
res = mq(torch.randn(4, 4))
eps = 1e-5
self.assertTrue(torch.abs(mq.quant.scale - res.q_scale()) < eps)
def test_mul_scalar_uses_input_qparams(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.ao.quantization.QuantStub()
self.ff = torch.nn.quantized.FloatFunctional()
def forward(self, x):
x = self.quant(x)
x = self.ff.mul_scalar(x, 2.0)
return x
m = M()
m.qconfig = torch.ao.quantization.default_qconfig
mp = torch.ao.quantization.prepare_qat(m)
mp(torch.randn(4, 4))
mq = torch.ao.quantization.convert(mp)
res = mq(torch.randn(4, 4))
eps = 1e-5
self.assertTrue(torch.abs(mq.quant.scale * 2 - res.q_scale()) < eps)
def test_qat_embedding_bag_errors(self):
default_qat_qconfig = get_default_qat_qconfig(torch.backends.quantized.engine)
# Test constructor parameters checks here.
with self.assertRaisesRegex(AssertionError,
"qconfig must be provided for QAT module"):
nnqat.EmbeddingBag(10, 5, qconfig=None)
with self.assertRaisesRegex(AssertionError,
"Embedding Bag weights requires a qscheme of " +
"torch.per_channel_affine_float_qparams"):
nnqat.EmbeddingBag(10, 5, qconfig=default_qat_qconfig)
# Test from_float checks here.
embed = nn.Embedding(10, 5)
with self.assertRaisesRegex(AssertionError,
"qat.EmbeddingBag.from_float only works for EmbeddingBag"):
nnqat.EmbeddingBag.from_float(embed)
embed_bag = nn.EmbeddingBag(10, 5)
with self.assertRaisesRegex(AssertionError,
"Input float module must have qconfig defined"):
nnqat.EmbeddingBag.from_float(embed_bag)
embed_bag.qconfig = None
with self.assertRaisesRegex(AssertionError,
"Input float module must have a valid qconfig"):
nnqat.EmbeddingBag.from_float(embed_bag)
embed_bag.qconfig = default_qat_qconfig
with self.assertRaisesRegex(AssertionError,
"Embedding Bag weights requires a qscheme of " +
"torch.per_channel_affine_float_qparams"):
nnqat.EmbeddingBag.from_float(embed_bag)
def test_embedding_qat_qconfig_equal(self):
# Embedding QAT uses a NoopObserver class for activation,
# and a FakeQuant for weight, make sure that qconfig comparison
# functions properly for a mix of partial function and class in
# qconfig.
model = ManualEmbeddingBagLinear().train()
model = prepare_qat(model)
self.assertTrue(qconfig_equals(model.emb.qconfig,
default_embedding_qat_qconfig))
class TestQuantizeEagerQATNumerics(QuantizationTestCase):
def _test_activation_convert_numerics_impl(self, Act, data):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.act = Act()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.act(x)
x = self.dequant(x)
return x
m = M().train()
m.qconfig = default_qat_qconfig
m = prepare_qat(m)
before_convert = m(data)
m = convert(m)
after_convert = m(data)
self.assertEqual(before_convert, after_convert)
def test_fixed_qparam_ops(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.sigmoid = torch.nn.Sigmoid()
self.hardsigmoid = torch.nn.Hardsigmoid()
self.tanh = torch.nn.Tanh()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.sigmoid(x)
x = self.hardsigmoid(x)
x = self.tanh(x)
x = self.dequant(x)
return x
m = M().train()
m.qconfig = default_qat_qconfig
m = prepare_qat(m)
for attr in ['sigmoid', 'hardsigmoid', 'tanh']:
self.assertEqual(type(getattr(m, attr).activation_post_process), FixedQParamsFakeQuantize)
data = torch.randn(1, 3, 2, 4)
before_convert = m(data)
m = convert(m)
after_convert = m(data)
self.assertEqual(before_convert, after_convert)
# make sure activation post process is removed
for attr in ['sigmoid', 'hardsigmoid', 'tanh']:
# verify fake quant module is removd
self.assertFalse(hasattr(getattr(m, attr), 'activation_post_process'))
# verify that hooks are removed
self.assertTrue(len(getattr(m, attr)._forward_hooks.items()) == 0)
# make sure no fake quantize module is inserted for eval mode
def checkNoFQModule(m):
for attr in ['sigmoid', 'hardsigmoid', 'tanh']:
self.assertFalse(hasattr(getattr(m, attr), "activation_post_process"))
self.assertTrue(len(getattr(m, attr)._forward_hooks.items()) == 0)
m = M().eval()
m.qconfig = default_qconfig
m = prepare(m)
checkNoFQModule(m)
m = convert(m)
checkNoFQModule(m)
def test_leaky_relu(self):
data = torch.randn(1, 3, 2, 4)
self._test_activation_convert_numerics_impl(nn.LeakyReLU, data)
def test_relu(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(x)
return x
m = M().train()
m.qconfig = default_qconfig
m = prepare_qat(m)
# make sure no activation_post_process is inserted for relu
self.assertFalse(hasattr(m, "activation_post_process"))
m = convert(m)
# make sure ReLU module is not changed
self.assertTrue(type(m.relu), nn.ReLU)
@given(batch_size=st.integers(2, 4),
input_channels_per_group=st.sampled_from([2, 3, 4]),
height=st.integers(5, 10),
width=st.integers(5, 10),
output_channels_per_group=st.sampled_from([2, 3]),
groups=st.integers(1, 3),
kernel_h=st.integers(1, 3),
kernel_w=st.integers(1, 3),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 1),
padding_mode=st.sampled_from(['zeros', 'circular']),
use_relu=st.booleans(),
eps=st.sampled_from([1e-5, 1e-4, 1e-3]),
momentum=st.sampled_from([0.1, 0.2, 0.3]),
freeze_bn=st.booleans())
def test_conv_bn_relu(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
padding_mode,
use_relu,
eps,
momentum,
freeze_bn
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
dilation_h = dilation_w = dilation
conv_op = Conv2d(
input_channels,
output_channels,
(kernel_h, kernel_w),
(stride_h, stride_w),
(pad_h, pad_w),
(dilation_h, dilation_w),
groups,
False, # No bias
padding_mode
).to(dtype=torch.double)
bn_op = BatchNorm2d(output_channels, eps, momentum).to(dtype=torch.double)
relu_op = ReLU()
cls = ConvBnReLU2d if use_relu else ConvBn2d
qat_op = cls(
input_channels,
output_channels,
(kernel_h, kernel_w),
(stride_h, stride_w),
(pad_h, pad_w),
(dilation_h, dilation_w),
groups,
None, # bias
padding_mode,
eps,
momentum,
freeze_bn=True,
qconfig=default_qat_qconfig
).to(dtype=torch.double)
qat_op.apply(torch.ao.quantization.disable_fake_quant)
if freeze_bn:
qat_op.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
else:
qat_op.apply(torch.nn.intrinsic.qat.update_bn_stats)
# align inputs and internal parameters
input = torch.randn(batch_size, input_channels, height, width, dtype=torch.double, requires_grad=True)
conv_op.weight = torch.nn.Parameter(qat_op.weight.detach())
bn_op.running_mean = qat_op.bn.running_mean.clone()
bn_op.running_var = qat_op.bn.running_var.clone()
bn_op.weight = torch.nn.Parameter(qat_op.bn.weight.detach())
bn_op.bias = torch.nn.Parameter(qat_op.bn.bias.detach())
def compose(functions):
# functions are reversed for natural reading order
return reduce(lambda f, g: lambda x: f(g(x)), functions[::-1], lambda x: x)
if not use_relu:
def relu_op(x):
return x
if freeze_bn:
def ref_op(x):
x = conv_op(x)
x = (x - bn_op.running_mean.reshape([1, -1, 1, 1])) * \
(bn_op.weight / torch.sqrt(bn_op.running_var + bn_op.eps)) \
.reshape([1, -1, 1, 1]) + bn_op.bias.reshape([1, -1, 1, 1])
x = relu_op(x)
return x
else:
ref_op = compose([conv_op, bn_op, relu_op])
input_clone = input.clone().detach().requires_grad_()
for i in range(2):
result_ref = ref_op(input)
result_actual = qat_op(input_clone)
self.assertEqual(result_ref, result_actual)
# backward
dout = torch.randn(result_ref.size(), dtype=torch.double)
loss = (result_ref - dout).sum()
loss.backward()
input_grad_ref = input.grad.cpu()
weight_grad_ref = conv_op.weight.grad.cpu()
gamma_grad_ref = bn_op.weight.grad.cpu()
beta_grad_ref = bn_op.bias.grad.cpu()
running_mean_ref = bn_op.running_mean
running_var_ref = bn_op.running_var
num_batches_tracked_ref = bn_op.num_batches_tracked
loss = (result_actual - dout).sum()
loss.backward()
input_grad_actual = input_clone.grad.cpu()
weight_grad_actual = qat_op.weight.grad.cpu()
gamma_grad_actual = qat_op.bn.weight.grad.cpu()
beta_grad_actual = qat_op.bn.bias.grad.cpu()
running_mean_actual = qat_op.bn.running_mean
running_var_actual = qat_op.bn.running_var
num_batches_tracked_actual = qat_op.bn.num_batches_tracked
precision = 1e-10
self.assertEqual(input_grad_ref, input_grad_actual, atol=precision, rtol=0)
self.assertEqual(weight_grad_ref, weight_grad_actual, atol=precision, rtol=0)
self.assertEqual(gamma_grad_ref, gamma_grad_actual, atol=precision, rtol=0)
self.assertEqual(beta_grad_ref, beta_grad_actual, atol=precision, rtol=0)
self.assertEqual(num_batches_tracked_ref, num_batches_tracked_actual, atol=precision, rtol=0)
self.assertEqual(running_mean_ref, running_mean_actual, atol=precision, rtol=0)
self.assertEqual(running_var_ref, running_var_actual, atol=precision, rtol=0)
@given(batch_size=st.integers(2, 4),
input_channels_per_group=st.sampled_from([2, 3, 4]),
height=st.integers(5, 10),
width=st.integers(5, 10),
output_channels_per_group=st.sampled_from([2, 3]),
groups=st.integers(1, 3),
kernel_h=st.integers(1, 3),
kernel_w=st.integers(1, 3),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 1),
padding_mode=st.sampled_from(['zeros', 'circular']),
eps=st.sampled_from([1e-5, 1e-4, 1e-3]),
momentum=st.sampled_from([0.1, 0.2, 0.3]),
freeze_bn=st.booleans(),
bias=st.booleans())
def test_conv_bn_folded_vs_unfolded(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
padding_mode,
eps,
momentum,
freeze_bn,
bias,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
dilation_h = dilation_w = dilation
qat_op = ConvBn2d(
input_channels,
output_channels,
(kernel_h, kernel_w),
(stride_h, stride_w),
(pad_h, pad_w),
(dilation_h, dilation_w),
groups,
bias, # bias
padding_mode,
eps,
momentum,
freeze_bn=freeze_bn,
qconfig=default_qat_qconfig
).to(dtype=torch.double)
qat_ref_op = _ReferenceConvBn2d(
input_channels,
output_channels,
(kernel_h, kernel_w),
(stride_h, stride_w),
(pad_h, pad_w),
(dilation_h, dilation_w),
groups,
bias, # bias
padding_mode,
eps,
momentum,
freeze_bn=freeze_bn,
qconfig=default_qat_qconfig
).to(dtype=torch.double)
qat_op.apply(torch.ao.quantization.disable_fake_quant)
qat_ref_op.apply(torch.ao.quantization.disable_fake_quant)
# align inputs and internal parameters
qat_ref_op.weight = torch.nn.Parameter(qat_op.weight.detach().clone())
qat_ref_op.running_mean = qat_op.bn.running_mean.clone()
qat_ref_op.running_var = qat_op.bn.running_var.clone()
qat_ref_op.gamma = torch.nn.Parameter(qat_op.bn.weight.detach().clone())
qat_ref_op.beta = torch.nn.Parameter(qat_op.bn.bias.detach().clone())
if qat_op.bias is not None:
qat_ref_op.bias = torch.nn.Parameter(qat_op.bias.detach().clone())
lr = 0.01
qat_op_optim = torch.optim.SGD(qat_op.parameters(), lr=lr)
qat_ref_op_optim = torch.optim.SGD(qat_ref_op.parameters(), lr=lr)
for i in range(5):
# make sure that calling model.train() does not override the
# bn freeze setting
qat_op.train()
qat_ref_op.train()
qat_op_optim.zero_grad()
qat_ref_op_optim.zero_grad()
input = torch.randn(batch_size, input_channels, height, width, dtype=torch.double, requires_grad=True)
input_clone = input.clone().detach().requires_grad_()
if i > 2:
qat_op.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
qat_ref_op.freeze_bn_stats()
if i > 3:
qat_op.apply(torch.ao.quantization.disable_observer)
qat_ref_op.apply(torch.ao.quantization.disable_observer)
result_ref = qat_ref_op(input)
result_actual = qat_op(input_clone)
self.assertEqual(result_ref, result_actual)
# backward
dout = torch.randn(result_ref.size(), dtype=torch.double) + 10.0
loss = (result_ref - dout).sum()
loss.backward()
input_grad_ref = input.grad.cpu()
weight_grad_ref = qat_ref_op.weight.grad.cpu()
gamma_grad_ref = qat_ref_op.gamma.grad.cpu()
beta_grad_ref = qat_ref_op.beta.grad.cpu()
running_mean_ref = qat_ref_op.running_mean
running_var_ref = qat_ref_op.running_var
num_batches_tracked_ref = qat_ref_op.num_batches_tracked
loss = (result_actual - dout).sum()
loss.backward()
input_grad_actual = input_clone.grad.cpu()
weight_grad_actual = qat_op.weight.grad.cpu()
gamma_grad_actual = qat_op.bn.weight.grad.cpu()
beta_grad_actual = qat_op.bn.bias.grad.cpu()
running_mean_actual = qat_op.bn.running_mean
running_var_actual = qat_op.bn.running_var
num_batches_tracked_actual = qat_op.bn.num_batches_tracked
precision = 1e-5
self.assertEqual(input_grad_ref, input_grad_actual, atol=precision, rtol=0)
self.assertEqual(weight_grad_ref, weight_grad_actual, atol=precision, rtol=0)
self.assertEqual(gamma_grad_ref, gamma_grad_actual, atol=precision, rtol=0)
self.assertEqual(beta_grad_ref, beta_grad_actual, atol=precision, rtol=0)
self.assertEqual(num_batches_tracked_ref, num_batches_tracked_actual, atol=precision, rtol=0)
self.assertEqual(running_mean_ref, running_mean_actual, atol=precision, rtol=0)
self.assertEqual(running_var_ref, running_var_actual, atol=precision, rtol=0)
qat_op_optim.step()
qat_ref_op_optim.step()
@override_qengines
def test_linear_bn_numerics(self):
qengine = torch.backends.quantized.engine
m_ref = nn.Sequential(
nn.Linear(4, 4),
nn.BatchNorm1d(4),
)
m_ref_copy = copy.deepcopy(m_ref)
m_ref_copy = torch.ao.quantization.fuse_modules_qat(m_ref_copy, [['0', '1']])
qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine)
m_ref_copy[0].qconfig = qconfig
m = nniqat.LinearBn1d.from_float(m_ref_copy[0])
# without fake_quants, fused QAT module should match fp32 module
m.apply(torch.quantization.disable_fake_quant)
data = torch.randn(4, 4)
r1 = m_ref(data)
r2 = m(data)
self.assertTrue(torch.allclose(r1, r2))
@skipIfNoXNNPACK
@override_qengines
def test_linear_bn_symm_numerics(self):
qengine = torch.backends.quantized.engine
if qengine != "qnnpack":
return # Only qnnpack support symmetric quantization
m_ref = nn.Sequential(
nn.Linear(4, 4),
nn.BatchNorm1d(4),
)
m_ref_copy = copy.deepcopy(m_ref)
m_ref_copy = torch.ao.quantization.fuse_modules_qat(m_ref_copy, [['0', '1']])
qconfig = default_symmetric_qnnpack_qat_qconfig
m_ref_copy[0].qconfig = qconfig
m = nniqat.LinearBn1d.from_float(m_ref_copy[0])
# without fake_quants, fused QAT module should match fp32 module
m.apply(torch.quantization.disable_fake_quant)
data = torch.randn(4, 4)
r1 = m_ref(data)
r2 = m(data)
self.assertTrue(torch.allclose(r1, r2))
@override_qengines
def test_linear_bn_workflow(self):
qengine = torch.backends.quantized.engine
m = nn.Sequential(
QuantStub(),
nn.Linear(4, 4),
nn.BatchNorm1d(4),
)
data = torch.randn(4, 4)
m.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine)
m = torch.ao.quantization.fuse_modules_qat(m, [['1', '2']])
mp = prepare_qat(m)
mp(data)
mq = convert(mp)
self.assertTrue(type(mq[1]) == nnq.Linear)
self.assertTrue(type(mq[2]) == nn.Identity)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/quantization/eager/test_quantize_eager_qat.py
|
# Owner(s): ["oncall: quantization"]
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.nn.utils.rnn import PackedSequence
from torch.ao.quantization import (
quantize,
prepare,
convert,
prepare_qat,
quantize_dynamic,
QuantWrapper,
QuantStub,
DeQuantStub,
default_qconfig,
default_dynamic_qconfig,
per_channel_dynamic_qconfig,
float16_dynamic_qconfig,
float_qparams_weight_only_qconfig,
float_qparams_weight_only_qconfig_4bit,
PerChannelMinMaxObserver,
default_dynamic_quant_observer,
QConfig,
)
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
AnnotatedSingleLayerLinearModel,
QuantStubModel,
ModelWithFunctionals,
SingleLayerLinearDynamicModel,
TwoLayerLinearModel,
NestedModel,
ResNetBase,
RNNDynamicModel,
RNNCellDynamicModel,
ActivationsTestModel,
NormalizationTestModel,
test_only_eval_fn,
prepare_dynamic,
convert_dynamic,
skipIfNoFBGEMM,
EmbeddingBagModule,
EmbeddingModule,
EmbeddingWithStaticLinear,
LinearReluLinearModel,
)
# annotated models
from torch.testing._internal.common_quantization import (
AnnotatedTwoLayerLinearModel,
AnnotatedNestedModel,
AnnotatedSubNestedModel,
AnnotatedCustomConfigNestedModel,
AnnotatedSkipQuantModel,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
supported_qengines,
override_qengines,
)
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfNoCaffe2
from hypothesis import given
from hypothesis import strategies as st
import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
# Standard library
from typing import Tuple
import io
import unittest
import numpy as np
class TestQuantizeEagerOps(QuantizationTestCase):
@override_qengines
def _test_reference_module_impl(self,
float_module_class,
quantized_module_class,
extra_module_kwargs,
input_size):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = float_module_class(**extra_module_kwargs)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
class RefM(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = float_module_class(**extra_module_kwargs)
self.quant1 = QuantStub()
self.dequant1 = DeQuantStub()
self.quant2 = QuantStub()
self.dequant2 = DeQuantStub()
def forward(self, x):
x = self.quant1(x)
x = self.dequant1(x)
x = self.conv(x)
x = self.quant2(x)
x = self.dequant2(x)
return x
qengine = torch.backends.quantized.engine
if qengine not in supported_qengines or qengine == 'qnnpack':
return # qnnpack does not support nnq.ConvTranspose3d
data = torch.randn(*input_size, dtype=torch.float)
original_m = M()
original_ref_m = RefM()
original_ref_m.conv.weight = torch.nn.Parameter(original_m.conv.weight.detach())
original_ref_m.conv.bias = torch.nn.Parameter(original_m.conv.bias.detach())
original_m.qconfig = torch.quantization.default_qconfig
m = prepare(original_m)
# calibration
m(data)
m = convert(m)
# check if the module is properly quantized
self.assertEqual(type(m.quant), nnq.Quantize)
self.assertEqual(type(m.conv), quantized_module_class)
self.assertEqual(type(m.dequant), nnq.DeQuantize)
res = m(data)
# quantize the reference model
original_ref_m.eval()
original_ref_m.qconfig = torch.quantization.default_qconfig
ref_m = prepare(original_ref_m)
ref_m(data)
ref_m = convert(ref_m, is_reference=True)
ref_res = ref_m(data)
self.assertEqual(res, ref_res)
def test_conv_1d(self):
self._test_reference_module_impl(
nn.Conv1d,
nnq.Conv1d,
{'in_channels': 1, 'out_channels': 1, 'kernel_size': 1},
(16, 1, 1)
)
def test_conv_2d(self):
self._test_reference_module_impl(
nn.Conv2d,
nnq.Conv2d,
{'in_channels': 1, 'out_channels': 1, 'kernel_size': 1},
(16, 1, 10, 10)
)
def test_conv_3d(self):
self._test_reference_module_impl(
nn.Conv3d,
nnq.Conv3d,
{'in_channels': 1, 'out_channels': 1, 'kernel_size': 1},
(16, 1, 10, 10, 10)
)
def test_conv_transpose_1d(self):
self._test_reference_module_impl(
nn.ConvTranspose1d,
nnq.ConvTranspose1d,
{'in_channels': 1, 'out_channels': 1, 'kernel_size': 1},
(16, 1, 1)
)
def test_conv_transpose_2d(self):
self._test_reference_module_impl(
nn.ConvTranspose2d,
nnq.ConvTranspose2d,
{'in_channels': 1, 'out_channels': 1, 'kernel_size': 1},
(16, 1, 10, 10)
)
def test_conv_transpose_3d(self):
self._test_reference_module_impl(
nn.ConvTranspose3d,
nnq.ConvTranspose3d,
{'in_channels': 1, 'out_channels': 1, 'kernel_size': 1},
(16, 1, 10, 10, 10)
)
def test_linear(self):
self._test_reference_module_impl(
nn.Linear,
nnq.Linear,
{'in_features': 5, 'out_features': 10},
(16, 5)
)
@override_qengines
def test_int16_reference_module(self):
class RefM(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.ConvTranspose2d(1, 1, 1)
self.quant1 = QuantStub()
self.dequant1 = DeQuantStub()
self.quant2 = QuantStub()
self.dequant2 = DeQuantStub()
def forward(self, x):
x = self.quant1(x)
x = self.dequant1(x)
x = self.conv(x)
x = self.quant2(x)
x = self.dequant2(x)
return x
input_size = (16, 1, 10, 10)
data = torch.randn(*input_size, dtype=torch.float)
original_ref_m = RefM()
rand_w = torch.randn_like(original_ref_m.conv.weight)
rand_b = torch.randn_like(original_ref_m.conv.bias)
original_ref_m.conv.weight = torch.nn.Parameter(rand_w, requires_grad=False)
original_ref_m.conv.bias = torch.nn.Parameter(rand_b, requires_grad=False)
qengine = torch.backends.quantized.engine
if qengine not in supported_qengines:
return
from torch.ao.quantization.observer import MovingAverageMinMaxObserver
weight_obs = MovingAverageMinMaxObserver.with_args(
dtype=torch.qint32,
# set qmin and qmax to represent qint16
quant_min=-1 * (2 ** 15),
quant_max=(2 ** 15) - 1,
qscheme=torch.per_tensor_symmetric,
)
act_obs = MovingAverageMinMaxObserver.with_args(
dtype=torch.qint32,
quant_min=-1 * (2 ** 15),
quant_max=(2 ** 15) - 1,
)
custom_qconfig = QConfig(activation=act_obs, weight=weight_obs)
# quantize the reference model
original_ref_m.eval()
original_ref_m.qconfig = custom_qconfig
ref_m = prepare(original_ref_m)
# calibration
ref_m(torch.randn(*input_size, dtype=torch.float))
ref_m = convert(ref_m, is_reference=True)
myobs = MovingAverageMinMaxObserver(averaging_constant=0.5,
dtype=torch.qint32,
# set qmin and qmax to represent qint16
quant_min=-1 * (2 ** 15),
quant_max=(2 ** 15) - 1,
qscheme=torch.per_tensor_symmetric,
)
result = myobs(rand_w)
qparams = myobs.calculate_qparams()
self.assertEqual(ref_m.conv.weight_scale, qparams[0])
def _test_activation_op_impl(
self, float_module_class, quantized_module_class, extra_module_kwargs):
""" Implementation for testing common activation ops like leaky relu
Args:
extra_module_kwargs: keyword args to instantiate the float module
"""
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.activation_op = float_module_class(**extra_module_kwargs)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.activation_op(x)
x = self.dequant(x)
return x
m = M().eval()
m.qconfig = default_qconfig
m = prepare(m)
self.checkObservers(m)
m = convert(m)
self.assertEqual(type(m.activation_op), quantized_module_class)
def test_leaky_relu(self):
self._test_activation_op_impl(nn.LeakyReLU, nnq.LeakyReLU, {'negative_slope': 0.1, 'inplace': False})
def test_relu(self):
self._test_activation_op_impl(nn.ReLU, nn.ReLU, {'inplace': False})
# Histogram Observers are slow, so have no-deadline to ensure test doesn't time out
@given(train_mode=st.booleans())
def test_functional_module(self, train_mode):
model = ModelWithFunctionals()
x = torch.rand(10, 1, dtype=torch.float)
xq = torch.quantize_per_tensor(x, 0.01, 30, torch.quint8)
self.checkScriptable(model, [[x]], check_save_load=True)
if train_mode:
model.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')
model = prepare_qat(model)
else:
model.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack')
model = prepare(model)
# Check if observers and quant/dequant nodes are inserted
self.checkNoPrepModules(model)
self.checkObservers(model)
# Calibrate
model(xq.dequantize())
model = convert(model)
def checkQuantized(model):
self.checkNoPrepModules(model)
self.assertEqual(type(model.myadd), torch.nn.quantized.QFunctional)
self.assertEqual(type(model.mycat), torch.nn.quantized.QFunctional)
self.assertEqual(type(model.myadd_relu), torch.nn.quantized.QFunctional)
self.checkNoQconfig(model)
checkQuantized(model)
self.checkScriptable(model, [[xq]], check_save_load=True)
class TestQuantizeEagerPTQStatic(QuantizationTestCase):
def test_single_layer(self):
r"""Quantize SingleLayerLinearModel which has one Linear module, make sure it is swapped
to nnq.Linear which is the quantized version of the module
"""
for qengine in supported_qengines:
with override_quantized_engine(qengine):
qconfig = torch.ao.quantization.get_default_qconfig(qengine)
model = AnnotatedSingleLayerLinearModel(qengine)
model.qconfig = qconfig
model = prepare(model)
# Check if observers and quant/dequant nodes are inserted
self.checkNoPrepModules(model)
self.checkHasPrepModules(model.fc1)
self.checkObservers(model)
test_only_eval_fn(model, self.calib_data)
model = convert(model)
def checkQuantized(model):
self.checkNoPrepModules(model)
self.checkHasPrepModules(model.fc1)
self.checkWrappedQuantizedLinear(model.fc1)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API - out of place version
base = AnnotatedSingleLayerLinearModel(qengine)
base.qconfig = qconfig
keys_before = set(list(base.state_dict().keys()))
model = quantize(base, test_only_eval_fn, [self.calib_data])
checkQuantized(model)
keys_after = set(list(base.state_dict().keys()))
self.assertEqual(keys_before, keys_after) # simple check that nothing changed
# in-place version
model = AnnotatedSingleLayerLinearModel(qengine)
model.qconfig = qconfig
quantize(model, test_only_eval_fn, [self.calib_data], inplace=True)
checkQuantized(model)
@skipIfNoFBGEMM
def test_two_layers(self):
r"""TwoLayerLinearModel has two Linear modules but we only quantize the second one
`fc2`, and `fc1`is not quantized
"""
with override_quantized_engine('fbgemm'):
model = AnnotatedTwoLayerLinearModel()
model = prepare(model)
self.checkNoPrepModules(model)
self.checkObservers(model)
self.checkNoPrepModules(model.fc1)
self.checkHasPrepModules(model.fc2)
test_only_eval_fn(model, self.calib_data)
model = convert(model)
def checkQuantized(model):
self.checkNoPrepModules(model)
self.checkNoPrepModules(model.fc1)
self.checkHasPrepModules(model.fc2)
self.assertEqual(type(model.fc1), torch.nn.Linear)
self.checkWrappedQuantizedLinear(model.fc2)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize(AnnotatedTwoLayerLinearModel(), test_only_eval_fn,
[self.calib_data])
checkQuantized(model)
def test_nested1(self):
r"""Test quantization for nested model, top level 'fc3' and
'fc1' of submodule 'sub2', 'sub2.fc2' is not quantized
"""
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = AnnotatedNestedModel(qengine)
def checkPrepModules(model, before_calib=False):
if before_calib:
self.checkObservers(model)
self.checkNoPrepModules(model)
self.checkNoPrepModules(model.sub1)
self.checkNoPrepModules(model.sub1.fc)
self.checkNoPrepModules(model.sub1.relu)
self.checkNoPrepModules(model.sub2)
self.checkHasPrepModules(model.sub2.fc1)
self.checkNoPrepModules(model.sub2.fc2)
self.checkHasPrepModules(model.fc3)
model = prepare(model)
checkPrepModules(model, True)
test_only_eval_fn(model, self.calib_data)
model = convert(model)
def checkQuantized(model):
checkPrepModules(model)
self.checkLinear(model.sub1.fc)
self.checkWrappedQuantizedLinear(model.fc3)
self.checkWrappedQuantizedLinear(model.sub2.fc1)
self.checkLinear(model.sub2.fc2)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize(AnnotatedNestedModel(qengine), test_only_eval_fn,
[self.calib_data])
checkQuantized(model)
@skipIfNoFBGEMM
def test_nested2(self):
model = AnnotatedSubNestedModel()
model = prepare(model)
def checkPrepModules(model, before_calib=False):
if before_calib:
self.checkObservers(model)
self.checkNoPrepModules(model)
self.checkNoPrepModules(model.sub1)
self.checkNoPrepModules(model.sub1.fc)
self.checkNoPrepModules(model.sub1.relu)
self.checkHasPrepModules(model.sub2)
self.checkNoPrepModules(model.sub2.module.fc1)
self.checkNoPrepModules(model.sub2.module.fc2)
self.checkHasPrepModules(model.fc3)
checkPrepModules(model, True)
test_only_eval_fn(model, self.calib_data)
model = convert(model)
def checkQuantized(model):
checkPrepModules(model)
self.checkLinear(model.sub1.fc)
self.assertEqual(type(model.sub1.relu), torch.nn.ReLU)
self.checkQuantizedLinear(model.sub2.module.fc1)
self.checkQuantizedLinear(model.sub2.module.fc2)
self.checkWrappedQuantizedLinear(model.fc3)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize(AnnotatedSubNestedModel(), test_only_eval_fn,
[self.calib_data])
checkQuantized(model)
def test_nested3(self):
r"""More complicated nested test case with child qconfig overrides
parent qconfig
"""
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = AnnotatedCustomConfigNestedModel()
model = prepare(model)
def checkPrepModules(model, before_calib=False):
if before_calib:
self.checkObservers(model)
self.checkNoPrepModules(model)
self.checkNoPrepModules(model.sub1)
self.checkNoPrepModules(model.sub1.fc)
self.checkNoPrepModules(model.sub1.relu)
self.checkNoPrepModules(model.sub2)
self.checkHasPrepModules(model.sub2.fc1)
self.checkHasPrepModules(model.sub2.fc2)
self.checkHasPrepModules(model.fc3)
checkPrepModules(model, True)
test_only_eval_fn(model, self.calib_data)
model = convert(model)
def checkQuantized(model):
checkPrepModules(model)
self.checkWrappedQuantizedLinear(model.sub2.fc1)
self.checkWrappedQuantizedLinear(model.sub2.fc2)
self.checkWrappedQuantizedLinear(model.fc3)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize(AnnotatedCustomConfigNestedModel(), test_only_eval_fn,
[self.calib_data])
checkQuantized(model)
def test_skip_quant(self):
r"""The case when we want to skip quantizing some layers
"""
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = AnnotatedSkipQuantModel(qengine)
model = prepare(model)
self.checkObservers(model)
test_only_eval_fn(model, self.calib_data)
model = convert(model)
def checkQuantized(model):
self.checkLinear(model.fc)
self.checkQuantDequant(model.sub)
self.checkQuantizedLinear(model.sub.module.fc1)
self.checkQuantizedLinear(model.sub.module.fc2)
self.assertEqual(type(model.sub.module.relu1), nn.ReLU)
self.assertEqual(type(model.sub.module.relu2), nn.ReLU)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize(AnnotatedSkipQuantModel(qengine), test_only_eval_fn, [self.calib_data])
checkQuantized(model)
@skipIfNoFBGEMM
def test_manual(self):
r"""User inserts QuantStub and DeQuantStub in model code
and call the quantization utility functions.
"""
model = QuantStubModel()
# propagate the qconfig of parents to children, model is changed
# inplace
model = prepare(model)
self.checkObservers(model)
test_only_eval_fn(model, self.calib_data)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.fc), nnq.Linear)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize(QuantStubModel(), test_only_eval_fn, [self.calib_data])
checkQuantized(model)
def test_resnet_base(self):
r"""Test quantization for bottleneck topology used in resnet/resnext
and add coverage for conversion of average pool and float functional
"""
for qengine in supported_qengines:
with override_quantized_engine(qengine):
qconfig = torch.ao.quantization.get_default_qconfig(qengine)
model = ResNetBase().float().eval()
model.fuse_model()
model = QuantWrapper(model)
model.qconfig = qconfig
model = prepare(model)
self.checkObservers(model)
test_only_eval_fn(model, self.img_data_2d)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.module.conv1), nn.intrinsic.quantized.ConvReLU2d)
self.assertEqual(type(model.module.myop), nn.quantized.QFunctional)
self.assertEqual(type(model.module.avgpool), nn.AdaptiveAvgPool2d)
self.assertEqual(type(model.module.fc), nnq.Linear)
test_only_eval_fn(model, self.img_data_2d)
self.checkNoQconfig(model)
checkQuantized(model)
@skipIfNoFBGEMM
def test_normalization(self):
r"""
Test quantization of normalization layers
"""
model = NormalizationTestModel()
model.qconfig = torch.ao.quantization.get_default_qconfig('fbgemm')
prepare(model, inplace=True)
self.checkObservers(model)
test_only_eval_fn(model, self.calib_data)
model = convert(model)
def checkQuantized(model):
self.checkNoPrepModules(model.layer_norm)
self.checkNoPrepModules(model.group_norm)
self.checkNoPrepModules(model.instance_norm1d)
self.checkNoPrepModules(model.instance_norm2d)
self.checkNoPrepModules(model.instance_norm3d)
self.assertEqual(type(model.layer_norm), nnq.LayerNorm)
self.assertEqual(type(model.group_norm), nnq.GroupNorm)
self.assertEqual(type(model.instance_norm1d), nnq.InstanceNorm1d)
self.assertEqual(type(model.instance_norm2d), nnq.InstanceNorm2d)
self.assertEqual(type(model.instance_norm3d), nnq.InstanceNorm3d)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
model_oneline = quantize(
NormalizationTestModel(), test_only_eval_fn, [self.calib_data])
checkQuantized(model)
def test_save_load_state_dict(self):
r"""Test PTQ flow of creating a model and quantizing it and saving the quantized state_dict
Load the quantized state_dict for eval and compare results against original model
"""
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = TwoLayerLinearModel()
model = torch.ao.quantization.QuantWrapper(model)
model.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
model = prepare(model)
# calibrate
test_only_eval_fn(model, self.calib_data)
model = convert(model)
x = torch.rand(2, 5, dtype=torch.float)
ref = model(x)
quant_state_dict = model.state_dict()
# Create model again for eval
model = TwoLayerLinearModel()
model = torch.ao.quantization.QuantWrapper(model)
model.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
model = prepare(model)
model = convert(model)
new_state_dict = model.state_dict()
# Check to make sure the state dict keys match original model after convert.
self.assertEqual(set(new_state_dict.keys()), set(quant_state_dict.keys()))
model.load_state_dict(quant_state_dict)
out = model(x)
self.assertEqual(ref, out)
@skipIfNoFBGEMM
def test_activations(self):
r"""
Test quantization of activations
"""
model = ActivationsTestModel()
model.qconfig = torch.ao.quantization.get_default_qconfig('fbgemm')
prepare(model, inplace=True)
self.checkObservers(model)
test_only_eval_fn(model, self.calib_data)
model = convert(model)
def checkQuantized(model):
self.checkNoPrepModules(model.hardswish)
self.assertEqual(type(model.hardswish), nnq.Hardswish)
self.assertEqual(type(model.elu), nnq.ELU)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model_oneline = quantize(ActivationsTestModel(), test_only_eval_fn,
[self.calib_data])
checkQuantized(model_oneline)
@override_qengines
def test_forward_hooks_preserved(self):
r"""Test post-training static quantization on preserving
pre forward and post forward hooks of original model
"""
qengine = torch.backends.quantized.engine
model = QuantStubModel()
counter = {
'pre_forwards': 0,
'forwards': 0,
}
def fw_pre_hook(h_module, input):
counter['pre_forwards'] += 1
def fw_hook(h_module, input, output):
counter['forwards'] += 1
model.fc.register_forward_pre_hook(fw_pre_hook)
model.fc.register_forward_hook(fw_hook)
model.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
model = prepare(model)
def checkHooksIsPresent(model, before_convert=True):
num_fwd_hooks = 1
if before_convert:
self.assertEqual(len(model.quant._forward_hooks.values()), 1,
"Quantization observer hook has disappeared")
num_fwd_hooks = 2
self.assertObjectIn(fw_pre_hook, model.fc._forward_pre_hooks.values())
self.assertObjectIn(fw_hook, model.fc._forward_hooks.values())
self.assertEqual(len(model.fc._forward_pre_hooks.values()), 1,
"Extra pre forward hooks have appeared on a layer")
# During static quantization non stub layers are provided with quantization observer hook too
self.assertEqual(len(model.fc._forward_hooks.values()), num_fwd_hooks,
"Extra post forward hooks have appeared on a layer")
# Implicitly check that fw_hook goes after _observer_forward_hook
self.assertEqual(list(model.fc._forward_hooks.values())[-1], fw_hook,
"_observer_forward_hook is not a first entry of the hooks list")
checkHooksIsPresent(model, True)
test_only_eval_fn(model, self.calib_data)
torch.ao.quantization.convert(model, inplace=True)
checkHooksIsPresent(model, False)
@skipIfNoFBGEMM
def test_quantized_embedding(self):
r""" Test the post-training quantization flow, serialization and scripting
of embedding modules
"""
for qconfig in [float_qparams_weight_only_qconfig, float_qparams_weight_only_qconfig_4bit]:
model = EmbeddingModule().eval()
indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8, 3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3])
weights = torch.randn(10, 12, dtype=torch.float32)
model.qconfig = qconfig
prepare(model, inplace=True)
convert(model, inplace=True)
self.assertTrue('QuantizedEmbedding' in str(model))
self.assertEqual(type(model.emb), torch.nn.quantized.Embedding)
self.checkScriptable(model, [[indices]], check_save_load=True)
idx = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
x = torch.randn(2, 4)
model = EmbeddingWithStaticLinear().eval()
prepare(model, inplace=True)
convert(model, inplace=True)
self.assertTrue('QuantizedEmbedding' in str(model))
self.assertTrue('QuantizedLinear' in str(model))
self.checkQuantizedLinear(model.fc)
model(idx, offsets, x)
@skipIfNoFBGEMM
def test_dequant_stub(self):
m = QuantStubModel().eval()
prepare(m, inplace=True)
self.checkObservers(m)
convert(m, inplace=True)
self.assertEqual(type(m.quant), nnq.Quantize)
self.assertEqual(type(m.fc), nnq.Linear)
self.assertEqual(type(m.dequant), nnq.DeQuantize)
# check DeQuantStub is not swapped when it doesn't have a qconfig
m2 = QuantStubModel().eval()
m2.dequant.qconfig = None
prepare(m2, inplace=True)
self.checkObservers(m2)
convert(m2, inplace=True)
self.assertEqual(type(m2.quant), nnq.Quantize)
self.assertEqual(type(m2.fc), nnq.Linear)
self.assertEqual(type(m2.dequant), DeQuantStub)
def test_quantized_embedding_bag(self):
r""" Test the post-training quantization flow, serialization and scripting
of embedding_bag modules
"""
indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8, 3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3])
offsets = torch.tensor([0, 19, 20, 28, 28, 32])
weights = torch.randn(10, 12, dtype=torch.float32)
for dtype in [torch.quint8, torch.quint4x2]:
model = EmbeddingBagModule().eval()
float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype,
qscheme=torch.per_channel_affine_float_qparams,
ch_axis=0)
float_qparams_qconfig = QConfig(activation=default_dynamic_quant_observer,
weight=float_qparams_observer)
model.qconfig = float_qparams_qconfig
prepare(model, inplace=True)
quantized_model = convert(model)
per_sample_weights = torch.from_numpy(np.random.uniform(
low=0.01, high=0.5, size=[len(indices)]).astype(np.float32))
# Test to make sure module is quantized correctly.
self.assertTrue('QuantizedEmbeddingBag' in str(quantized_model))
self.checkDynamicQuantizedModule(quantized_model.emb, torch.nn.quantized.EmbeddingBag, torch.quint8)
self.checkScriptable(quantized_model, [[indices, offsets, per_sample_weights]], check_save_load=True)
class EmbeddingBagWithLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12,
include_last_offset=True, scale_grad_by_freq=False, mode='sum')
self.fc = torch.nn.Linear(5, 5)
def forward(self, indices, offsets, per_sample_weights, linear_in):
return self.emb(indices, offsets, per_sample_weights), self.fc(linear_in)
# Test quantization of embedding_bag layer only
model2 = EmbeddingBagWithLinear().eval()
model2.emb.qconfig = float_qparams_qconfig
prepare(model2, inplace=True)
quantized_model = convert(model2)
self.assertTrue('QuantizedEmbeddingBag' in str(quantized_model))
self.checkLinear(model2.fc)
self.checkDynamicQuantizedModule(quantized_model.emb, torch.nn.quantized.EmbeddingBag, torch.quint8)
@skipIfNoFBGEMM
def test_custom_module_class(self):
class CustomModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv(x)
class ObservedCustomModule(torch.nn.Module):
def __init__(self, conv):
super().__init__()
self.conv = conv
def forward(self, x):
return self.conv(x)
@classmethod
def from_float(cls, float_module):
assert hasattr(float_module, 'qconfig')
observed = cls(float_module.conv)
observed.qconfig = float_module.qconfig
return observed
class QuantizedCustomModule(torch.nn.Module):
def __init__(self, conv):
super().__init__()
self.conv = conv
def forward(self, x):
return self.conv(x)
@classmethod
def from_observed(cls, observed_module):
assert hasattr(observed_module, 'qconfig')
assert hasattr(observed_module, 'activation_post_process')
observed_module.conv.activation_post_process = \
observed_module.activation_post_process
quantized = cls(nnq.Conv2d.from_float(observed_module.conv))
return quantized
class Sub(torch.nn.Module):
def __init__(self):
super().__init__()
self.custom = CustomModule()
def forward(self, x):
return self.custom(x)
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.sub = Sub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.sub(x)
x = self.dequant(x)
return x
class RefM(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.dequant(x)
return x
data = torch.randn(1, 1, 1, 1)
# instantiate M and RefM and align the parameters
original_m = M()
original_ref_m = RefM()
original_ref_m.conv1.weight = torch.nn.Parameter(original_m.conv.weight.detach())
original_ref_m.conv1.bias = torch.nn.Parameter(original_m.conv.bias.detach())
original_ref_m.conv2.weight = torch.nn.Parameter(original_m.sub.custom.conv.weight.detach())
original_ref_m.conv2.bias = torch.nn.Parameter(original_m.sub.custom.conv.bias.detach())
original_m.qconfig = default_qconfig
prepare_custom_config_dict = {
"float_to_observed_custom_module_class": {
CustomModule: ObservedCustomModule
}
}
convert_custom_config_dict = {
"observed_to_quantized_custom_module_class": {
ObservedCustomModule: QuantizedCustomModule
}
}
m = prepare(
original_m,
prepare_custom_config_dict=prepare_custom_config_dict)
self.checkObservers(m, None, prepare_custom_config_dict)
# calibration
m(data)
# all activation observers are inserted in the top level module
# check converted/quantized model
m = convert(
m,
convert_custom_config_dict=convert_custom_config_dict)
# check if the module is properly quantized
self.assertEqual(type(m.quant), nnq.Quantize)
self.assertEqual(type(m.conv), nnq.Conv2d)
self.assertEqual(type(m.sub), Sub)
self.assertEqual(type(m.sub.custom), QuantizedCustomModule)
self.assertEqual(type(m.sub.custom.conv), nnq.Conv2d)
self.assertEqual(type(m.dequant), nnq.DeQuantize)
res = m(data)
# quantize the reference model
original_ref_m.eval()
original_ref_m.qconfig = default_qconfig
ref_m = prepare(original_ref_m)
ref_m(data)
ref_m = convert(ref_m)
ref_res = ref_m(data)
self.assertEqual(res, ref_res)
@skipIfNoFBGEMM
def test_convtranspose_per_channel_fails_early(self):
r"""
Verifies that attempting to quantize a ConvTranspose module with per-Channel
weight observers fails in the prepare step, as opposed to the convert step.
"""
m = torch.nn.Sequential(torch.nn.ConvTranspose2d(1, 1, 1))
m.qconfig = torch.ao.quantization.get_default_qconfig('fbgemm')
with self.assertRaises(AssertionError) as context:
mp = torch.ao.quantization.prepare(m)
self.assertTrue(
str(context.exception) ==
'Per channel weight observer is not supported yet for ConvTranspose{n}d.')
@skipIfNoFBGEMM
def test_convtranspose_per_channel_qconfig_none(self):
r"""
Verifies that having qconfig==None for conv transpose does not crash
"""
m = torch.nn.Sequential(torch.nn.ConvTranspose2d(1, 1, 1))
m.qconfig = torch.ao.quantization.get_default_qconfig('fbgemm')
m[0].qconfig = None
mp = torch.ao.quantization.prepare(m)
@skipIfNoFBGEMM
def test_quantwrapper_attaches_qconfig_to_dequant(self):
qconfig = torch.ao.quantization.default_qconfig
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
for i in range(len(m)):
m[i].qconfig = qconfig
m[i] = torch.ao.quantization.QuantWrapper(m[i])
mp = torch.ao.quantization.prepare(m)
mq = torch.ao.quantization.convert(mp)
self.assertTrue(isinstance(mq[0].dequant, nnq.DeQuantize))
@skipIfNoFBGEMM
class TestQuantizeEagerPTQDynamic(QuantizationTestCase):
def test_single_layer(self):
r"""Dynamic Quantize SingleLayerLinearDynamicModel which has one Linear module,
make sure it is swapped to nnqd.Linear which is the quantized version of
the module
"""
for dtype in [torch.qint8, torch.float16]:
model = SingleLayerLinearDynamicModel().eval()
qconfig = float16_dynamic_qconfig if dtype == torch.float16 else default_dynamic_qconfig
qconfig_dict = {
'fc1': qconfig
}
prepare_dynamic(model, qconfig_dict)
convert_dynamic(model)
def checkQuantized(model):
self.checkDynamicQuantizedLinear(model.fc1, dtype)
self.checkScriptable(model, self.calib_data, check_save_load=True)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API - out of place version
base = SingleLayerLinearDynamicModel()
keys_before = set(list(base.state_dict().keys()))
model = quantize_dynamic(base, qconfig_dict)
checkQuantized(model)
keys_after = set(list(base.state_dict().keys()))
self.assertEqual(keys_before, keys_after) # simple check that nothing changed
# in-place version
model = SingleLayerLinearDynamicModel()
quantize_dynamic(model, qconfig_dict, inplace=True)
checkQuantized(model)
# Test set qconfig
model = SingleLayerLinearDynamicModel()
quantize_dynamic(model, set([nn.Linear]), inplace=True, dtype=dtype)
checkQuantized(model)
def test_two_layers(self):
r"""TwoLayerLinearModel has two Linear modules but we only quantize the second one
`fc2`, and `fc1`is not quantized
"""
for dtype in [torch.qint8, torch.float16]:
model = TwoLayerLinearModel().eval()
qconfig = float16_dynamic_qconfig if dtype == torch.float16 else default_dynamic_qconfig
qconfig_dict = {
'fc2': qconfig
}
prepare_dynamic(model, qconfig_dict)
convert_dynamic(model)
def checkQuantized(model):
self.assertEqual(type(model.fc1), torch.nn.Linear)
self.checkDynamicQuantizedLinear(model.fc2, dtype=dtype)
self.checkScriptable(model, self.calib_data, check_save_load=True)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize_dynamic(TwoLayerLinearModel().eval(), qconfig_dict)
checkQuantized(model)
# Test set API
model = quantize_dynamic(TwoLayerLinearModel().eval(), {'fc2'}, dtype=dtype)
checkQuantized(model)
def test_nested1(self):
r"""Test quantization for nested model, top level 'fc3' and
'fc1' of submodule 'sub2', 'sub2.fc2' is not quantized
"""
for dtype in [torch.qint8, torch.float16]:
model = NestedModel().eval()
qconfig = float16_dynamic_qconfig if dtype == torch.float16 else default_dynamic_qconfig
qconfig_dict = {
'fc3': qconfig,
'sub2.fc1': qconfig
}
prepare_dynamic(model, qconfig_dict)
convert_dynamic(model)
def checkQuantized(model):
self.checkLinear(model.sub1.fc)
self.checkDynamicQuantizedLinear(model.fc3, dtype=dtype)
self.checkDynamicQuantizedLinear(model.sub2.fc1, dtype=dtype)
self.checkLinear(model.sub2.fc2)
self.checkScriptable(model, self.calib_data, check_save_load=True)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize_dynamic(NestedModel().eval(), qconfig_dict)
checkQuantized(model)
model = quantize_dynamic(NestedModel().eval(), {'fc3', 'sub2.fc1'}, dtype=dtype)
checkQuantized(model)
def test_nested2(self):
r"""Another test case for quantized, we will quantize all submodules
of submodule sub2
"""
for dtype in [torch.qint8, torch.float16]:
model = NestedModel().eval()
qconfig = float16_dynamic_qconfig if dtype == torch.float16 else default_dynamic_qconfig
qconfig_dict = {
'fc3': qconfig,
'sub2': qconfig
}
prepare_dynamic(model, qconfig_dict)
convert_dynamic(model)
def checkQuantized(model):
self.checkLinear(model.sub1.fc)
self.assertEqual(type(model.sub1.relu), torch.nn.ReLU)
self.checkDynamicQuantizedLinear(model.sub2.fc1, dtype=dtype)
self.checkDynamicQuantizedLinear(model.sub2.fc2, dtype=dtype)
self.checkDynamicQuantizedLinear(model.fc3, dtype=dtype)
self.checkScriptable(model, self.calib_data, check_save_load=True)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize_dynamic(NestedModel().eval(), qconfig_dict, dtype=dtype)
checkQuantized(model)
# Test set API
model = quantize_dynamic(NestedModel().eval(), {'fc3', 'sub2'}, dtype=dtype)
checkQuantized(model)
def test_nested3(self):
r"""More complicated nested test case with child qconfig overrides
parent qconfig
"""
for dtype in [torch.qint8, torch.float16]:
model = NestedModel().eval()
qconfig = float16_dynamic_qconfig if dtype == torch.float16 else default_dynamic_qconfig
qconfig_dynamic_dict = {
'fc3': qconfig,
'sub2': qconfig,
'sub2.fc1': qconfig
}
prepare_dynamic(model, qconfig_dynamic_dict)
convert_dynamic(model)
def checkQuantized(model):
self.checkDynamicQuantizedLinear(model.sub2.fc1, dtype=dtype)
self.checkDynamicQuantizedLinear(model.sub2.fc2, dtype=dtype)
self.checkDynamicQuantizedLinear(model.fc3, dtype=dtype)
self.checkScriptable(model, self.calib_data, check_save_load=True)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize_dynamic(NestedModel().eval(), qconfig_dynamic_dict)
checkQuantized(model)
# Test set API
model = quantize_dynamic(NestedModel().eval(), {'fc3', 'sub2', 'sub2.fc1'}, dtype=dtype)
checkQuantized(model)
def test_type_match_rule(self):
r"""Test quantization for nested model, top level 'fc3' and
'fc1' of submodule 'sub2', All 'torch.nn.Linear' modules are quantized
"""
for dtype in [torch.qint8, torch.float16]:
model = NestedModel().eval()
qconfig = float16_dynamic_qconfig if dtype == torch.float16 else default_dynamic_qconfig
qconfig_dict = {
'fc3': None,
'sub2.fc1': None,
torch.nn.Linear: qconfig
}
prepare_dynamic(model, qconfig_dict)
test_only_eval_fn(model, self.calib_data)
convert_dynamic(model)
def checkQuantized(model):
self.checkDynamicQuantizedLinear(model.sub1.fc, dtype=dtype)
self.checkLinear(model.fc3)
self.checkLinear(model.sub2.fc1)
self.checkDynamicQuantizedLinear(model.sub2.fc2, dtype=dtype)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data, check_save_load=True)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize_dynamic(NestedModel().eval(), qconfig_dict, dtype=dtype)
checkQuantized(model)
def test_per_channel_linear_quantize(self):
r"""Test quantization for per_channel dynamic quantization
"""
model = NestedModel().eval()
qconfig_dict = {
torch.nn.Linear: per_channel_dynamic_qconfig
}
prepare_dynamic(model, qconfig_dict)
test_only_eval_fn(model, self.calib_data)
convert_dynamic(model)
def checkQuantized(model):
self.checkDynamicQuantizedLinear(model.sub1.fc, dtype=torch.qint8)
self.checkDynamicQuantizedLinear(model.fc3, dtype=torch.qint8)
self.checkDynamicQuantizedLinear(model.sub2.fc1, dtype=torch.qint8)
self.checkDynamicQuantizedLinear(model.sub2.fc2, dtype=torch.qint8)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data, check_save_load=True)
self.checkNoQconfig(model)
checkQuantized(model)
# test one line API
model = quantize_dynamic(NestedModel().eval(), qconfig_dict)
checkQuantized(model)
def test_linear_relu_fusion(self):
dtype = torch.qint8
model = LinearReluLinearModel().eval()
qconfig = default_dynamic_qconfig
qconfig_dict = {'' : qconfig}
torch.ao.quantization.fuse_modules(model, [['fc1', 'relu']], inplace=True)
prepare_dynamic(model, qconfig_dict)
convert_dynamic(model)
def checkQuantized(model):
self.checkDynamicQuantizedLinearRelu(model.fc1, dtype)
self.checkDynamicQuantizedLinear(model.fc2, dtype)
self.checkScriptable(model, self.calib_data, check_save_load=True)
self.checkNoQconfig(model)
checkQuantized(model)
@given(qconfig=st.sampled_from([per_channel_dynamic_qconfig, default_dynamic_qconfig]),
dtype=st.sampled_from([torch.qint8, torch.float16]))
def test_quantized_rnn(self, qconfig, dtype):
r"""Test dynamic quantization, scriptability and serialization for dynamic quantized lstm modules on int8 and fp16
"""
niter = 10
x = torch.tensor([[100, -155],
[-155, 100],
[100, -155]], dtype=torch.float).unsqueeze(0).repeat(niter, 1, 1)
qconfig_dict = {
torch.nn.LSTM : qconfig,
torch.nn.GRU: qconfig
}
def checkQuantized(model, module_type):
mod_type_map = {'LSTM': torch.nn.quantized.dynamic.LSTM,
'GRU': torch.nn.quantized.dynamic.GRU}
mod_repr_map = {'LSTM': 'DynamicQuantizedLSTM',
'GRU': 'DynamicQuantizedGRU'}
self.assertTrue(mod_repr_map[module_type] in str(model_quantized))
self.checkDynamicQuantizedModule(model_quantized.mod, mod_type_map[module_type], dtype)
for module_type in ['LSTM', 'GRU']:
model = RNNDynamicModel(module_type).eval()
if dtype == torch.float16:
model_quantized = quantize_dynamic(model=model, dtype=dtype)
else:
model_quantized = quantize_dynamic(model=model, qconfig_spec=qconfig_dict, dtype=dtype)
checkQuantized(model_quantized, module_type)
self.checkScriptable(model_quantized, [[x]], check_save_load=True)
class ScriptWrapperPackedLSTM(torch.nn.Module):
def __init__(self, cell):
super(ScriptWrapperPackedLSTM, self).__init__()
self.cell = cell
def forward(self, x: PackedSequence) -> Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]:
return self.cell(x)
class ScriptWrapperPackedGRU(torch.nn.Module):
def __init__(self, cell):
super(ScriptWrapperPackedGRU, self).__init__()
self.cell = cell
def forward(self, x: PackedSequence) -> Tuple[PackedSequence, torch.Tensor]:
return self.cell(x)
script_wrapper_map = {'LSTM': ScriptWrapperPackedLSTM,
'GRU': ScriptWrapperPackedGRU}
packed_input = torch.nn.utils.rnn.pack_padded_sequence(x, torch.tensor([10, 5, 2]))
model_with_packed_input = script_wrapper_map[module_type](model_quantized.mod)
model_with_packed_input(packed_input)
scripted = torch.jit.script(model_with_packed_input)
scripted(packed_input)
# We cannot trace with input dtype being a packed sequence
self._checkScriptable(model_with_packed_input, scripted, [[packed_input]], True)
@given(qconfig=st.sampled_from([per_channel_dynamic_qconfig, default_dynamic_qconfig]),
dtype=st.sampled_from([torch.qint8, torch.float16]))
def test_quantized_rnn_cell(self, qconfig, dtype):
r"""Test dynamic quantization, scriptability and serialization for dynamic quantized rnn cell modules on int8 and fp16
"""
qconfig_dict = {
torch.nn.LSTMCell : qconfig,
torch.nn.GRUCell : qconfig,
torch.nn.RNNCell : qconfig
}
for module_type in ['LSTMCell', 'GRUCell', 'RNNTanh', 'RNNReLU']:
model = RNNCellDynamicModel(module_type).eval()
x = torch.tensor([[100, -155],
[-155, 100],
[100, -155]], dtype=torch.float)
if torch.backends.quantized.engine == 'qnnpack' and dtype == torch.float16:
continue
# fp16 dynamic quant is not supported for qnnpack
if dtype == torch.float16:
model_quantized = quantize_dynamic(model=model, dtype=dtype)
else:
model_quantized = quantize_dynamic(model=model, qconfig_spec=qconfig_dict, dtype=dtype)
def checkQuantized(model, module_type):
mod_type_map = {'LSTMCell': torch.nn.quantized.dynamic.LSTMCell,
'GRUCell': torch.nn.quantized.dynamic.GRUCell,
'RNNTanh': torch.nn.quantized.dynamic.RNNCell,
'RNNReLU': torch.nn.quantized.dynamic.RNNCell}
mod_repr_map = {'LSTMCell': 'DynamicQuantizedLSTMCell',
'GRUCell': 'DynamicQuantizedGRUCell',
'RNNTanh': 'DynamicQuantizedRNNCell',
'RNNReLU': 'DynamicQuantizedRNNCell'}
self.assertTrue(mod_repr_map[module_type] in str(model_quantized))
self.checkDynamicQuantizedModule(model_quantized.mod, mod_type_map[module_type], dtype)
self.checkNoQconfig(model)
# Smoke test extra reprs
checkQuantized(model_quantized, module_type)
self.checkScriptable(model_quantized, [[x]], check_save_load=True)
def test_forward_hooks_preserved(self):
r"""Test post-training dynamic quantization on preserving
pre forward and post forward hooks of original model
"""
for dtype in [torch.qint8, torch.float16]:
model = SingleLayerLinearDynamicModel().eval()
qconfig = float16_dynamic_qconfig if dtype == torch.float16 else default_dynamic_qconfig
qconfig_dict = {
'fc1': qconfig
}
convert_dynamic(model)
counter = {
'pre_forwards': 0,
'forwards': 0,
}
def fw_pre_hook(h_module, input):
counter['pre_forwards'] += 1
def fw_hook(h_module, input, output):
counter['forwards'] += 1
model.fc1.register_forward_pre_hook(fw_pre_hook)
model.fc1.register_forward_hook(fw_hook)
prepare_dynamic(model, qconfig_dict)
def checkHooksIsPresent(model):
self.assertObjectIn(fw_pre_hook, model.fc1._forward_pre_hooks.values())
self.assertObjectIn(fw_hook, model.fc1._forward_hooks.values())
self.assertEqual(len(model.fc1._forward_pre_hooks.values()), 1,
"Extra pre forward hooks have appeared on a layer")
self.assertEqual(len(model.fc1._forward_hooks.values()), 1,
"Extra post forward hooks have appeared on a layer")
checkHooksIsPresent(model)
test_only_eval_fn(model, self.calib_data)
convert_dynamic(model)
checkHooksIsPresent(model)
@skipIfNoFBGEMM
def test_embedding_ops_dynamic(self):
class EmbeddingBagWithLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12,
include_last_offset=True, scale_grad_by_freq=False, mode='sum')
self.fc = torch.nn.Linear(5, 5)
def forward(self, indices, offsets, linear_in):
return self.emb(indices, offsets), self.fc(linear_in)
model = EmbeddingBagWithLinear().eval()
qconfig_dict = {
torch.nn.EmbeddingBag : float_qparams_weight_only_qconfig,
torch.nn.Linear: default_dynamic_qconfig
}
indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8, 3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3])
offsets = torch.tensor([0, 19, 20, 28, 28, 32])
q_model = quantize_dynamic(model, qconfig_dict)
q_model(indices, offsets, torch.randn(5, 5))
self.assertTrue('QuantizedEmbedding' in str(q_model))
self.assertTrue('DynamicQuantizedLinear' in str(q_model))
class TestQuantizeEagerONNXExport(JitTestCase):
def _test_lower_graph_impl(self, model, data):
model.qconfig = torch.ao.quantization.default_qconfig
model = torch.ao.quantization.prepare(model)
model = torch.ao.quantization.convert(model)
outputs = model(data)
input_names = ["x"]
def export_to_onnx(model, input, input_names):
traced = torch.jit.trace(model, input)
buf = io.BytesIO()
torch.jit.save(traced, buf)
buf.seek(0)
model = torch.jit.load(buf)
f = io.BytesIO()
torch.onnx.export(model, input, f, input_names=input_names,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
opset_version=9)
onnx_model = export_to_onnx(model, data, input_names)
@skipIfNoFBGEMM
@skipIfNoCaffe2
def test_lower_graph_linear(self):
model = torch.ao.quantization.QuantWrapper(torch.nn.Linear(5, 10, bias=True)).to(dtype=torch.float)
data_numpy = np.random.rand(1, 2, 5).astype(np.float32)
data = torch.from_numpy(data_numpy).to(dtype=torch.float)
self._test_lower_graph_impl(model, data)
@skipIfNoFBGEMM
@skipIfNoCaffe2
def test_lower_graph_conv2d(self):
model = torch.ao.quantization.QuantWrapper(torch.nn.Conv2d(3, 5, 2, bias=True)).to(dtype=torch.float)
data_numpy = np.random.rand(1, 3, 6, 6).astype(np.float32)
data = torch.from_numpy(data_numpy).to(dtype=torch.float)
self._test_lower_graph_impl(model, data)
@skipIfNoFBGEMM
@unittest.skip("onnx opset9 does not support quantize_per_tensor and caffe2 \
does not support conv3d")
def test_lower_graph_conv3d(self):
model = torch.ao.quantization.QuantWrapper(torch.nn.Conv3d(3, 5, 2, bias=True)).to(dtype=torch.float)
data_numpy = np.random.rand(1, 3, 6, 6, 6).astype(np.float32)
data = torch.from_numpy(data_numpy).to(dtype=torch.float)
self._test_lower_graph_impl(model, data)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/quantization/eager/test_quantize_eager_ptq.py
|
# Owner(s): ["oncall: quantization"]
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.ao.quantization.fuse_modules import fuse_modules
import torch.ao.quantization._equalize as _equalize
import copy
class TestEqualizeEager(QuantizationTestCase):
def checkChannelsEqualized(self, tensor1, tensor2, output_axis, input_axis):
''' Checks the channel ranges of tensor1, tensor2 are the same,
which is an indication that equalization has been applied correctly
'''
output_channel_tensor1 = _equalize.channel_range(tensor1, output_axis)
input_channel_tensor2 = _equalize.channel_range(tensor2, input_axis)
# ensuring the channels ranges of tensor1's input is the same as
# tensor2's output
self.assertEqual(output_channel_tensor1, input_channel_tensor2)
def getModule(self, model, name):
''' Given the name is a submodule to a model, return the submodule
'''
curr = model
name = name.split('.')
for subname in name:
curr = curr._modules[subname]
return curr
def test_cross_layer_equalization(self):
''' applies _equalize.cross_layer_equalization on two modules and checks
to make sure channels ranges are equivalent
'''
module1 = nn.Conv2d(3, 4, 2)
module2 = nn.Linear(4, 4)
module1_output_channel_axis = 0
module2_input_channel_axis = 1
_equalize.cross_layer_equalization(module1, module2)
mod_tensor1, mod_tensor2 = module1.weight, module2.weight
self.checkChannelsEqualized(mod_tensor1, mod_tensor2, module1_output_channel_axis, module2_input_channel_axis)
def test_converged(self):
''' Sanity checks on _equalize.converged working
identical modules should return true
modules with high difference in weights should return false
'''
module1 = nn.Linear(3, 3)
module2 = nn.Linear(3, 3)
module1.weight = nn.parameter.Parameter(torch.ones(module1.weight.size()))
module2.weight = nn.parameter.Parameter(torch.zeros(module1.weight.size()))
# input is a dictionary
dictionary_1 = {'linear1': module1}
dictionary_2 = {'linear1': module2}
self.assertTrue(_equalize.converged(dictionary_1, dictionary_1, 1e-6))
self.assertFalse(_equalize.converged(dictionary_1, dictionary_2, 1e-6))
def test_equalize(self):
''' First checks to see if _equalize.equalize can handle multiple
pair modules as input
then checks correctness of the function by ensuring the equalized
and unequalized versions of the model yield the same output
given the same input
'''
class ChainModule(nn.Module):
def __init__(self):
super(ChainModule, self).__init__()
self.linear1 = nn.Linear(3, 4)
self.linear2 = nn.Linear(4, 5)
self.linear3 = nn.Linear(5, 6)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
return x
chain1 = ChainModule()
chain2 = copy.deepcopy(chain1)
_equalize.equalize(chain1, [['linear1', 'linear2'], ['linear2', 'linear3']], 1e-6)
linear1 = self.getModule(chain1, 'linear1')
linear2 = self.getModule(chain1, 'linear2')
linear3 = self.getModule(chain1, 'linear3')
self.checkChannelsEqualized(linear1.weight, linear2.weight, 0, 1)
self.checkChannelsEqualized(linear2.weight, linear3.weight, 0, 1)
input = torch.randn(20, 3)
self.assertEqual(chain1(input), chain2(input))
def test_equalize_fused_convrelu(self):
''' Checks to see if eager mode equalization supports fused
ConvReLU2d models
A model with 3 ConvReLU2d is constructed. Next, the conv2d and relu
layers are fused together and adjacent conv2d layers have cross-layer
equalization applied. Finally, we ensure that the channels have been
equalized and that the equalized and unequalized versions of the model
yield the same output given the same input
'''
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 1).to(dtype=torch.float)
self.relu1 = nn.ReLU(inplace=False).to(dtype=torch.float)
self.conv2 = nn.Conv2d(3, 3, 1).to(dtype=torch.float)
self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float)
self.conv3 = nn.Conv2d(3, 3, 1).to(dtype=torch.float)
self.relu3 = nn.ReLU(inplace=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
return x
model = M()
fused_model1 = fuse_modules(model, [['conv1', 'relu1'], ['conv2', 'relu2'], ['conv3', 'relu3']])
fused_model2 = copy.deepcopy(fused_model1)
_equalize.equalize(fused_model1, [['conv1', 'conv2'], ['conv2', 'conv3']], 1e-6)
conv1 = self.getModule(fused_model1, 'conv1')[0]
conv2 = self.getModule(fused_model1, 'conv2')[0]
conv3 = self.getModule(fused_model1, 'conv3')[0]
self.checkChannelsEqualized(conv1.weight, conv2.weight, 0, 1)
self.checkChannelsEqualized(conv2.weight, conv3.weight, 0, 1)
input = torch.randn(3, 3, 1, 1)
self.assertEqual(fused_model1(input), fused_model2(input))
self.assertEqual(fused_model1(input), model(input))
def test_equalize_fused_linearrelu(self):
''' Checks to see if eager mode equalization supports fused
LinearReLU models
A model with 3 LinearReLU is constructed. Next, the linear and relu
layers are fused together and adjacent linear layers have cross-layer
equalization applied. Finally, we ensure that the channels have been
equalized and that the equalized and unequalized versions of the model
yield the same output given the same input
'''
class M(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.relu1 = nn.ReLU(inplace=False).to(dtype=torch.float)
self.linear2 = nn.Linear(4, 5)
self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float)
self.linear3 = nn.Linear(5, 6)
self.relu3 = nn.ReLU(inplace=False).to(dtype=torch.float)
def forward(self, x):
x = self.linear1(x)
x = self.relu1(x)
x = self.linear2(x)
x = self.relu2(x)
x = self.linear3(x)
x = self.relu3(x)
return x
model = M()
fused_model1 = fuse_modules(model, [['linear1', 'relu1'], ['linear2', 'relu2'], ['linear3', 'relu3']])
fused_model2 = copy.deepcopy(fused_model1)
_equalize.equalize(fused_model1, [['linear1', 'linear2'], ['linear2', 'linear3']], 1e-6)
linear1 = self.getModule(fused_model1, 'linear1')[0]
linear2 = self.getModule(fused_model1, 'linear2')[0]
linear3 = self.getModule(fused_model1, 'linear3')[0]
self.checkChannelsEqualized(linear1.weight, linear2.weight, 0, 1)
self.checkChannelsEqualized(linear2.weight, linear3.weight, 0, 1)
input = torch.randn(20, 3)
self.assertEqual(fused_model1(input), fused_model2(input))
self.assertEqual(fused_model1(input), model(input))
|
pytorch-master
|
test/quantization/eager/test_equalize_eager.py
|
pytorch-master
|
test/quantization/eager/__init__.py
|
|
# Owner(s): ["oncall: quantization"]
import torch
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
ModelMultipleOps,
ModelMultipleOpsNoAvgPool,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
supported_qengines,
)
class TestModelNumericsEager(QuantizationTestCase):
def test_float_quant_compare_per_tensor(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
torch.manual_seed(42)
my_model = ModelMultipleOps().to(torch.float32)
my_model.eval()
calib_data = torch.rand(1024, 3, 15, 15, dtype=torch.float32)
eval_data = torch.rand(1, 3, 15, 15, dtype=torch.float32)
out_ref = my_model(eval_data)
qModel = torch.ao.quantization.QuantWrapper(my_model)
qModel.eval()
qModel.qconfig = torch.ao.quantization.default_qconfig
torch.ao.quantization.fuse_modules(qModel.module, [['conv1', 'bn1', 'relu1']], inplace=True)
torch.ao.quantization.prepare(qModel, inplace=True)
qModel(calib_data)
torch.ao.quantization.convert(qModel, inplace=True)
out_q = qModel(eval_data)
SQNRdB = 20 * torch.log10(torch.norm(out_ref) / torch.norm(out_ref - out_q))
# Quantized model output should be close to floating point model output numerically
# Setting target SQNR to be 30 dB so that relative error is 1e-3 below the desired
# output
self.assertGreater(SQNRdB, 30, msg='Quantized model numerics diverge from float, expect SQNR > 30 dB')
def test_float_quant_compare_per_channel(self):
# Test for per-channel Quant
torch.manual_seed(67)
my_model = ModelMultipleOps().to(torch.float32)
my_model.eval()
calib_data = torch.rand(2048, 3, 15, 15, dtype=torch.float32)
eval_data = torch.rand(10, 3, 15, 15, dtype=torch.float32)
out_ref = my_model(eval_data)
q_model = torch.ao.quantization.QuantWrapper(my_model)
q_model.eval()
q_model.qconfig = torch.ao.quantization.default_per_channel_qconfig
torch.ao.quantization.fuse_modules(q_model.module, [['conv1', 'bn1', 'relu1']], inplace=True)
torch.ao.quantization.prepare(q_model)
q_model(calib_data)
torch.ao.quantization.convert(q_model)
out_q = q_model(eval_data)
SQNRdB = 20 * torch.log10(torch.norm(out_ref) / torch.norm(out_ref - out_q))
# Quantized model output should be close to floating point model output numerically
# Setting target SQNR to be 35 dB
self.assertGreater(SQNRdB, 35, msg='Quantized model numerics diverge from float, expect SQNR > 35 dB')
def test_fake_quant_true_quant_compare(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
torch.manual_seed(67)
my_model = ModelMultipleOpsNoAvgPool().to(torch.float32)
calib_data = torch.rand(2048, 3, 15, 15, dtype=torch.float32)
eval_data = torch.rand(10, 3, 15, 15, dtype=torch.float32)
my_model.eval()
out_ref = my_model(eval_data)
fq_model = torch.ao.quantization.QuantWrapper(my_model)
fq_model.train()
fq_model.qconfig = torch.ao.quantization.default_qat_qconfig
torch.ao.quantization.fuse_modules_qat(fq_model.module, [['conv1', 'bn1', 'relu1']], inplace=True)
torch.ao.quantization.prepare_qat(fq_model)
fq_model.eval()
fq_model.apply(torch.ao.quantization.disable_fake_quant)
fq_model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
fq_model(calib_data)
fq_model.apply(torch.ao.quantization.enable_fake_quant)
fq_model.apply(torch.ao.quantization.disable_observer)
out_fq = fq_model(eval_data)
SQNRdB = 20 * torch.log10(torch.norm(out_ref) / torch.norm(out_ref - out_fq))
# Quantized model output should be close to floating point model output numerically
# Setting target SQNR to be 35 dB
self.assertGreater(SQNRdB, 35, msg='Quantized model numerics diverge from float, expect SQNR > 35 dB')
torch.ao.quantization.convert(fq_model)
out_q = fq_model(eval_data)
SQNRdB = 20 * torch.log10(torch.norm(out_fq) / (torch.norm(out_fq - out_q) + 1e-10))
self.assertGreater(SQNRdB, 60, msg='Fake quant and true quant numerics diverge, expect SQNR > 60 dB')
# Test to compare weight only quantized model numerics and
# activation only quantized model numerics with float
def test_weight_only_activation_only_fakequant(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
torch.manual_seed(67)
calib_data = torch.rand(2048, 3, 15, 15, dtype=torch.float32)
eval_data = torch.rand(10, 3, 15, 15, dtype=torch.float32)
qconfigset = set([torch.ao.quantization.default_weight_only_qconfig,
torch.ao.quantization.default_activation_only_qconfig])
SQNRTarget = [35, 45]
for idx, qconfig in enumerate(qconfigset):
my_model = ModelMultipleOpsNoAvgPool().to(torch.float32)
my_model.eval()
out_ref = my_model(eval_data)
fq_model = torch.ao.quantization.QuantWrapper(my_model)
fq_model.train()
fq_model.qconfig = qconfig
torch.ao.quantization.fuse_modules_qat(fq_model.module, [['conv1', 'bn1', 'relu1']], inplace=True)
torch.ao.quantization.prepare_qat(fq_model)
fq_model.eval()
fq_model.apply(torch.ao.quantization.disable_fake_quant)
fq_model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
fq_model(calib_data)
fq_model.apply(torch.ao.quantization.enable_fake_quant)
fq_model.apply(torch.ao.quantization.disable_observer)
out_fq = fq_model(eval_data)
SQNRdB = 20 * torch.log10(torch.norm(out_ref) / torch.norm(out_ref - out_fq))
self.assertGreater(SQNRdB, SQNRTarget[idx], msg='Quantized model numerics diverge from float')
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/quantization/eager/test_model_numerics.py
|
# Owner(s): ["oncall: quantization"]
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.ao.quantization import default_qconfig
from torch.ao.quantization import QuantWrapper
import torch.ao.ns._numeric_suite as ns
from torch.ao.quantization._correct_bias import (
_supported_modules,
_supported_modules_quantized,
bias_correction,
get_module,
get_param,
parent_child_names
)
import copy
class TestBiasCorrectionEager(QuantizationTestCase):
def compute_sqnr(self, x, y):
Ps = torch.norm(x)
Pn = torch.norm(x - y)
return 20 * torch.log10(Ps / Pn)
def correct_artificial_bias_quantize(self, float_model, img_data):
''' Adding artificial bias and testing if bias persists after bias
correction. This test case changes the bias of a quantized submodule
'''
artificial_model = copy.deepcopy(float_model)
artificial_model.qconfig = default_qconfig
torch.ao.quantization.prepare(artificial_model, inplace=True)
for data in img_data:
artificial_model(data[0])
torch.ao.quantization.convert(artificial_model, inplace=True)
# manually changing bias
for name, submodule in artificial_model.named_modules():
if type(submodule) in _supported_modules:
x = get_param(submodule, 'bias')
weight = get_param(submodule, 'weight')
if x is not None:
submodule.set_weight_bias(weight, x.data * 3)
bias_correction(float_model, artificial_model, img_data, target_modules=_supported_modules_quantized)
# Trims off the shadow module,
for name, submodule in artificial_model.named_modules():
if isinstance(submodule, ns.Shadow):
parent_name, child_name = parent_child_names(name)
parent = get_module(artificial_model, parent_name)
parent._modules[child_name] = submodule.orig_module
for name, artificial_submodule in artificial_model.named_modules():
if type(artificial_submodule) in _supported_modules_quantized:
submodule = get_module(float_model, name)
float_bias = get_param(submodule, 'bias')
artificial_bias = get_param(artificial_submodule, 'bias')
self.assertTrue(self.compute_sqnr(float_bias, artificial_bias) > 30,
"Correcting quantized bias produced too much noise, sqnr score too low")
@skipIfNoFBGEMM
def test_linear_chain(self):
class LinearChain(nn.Module):
def __init__(self):
super(LinearChain, self).__init__()
self.linear1 = nn.Linear(3, 4)
self.linear2 = nn.Linear(4, 5)
self.linear3 = nn.Linear(5, 6)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
return x
float_model = QuantWrapper(LinearChain())
img_data = [(torch.rand(10, 3, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long))
for _ in range(50)]
self.correct_artificial_bias_quantize(float_model, img_data)
@skipIfNoFBGEMM
def test_conv_chain(self):
class ConvChain(nn.Module):
def __init__(self):
super(ConvChain, self).__init__()
self.conv2d1 = nn.Conv2d(3, 4, 5, 5)
self.conv2d2 = nn.Conv2d(4, 5, 5, 5)
self.conv2d3 = nn.Conv2d(5, 6, 5, 5)
def forward(self, x):
x = self.conv2d1(x)
x = self.conv2d2(x)
x = self.conv2d3(x)
return x
float_model = QuantWrapper(ConvChain())
img_data = [(torch.rand(10, 3, 125, 125, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long))
for _ in range(50)]
self.correct_artificial_bias_quantize(float_model, img_data)
|
pytorch-master
|
test/quantization/eager/test_bias_correction_eager.py
|
# Owner(s): ["oncall: quantization"]
import copy
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.qat as nniqat
from torch.ao.quantization import (
quantize,
prepare,
convert,
prepare_qat,
quantize_qat,
fuse_modules,
fuse_modules_qat,
QConfig,
default_qconfig,
default_qat_qconfig,
)
from torch.testing._internal.common_quantization import (
QuantizationTestCase,
ModelForFusion,
ModelWithSequentialFusion,
ModelForLinearBNFusion,
ModelForFusionWithBias,
ModelForConvTransposeBNFusion,
test_only_eval_fn,
test_only_train_fn,
skipIfNoFBGEMM,
)
from torch.testing._internal.common_quantized import (
override_quantized_engine,
supported_qengines,
)
@skipIfNoFBGEMM
class TestFuseEager(QuantizationTestCase):
def test_fuse_module_train(self):
model = ModelForFusion(default_qat_qconfig).train()
# Test step by step fusion
model = fuse_modules_qat(model, ['conv1', 'bn1', 'relu1'])
model = fuse_modules_qat(model, ['sub1.conv', 'sub1.bn'])
self.assertEqual(type(model.conv1), nni.ConvBnReLU2d,
msg="Fused Conv + BN + Relu first layer")
self.assertEqual(type(model.bn1), torch.nn.Identity,
msg="Fused Conv + BN + Relu (skipped BN)")
self.assertEqual(type(model.relu1), torch.nn.Identity,
msg="Fused Conv + BN + Relu (skipped Relu)")
self.assertEqual(type(model.sub1.conv), nni.ConvBn2d,
msg="Fused submodule Conv + BN")
self.assertEqual(type(model.sub1.bn), torch.nn.Identity,
msg="Fused submodule Conv + BN (skipped BN)")
self.assertEqual(type(model.sub2.conv), torch.nn.Conv2d,
msg="Non-fused submodule Conv")
self.assertEqual(type(model.sub2.relu), torch.nn.ReLU,
msg="Non-fused submodule ReLU")
model = prepare_qat(model)
self.checkObservers(model)
def checkQAT(model):
self.assertEqual(type(model.conv1), nniqat.ConvBnReLU2d)
self.assertEqual(type(model.bn1), nn.Identity)
self.assertEqual(type(model.relu1), nn.Identity)
self.assertEqual(type(model.sub1.conv), nniqat.ConvBn2d)
self.assertEqual(type(model.sub1.bn), nn.Identity)
self.assertEqual(type(model.sub2.conv), nn.Conv2d)
self.assertEqual(type(model.sub2.relu), nn.ReLU)
checkQAT(model)
test_only_train_fn(model, self.img_data_1d_train)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.conv1), nniq.ConvReLU2d)
self.assertEqual(type(model.bn1), nn.Identity)
self.assertEqual(type(model.relu1), nn.Identity)
self.assertEqual(type(model.sub1.conv), nnq.Conv2d)
self.assertEqual(type(model.sub1.bn), nn.Identity)
self.assertEqual(type(model.sub2.conv), nn.Conv2d)
self.assertEqual(type(model.sub2.relu), nn.ReLU)
test_only_eval_fn(model, self.img_data_1d)
self.checkNoQconfig(model)
with self.assertRaisesRegex(RuntimeError, "Could not run 'aten::native_batch_norm' with arguments from the 'QuantizedCPU'"):
checkQuantized(model)
model = ModelForFusion(default_qat_qconfig).train()
model = fuse_modules_qat(
model,
[['conv1', 'bn1', 'relu1'],
['sub1.conv', 'sub1.bn']])
model = quantize_qat(model, test_only_train_fn, [self.img_data_1d_train])
with self.assertRaisesRegex(RuntimeError, "Could not run 'aten::native_batch_norm' with arguments from the 'QuantizedCPU'"):
checkQuantized(model)
def test_fuse_module_eval(self):
model = ModelForFusion(default_qconfig)
model.eval()
model = fuse_modules(
model,
[['conv3', 'bn3', 'relu4'],
['conv1', 'bn1', 'relu1'],
['conv2', 'relu2'],
['bn2', 'relu3'],
['sub1.conv', 'sub1.bn']])
self.assertEqual(type(model.conv1), nni.ConvReLU2d,
msg="Fused Conv + BN + Relu first layer (BN is folded)")
self.assertEqual(type(model.conv1[0]), nn.Conv2d,
msg="Fused Conv + BN + Relu (Conv + folded BN only)")
self.assertEqual(type(model.conv1[1]), nn.ReLU,
msg="Fused Conv + BN + Relu second layer (Relu only)")
self.assertEqual(type(model.bn1), nn.Identity,
msg="Fused Conv + BN + Relu second layer (Skipped BN)")
self.assertEqual(type(model.relu1), nn.Identity,
msg="Fused Conv + BN + Relu second layer (Skipped Relu)")
self.assertEqual(type(model.conv2), nni.ConvReLU3d,
msg="Fused Conv + BN + Relu first layer (BN is folded)")
self.assertEqual(type(model.bn2), nni.BNReLU3d,
msg="Fused BN + Relu first layer (Relu is folded))")
self.assertEqual(type(model.relu3), nn.Identity,
msg="Fused BN + Relu second layer (Skipped Relu)")
self.assertEqual(type(model.conv2[0]), nn.Conv3d,
msg="Fused Conv + BN + Relu (Conv + folded BN only)")
self.assertEqual(type(model.conv2[1]), nn.ReLU,
msg="Fused Conv + BN + Relu second layer (Relu only)")
self.assertEqual(type(model.relu2), nn.Identity,
msg="Fused Conv + BN + Relu second layer (Skipped Relu)")
self.assertEqual(type(model.conv3), nni.ConvReLU1d,
msg="Fused Conv + Relu for Conv1d (folded BN)")
self.assertEqual(type(model.conv3[0]), nn.Conv1d,
msg="Fused Conv + Relu for Conv1d ")
self.assertEqual(type(model.conv3[1]), nn.ReLU,
msg="Fused Conv + Relu for Conv1d")
self.assertEqual(type(model.bn3), nn.Identity,
msg="Fused Conv + BN + Relu for Conv1d (Skipped BN)")
self.assertEqual(type(model.sub1.conv), nn.Conv2d,
msg="Fused submodule Conv + folded BN")
self.assertEqual(type(model.sub1.bn), nn.Identity,
msg="Fused submodule (skipped BN)")
self.assertEqual(type(model.sub2.conv), nn.Conv2d,
msg="Non-fused submodule Conv")
self.assertEqual(type(model.sub2.relu), torch.nn.ReLU,
msg="Non-fused submodule ReLU")
model = prepare(model)
self.checkObservers(model)
test_only_eval_fn(model, self.img_data_1d)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.conv3), nniq.ConvReLU1d)
self.assertEqual(type(model.conv1), nniq.ConvReLU2d)
self.assertEqual(type(model.bn1), nn.Identity)
self.assertEqual(type(model.relu1), nn.Identity)
self.assertEqual(type(model.sub1.conv), nnq.Conv2d)
self.assertEqual(type(model.sub1.bn), nn.Identity)
self.assertEqual(type(model.sub2.conv), nn.Conv2d)
self.assertEqual(type(model.sub2.relu), nn.ReLU)
self.assertEqual(type(model.bn2), nniq.BNReLU3d)
test_only_eval_fn(model, self.img_data_1d)
self.checkNoQconfig(model)
checkQuantized(model)
model = ModelForFusion(default_qconfig).eval()
model = fuse_modules(
model,
[['conv1', 'bn1', 'relu1'],
['conv2', 'relu2'],
['bn2', 'relu3'],
['sub1.conv', 'sub1.bn'],
['conv3', 'bn3', 'relu4']])
model = quantize(model, test_only_eval_fn, [self.img_data_1d])
checkQuantized(model)
def test_fusion_sequential_model_train(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ModelWithSequentialFusion().train()
model.to(torch.float)
fuse_modules_qat(
model, [['conv1', 'relu1'] ,
['features.0.0', 'features.0.1', 'features.0.2'],
['features.1.0', 'features.1.1', 'features.1.2'],
['features.2.0', 'features.2.1', 'features.2.2'],
['classifier.0', 'classifier.1']],
inplace=True)
self.assertEqual(type(model.conv1), nni.ConvReLU2d,
msg="Fused Conv + Relu: nni.ConvReLU2d")
self.assertEqual(type(model.conv1[0]), nn.Conv2d,
msg="Fused Conv + Relu: Conv2d")
self.assertEqual(type(model.conv1[1]), nn.ReLU,
msg="Fused Conv + Relu: Relu")
self.assertEqual(type(model.relu1), nn.Identity,
msg="Fused Conv + Relu: Identity")
for i in range(3):
self.assertEqual(type(model.features[i][0]), nni.ConvBnReLU2d,
msg="Fused submodule Conv + folded BN")
self.assertEqual(type(model.features[i][1]), nn.Identity,
msg="Fused submodule (skipped BN)")
self.assertEqual(type(model.features[i][2]), nn.Identity,
msg="Non-fused submodule Conv")
self.assertEqual(type(model.classifier[0]), nni.LinearReLU)
self.assertEqual(type(model.classifier[1]), nn.Identity)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine)
prepare_qat(model, inplace=True)
self.checkObservers(model)
model(self.img_data_2d[0][0])
def checkQAT(model):
self.assertEqual(type(model.conv1), nniqat.ConvReLU2d)
self.assertEqual(type(model.relu1), nn.Identity)
for i in range(3):
self.assertEqual(type(model.features[i][0]), nniqat.ConvBnReLU2d,
msg="Fused submodule Conv + folded BN")
self.assertEqual(type(model.features[i][1]), nn.Identity,
msg="Fused submodule (skipped BN)")
self.assertEqual(type(model.features[i][2]), nn.Identity,
msg="Non-fused submodule Conv")
self.assertEqual(type(model.classifier[0]), nniqat.LinearReLU)
self.assertEqual(type(model.classifier[1]), nn.Identity)
checkQAT(model)
model(self.img_data_2d[1][0])
convert(model, inplace=True)
model(self.img_data_2d[1][0])
self.checkModelWithSequentialQuantized(model)
def test_fusion_sequential_model_eval(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ModelWithSequentialFusion().eval()
model.to(torch.float)
fuse_modules(
model,
[['conv1', 'relu1'],
['features.0.0', 'features.0.1', 'features.0.2'],
['features.1.0', 'features.1.1', 'features.1.2'],
['features.2.0', 'features.2.1', 'features.2.2'],
['classifier.0', 'classifier.1']],
inplace=True)
self.assertEqual(type(model.conv1), nni.ConvReLU2d,
msg="Fused Conv + Relu: nni.ConvReLU2d")
self.assertEqual(type(model.conv1[0]), nn.Conv2d,
msg="Fused Conv + Relu: Conv2d")
self.assertEqual(type(model.conv1[1]), nn.ReLU,
msg="Fused Conv + Relu: Relu")
self.assertEqual(type(model.relu1), nn.Identity,
msg="Fused Conv + Relu: Identity")
for i in range(3):
self.assertEqual(type(model.features[i][0]), nni.ConvReLU2d,
msg="Fused submodule Conv + folded BN")
self.assertEqual(type(model.features[i][1]), nn.Identity,
msg="Fused submodule (skipped BN)")
self.assertEqual(type(model.features[i][2]), nn.Identity,
msg="Non-fused submodule Conv")
self.assertEqual(type(model.classifier[0]), nni.LinearReLU)
self.assertEqual(type(model.classifier[1]), nn.Identity)
model.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
prepare(model, inplace=True)
self.checkObservers(model)
model(self.img_data_2d[0][0])
convert(model, inplace=True)
model(self.img_data_2d[1][0])
self.checkModelWithSequentialQuantized(model)
def checkModelWithSequentialQuantized(self, model):
self.assertEqual(type(model.conv1), nniq.ConvReLU2d)
self.assertEqual(type(model.relu1), nn.Identity)
for i in range(3):
self.assertEqual(type(model.features[i][0]), nniq.ConvReLU2d)
self.assertEqual(type(model.features[i][1]), nn.Identity)
self.assertEqual(type(model.features[i][2]), nn.Identity)
self.assertEqual(type(model.classifier[0]), nniq.LinearReLU)
self.assertEqual(type(model.classifier[1]), nn.Identity)
def test_fusion_conv_with_bias(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model_orig = ModelForFusionWithBias().train()
# reference model
model_ref = copy.deepcopy(model_orig)
# output with no fusion.
out_ref = model_ref(self.img_data_2d[0][0])
# fused model
model_orig.qconfig = QConfig(activation=torch.nn.Identity,
weight=torch.nn.Identity)
model = fuse_modules_qat(
model_orig,
[["conv1", "bn1", "relu1"],
["conv2", "bn2"]])
prep_model = prepare_qat(model, inplace=False)
# output with fusion but no observers.
out_fused = prep_model(self.img_data_2d[0][0])
self.assertEqual(out_ref, out_fused)
def checkBN(bn_ref, bn):
self.assertEqual(bn_ref.weight, bn.weight)
self.assertEqual(bn_ref.bias, bn.bias)
self.assertEqual(bn_ref.running_mean, bn.running_mean)
self.assertEqual(bn_ref.running_var, bn.running_var)
checkBN(model_ref.bn1, prep_model.conv1.bn)
checkBN(model_ref.bn2, prep_model.conv2.bn)
model.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
prepare_qat(model, inplace=True)
model(self.img_data_2d[0][0])
def checkQAT(model):
self.assertEqual(type(model.conv1), nniqat.ConvBnReLU2d)
self.assertEqual(type(model.bn1), nn.Identity)
self.assertEqual(type(model.relu1), nn.Identity)
self.assertEqual(type(model.conv2), nniqat.ConvBn2d)
self.assertEqual(type(model.bn2), nn.Identity)
checkQAT(model)
def test_fusion_linear_bn_eval(self):
model = ModelForLinearBNFusion().train()
inp1 = torch.randn(8, 20)
inp2 = torch.randn(8, 20)
# Get some interesting values into the running mean and variance.
model(inp1)
model.eval()
golden = model(inp2)
model = fuse_modules(model, [["fc", "bn"]])
self.assertEqual(type(model.bn), nn.Identity)
self.assertEqual(golden, model(inp2))
def test_fusion_convtranspose_bn_eval(self):
model = ModelForConvTransposeBNFusion().train()
inp1 = torch.randn(8, 3, 16)
inp2 = torch.randn(8, 3, 16)
# Get some interesting values into the running mean and variance.
model(inp1)
model.eval()
golden = model(inp2)
model = fuse_modules(model, [["conv1", "bn1"], ["conv2", "bn2"], ["conv3", "bn3"]])
self.assertEqual(type(model.bn1), nn.Identity)
self.assertEqual(type(model.bn2), nn.Identity)
self.assertEqual(type(model.bn3), nn.Identity)
self.assertEqual(golden, model(inp2))
def test_forward_hooks_preserved(self):
r"""Test case that checks whether forward pre hooks of the first module and
post forward hooks of the last module in modules list passed to fusion function preserved.
(e.g. before fusion: [nn.Conv2d (with pre forward hooks), nn.BatchNorm2d, nn.ReLU (with post forward hooks)]
after fusion: [nni.ConvBnReLU2d (with pre and post hooks), nn.Identity, nn.Identity])
"""
model = ModelForFusion(default_qat_qconfig).train()
counter = {
'pre_forwards': 0,
'forwards': 0,
}
fused = False
def fw_pre_hook(fused_module_class, h_module, input):
if fused:
self.assertEqual(type(h_module), fused_module_class,
"After fusion owner of the first module's forward pre hook is not a fused module")
counter['pre_forwards'] += 1
def fw_hook(fused_module_class, h_module, input, output):
if fused:
self.assertEqual(type(h_module), fused_module_class,
"After fusion owner of the last module's forward hook is not a fused module")
counter['forwards'] += 1
# Registering two pre and two post forward hooks, thus expecting counter increment by two each inference
model.conv1.register_forward_pre_hook(lambda *args: fw_pre_hook(nni.ConvBnReLU2d, *args))
model.sub1.conv.register_forward_pre_hook(lambda *args: fw_pre_hook(nni.ConvBn2d, *args))
model.relu1.register_forward_hook(lambda *args: fw_hook(nni.ConvBnReLU2d, *args))
model.sub1.bn.register_forward_hook(lambda *args: fw_hook(nni.ConvBn2d, *args))
test_only_eval_fn(model, self.img_data_1d)
self.assertEqual(counter['pre_forwards'], 2 * len(self.img_data_1d))
self.assertEqual(counter['forwards'], 2 * len(self.img_data_1d))
model = fuse_modules_qat(model, ['conv1', 'bn1', 'relu1'])
model = fuse_modules_qat(model, ['sub1.conv', 'sub1.bn'])
fused = True
before_fusion_pre_count = counter['pre_forwards']
before_fusion_post_count = counter['forwards']
test_only_eval_fn(model, self.img_data_1d)
self.assertEqual(counter['pre_forwards'] - before_fusion_pre_count, 2 * len(self.img_data_1d))
self.assertEqual(counter['forwards'] - before_fusion_post_count, 2 * len(self.img_data_1d))
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/quantization/eager/test_fuse_eager.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.