From 0d56dfb2c8bb0feb72e6839ca684af96cd086cbe Mon Sep 17 00:00:00 2001 From: Kyunggeun Lee Date: Wed, 31 Jan 2024 12:30:29 -0800 Subject: [PATCH 1/4] Add A/B test directory Signed-off-by: Kyunggeun Lee --- .../test/python/experimental/v2/__init__.py | 36 +++++++++++++++++++ .../experimental/v2/ab_test/__init__.py | 36 +++++++++++++++++++ .../v2/{ => ab_test}/test_quantsim_config_.py | 2 +- .../test_quantsim_export.py} | 3 +- .../test_quantsim_logits.py} | 2 +- .../experimental/v2/models_/__init__.py | 36 +++++++++++++++++++ .../python/experimental/v2/nn/__init__.py | 36 +++++++++++++++++++ .../experimental/v2/quantizers/__init__.py | 36 +++++++++++++++++++ 8 files changed, 183 insertions(+), 4 deletions(-) create mode 100644 TrainingExtensions/torch/test/python/experimental/v2/__init__.py create mode 100644 TrainingExtensions/torch/test/python/experimental/v2/ab_test/__init__.py rename TrainingExtensions/torch/test/python/experimental/v2/{ => ab_test}/test_quantsim_config_.py (99%) rename TrainingExtensions/torch/test/python/experimental/v2/{test_quantsim_v1_export.py => ab_test/test_quantsim_export.py} (99%) rename TrainingExtensions/torch/test/python/experimental/v2/{test_quantsim_v1_consistency.py => ab_test/test_quantsim_logits.py} (99%) create mode 100644 TrainingExtensions/torch/test/python/experimental/v2/models_/__init__.py create mode 100644 TrainingExtensions/torch/test/python/experimental/v2/nn/__init__.py create mode 100644 TrainingExtensions/torch/test/python/experimental/v2/quantizers/__init__.py diff --git a/TrainingExtensions/torch/test/python/experimental/v2/__init__.py b/TrainingExtensions/torch/test/python/experimental/v2/__init__.py new file mode 100644 index 00000000000..e2bd9499cbc --- /dev/null +++ b/TrainingExtensions/torch/test/python/experimental/v2/__init__.py @@ -0,0 +1,36 @@ +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= diff --git a/TrainingExtensions/torch/test/python/experimental/v2/ab_test/__init__.py b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/__init__.py new file mode 100644 index 00000000000..e2bd9499cbc --- /dev/null +++ b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/__init__.py @@ -0,0 +1,36 @@ +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= diff --git a/TrainingExtensions/torch/test/python/experimental/v2/test_quantsim_config_.py b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_config_.py similarity index 99% rename from TrainingExtensions/torch/test/python/experimental/v2/test_quantsim_config_.py rename to TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_config_.py index 09ea762a225..12921fded74 100644 --- a/TrainingExtensions/torch/test/python/experimental/v2/test_quantsim_config_.py +++ b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_config_.py @@ -57,7 +57,7 @@ from aimet_torch.experimental.v2.quantization.quantizers.affine import QuantizeDequantize from aimet_torch.experimental.v2.quantization.quantizers.float import FloatQuantizeDequantize -from models_.models_to_test import SingleResidual, QuantSimTinyModel, MultiInput, SingleResidualWithModuleAdd, \ +from ..models_.models_to_test import SingleResidual, QuantSimTinyModel, MultiInput, SingleResidualWithModuleAdd, \ SingleResidualWithAvgPool, ModelWithBertCustomLayerNormGelu diff --git a/TrainingExtensions/torch/test/python/experimental/v2/test_quantsim_v1_export.py b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_export.py similarity index 99% rename from TrainingExtensions/torch/test/python/experimental/v2/test_quantsim_v1_export.py rename to TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_export.py index c41b1b8666f..05cd1448fde 100644 --- a/TrainingExtensions/torch/test/python/experimental/v2/test_quantsim_v1_export.py +++ b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_export.py @@ -41,7 +41,6 @@ import os import json -# from aimet_torch.experimental.v2.quantization.wrappers.quantization_mixin import _QuantizationMixin import aimet_torch.experimental.v2.nn as aimet_nn from aimet_torch.experimental.v2.nn.fake_quant import FakeQuantizationMixin from aimet_torch.experimental.v2.quantization.quantizers.affine import QuantizeDequantize @@ -50,7 +49,7 @@ from aimet_torch import onnx_utils from aimet_torch.quantsim import QuantizationSimModel, OnnxExportApiArgs -from models_.models_to_test import ( +from ..models_.models_to_test import ( SimpleConditional, ModelWithTwoInputs, ModelWith5Output, diff --git a/TrainingExtensions/torch/test/python/experimental/v2/test_quantsim_v1_consistency.py b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py similarity index 99% rename from TrainingExtensions/torch/test/python/experimental/v2/test_quantsim_v1_consistency.py rename to TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py index 6cf7ecd1470..7a22370bc68 100644 --- a/TrainingExtensions/torch/test/python/experimental/v2/test_quantsim_v1_consistency.py +++ b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py @@ -42,7 +42,7 @@ import pytest import torch -from models_ import models_to_test +from ..models_ import models_to_test from aimet_common.defs import QuantScheme diff --git a/TrainingExtensions/torch/test/python/experimental/v2/models_/__init__.py b/TrainingExtensions/torch/test/python/experimental/v2/models_/__init__.py new file mode 100644 index 00000000000..e2bd9499cbc --- /dev/null +++ b/TrainingExtensions/torch/test/python/experimental/v2/models_/__init__.py @@ -0,0 +1,36 @@ +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= diff --git a/TrainingExtensions/torch/test/python/experimental/v2/nn/__init__.py b/TrainingExtensions/torch/test/python/experimental/v2/nn/__init__.py new file mode 100644 index 00000000000..e2bd9499cbc --- /dev/null +++ b/TrainingExtensions/torch/test/python/experimental/v2/nn/__init__.py @@ -0,0 +1,36 @@ +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= diff --git a/TrainingExtensions/torch/test/python/experimental/v2/quantizers/__init__.py b/TrainingExtensions/torch/test/python/experimental/v2/quantizers/__init__.py new file mode 100644 index 00000000000..e2bd9499cbc --- /dev/null +++ b/TrainingExtensions/torch/test/python/experimental/v2/quantizers/__init__.py @@ -0,0 +1,36 @@ +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= From 477d29f4ca0c724c315f6c99eda684deb530e980 Mon Sep 17 00:00:00 2001 From: Kyunggeun Lee Date: Wed, 31 Jan 2024 14:50:14 -0800 Subject: [PATCH 2/4] Adjust test criteria to tolerate off-by-one precision error Signed-off-by: Kyunggeun Lee --- .../v2/ab_test/test_quantsim_export.py | 17 ----- .../v2/ab_test/test_quantsim_logits.py | 63 ++++++++++++------- .../experimental/v2/models_/models_to_test.py | 17 +++++ 3 files changed, 58 insertions(+), 39 deletions(-) diff --git a/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_export.py b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_export.py index 05cd1448fde..75d4800ddcb 100644 --- a/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_export.py +++ b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_export.py @@ -53,7 +53,6 @@ SimpleConditional, ModelWithTwoInputs, ModelWith5Output, - ModuleWith5Output, SoftMaxAvgPoolModel, ) @@ -246,22 +245,6 @@ def test_multi_output_onnx_op(self): model = ModelWith5Output() dummy_input = torch.randn(1, 3, 224, 224) sim_model = copy.deepcopy(model) - - @FakeQuantizationMixin.implements(ModuleWith5Output) - class FakeQuantizationMixinWithDisabledOutput(FakeQuantizationMixin, ModuleWith5Output): - def __quant_init__(self): - super().__quant_init__() - self.output_quantizers = torch.nn.ModuleList([None, None, None, None, None]) - - def quantized_forward(self, input): - if self.input_quantizers[0]: - input = self.input_quantizers[0](input) - outputs = super().forward(input) - return tuple( - quantizer(out) if quantizer else out - for out, quantizer in zip(outputs, self.output_quantizers) - ) - sim_model.cust = FakeQuantizationMixin.from_module(sim_model.cust) sim_model.cust.input_quantizers[0] = QuantizeDequantize((1,), bitwidth=8, diff --git a/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py index 7a22370bc68..e94276680d4 100644 --- a/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py +++ b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py @@ -41,6 +41,8 @@ import tempfile import pytest import torch +import random +import numpy as np from ..models_ import models_to_test @@ -189,12 +191,21 @@ def config_path(request): yield temp_config_path +def set_seed(seed): + torch.manual_seed(seed) + np.random.seed(seed) + random.seed(seed) + + @pytest.mark.skip("Skip tests until v2 implementation is done") @pytest.mark.parametrize('quant_scheme', [QuantScheme.post_training_tf, - QuantScheme.post_training_percentile, - QuantScheme.training_range_learning_with_tf_init]) -class TestCompareV1QuantsimAndV2Quantsim: + # QuantScheme.post_training_percentile, # TODO: not implemented + # QuantScheme.training_range_learning_with_tf_init, # TODO: not implemented + ]) +@pytest.mark.parametrize('seed', range(3)) +class TestQuantsimLogits: @staticmethod + @torch.no_grad() def check_qsim_logit_consistency(config, quant_scheme, model, dummy_input): with tempfile.TemporaryDirectory() as temp_dir: config_path = os.path.join(temp_dir, "quantsim_config.json") @@ -211,43 +222,48 @@ def check_qsim_logit_consistency(config, quant_scheme, model, dummy_input): default_output_bw=16, config_file=config_path) - v1_sim.compute_encodings(lambda sim_model, _: sim_model(dummy_input), + if isinstance(dummy_input, torch.Tensor): + dummy_input = (dummy_input,) + + v1_sim.compute_encodings(lambda sim_model, _: sim_model(*dummy_input), forward_pass_callback_args=None) - v2_sim.compute_encodings(lambda sim_model, _: sim_model(dummy_input), + v2_sim.compute_encodings(lambda sim_model, _: sim_model(*dummy_input), forward_pass_callback_args=None) - v1_logits = v1_sim.model(dummy_input) - v2_logits = v2_sim.model(dummy_input) + v1_logits = v1_sim.model(*dummy_input) + v2_logits = v2_sim.model(*dummy_input) if isinstance(v1_logits, list): assert len(v1_logits) == len(v2_logits) - for idx in range(len(v1_logits)): - assert torch.allclose(v1_logits[idx], v2_logits[idx]) + for v1_logit, v2_logit in zip(v1_logits, v2_logits): + tick = (v1_logit.max() - v1_logit.min()) / (2**16 - 1) # Tolerate off-by-one precision error + assert torch.allclose(v1_logit, v2_logit, rtol=1e-3, atol=tick) else: - assert torch.allclose(v1_logits, v2_logits) + tick = (v1_logits.max() - v1_logits.min()) / (2**16 - 1) # Tolerate off-by-one precision error + assert torch.allclose(v1_logits, v2_logits, rtol=1e-3, atol=tick) - @pytest.mark.parametrize('model_and_input_shape', [(models_to_test.SingleResidual, (1, 3, 32, 32)), + @pytest.mark.parametrize('model_cls,input_shape', [(models_to_test.SingleResidual, (1, 3, 32, 32)), (models_to_test.SoftMaxAvgPoolModel, (1, 4, 256, 512)), (models_to_test.QuantSimTinyModel, (1, 3, 32, 32))]) - def test_default_config(self, model_and_input_shape, quant_scheme): - model_cls, input_shape = model_and_input_shape + def test_default_config(self, model_cls, input_shape, quant_scheme, seed): + set_seed(seed) model = model_cls() dummy_input = torch.randn(input_shape) self.check_qsim_logit_consistency(CONFIG_DEFAULT, quant_scheme, model, dummy_input) - @pytest.mark.parametrize('model_and_input_shape', [(models_to_test.SingleResidual, (1, 3, 32, 32)), + @pytest.mark.parametrize('model_cls,input_shape', [(models_to_test.SingleResidual, (1, 3, 32, 32)), (models_to_test.QuantSimTinyModel, (1, 3, 32, 32))]) - def test_param_quant(self, model_and_input_shape, quant_scheme): - model_cls, input_shape = model_and_input_shape + def test_param_quant(self, model_cls, input_shape, quant_scheme, seed): + set_seed(seed) model = model_cls() dummy_input = torch.randn(input_shape) self.check_qsim_logit_consistency(CONFIG_PARAM_QUANT, quant_scheme, model, dummy_input) - @pytest.mark.parametrize('model_and_input_shape', [(models_to_test.SingleResidual, (1, 3, 32, 32)), + @pytest.mark.parametrize('model_cls,input_shape', [(models_to_test.SingleResidual, (1, 3, 32, 32)), (models_to_test.QuantSimTinyModel, (1, 3, 32, 32))]) - def test_op_specific_quant(self, model_and_input_shape, quant_scheme): - model_cls, input_shape = model_and_input_shape + def test_op_specific_quant(self, model_cls, input_shape, quant_scheme, seed): + set_seed(seed) model = model_cls() dummy_input = torch.randn(input_shape) # Check per-tensor quantization for conv op @@ -256,17 +272,20 @@ def test_op_specific_quant(self, model_and_input_shape, quant_scheme): # Check per-channel quantization for conv op self.check_qsim_logit_consistency(CONFIG_OP_SPECIFIC_QUANT_PER_CHANNEL, quant_scheme, model, dummy_input) - def test_supergroup(self, quant_scheme): + def test_supergroup(self, quant_scheme, seed): + set_seed(seed) model = models_to_test.QuantSimTinyModel() dummy_input = torch.randn(1, 3, 32, 32) self.check_qsim_logit_consistency(CONFIG_SUPERGROUP, quant_scheme, model, dummy_input) - def test_multi_input(self, quant_scheme): + def test_multi_input(self, quant_scheme, seed): + set_seed(seed) model = models_to_test.MultiInput() dummy_input = (torch.rand(1, 3, 32, 32), torch.rand(1, 3, 20, 20)) self.check_qsim_logit_consistency(CONFIG_DEFAULT, quant_scheme, model, dummy_input) - def test_multi_output(self, quant_scheme): + def test_multi_output(self, quant_scheme, seed): + set_seed(seed) model = models_to_test.ModelWith5Output() dummy_input = torch.randn(1, 3, 224, 224) self.check_qsim_logit_consistency(CONFIG_DEFAULT, quant_scheme, model, dummy_input) diff --git a/TrainingExtensions/torch/test/python/experimental/v2/models_/models_to_test.py b/TrainingExtensions/torch/test/python/experimental/v2/models_/models_to_test.py index 96be5153cdd..85c15bf7798 100644 --- a/TrainingExtensions/torch/test/python/experimental/v2/models_/models_to_test.py +++ b/TrainingExtensions/torch/test/python/experimental/v2/models_/models_to_test.py @@ -40,6 +40,7 @@ from torch import nn from aimet_torch import elementwise_ops +from aimet_torch.experimental.v2.nn.fake_quant import FakeQuantizationMixin class SimpleConditional(torch.nn.Module): @@ -395,3 +396,19 @@ def forward(self, *inputs): x = x.view(x.size(0), -1) x = self.fc(x) return x + + +@FakeQuantizationMixin.implements(ModuleWith5Output) +class FakeQuantizationModuleWith5Output(FakeQuantizationMixin, ModuleWith5Output): + def __quant_init__(self): + super().__quant_init__() + self.output_quantizers = torch.nn.ModuleList([None, None, None, None, None]) + + def quantized_forward(self, input): + if self.input_quantizers[0]: + input = self.input_quantizers[0](input) + outputs = super().forward(input) + return tuple( + quantizer(out) if quantizer else out + for out, quantizer in zip(outputs, self.output_quantizers) + ) From 457b0d062faf39a98266eda84e72897e96ad1145 Mon Sep 17 00:00:00 2001 From: Kyunggeun Lee Date: Wed, 31 Jan 2024 15:04:45 -0800 Subject: [PATCH 3/4] Make quantsim pass logit-based A/B test Signed-off-by: Kyunggeun Lee --- .../experimental/v2/nn/fake_quant.py | 19 +++++++++++++++++++ .../experimental/v2/nn/quant_base.py | 2 -- .../v2/ab_test/test_quantsim_logits.py | 1 - 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/nn/fake_quant.py b/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/nn/fake_quant.py index 4937db60e30..eb90e513a47 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/nn/fake_quant.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/nn/fake_quant.py @@ -36,6 +36,8 @@ # ============================================================================= """Fake-quantized modules""" +import contextlib +import itertools from collections import OrderedDict from typing import Type, Optional, Tuple, List, Dict @@ -46,6 +48,7 @@ from aimet_torch.experimental.v2.nn.quant_base import BaseQuantizationMixin from aimet_torch.experimental.v2.quantization.quantizers import QuantizerBase +from aimet_torch.experimental.v2.utils import patch_attr import aimet_torch.elementwise_ops as aimet_ops @@ -72,6 +75,22 @@ class FakeQuantizationMixin(BaseQuantizationMixin): # pylint: disable=abstract-m cls_to_qcls = OrderedDict() # ouantized class -> original class qcls_to_cls = OrderedDict() # original class -> quantized class + @contextlib.contextmanager + def compute_encodings(self): + def no_op(input: Tensor): # pylint: disable=redefined-builtin + return input + + with contextlib.ExitStack() as stack: + for quantizer in itertools.chain(self.input_quantizers, self.output_quantizers): + if not quantizer: + continue + # Set input/output quantizers into pass-through mode during compute_encodings + # NOTE: This behavior is for backawrd-compatibility with V1 quantsim. + stack.enter_context(patch_attr(quantizer, 'forward', no_op)) + + with super().compute_encodings(): + yield + def export_input_encodings(self) -> List[List[Dict]]: """ Returns a list of input encodings, each represented as a List of Dicts diff --git a/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/nn/quant_base.py b/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/nn/quant_base.py index f4632e5ba8d..f9f547ff994 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/nn/quant_base.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/experimental/v2/nn/quant_base.py @@ -110,8 +110,6 @@ def _compute_param_encodings(self, overwrite: bool): def compute_encodings(self): """ Observe inputs and update quantization parameters based on the input statistics. - During ``compute_encodings`` is enabled, the input/output quantizers will forward perform - dynamic quantization using the batch statistics. """ self._compute_param_encodings(overwrite=True) diff --git a/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py index e94276680d4..b6aac9e1465 100644 --- a/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py +++ b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py @@ -197,7 +197,6 @@ def set_seed(seed): random.seed(seed) -@pytest.mark.skip("Skip tests until v2 implementation is done") @pytest.mark.parametrize('quant_scheme', [QuantScheme.post_training_tf, # QuantScheme.post_training_percentile, # TODO: not implemented # QuantScheme.training_range_learning_with_tf_init, # TODO: not implemented From 41a0b94a30f4f6dcc59281c4541cf36050735625 Mon Sep 17 00:00:00 2001 From: Kyunggeun Lee Date: Wed, 31 Jan 2024 20:17:58 -0800 Subject: [PATCH 4/4] Add A/B test against V1 range learning quantizer Signed-off-by: Kyunggeun Lee --- .../torch/src/python/aimet_torch/quantsim.py | 7 ++++++- .../python/experimental/v2/ab_test/test_quantsim_logits.py | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/TrainingExtensions/torch/src/python/aimet_torch/quantsim.py b/TrainingExtensions/torch/src/python/aimet_torch/quantsim.py index e960c021576..f4c8c7af91c 100644 --- a/TrainingExtensions/torch/src/python/aimet_torch/quantsim.py +++ b/TrainingExtensions/torch/src/python/aimet_torch/quantsim.py @@ -765,7 +765,12 @@ def replace_wrappers_for_quantize_dequantize(self): """ if self._quant_scheme == QuantScheme.training_range_learning_with_tf_init or self._quant_scheme == \ QuantScheme.training_range_learning_with_tf_enhanced_init: - device = utils.get_device(self.model) + try: + device = utils.get_device(self.model) + except StopIteration: + # Model doesn't have any parameter. + # Set device to cpu by default. + device = torch.device('cpu') self._replace_quantization_wrapper(self.model, device) diff --git a/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py index b6aac9e1465..69c4326481e 100644 --- a/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py +++ b/TrainingExtensions/torch/test/python/experimental/v2/ab_test/test_quantsim_logits.py @@ -198,6 +198,7 @@ def set_seed(seed): @pytest.mark.parametrize('quant_scheme', [QuantScheme.post_training_tf, + QuantScheme.training_range_learning_with_tf_init, # QuantScheme.post_training_percentile, # TODO: not implemented # QuantScheme.training_range_learning_with_tf_init, # TODO: not implemented ]) @@ -288,4 +289,3 @@ def test_multi_output(self, quant_scheme, seed): model = models_to_test.ModelWith5Output() dummy_input = torch.randn(1, 3, 224, 224) self.check_qsim_logit_consistency(CONFIG_DEFAULT, quant_scheme, model, dummy_input) -