Skip to content

Commit

Permalink
fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
ofirgo committed Jan 1, 2025
1 parent 42be1cb commit 683d95a
Show file tree
Hide file tree
Showing 8 changed files with 14 additions and 93 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def get_target_platform_capabilities(fw_name: str,
if target_platform_name == DEFAULT_TP_MODEL:
return get_tp_model_imx500_v1()

assert target_platform_version == 'v1', \
assert target_platform_version == 'v1' or target_platform_version is None, \
"The usage of get_target_platform_capabilities API is supported only with the default TPC ('v1')."

if target_platform_name == IMX500_TP_MODEL:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,6 @@ def generate_tp_model(default_config: OpQuantizationConfig,
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.OPSET_GET_ITEM.value, qc_options=no_quantization_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.OPSET_RESHAPE.value, qc_options=no_quantization_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.OPSET_UNSQUEEZE.value, qc_options=no_quantization_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.OPSET_BATCH_NORM.value, qc_options=no_quantization_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.OPSET_SIZE.value, qc_options=no_quantization_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.OPSET_PERMUTE.value, qc_options=no_quantization_config))
operator_set.append(schema.OperatorsSet(name=schema.OperatorSetNames.OPSET_TRANSPOSE.value, qc_options=no_quantization_config))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@

import model_compression_toolkit as mct
from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OpQuantizationConfig
from tests.common_tests.helpers.tpcs_for_tests.v1.tp_model import generate_tp_model

tp = mct.target_platform

Expand Down Expand Up @@ -62,60 +63,9 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
return eight_bits, mixed_precision_cfg_list, default_config


def generate_tp_model(default_config: OpQuantizationConfig,
base_config: OpQuantizationConfig,
mixed_precision_cfg_list: List[OpQuantizationConfig],
name: str) -> TargetPlatformModel:
default_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple(
[default_config]))

mixed_precision_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple(mixed_precision_cfg_list),
base_config=base_config)

operator_set, fusing_patterns = [], []

operator_set.append(schema.OperatorsSet(name="NoQuantization",
qc_options=default_configuration_options
.clone_and_edit(enable_activation_quantization=False)
.clone_and_edit_weight_attribute(enable_weights_quantization=False)))

conv = schema.OperatorsSet(name="Conv", qc_options=mixed_precision_configuration_options)
fc = schema.OperatorsSet(name="FullyConnected", qc_options=mixed_precision_configuration_options)

any_relu = schema.OperatorsSet(name="AnyReLU")
add = schema.OperatorsSet(name="Add")
sub = schema.OperatorsSet(name="Sub")
mul = schema.OperatorsSet(name="Mul")
div = schema.OperatorsSet(name="Div")
prelu = schema.OperatorsSet(name="PReLU")
swish = schema.OperatorsSet(name="Swish")
sigmoid = schema.OperatorsSet(name="Sigmoid")
tanh = schema.OperatorsSet(name="Tanh")

operator_set.extend([conv, fc, any_relu, add, sub, mul, div, prelu, swish, sigmoid, tanh])

activations_after_conv_to_fuse = schema.OperatorSetConcat(operators_set=(any_relu, swish, prelu, sigmoid, tanh))
activations_after_fc_to_fuse = schema.OperatorSetConcat(operators_set=(any_relu, swish, sigmoid))
any_binary = schema.OperatorSetConcat(operators_set=(add, sub, mul, div))

fusing_patterns.append(schema.Fusing(operator_groups=(conv, activations_after_conv_to_fuse)))
fusing_patterns.append(schema.Fusing(operator_groups=(fc, activations_after_fc_to_fuse)))
fusing_patterns.append(schema.Fusing(operator_groups=(any_binary, any_relu)))

generated_tpc = schema.TargetPlatformModel(
default_qco=default_configuration_options,
tpc_minor_version=None,
tpc_patch_version=None,
tpc_platform_type=None,
operator_set=tuple(operator_set),
fusing_patterns=tuple(fusing_patterns),
add_metadata=False, name=name)
return generated_tpc


def get_int8_tpc(edit_weights_params_dict={}, edit_act_params_dict={}) -> tp.TargetPlatformCapabilities:
def get_int8_tpc(edit_weights_params_dict={}, edit_act_params_dict={}) -> tp.TargetPlatformModel:
default_tp_model = get_tp_model(edit_weights_params_dict, edit_act_params_dict)
return generate_keras_tpc(name='int8_tpc', tp_model=default_tp_model)
return default_tp_model


def generate_keras_tpc(name: str, tp_model: schema.TargetPlatformModel):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@
import tests.keras_tests.exporter_tests.constants as constants
from model_compression_toolkit.core.keras.constants import KERNEL
from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
from tests.common_tests.helpers.tpcs_for_tests.v1_pot.tp_model import get_tp_model as get_tp_model_pot
from tests.common_tests.helpers.tpcs_for_tests.v1.tp_model import get_tp_model as get_tp_model_symmetric
from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tp_model import get_int8_tpc
from tests.keras_tests.exporter_tests.tflite_int8.tflite_int8_exporter_base_test import TFLiteINT8ExporterBaseTest
from tests.keras_tests.utils import get_layers_from_model_by_type

Expand All @@ -35,7 +34,7 @@ def get_model(self):
return self.get_one_layer_model(layers.Conv2D(6, 5))

def get_tpc(self):
return get_tp_model_symmetric()
return get_int8_tpc(edit_weights_params_dict={'weights_quantization_method': QuantizationMethod.SYMMETRIC})

def run_checks(self):
# Fetch quantized weights from int8 model tensors
Expand Down Expand Up @@ -69,7 +68,7 @@ def __init__(self):
self.weights_diff_tolerance = 0

def get_tpc(self):
return get_tp_model_pot()
return get_int8_tpc(edit_weights_params_dict={'weights_quantization_method': QuantizationMethod.POWER_OF_TWO})

def run_checks(self):
super(TestConv2DPOTTFLiteINT8Exporter, self).run_checks()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@

import model_compression_toolkit as mct
import tests.keras_tests.exporter_tests.constants as constants
from tests.common_tests.helpers.tpcs_for_tests.v1.tp_model import get_tp_model
from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tp_model import get_int8_tpc


Expand Down Expand Up @@ -74,7 +73,7 @@ def get_input_shape(self):
return [(16, 16, 3)]

def get_tpc(self):
return get_tp_model()
return get_int8_tpc()

def __get_repr_dataset(self):
for _ in range(1):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def get_tpc(self):
mp_bitwidth_candidates_list=[(8, 8), (8, 4), (8, 2),
(4, 8), (4, 4), (4, 2),
(2, 8), (2, 4), (2, 2)],
custom_opsets=['Concat']),
custom_opsets=['Concat']),
test_name='mixed_precision_activation_model',
tpc_name='mixed_precision_activation_pytorch_test')

Expand Down
3 changes: 2 additions & 1 deletion tests/pytorch_tests/model_tests/feature_models/qat_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,8 @@ def get_tpc(self):
def run_test(self):
self._gen_fixed_input()
model_float = self.create_networks()
config = mct.core.CoreConfig(mct.core.QuantizationConfig(shift_negative_activation_correction=False))
config = mct.core.CoreConfig(mct.core.QuantizationConfig(shift_negative_activation_correction=False,
custom_tpc_opset_to_layer={"Input": ([DummyPlaceHolder],)}))
ru = mct.core.ResourceUtilization(weights_memory=50, activation_memory=40)
qat_ready_model, quantization_info = mct.qat.pytorch_quantization_aware_training_init_experimental(model_float,
self.representative_data_gen_experimental,
Expand Down
33 changes: 3 additions & 30 deletions tests/pytorch_tests/tpc_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,33 +33,6 @@ def get_weights_quantization_disabled_pytorch_tpc(name):
return get_pytorch_test_tpc_dict(tp, name, name)


def get_mp_activation_pytorch_tpc_dict(tpc_model, test_name, tpc_name, custom_opsets_to_layer={}):
# TODO: this is a light implementation of the get_mp_activation_pytorch_tpc_dict function.
# the full implementation needs to be adjusted and reactivated once implementing custom layer opset support in TPC.
# it still should return a TP Model (!) but use custom opset to operators mapping to allow MCT construct the desired TPC.

return {test_name: tpc_model}

# def get_mp_activation_pytorch_tpc_dict(tpc_model, test_name, tpc_name, custom_opsets_to_layer={}):
# op_sets_to_layer_add = {
# "Input": [DummyPlaceHolder],
# }
#
# op_sets_to_layer_add.update(custom_opsets_to_layer)
#
# # we assume a standard tp model with standard operator sets names,
# # otherwise - need to generate the tpc per test and not with this generic function
# attr_mapping = {'Conv': {KERNEL_ATTR: DefaultDict(default_value=PYTORCH_KERNEL),
# BIAS_ATTR: DefaultDict(default_value=BIAS)},
# 'FullyConnected': {KERNEL_ATTR: DefaultDict(default_value=PYTORCH_KERNEL),
# BIAS_ATTR: DefaultDict(default_value=BIAS)}}
#
# attach2pytorch = AttachTpcToPytorch()
# tpc = attach2pytorch.attach(tpc_model)
# return {
# test_name: generate_test_tpc(name=tpc_name,
# tp_model=tpc_model,
# base_tpc=tpc,
# op_sets_to_layer_add=op_sets_to_layer_add,
# attr_mapping=attr_mapping),
# }
def get_mp_activation_pytorch_tpc_dict(tpc_model, test_name, tpc_name):
# This is a legacy helper function that is kept for maintaining tests usability
return {test_name: tpc_model}

0 comments on commit 683d95a

Please sign in to comment.