diff --git a/model_compression_toolkit/__init__.py b/model_compression_toolkit/__init__.py index 4d45628ef..723328b9b 100644 --- a/model_compression_toolkit/__init__.py +++ b/model_compression_toolkit/__init__.py @@ -14,7 +14,6 @@ # ============================================================================== from model_compression_toolkit.defaultdict import DefaultDict -from model_compression_toolkit.target_platform_capabilities import target_platform from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import get_target_platform_capabilities from model_compression_toolkit import core from model_compression_toolkit.logger import set_log_folder diff --git a/model_compression_toolkit/core/common/framework_info.py b/model_compression_toolkit/core/common/framework_info.py index 790773d8b..c80ecf4e3 100644 --- a/model_compression_toolkit/core/common/framework_info.py +++ b/model_compression_toolkit/core/common/framework_info.py @@ -18,10 +18,8 @@ from enum import Enum from typing import Dict, Any, List - - +from mct_quantizers import QuantizationMethod from model_compression_toolkit.defaultdict import DefaultDict -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod class ChannelAxis(Enum): diff --git a/model_compression_toolkit/core/common/fusion/layer_fusing.py b/model_compression_toolkit/core/common/fusion/layer_fusing.py index e76aad1a3..1f2981eb3 100644 --- a/model_compression_toolkit/core/common/fusion/layer_fusing.py +++ b/model_compression_toolkit/core/common/fusion/layer_fusing.py @@ -16,7 +16,8 @@ from typing import Any, List from model_compression_toolkit.core.common.graph.base_graph import Graph from model_compression_toolkit.core.common.graph.base_node import BaseNode -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.layer_filter_params import LayerFilterParams diff --git a/model_compression_toolkit/core/common/graph/base_graph.py b/model_compression_toolkit/core/common/graph/base_graph.py index 2b64787b2..580d5103c 100644 --- a/model_compression_toolkit/core/common/graph/base_graph.py +++ b/model_compression_toolkit/core/common/graph/base_graph.py @@ -32,8 +32,9 @@ from model_compression_toolkit.core.common.pruning.pruning_section import PruningSection from model_compression_toolkit.core.common.user_info import UserInformation from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \ - FrameworkQuantizationCapabilities, LayerFilterParams +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities OutTensor = namedtuple('OutTensor', 'node node_out_index') diff --git a/model_compression_toolkit/core/common/graph/base_node.py b/model_compression_toolkit/core/common/graph/base_node.py index c246fb147..23b1410e2 100644 --- a/model_compression_toolkit/core/common/graph/base_node.py +++ b/model_compression_toolkit/core/common/graph/base_node.py @@ -25,7 +25,9 @@ from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import QuantizationConfigOptions, \ OpQuantizationConfig from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import max_input_activation_n_bits -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, LayerFilterParams +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities class BaseNode: diff --git a/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py b/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py index f0308408b..6a5203886 100644 --- a/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py +++ b/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py @@ -17,7 +17,8 @@ from model_compression_toolkit.core import ResourceUtilization, FrameworkInfo from model_compression_toolkit.core.common import Graph from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities def filter_candidates_for_mixed_precision(graph: Graph, diff --git a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py index b37067bc9..576a95386 100644 --- a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +++ b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py @@ -25,9 +25,10 @@ from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation from model_compression_toolkit.core.common.graph.edge import EDGE_SINK_INDEX from model_compression_toolkit.core.graph_prep_runner import graph_preparation_runner -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import QuantizationConfigOptions from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.ru_methods import calc_graph_cuts +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities def compute_resource_utilization_data(in_model: Any, diff --git a/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py b/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py index 7ecdedd78..c0632c664 100644 --- a/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py +++ b/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py @@ -24,7 +24,8 @@ from model_compression_toolkit.core.common.pruning.pruning_framework_implementation import PruningFrameworkImplementation from model_compression_toolkit.core.common.pruning.mask.per_simd_group_mask import PerSIMDGroupMask from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities class GreedyMaskCalculator: diff --git a/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py b/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py index 2cbf47af5..93b7574d7 100644 --- a/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py +++ b/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py @@ -23,7 +23,6 @@ from model_compression_toolkit.core.common.pruning.memory_calculator import MemoryCalculator from model_compression_toolkit.core.common.pruning.pruning_framework_implementation import PruningFrameworkImplementation from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities class MaskIndicator(Enum): """ diff --git a/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py b/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py index 79c03336d..d763463fe 100644 --- a/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py +++ b/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py @@ -23,7 +23,6 @@ from model_compression_toolkit.core.common.pruning.memory_calculator import MemoryCalculator from model_compression_toolkit.core.common.pruning.pruning_framework_implementation import PruningFrameworkImplementation from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities class PerSIMDGroupMask: def __init__(self, diff --git a/model_compression_toolkit/core/common/pruning/pruner.py b/model_compression_toolkit/core/common/pruning/pruner.py index 8e2de586a..f2b16a794 100644 --- a/model_compression_toolkit/core/common/pruning/pruner.py +++ b/model_compression_toolkit/core/common/pruning/pruner.py @@ -29,7 +29,9 @@ from model_compression_toolkit.core.common.pruning.pruning_info import PruningInfo, \ unroll_simd_scores_to_per_channel_scores from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import \ + FrameworkQuantizationCapabilities + class Pruner: """ diff --git a/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py b/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py index 76ef891cc..beba61067 100644 --- a/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py +++ b/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py @@ -15,8 +15,7 @@ import copy from typing import List -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod - +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core.common import Graph, BaseNode from model_compression_toolkit.constants import FLOAT_BITWIDTH from model_compression_toolkit.core.common.quantization.candidate_node_quantization_config import \ diff --git a/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py b/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py index 6318fb268..ab45a9891 100644 --- a/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py +++ b/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py @@ -16,8 +16,8 @@ from collections.abc import Callable from functools import partial +from mct_quantizers import QuantizationMethod from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.core.common.quantization.quantizers.lut_kmeans_quantizer import lut_kmeans_quantizer from model_compression_toolkit.core.common.quantization.quantizers.uniform_quantizers import power_of_two_quantizer, \ symmetric_quantizer, uniform_quantizer diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py b/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py index 84b790906..88eb152b6 100644 --- a/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py +++ b/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py @@ -16,8 +16,8 @@ from collections.abc import Callable from functools import partial +from mct_quantizers import QuantizationMethod from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.core.common.quantization.quantization_params_generation.lut_kmeans_params import \ lut_kmeans_tensor, lut_kmeans_histogram from model_compression_toolkit.core.common.quantization.quantization_params_generation.symmetric_selection import \ diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py b/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py index 061711b13..69ad9de78 100644 --- a/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py +++ b/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py @@ -16,11 +16,11 @@ from typing import Tuple, Callable, List, Iterable, Optional import numpy as np import model_compression_toolkit.core.common.quantization.quantization_config as qc +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core.common.hessian import HessianScoresRequest, HessianMode, HessianScoresGranularity, \ HessianInfoService from model_compression_toolkit.core.common.similarity_analyzer import compute_mse, compute_mae, compute_lp_norm from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.constants import FLOAT_32, NUM_QPARAM_HESSIAN_SAMPLES from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import uniform_quantize_tensor, \ reshape_tensor_for_per_channel_search diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py b/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py index 2d2241424..787b70388 100644 --- a/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py +++ b/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py @@ -16,6 +16,7 @@ from typing import Union, Tuple, Dict import model_compression_toolkit.core.common.quantization.quantization_config as qc +from mct_quantizers import QuantizationMethod from model_compression_toolkit.constants import MIN_THRESHOLD, THRESHOLD, NUM_QPARAM_HESSIAN_SAMPLES, SIGNED from model_compression_toolkit.core.common.hessian import HessianInfoService from model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_search import \ @@ -23,7 +24,6 @@ from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import max_power_of_two, get_tensor_max from model_compression_toolkit.core.common.quantization.quantization_params_generation.error_functions import \ get_threshold_selection_tensor_error_function, get_threshold_selection_histogram_error_function -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.core.common.similarity_analyzer import compute_mse from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import quantize_tensor diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py b/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py index ca3d7c733..052074d3c 100644 --- a/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py +++ b/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py @@ -15,7 +15,7 @@ import numpy as np from typing import Dict, Union -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness from model_compression_toolkit.core.common.collectors.statistics_collector import BaseStatsCollector from model_compression_toolkit.core.common.quantization import quantization_params_generation diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py b/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py index 73cb1077d..524bd863b 100644 --- a/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py +++ b/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py @@ -25,7 +25,7 @@ qparams_symmetric_selection_histogram_search, kl_qparams_symmetric_selection_histogram_search from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import \ get_tensor_max -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core.common.similarity_analyzer import compute_mse from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import quantize_tensor diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py b/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py index 624c6bd9f..d67834092 100644 --- a/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py +++ b/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py @@ -24,7 +24,7 @@ get_threshold_selection_tensor_error_function, get_threshold_selection_histogram_error_function from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import get_tensor_max, \ get_tensor_min -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core.common.similarity_analyzer import compute_mse from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import uniform_quantize_tensor diff --git a/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py b/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py index d83e9e96f..7359cdf1c 100644 --- a/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py +++ b/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py @@ -33,9 +33,10 @@ from model_compression_toolkit.core.common.quantization.quantization_fn_selection import \ get_weights_quantization_fn from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import max_input_activation_n_bits -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OpQuantizationConfig, \ QuantizationConfigOptions +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities def set_quantization_configuration_to_graph(graph: Graph, diff --git a/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py b/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py index 256501365..1f17263a4 100644 --- a/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py +++ b/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py @@ -26,7 +26,7 @@ from model_compression_toolkit.core.common.graph.base_graph import Graph from model_compression_toolkit.core.common.graph.base_node import BaseNode from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import AttributeQuantizationConfig diff --git a/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py b/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py index 1ce52016b..f49af59e7 100644 --- a/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +++ b/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py @@ -22,7 +22,7 @@ from model_compression_toolkit.core.common.graph.base_graph import Graph from model_compression_toolkit.core.common.graph.graph_matchers import EdgeMatcher, NodeOperationMatcher from model_compression_toolkit.core.common.graph.base_node import BaseNode -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.constants import THRESHOLD, RANGE_MIN, RANGE_MAX from model_compression_toolkit.logger import Logger diff --git a/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py b/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py index 1db9fce20..7909cf1a6 100644 --- a/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +++ b/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py @@ -22,7 +22,7 @@ from model_compression_toolkit.core.common import FrameworkInfo, Graph, BaseNode from model_compression_toolkit.constants import THRESHOLD, SIGNED, SHIFT_NEGATIVE_NON_LINEAR_NUM_BITS from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import AttributeQuantizationConfig from model_compression_toolkit.core.common.quantization.set_node_quantization_config import create_node_activation_qc, \ set_quantization_configs_to_node diff --git a/model_compression_toolkit/core/graph_prep_runner.py b/model_compression_toolkit/core/graph_prep_runner.py index 3f9027330..78d543f15 100644 --- a/model_compression_toolkit/core/graph_prep_runner.py +++ b/model_compression_toolkit/core/graph_prep_runner.py @@ -29,8 +29,9 @@ from model_compression_toolkit.core.common.substitutions.apply_substitutions import substitute from model_compression_toolkit.core.common.substitutions.linear_collapsing_substitution import \ linear_collapsing_substitute -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities from model_compression_toolkit.core.common.visualization.tensorboard_writer import TensorboardWriter +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities def graph_preparation_runner(in_model: Any, diff --git a/model_compression_toolkit/core/keras/default_framework_info.py b/model_compression_toolkit/core/keras/default_framework_info.py index d26efed71..7cc9990f0 100644 --- a/model_compression_toolkit/core/keras/default_framework_info.py +++ b/model_compression_toolkit/core/keras/default_framework_info.py @@ -26,7 +26,7 @@ from model_compression_toolkit.defaultdict import DefaultDict from model_compression_toolkit.core.common.framework_info import FrameworkInfo -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.constants import SOFTMAX_THRESHOLD from model_compression_toolkit.core.keras.constants import SOFTMAX, LINEAR, RELU, SWISH, SIGMOID, IDENTITY, TANH, SELU, \ KERNEL, DEPTHWISE_KERNEL, GELU diff --git a/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py b/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py index 704066718..a71af6feb 100644 --- a/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py +++ b/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py @@ -20,8 +20,7 @@ from model_compression_toolkit.core.common.quantization.candidate_node_quantization_config import \ CandidateNodeQuantizationConfig from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod -from mct_quantizers import QuantizationTarget +from mct_quantizers import QuantizationTarget, QuantizationMethod from mct_quantizers import mark_quantizer import tensorflow as tf diff --git a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py index 493007d44..87b4a06db 100644 --- a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py +++ b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py @@ -19,8 +19,9 @@ from model_compression_toolkit.logger import Logger from model_compression_toolkit.constants import TENSORFLOW from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ + AttachTpcToKeras from model_compression_toolkit.verify_packages import FOUND_TF if FOUND_TF: @@ -28,8 +29,6 @@ from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation from tensorflow.keras.models import Model - from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ - AttachTpcToKeras from model_compression_toolkit import get_target_platform_capabilities diff --git a/model_compression_toolkit/core/pytorch/default_framework_info.py b/model_compression_toolkit/core/pytorch/default_framework_info.py index f3d965182..93997bb88 100644 --- a/model_compression_toolkit/core/pytorch/default_framework_info.py +++ b/model_compression_toolkit/core/pytorch/default_framework_info.py @@ -19,7 +19,7 @@ from model_compression_toolkit.defaultdict import DefaultDict from model_compression_toolkit.core.common.framework_info import FrameworkInfo -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.constants import SOFTMAX_THRESHOLD from model_compression_toolkit.core.pytorch.constants import KERNEL from model_compression_toolkit.core.pytorch.quantizer.fake_quant_builder import power_of_two_quantization, \ diff --git a/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py b/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py index 809256a74..4164cf60f 100644 --- a/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py +++ b/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py @@ -21,7 +21,7 @@ from model_compression_toolkit.core.common.quantization.candidate_node_quantization_config import \ CandidateNodeQuantizationConfig from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget from mct_quantizers import mark_quantizer diff --git a/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py b/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py index 6feb5e77d..8b0d2c177 100644 --- a/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py +++ b/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py @@ -20,7 +20,7 @@ from model_compression_toolkit.core.common.quantization.candidate_node_quantization_config import \ CandidateNodeQuantizationConfig from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget from mct_quantizers import mark_quantizer diff --git a/model_compression_toolkit/core/runner.py b/model_compression_toolkit/core/runner.py index 1f3b080e6..f3b71668a 100644 --- a/model_compression_toolkit/core/runner.py +++ b/model_compression_toolkit/core/runner.py @@ -44,12 +44,13 @@ from model_compression_toolkit.core.common.mixed_precision.mixed_precision_search_facade import search_bit_width from model_compression_toolkit.core.common.network_editors.edit_network import edit_network_graph from model_compression_toolkit.core.common.quantization.core_config import CoreConfig -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities from model_compression_toolkit.core.common.visualization.final_config_visualizer import \ WeightsFinalBitwidthConfigVisualizer, \ ActivationFinalBitwidthConfigVisualizer from model_compression_toolkit.core.common.visualization.tensorboard_writer import TensorboardWriter, \ finalize_bitwidth_in_tb +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities def core_runner(in_model: Any, diff --git a/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py b/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py index 68b464d57..e7d1fa303 100644 --- a/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py +++ b/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py @@ -20,7 +20,7 @@ NodeWeightsQuantizationConfig, NodeActivationQuantizationConfig from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget from mct_quantizers.common.get_quantizers import get_inferable_quantizer_class from mct_quantizers.keras.quantizers import BaseKerasInferableQuantizer diff --git a/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py b/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py index 9db12cd2a..e17131b28 100644 --- a/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py +++ b/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py @@ -21,7 +21,7 @@ from model_compression_toolkit.core.common.quantization.node_quantization_config import BaseNodeQuantizationConfig, \ NodeWeightsQuantizationConfig, NodeActivationQuantizationConfig from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget from mct_quantizers.common.get_quantizers import get_inferable_quantizer_class from mct_quantizers import \ diff --git a/model_compression_toolkit/gptq/keras/quantization_facade.py b/model_compression_toolkit/gptq/keras/quantization_facade.py index 2da9b0bc6..a32cf6556 100644 --- a/model_compression_toolkit/gptq/keras/quantization_facade.py +++ b/model_compression_toolkit/gptq/keras/quantization_facade.py @@ -23,6 +23,8 @@ from model_compression_toolkit.logger import Logger from model_compression_toolkit.constants import TENSORFLOW, ACT_HESSIAN_DEFAULT_BATCH_SIZE, GPTQ_HESSIAN_NUM_SAMPLES from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ + AttachTpcToKeras from model_compression_toolkit.verify_packages import FOUND_TF from model_compression_toolkit.core.common.user_info import UserInformation from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, GPTQHessianScoresConfig, \ @@ -33,7 +35,6 @@ from model_compression_toolkit.core.runner import core_runner from model_compression_toolkit.gptq.runner import gptq_runner from model_compression_toolkit.core.analyzer import analyzer_model_quantization -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities from model_compression_toolkit.metadata import create_model_metadata @@ -48,8 +49,6 @@ from model_compression_toolkit.exporter.model_wrapper import get_exportable_keras_model from model_compression_toolkit import get_target_platform_capabilities from mct_quantizers.keras.metadata import add_metadata - from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ - AttachTpcToKeras # As from TF2.9 optimizers package is changed if version.parse(tf.__version__) < version.parse("2.9"): diff --git a/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py b/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py index e3fb198fe..4900e795e 100644 --- a/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py +++ b/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py @@ -18,7 +18,7 @@ from model_compression_toolkit.gptq import RoundingType from model_compression_toolkit.core.common import max_power_of_two -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget from model_compression_toolkit.gptq.common.gptq_constants import PTQ_THRESHOLD, SCALE_PTQ, \ SOFT_ROUNDING_GAMMA, SOFT_ROUNDING_ZETA, AUXVAR diff --git a/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py b/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py index 2d5f8d7b9..0445e9d1a 100644 --- a/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py +++ b/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py @@ -18,7 +18,7 @@ from model_compression_toolkit.gptq import RoundingType from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget from model_compression_toolkit.gptq.common.gptq_constants import \ SOFT_ROUNDING_GAMMA, SOFT_ROUNDING_ZETA, AUXVAR diff --git a/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py b/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py index 2ed2d6d71..c39721a60 100644 --- a/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py +++ b/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py @@ -19,7 +19,7 @@ import tensorflow as tf from model_compression_toolkit.gptq import RoundingType -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget from model_compression_toolkit.gptq.common.gptq_constants import AUXVAR, PTQ_THRESHOLD from model_compression_toolkit.gptq.keras.quantizer import quant_utils as qutils diff --git a/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py b/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py index 4724c2036..a4a48f7e2 100644 --- a/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py +++ b/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py @@ -18,7 +18,7 @@ import numpy as np from model_compression_toolkit.core.common import max_power_of_two -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper from model_compression_toolkit.gptq.common.gptq_config import RoundingType from model_compression_toolkit.gptq.pytorch.quantizer.base_pytorch_gptq_quantizer import \ diff --git a/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py b/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py index 8d68fb465..9f63ecffa 100644 --- a/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py +++ b/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py @@ -18,7 +18,7 @@ import numpy as np from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper from model_compression_toolkit.gptq.common.gptq_config import RoundingType from model_compression_toolkit.gptq.pytorch.quantizer.base_pytorch_gptq_quantizer import \ diff --git a/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py b/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py index 38e2041e0..79ed406c5 100644 --- a/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py +++ b/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py @@ -18,7 +18,7 @@ import numpy as np from model_compression_toolkit.defaultdict import DefaultDict -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper from model_compression_toolkit.gptq.common.gptq_config import RoundingType from model_compression_toolkit.gptq.pytorch.quantizer.base_pytorch_gptq_quantizer import \ diff --git a/model_compression_toolkit/metadata.py b/model_compression_toolkit/metadata.py index f223c5f29..c200d2036 100644 --- a/model_compression_toolkit/metadata.py +++ b/model_compression_toolkit/metadata.py @@ -18,7 +18,8 @@ from model_compression_toolkit.constants import OPERATORS_SCHEDULING, FUSED_NODES_MAPPING, CUTS, MAX_CUT, OP_ORDER, \ OP_RECORD, SHAPE, NODE_OUTPUT_INDEX, NODE_NAME, TOTAL_SIZE, MEM_ELEMENTS from model_compression_toolkit.core.common.graph.memory_graph.compute_graph_max_cut import SchedulerInfo -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities def create_model_metadata(fqc: FrameworkQuantizationCapabilities, diff --git a/model_compression_toolkit/pruning/keras/pruning_facade.py b/model_compression_toolkit/pruning/keras/pruning_facade.py index beda9c3f2..b6e3cdc0b 100644 --- a/model_compression_toolkit/pruning/keras/pruning_facade.py +++ b/model_compression_toolkit/pruning/keras/pruning_facade.py @@ -18,6 +18,8 @@ from model_compression_toolkit import get_target_platform_capabilities from model_compression_toolkit.constants import TENSORFLOW from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ + AttachTpcToKeras from model_compression_toolkit.verify_packages import FOUND_TF from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization from model_compression_toolkit.core.common.pruning.pruner import Pruner @@ -26,7 +28,6 @@ from model_compression_toolkit.core.common.quantization.set_node_quantization_config import set_quantization_configuration_to_graph from model_compression_toolkit.core.graph_prep_runner import read_model_to_graph from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities from model_compression_toolkit.core.common.quantization.quantization_config import DEFAULTCONFIG from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL @@ -35,8 +36,6 @@ from model_compression_toolkit.core.keras.pruning.pruning_keras_implementation import PruningKerasImplementation from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO from tensorflow.keras.models import Model - from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ - AttachTpcToKeras DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL) diff --git a/model_compression_toolkit/ptq/keras/quantization_facade.py b/model_compression_toolkit/ptq/keras/quantization_facade.py index 3f0e960b1..3c93b8f37 100644 --- a/model_compression_toolkit/ptq/keras/quantization_facade.py +++ b/model_compression_toolkit/ptq/keras/quantization_facade.py @@ -23,11 +23,12 @@ from model_compression_toolkit.logger import Logger from model_compression_toolkit.constants import TENSORFLOW from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ + AttachTpcToKeras from model_compression_toolkit.verify_packages import FOUND_TF from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \ MixedPrecisionQuantizationConfig -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities from model_compression_toolkit.core.runner import core_runner from model_compression_toolkit.ptq.runner import ptq_runner from model_compression_toolkit.metadata import create_model_metadata @@ -42,8 +43,6 @@ from model_compression_toolkit import get_target_platform_capabilities from mct_quantizers.keras.metadata import add_metadata - from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ - AttachTpcToKeras DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL) diff --git a/model_compression_toolkit/qat/keras/README.md b/model_compression_toolkit/qat/keras/README.md index f1cf6593e..42c096bea 100644 --- a/model_compression_toolkit/qat/keras/README.md +++ b/model_compression_toolkit/qat/keras/README.md @@ -72,7 +72,7 @@ import tensorflow as tf from mct_quantizers.keras.quantizers import ActivationUniformInferableQuantizer from model_compression_toolkit.trainable_infrastructure import TrainingMethod, TrainableQuantizerActivationConfig -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import mark_quantizer, QuantizationTarget from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_weight_quantizer import BaseKerasQATWeightTrainableQuantizer diff --git a/model_compression_toolkit/qat/keras/quantization_facade.py b/model_compression_toolkit/qat/keras/quantization_facade.py index a38600132..9480c018f 100644 --- a/model_compression_toolkit/qat/keras/quantization_facade.py +++ b/model_compression_toolkit/qat/keras/quantization_facade.py @@ -20,12 +20,13 @@ from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer from model_compression_toolkit.logger import Logger from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ + AttachTpcToKeras from model_compression_toolkit.verify_packages import FOUND_TF from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \ MixedPrecisionQuantizationConfig from mct_quantizers import KerasActivationQuantizationHolder -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities from model_compression_toolkit.core.runner import core_runner from model_compression_toolkit.ptq.runner import ptq_runner @@ -55,8 +56,6 @@ from model_compression_toolkit.qat.keras.quantizer.quantization_builder import quantization_builder, \ get_activation_quantizer_holder from model_compression_toolkit.qat.common.qat_config import QATConfig - from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ - AttachTpcToKeras DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL) diff --git a/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py b/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py index 07413b85c..5966bb146 100644 --- a/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py +++ b/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py @@ -21,7 +21,7 @@ from model_compression_toolkit.trainable_infrastructure import TrainingMethod -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper from mct_quantizers import QuantizationTarget, mark_quantizer from model_compression_toolkit.qat.common import THRESHOLD_TENSOR diff --git a/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py b/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py index d41f04c19..80f03d6d5 100644 --- a/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py +++ b/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py @@ -22,7 +22,7 @@ from model_compression_toolkit.trainable_infrastructure import TrainingMethod -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper from mct_quantizers import QuantizationTarget, mark_quantizer from model_compression_toolkit.qat.common import THRESHOLD_TENSOR diff --git a/model_compression_toolkit/qat/pytorch/quantizer/DQA/dqa_uniform.py b/model_compression_toolkit/qat/pytorch/quantizer/DQA/dqa_uniform.py index 931fd53b5..f87dbad22 100755 --- a/model_compression_toolkit/qat/pytorch/quantizer/DQA/dqa_uniform.py +++ b/model_compression_toolkit/qat/pytorch/quantizer/DQA/dqa_uniform.py @@ -20,7 +20,7 @@ from mct_quantizers.pytorch.quantizers import WeightsUniformInferableQuantizer from torch import Tensor -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper, mark_quantizer from model_compression_toolkit.constants import RANGE_MAX, RANGE_MIN from model_compression_toolkit.trainable_infrastructure import TrainingMethod diff --git a/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py b/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py index 7a4f91ec0..250f6d872 100644 --- a/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py +++ b/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py @@ -18,7 +18,7 @@ import torch import torch.nn as nn -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import PytorchQuantizationWrapper from model_compression_toolkit.qat.common import THRESHOLD_TENSOR from model_compression_toolkit import constants as C diff --git a/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py b/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py index 20bd84b56..24acaf30e 100644 --- a/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py +++ b/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py @@ -28,7 +28,7 @@ from model_compression_toolkit.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup from model_compression_toolkit.trainable_infrastructure.common.trainable_quantizer_config import \ TrainableQuantizerWeightsConfig -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core.pytorch.utils import to_torch_tensor from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import fix_range_to_include_zero from model_compression_toolkit.qat.pytorch.quantizer.base_pytorch_qat_weight_quantizer import BasePytorchQATWeightTrainableQuantizer diff --git a/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py b/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py index 117a1ca6b..945ad65b5 100644 --- a/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py +++ b/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py @@ -18,7 +18,7 @@ import torch import torch.nn as nn -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import PytorchQuantizationWrapper from model_compression_toolkit.qat.common import THRESHOLD_TENSOR from model_compression_toolkit import constants as C diff --git a/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py b/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py index 8e4675841..069ce3290 100644 --- a/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py +++ b/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py @@ -20,7 +20,7 @@ from model_compression_toolkit.constants import RANGE_MAX, RANGE_MIN from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper from model_compression_toolkit.qat.pytorch.quantizer.base_pytorch_qat_weight_quantizer import BasePytorchQATWeightTrainableQuantizer diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py index b3ee5dff4..02e60eb6a 100644 --- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py +++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py @@ -13,11 +13,11 @@ # limitations under the License. # ============================================================================== -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import get_current_tpc -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities import FrameworkQuantizationCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.current_tpc import get_current_tpc +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import FrameworkQuantizationCapabilities from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.layer_filter_params import \ LayerFilterParams -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.operations_to_layers import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.operations_to_layers import \ OperationsToLayers, OperationsSetToLayers diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py index f3505c5ef..f2ab071ad 100644 --- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py +++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py @@ -3,10 +3,10 @@ from model_compression_toolkit.logger import Logger from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \ OperatorsSet -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, \ - OperationsSetToLayers from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import \ + FrameworkQuantizationCapabilities, OperationsSetToLayers class AttachTpcToFramework: diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py index 2f1157868..e4f022ec7 100644 --- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py +++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py @@ -16,6 +16,9 @@ import tensorflow as tf from packaging import version +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2fw import \ + AttachTpcToFramework from model_compression_toolkit.verify_packages import FOUND_SONY_CUSTOM_LAYERS if FOUND_SONY_CUSTOM_LAYERS: @@ -34,9 +37,6 @@ from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS, \ BIAS_ATTR, KERAS_KERNEL, KERAS_DEPTHWISE_KERNEL from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorSetNames -from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2fw import \ - AttachTpcToFramework class AttachTpcToKeras(AttachTpcToFramework): diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py index fe94ea2a4..e7347cd81 100644 --- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py +++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py @@ -28,8 +28,8 @@ from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, PYTORCH_KERNEL, BIAS, \ BIAS_ATTR from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorSetNames -from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams, Eq -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2fw import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2fw import \ AttachTpcToFramework diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py index e5af3d2e7..ecc200bd5 100644 --- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py +++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py @@ -21,15 +21,15 @@ from model_compression_toolkit.logger import Logger from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \ get_config_options_by_operators_set, get_default_op_quantization_config, get_opset_by_name -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.operations_to_layers import \ - OperationsToLayers, OperationsSetToLayers -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.operations_to_layers import OperationsToLayers, \ + OperationsSetToLayers +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities_component import \ + FrameworkQuantizationCapabilitiesComponent from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.layer_filter_params import LayerFilterParams from model_compression_toolkit.target_platform_capabilities.immutable import ImmutableClass from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSetBase, \ OpQuantizationConfig, QuantizationConfigOptions -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import _current_tpc - +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.current_tpc import _current_tpc class FrameworkQuantizationCapabilities(ImmutableClass): """ diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py index 6341a0e69..54011d7f9 100644 --- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py +++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py @@ -13,7 +13,7 @@ # limitations under the License. # ============================================================================== -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import _current_tpc +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.current_tpc import _current_tpc class FrameworkQuantizationCapabilitiesComponent: diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py index 30e032635..7335d1099 100644 --- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py +++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py @@ -14,7 +14,6 @@ # ============================================================================== from typing import Any -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import AttributeFilter class LayerFilterParams: diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py index be1f57190..6b81a10a7 100644 --- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py +++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py @@ -18,8 +18,8 @@ from model_compression_toolkit.logger import Logger from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \ get_config_options_by_operators_set, is_opset_in_model -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import _current_tpc -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.current_tpc import _current_tpc +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorsSetBase, OperatorSetGroup from model_compression_toolkit import DefaultDict diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py index b4e6f273d..de6e953de 100644 --- a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py +++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py @@ -16,14 +16,13 @@ import model_compression_toolkit as mct import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema +from mct_quantizers import QuantizationMethod from model_compression_toolkit.constants import FLOAT_BITWIDTH from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \ IMX500_TP_MODEL from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform - def get_tpc() -> TargetPlatformCapabilities: """ @@ -60,7 +59,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -69,7 +68,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, @@ -77,7 +76,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -92,7 +91,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza eight_bits_default = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -106,7 +105,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza linear_eight_bits = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py index 979febe5d..93bc46c1e 100644 --- a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py +++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py @@ -22,7 +22,6 @@ Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform def get_tpc() -> TargetPlatformCapabilities: @@ -55,7 +54,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -63,7 +62,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=True, @@ -71,7 +70,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -88,7 +87,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza eight_bits_default = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -100,7 +99,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define an 8-bit config for linear operations quantization, that include a kernel and bias attributes. linear_eight_bits = schema.OpQuantizationConfig( - activation_quantization_method=tp.QuantizationMethod.UNIFORM, + activation_quantization_method=QuantizationMethod.UNIFORM, default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, activation_n_bits=8, diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py index 8e00a048d..7d4a6048f 100644 --- a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py +++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py @@ -21,7 +21,6 @@ from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform def get_tpc() -> TargetPlatformCapabilities: @@ -54,7 +53,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -62,7 +61,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, @@ -70,7 +69,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -85,7 +84,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza eight_bits_default = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -97,7 +96,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define an 8-bit config for linear operations quantization, that include a kernel and bias attributes. linear_eight_bits = schema.OpQuantizationConfig( - activation_quantization_method=tp.QuantizationMethod.UNIFORM, + activation_quantization_method=QuantizationMethod.UNIFORM, default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, activation_n_bits=8, diff --git a/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py b/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py index 7f586db37..afb312fe9 100644 --- a/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py +++ b/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py @@ -15,8 +15,7 @@ from typing import Union, Any from model_compression_toolkit.logger import Logger -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod -from mct_quantizers import QuantizationTarget +from mct_quantizers import QuantizationTarget, QuantizationMethod from mct_quantizers.common.constants \ import QUANTIZATION_TARGET, QUANTIZATION_METHOD, QUANTIZER_ID from mct_quantizers.common.get_all_subclasses \ diff --git a/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py b/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py index 3eec72942..18bb612ae 100644 --- a/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py +++ b/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py @@ -14,7 +14,8 @@ # ============================================================================== from abc import ABC from typing import Dict, List -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod + +from mct_quantizers import QuantizationMethod class TrainableQuantizerCandidateConfig: diff --git a/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py b/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py index 1be5f2a94..f21f96721 100644 --- a/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py +++ b/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py @@ -23,9 +23,8 @@ from model_compression_toolkit.trainable_infrastructure import TrainingMethod -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper -from mct_quantizers import QuantizationTarget, mark_quantizer +from mct_quantizers import QuantizationTarget, mark_quantizer, QuantizationMethod from model_compression_toolkit.qat.common import THRESHOLD_TENSOR from model_compression_toolkit import constants as C diff --git a/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py b/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py index e0725cea3..0d2beafdd 100644 --- a/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py +++ b/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py @@ -19,7 +19,7 @@ import numpy as np -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.trainable_infrastructure.common.trainable_quantizer_config import \ TrainableQuantizerActivationConfig, TrainableQuantizerWeightsConfig from mct_quantizers.common import constants as C diff --git a/model_compression_toolkit/xquant/common/model_folding_utils.py b/model_compression_toolkit/xquant/common/model_folding_utils.py index 8e923379e..63af169a9 100644 --- a/model_compression_toolkit/xquant/common/model_folding_utils.py +++ b/model_compression_toolkit/xquant/common/model_folding_utils.py @@ -23,7 +23,8 @@ from typing import Any, Callable from model_compression_toolkit.core.common import Graph -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import \ + FrameworkQuantizationCapabilities class ModelFoldingUtils: diff --git a/model_compression_toolkit/xquant/keras/keras_report_utils.py b/model_compression_toolkit/xquant/keras/keras_report_utils.py index 73c249b6f..42ba4652e 100644 --- a/model_compression_toolkit/xquant/keras/keras_report_utils.py +++ b/model_compression_toolkit/xquant/keras/keras_report_utils.py @@ -17,6 +17,8 @@ from model_compression_toolkit.constants import TENSORFLOW from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ + AttachTpcToKeras from model_compression_toolkit.xquant.common.framework_report_utils import FrameworkReportUtils from model_compression_toolkit.xquant.common.model_folding_utils import ModelFoldingUtils from model_compression_toolkit.xquant.common.similarity_calculator import SimilarityCalculator @@ -27,8 +29,6 @@ from model_compression_toolkit.xquant.keras.tensorboard_utils import KerasTensorboardUtils from mct_quantizers.keras.metadata import get_metadata from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ - AttachTpcToKeras class KerasReportUtils(FrameworkReportUtils): diff --git a/tests/common_tests/helpers/generate_test_tpc.py b/tests/common_tests/helpers/generate_test_tpc.py index 2b6276514..bb95c2e31 100644 --- a/tests/common_tests/helpers/generate_test_tpc.py +++ b/tests/common_tests/helpers/generate_test_tpc.py @@ -16,16 +16,18 @@ from typing import Dict, List, Any import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema +from mct_quantizers import QuantizationMethod from model_compression_toolkit.constants import FLOAT_BITWIDTH, ACTIVATION_N_BITS_ATTRIBUTE, \ SUPPORTED_INPUT_ACTIVATION_NBITS_ATTRIBUTE from model_compression_toolkit.target_platform_capabilities.constants import OPS_SET_LIST, KERNEL_ATTR, BIAS_ATTR, \ WEIGHTS_N_BITS from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness, OpQuantizationConfig, \ QuantizationConfigOptions +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import \ + FrameworkQuantizationCapabilities from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs, generate_tpc import model_compression_toolkit as mct -tp = mct.target_platform DEFAULT_WEIGHT_ATTR_CONFIG = 'default_weight_attr_config' KERNEL_BASE_CONFIG = 'kernel_base_config' @@ -168,7 +170,7 @@ def generate_custom_test_tpc(name: str, def generate_test_fqc(name: str, tpc: schema.TargetPlatformCapabilities, - base_fqc: tp.FrameworkQuantizationCapabilities, + base_fqc: FrameworkQuantizationCapabilities, op_sets_to_layer_add: Dict[str, List[Any]] = None, op_sets_to_layer_drop: Dict[str, List[Any]] = None, attr_mapping: Dict[str, Dict] = {}): @@ -189,20 +191,20 @@ def generate_test_fqc(name: str, # Remove empty op sets merged_dict = {op_set_name: layers for op_set_name, layers in merged_dict.items() if len(layers) == 0} - fqc = tp.FrameworkQuantizationCapabilities(tpc) + fqc = FrameworkQuantizationCapabilities(tpc) with fqc: for op_set_name, layers in merged_dict.items(): am = attr_mapping.get(op_set_name) - tp.OperationsSetToLayers(op_set_name, layers, attr_mapping=am) + OperationsSetToLayers(op_set_name, layers, attr_mapping=am) return fqc def generate_test_attr_configs(default_cfg_nbits: int = 8, - default_cfg_quantizatiom_method: tp.QuantizationMethod = tp.QuantizationMethod.POWER_OF_TWO, + default_cfg_quantizatiom_method: QuantizationMethod = QuantizationMethod.POWER_OF_TWO, kernel_cfg_nbits: int = 8, - kernel_cfg_quantizatiom_method: tp.QuantizationMethod = tp.QuantizationMethod.POWER_OF_TWO, + kernel_cfg_quantizatiom_method: QuantizationMethod = QuantizationMethod.POWER_OF_TWO, enable_kernel_weights_quantization: bool = True, kernel_lut_values_bitwidth: int = None): default_weight_attr_config = schema.AttributeQuantizationConfig( @@ -220,7 +222,7 @@ def generate_test_attr_configs(default_cfg_nbits: int = 8, lut_values_bitwidth=kernel_lut_values_bitwidth) bias_config = schema.AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -236,7 +238,7 @@ def generate_test_op_qc(default_weight_attr_config: schema.AttributeQuantization bias_config: schema.AttributeQuantizationConfig, enable_activation_quantization: bool = True, activation_n_bits: int = 8, - activation_quantization_method: tp.QuantizationMethod = tp.QuantizationMethod.POWER_OF_TWO): + activation_quantization_method: QuantizationMethod = QuantizationMethod.POWER_OF_TWO): return schema.OpQuantizationConfig(enable_activation_quantization=enable_activation_quantization, default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, diff --git a/tests/common_tests/helpers/prep_graph_for_func_test.py b/tests/common_tests/helpers/prep_graph_for_func_test.py index bbb5d76a7..d77158615 100644 --- a/tests/common_tests/helpers/prep_graph_for_func_test.py +++ b/tests/common_tests/helpers/prep_graph_for_func_test.py @@ -29,8 +29,6 @@ import model_compression_toolkit as mct -tp = mct.target_platform - def prepare_graph_with_configs(in_model, fw_impl, diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py index 89f9735f0..3c6e8fcfe 100644 --- a/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py +++ b/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py @@ -22,7 +22,6 @@ from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform def get_tpc() -> TargetPlatformCapabilities: @@ -60,7 +59,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -69,7 +68,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, @@ -77,7 +76,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -92,7 +91,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza eight_bits_default = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -106,7 +105,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza linear_eight_bits = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py index 40cd8414e..89aac31ee 100644 --- a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py +++ b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py @@ -23,7 +23,6 @@ Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform def get_tpc() -> TargetPlatformCapabilities: @@ -56,7 +55,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -64,7 +63,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, @@ -72,7 +71,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -87,7 +86,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza eight_bits_default = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -101,7 +100,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza linear_eight_bits = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -118,11 +117,11 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # to quantize the operations' activations using LUT. four_bits_lut = linear_eight_bits.clone_and_edit( attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 4, - WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}}, + WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}}, simd_size=linear_eight_bits.simd_size * 2) two_bits_lut = linear_eight_bits.clone_and_edit( attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 2, - WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}}, + WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}}, simd_size=linear_eight_bits.simd_size * 4) mixed_precision_cfg_list = [linear_eight_bits, four_bits_lut, two_bits_lut] diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py index e16be42b8..18ce2262e 100644 --- a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py +++ b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py @@ -23,7 +23,6 @@ Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform def get_tpc() -> TargetPlatformCapabilities: @@ -56,7 +55,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -64,7 +63,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, @@ -72,7 +71,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -87,7 +86,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza eight_bits_default = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -99,7 +98,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza # We define an 8-bit config for linear operations quantization, that include a kernel and bias attributes. linear_eight_bits = schema.OpQuantizationConfig( - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, activation_n_bits=8, diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py index 5fd88882a..f2dcfb4d0 100644 --- a/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py +++ b/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py @@ -23,7 +23,6 @@ Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform def get_tpc() -> TargetPlatformCapabilities: @@ -63,7 +62,7 @@ def get_op_quantization_configs() -> \ # define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -72,7 +71,7 @@ def get_op_quantization_configs() -> \ # define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, @@ -80,7 +79,7 @@ def get_op_quantization_configs() -> \ # define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -95,7 +94,7 @@ def get_op_quantization_configs() -> \ eight_bits_default = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -109,7 +108,7 @@ def get_op_quantization_configs() -> \ linear_eight_bits = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py index 583781f7f..20d79d9fd 100644 --- a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py +++ b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py @@ -23,7 +23,6 @@ Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform def get_tpc() -> TargetPlatformCapabilities: @@ -58,7 +57,7 @@ def get_op_quantization_configs() -> \ # We define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -66,7 +65,7 @@ def get_op_quantization_configs() -> \ # define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, @@ -74,7 +73,7 @@ def get_op_quantization_configs() -> \ # We define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -89,7 +88,7 @@ def get_op_quantization_configs() -> \ eight_bits_default = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -103,7 +102,7 @@ def get_op_quantization_configs() -> \ linear_eight_bits = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -120,11 +119,11 @@ def get_op_quantization_configs() -> \ # to quantize the operations' activations using LUT. four_bits_lut = linear_eight_bits.clone_and_edit( attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 4, - WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}}, + WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}}, simd_size=linear_eight_bits.simd_size * 2) two_bits_lut = linear_eight_bits.clone_and_edit( attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 2, - WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}}, + WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}}, simd_size=linear_eight_bits.simd_size * 4) mixed_precision_cfg_list = [linear_eight_bits, four_bits_lut, two_bits_lut] diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py index 75c15d2b7..f49cbfd94 100644 --- a/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py +++ b/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py @@ -23,7 +23,6 @@ Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform def get_tpc() -> TargetPlatformCapabilities: @@ -63,7 +62,7 @@ def get_op_quantization_configs() -> \ # define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -72,7 +71,7 @@ def get_op_quantization_configs() -> \ # define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, @@ -80,7 +79,7 @@ def get_op_quantization_configs() -> \ # define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -95,7 +94,7 @@ def get_op_quantization_configs() -> \ eight_bits_default = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -109,7 +108,7 @@ def get_op_quantization_configs() -> \ linear_eight_bits = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -166,7 +165,7 @@ def generate_tpc(default_config: OpQuantizationConfig, const_config = default_config.clone_and_edit( default_weight_attr_config=default_config.default_weight_attr_config.clone_and_edit( enable_weights_quantization=True, weights_per_channel_threshold=True, - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO)) + weights_quantization_method=QuantizationMethod.POWER_OF_TWO)) const_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config])) # 16 bits inputs and outputs. Currently, only defined for consts since they are used in operators that diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py index 631c82513..2c41ad629 100644 --- a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py +++ b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py @@ -23,7 +23,6 @@ Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform def get_tpc() -> TargetPlatformCapabilities: @@ -58,7 +57,7 @@ def get_op_quantization_configs() -> \ # We define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -66,7 +65,7 @@ def get_op_quantization_configs() -> \ # define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, @@ -74,7 +73,7 @@ def get_op_quantization_configs() -> \ # We define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -89,7 +88,7 @@ def get_op_quantization_configs() -> \ eight_bits_default = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -103,7 +102,7 @@ def get_op_quantization_configs() -> \ linear_eight_bits = schema.OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -120,11 +119,11 @@ def get_op_quantization_configs() -> \ # to quantize the operations' activations using LUT. four_bits_lut = linear_eight_bits.clone_and_edit( attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 4, - WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}}, + WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}}, simd_size=linear_eight_bits.simd_size * 2) two_bits_lut = linear_eight_bits.clone_and_edit( attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 2, - WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}}, + WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}}, simd_size=linear_eight_bits.simd_size * 4) mixed_precision_cfg_list = [linear_eight_bits, four_bits_lut, two_bits_lut] @@ -164,7 +163,7 @@ def generate_tpc(default_config: OpQuantizationConfig, const_config = default_config.clone_and_edit( default_weight_attr_config=default_config.default_weight_attr_config.clone_and_edit( enable_weights_quantization=True, weights_per_channel_threshold=True, - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO)) + weights_quantization_method=QuantizationMethod.POWER_OF_TWO)) const_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config])) # 16 bits inputs and outputs. Currently, only defined for consts since they are used in operators that diff --git a/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py index 9c410c503..5d30ce04b 100644 --- a/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py +++ b/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py @@ -23,7 +23,6 @@ Signedness, \ AttributeQuantizationConfig, OpQuantizationConfig -tp = mct.target_platform def get_tpc() -> TargetPlatformCapabilities: """ @@ -62,7 +61,7 @@ def get_op_quantization_configs() -> \ # define a default quantization config for all non-specified weights attributes. default_weight_attr_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=8, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -71,7 +70,7 @@ def get_op_quantization_configs() -> \ # define a quantization config to quantize the kernel (for layers where there is a kernel attribute). kernel_base_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.SYMMETRIC, + weights_quantization_method=QuantizationMethod.SYMMETRIC, weights_n_bits=8, weights_per_channel_threshold=True, enable_weights_quantization=True, @@ -79,7 +78,7 @@ def get_op_quantization_configs() -> \ # define a quantization config to quantize the bias (for layers where there is a bias attribute). bias_config = AttributeQuantizationConfig( - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=QuantizationMethod.POWER_OF_TWO, weights_n_bits=FLOAT_BITWIDTH, weights_per_channel_threshold=False, enable_weights_quantization=False, @@ -94,7 +93,7 @@ def get_op_quantization_configs() -> \ eight_bits_default = OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -108,7 +107,7 @@ def get_op_quantization_configs() -> \ linear_eight_bits = OpQuantizationConfig( default_weight_attr_config=default_weight_attr_config, attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config}, - activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=QuantizationMethod.POWER_OF_TWO, activation_n_bits=8, supported_input_activation_n_bits=8, enable_activation_quantization=True, @@ -173,7 +172,7 @@ def generate_tpc(default_config: OpQuantizationConfig, const_config = default_config.clone_and_edit( default_weight_attr_config=default_config.default_weight_attr_config.clone_and_edit( enable_weights_quantization=True, weights_per_channel_threshold=True, - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO)) + weights_quantization_method=QuantizationMethod.POWER_OF_TWO)) const_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config])) # 16 bits inputs and outputs. Currently, only defined for consts since they are used in operators that @@ -191,7 +190,7 @@ def generate_tpc(default_config: OpQuantizationConfig, supported_input_activation_n_bits=(8, 16), default_weight_attr_config=default_config.default_weight_attr_config.clone_and_edit( enable_weights_quantization=True, weights_per_channel_threshold=False, - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO) + weights_quantization_method=QuantizationMethod.POWER_OF_TWO) ) const_config_input16_output16_per_tensor = const_config_input16_per_tensor.clone_and_edit( activation_n_bits=16, signedness=Signedness.SIGNED) diff --git a/tests/common_tests/test_tpc.py b/tests/common_tests/test_tpc.py index 9802152a1..e4a5645d1 100644 --- a/tests/common_tests/test_tpc.py +++ b/tests/common_tests/test_tpc.py @@ -26,7 +26,6 @@ export_target_platform_model from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, generate_test_op_qc -tp = mct.target_platform TEST_QC = generate_test_op_qc(**generate_test_attr_configs()) TEST_QCO = schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])) diff --git a/tests/external_tests/keras_tests/models_tests/test_networks_runner.py b/tests/external_tests/keras_tests/models_tests/test_networks_runner.py index fcfbe836b..f542d3eab 100644 --- a/tests/external_tests/keras_tests/models_tests/test_networks_runner.py +++ b/tests/external_tests/keras_tests/models_tests/test_networks_runner.py @@ -31,7 +31,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform QUANTIZATION_CONFIG = mct.core.QuantizationConfig(activation_error_method=mct.core.QuantizationErrorMethod.MSE, weights_error_method=mct.core.QuantizationErrorMethod.MSE, diff --git a/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py b/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py index bdac7a0c0..3b09d9712 100644 --- a/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py +++ b/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py @@ -36,7 +36,6 @@ from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OpQuantizationConfig from tests.common_tests.helpers.tpcs_for_tests.v1.tpc import generate_tpc -tp = mct.target_platform def get_tpc(edit_weights_params_dict, edit_act_params_dict) -> TargetPlatformCapabilities: @@ -63,16 +62,16 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza return eight_bits, mixed_precision_cfg_list, default_config -def get_int8_tpc(edit_weights_params_dict={}, edit_act_params_dict={}) -> tp.TargetPlatformCapabilities: +def get_int8_tpc(edit_weights_params_dict={}, edit_act_params_dict={}) -> TargetPlatformCapabilities: default_tpc = get_tpc(edit_weights_params_dict, edit_act_params_dict) return default_tpc def generate_keras_tpc(name: str, tpc: schema.TargetPlatformCapabilities): - keras_tpc = tp.FrameworkQuantizationCapabilities(tpc) + keras_tpc = FrameworkQuantizationCapabilities(tpc) with keras_tpc: - tp.OperationsSetToLayers("NoQuantization", [Reshape, + OperationsSetToLayers("NoQuantization", [Reshape, tf.reshape, Permute, tf.transpose, @@ -92,7 +91,7 @@ def generate_keras_tpc(name: str, tpc: schema.TargetPlatformCapabilities): tf.nn.top_k, tf.__operators__.getitem, tf.compat.v1.shape]) - tp.OperationsSetToLayers("Conv", + OperationsSetToLayers("Conv", [Conv2D, DepthwiseConv2D, Conv2DTranspose, @@ -104,22 +103,22 @@ def generate_keras_tpc(name: str, tpc: schema.TargetPlatformCapabilities): DepthwiseConv2D: KERAS_DEPTHWISE_KERNEL, tf.nn.depthwise_conv2d: KERAS_DEPTHWISE_KERNEL}, default_value=KERAS_KERNEL), BIAS_ATTR: DefaultDict(default_value=BIAS)}) - tp.OperationsSetToLayers("FullyConnected", [Dense], + OperationsSetToLayers("FullyConnected", [Dense], attr_mapping={KERNEL_ATTR: DefaultDict(default_value=KERAS_KERNEL), BIAS_ATTR: DefaultDict(default_value=BIAS)}) - tp.OperationsSetToLayers("AnyReLU", [tf.nn.relu, + OperationsSetToLayers("AnyReLU", [tf.nn.relu, tf.nn.relu6, tf.nn.leaky_relu, ReLU, LeakyReLU, - tp.LayerFilterParams(Activation, activation="relu"), - tp.LayerFilterParams(Activation, activation="leaky_relu")]) - tp.OperationsSetToLayers("Add", [tf.add, Add]) - tp.OperationsSetToLayers("Sub", [tf.subtract, Subtract]) - tp.OperationsSetToLayers("Mul", [tf.math.multiply, Multiply]) - tp.OperationsSetToLayers("Div", [tf.math.divide]) - tp.OperationsSetToLayers("PReLU", [PReLU]) - tp.OperationsSetToLayers("Swish", [tf.nn.swish, tp.LayerFilterParams(Activation, activation="swish")]) - tp.OperationsSetToLayers("Sigmoid", [tf.nn.sigmoid, tp.LayerFilterParams(Activation, activation="sigmoid")]) - tp.OperationsSetToLayers("Tanh", [tf.nn.tanh, tp.LayerFilterParams(Activation, activation="tanh")]) + LayerFilterParams(Activation, activation="relu"), + LayerFilterParams(Activation, activation="leaky_relu")]) + OperationsSetToLayers("Add", [tf.add, Add]) + OperationsSetToLayers("Sub", [tf.subtract, Subtract]) + OperationsSetToLayers("Mul", [tf.math.multiply, Multiply]) + OperationsSetToLayers("Div", [tf.math.divide]) + OperationsSetToLayers("PReLU", [PReLU]) + OperationsSetToLayers("Swish", [tf.nn.swish, LayerFilterParams(Activation, activation="swish")]) + OperationsSetToLayers("Sigmoid", [tf.nn.sigmoid, LayerFilterParams(Activation, activation="sigmoid")]) + OperationsSetToLayers("Tanh", [tf.nn.tanh, LayerFilterParams(Activation, activation="tanh")]) return keras_tpc diff --git a/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py b/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py index 918314119..882552bc8 100644 --- a/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py +++ b/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py @@ -16,8 +16,8 @@ import numpy as np import tests.keras_tests.exporter_tests.constants as constants +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core.keras.constants import KERNEL -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tpc import get_int8_tpc from tests.keras_tests.exporter_tests.tflite_int8.tflite_int8_exporter_base_test import TFLiteINT8ExporterBaseTest from tests.keras_tests.utils import get_layers_from_model_by_type diff --git a/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py b/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py index c425ce3f9..f4ce5fd27 100644 --- a/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py +++ b/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py @@ -18,7 +18,7 @@ from keras.applications import MobileNetV2 import tests.keras_tests.exporter_tests.constants as constants -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tpc import get_int8_tpc from tests.keras_tests.exporter_tests.tflite_int8.tflite_int8_exporter_base_test import TFLiteINT8ExporterBaseTest diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py index f342b1a49..ff161d818 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py @@ -30,8 +30,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform - def _generate_bn_quantized_tpm(quantize_linear): attr_cfgs_dict = generate_test_attr_configs() diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py index 616a2d4ea..7f71c7590 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py @@ -28,7 +28,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform def update_kernel_for_bn_folding_fn(conv_layer: layers.Conv2D, diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py index d63d97350..24e9e85e9 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py @@ -32,7 +32,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform def create_const_quant_tpc(qmethod): @@ -73,7 +72,7 @@ class ConstQuantizationTest(BaseKerasFeatureNetworkTest): def __init__(self, unit_test, layer, const, is_list_input=False, input_reverse_order=False, use_kwargs=False, error_method: mct.core.QuantizationErrorMethod = mct.core.QuantizationErrorMethod.MSE, - qmethod: tp.QuantizationMethod = tp.QuantizationMethod.POWER_OF_TWO, + qmethod: QuantizationMethod = QuantizationMethod.POWER_OF_TWO, input_shape=(32, 32, 16)): super(ConstQuantizationTest, self).__init__(unit_test=unit_test, input_shape=input_shape) self.layer = layer diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py index 7d662f3ed..fedd80908 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py @@ -26,7 +26,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform class ConstRepresentationTest(BaseKerasFeatureNetworkTest): diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_conv.py b/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_conv.py index 3367bc7a7..469a71bd5 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_conv.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_conv.py @@ -21,7 +21,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform def build_model(in_input_shape: List[int], group: int = 1, dilation_rate=(1, 1)) -> keras.Model: diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_test.py index 487297ced..e4e16feaf 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_test.py @@ -18,11 +18,11 @@ import tensorflow as tf import model_compression_toolkit as mct +from mct_quantizers import QuantizationMethod from model_compression_toolkit import DefaultDict from model_compression_toolkit.constants import GPTQ_HESSIAN_NUM_SAMPLES from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, RoundingType, GradientPTQConfig, \ GPTQHessianScoresConfig, GradualActivationQuantizationConfig -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.core.common.user_info import UserInformation from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR from model_compression_toolkit.gptq.keras.gptq_loss import multiple_tensors_mse_loss @@ -32,7 +32,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform def build_model(in_input_shape: List[int]) -> keras.Model: diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py index 70d5e18ae..636d7502a 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py @@ -31,7 +31,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform class BaseConv2DCollapsingTest(BaseKerasFeatureNetworkTest, ABC): diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py b/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py index 03c27db77..b3d24daa3 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py @@ -34,7 +34,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform def get_uniform_weights(kernel, in_channels, out_channels): @@ -59,7 +58,7 @@ def __init__(self, unit_test, weights_n_bits: int = 3, is_symmetric=False): super().__init__(unit_test, num_calibration_iter=5, val_batch_size=32) def get_tpc(self): - qmethod = tp.QuantizationMethod.LUT_SYM_QUANTIZER if self.is_symmetric else tp.QuantizationMethod.LUT_POT_QUANTIZER + qmethod = QuantizationMethod.LUT_SYM_QUANTIZER if self.is_symmetric else QuantizationMethod.LUT_POT_QUANTIZER tpc = generate_test_tpc({'weights_n_bits': self.weights_n_bits, 'weights_quantization_method': qmethod}) return generate_keras_tpc(name='lut_quantizer_test', tpc=tpc) @@ -69,7 +68,7 @@ def get_debug_config(self): network_editor=[EditRule(filter=NodeNameFilter(self.node_to_change_name), action=ChangeCandidatesWeightsQuantizationMethod( weights_quantization_method= - mct.target_platform.QuantizationMethod.POWER_OF_TWO, + mct.QuantizationMethod.POWER_OF_TWO, attr_name=KERNEL))]) def get_input_shapes(self): @@ -105,7 +104,7 @@ def __init__(self, unit_test, activation_n_bits: int = 3): super().__init__(unit_test, num_calibration_iter=5, val_batch_size=32) def get_tpc(self): - tpc = generate_test_tpc({'activation_quantization_method': tp.QuantizationMethod.LUT_POT_QUANTIZER, + tpc = generate_test_tpc({'activation_quantization_method': QuantizationMethod.LUT_POT_QUANTIZER, 'activation_n_bits': self.activation_n_bits}) return generate_keras_tpc(name='lut_quantizer_test', tpc=tpc) diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py index 5089c1d97..9c0d01b0c 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py @@ -26,7 +26,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform class MetadataTest(BaseKerasFeatureNetworkTest): diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py index 75e9c5516..f9062ba12 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py @@ -19,7 +19,7 @@ from packaging import version from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras if version.parse(tf.__version__) >= version.parse("2.13"): diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py index cb9402468..15d2f71f6 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py @@ -39,7 +39,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform def get_base_mp_nbits_candidates(): @@ -567,7 +566,7 @@ def get_resource_utilization(self): def get_core_config(self): return CoreConfig(quantization_config=QuantizationConfig( custom_tpc_opset_to_layer={"Softmax": CustomOpsetLayers([layers.Softmax, tf.nn.softmax, softmax, - tp.LayerFilterParams(layers.Activation, activation=SOFTMAX)]), + LayerFilterParams(layers.Activation, activation=SOFTMAX)]), "Input": CustomOpsetLayers([layers.InputLayer])})) def get_tpc(self): diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py index a3dfbf5f0..78fd3446c 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py @@ -16,6 +16,7 @@ import tensorflow as tf from tqdm import tqdm +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core import DebugConfig from model_compression_toolkit.core.common.mixed_precision.bit_width_setter import set_bit_widths from model_compression_toolkit.core.common.mixed_precision.mixed_precision_search_facade import search_bit_width @@ -34,10 +35,9 @@ from model_compression_toolkit.core.common.substitutions.apply_substitutions import substitute from model_compression_toolkit.core.graph_prep_runner import graph_preparation_runner from model_compression_toolkit.core.keras.constants import KERNEL -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod keras = tf.keras diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py index 4e11c29d0..afcd50a06 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py @@ -31,7 +31,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform def get_uniform_weights(kernel, in_channels, out_channels): @@ -58,7 +57,7 @@ def __init__(self, unit_test, activation_n_bits: int = 3, weights_n_bits: int = def get_tpc(self): tpc = generate_test_tpc({ - 'weights_quantization_method': tp.QuantizationMethod.POWER_OF_TWO, + 'weights_quantization_method': QuantizationMethod.POWER_OF_TWO, 'activation_n_bits': 16, 'weights_n_bits': 16}) return generate_keras_tpc(name="scope_filter_test", tpc=tpc) @@ -140,7 +139,7 @@ def __init__(self, unit_test, activation_n_bits: int = 3, weights_n_bits: int = def get_tpc(self): tpc = generate_test_tpc({ - 'weights_quantization_method': tp.QuantizationMethod.POWER_OF_TWO, + 'weights_quantization_method': QuantizationMethod.POWER_OF_TWO, 'activation_n_bits': 16, 'weights_n_bits': 16}) return generate_keras_tpc(name="name_filter_test", tpc=tpc) @@ -205,14 +204,14 @@ def __init__(self, unit_test, activation_n_bits: int = 3, weights_n_bits: int = super().__init__(unit_test ) def weights_params_fn(self): - return get_weights_quantization_params_fn(tp.QuantizationMethod.POWER_OF_TWO) + return get_weights_quantization_params_fn(QuantizationMethod.POWER_OF_TWO) def activations_params_fn(self): - return get_activation_quantization_params_fn(tp.QuantizationMethod.POWER_OF_TWO) + return get_activation_quantization_params_fn(QuantizationMethod.POWER_OF_TWO) def get_tpc(self): tpc = generate_test_tpc({ - 'weights_quantization_method': tp.QuantizationMethod.POWER_OF_TWO, + 'weights_quantization_method': QuantizationMethod.POWER_OF_TWO, 'activation_n_bits': 16, 'weights_n_bits': 16}) return generate_keras_tpc(name="type_filter_test", tpc=tpc) diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py index fd27723ae..6fd65e2f9 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py @@ -45,8 +45,8 @@ class QuantizationAwareTrainingTest(BaseKerasFeatureNetworkTest): def __init__(self, unit_test, layer, weight_bits=2, activation_bits=4, finalize=False, - weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, - activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO, test_loading=False): self.layer = layer self.weight_bits = weight_bits @@ -163,8 +163,8 @@ def compare(self, quantized_model, float_model, loaded_model, input_x=None, quan class QATWrappersTest(BaseKerasFeatureNetworkTest): def __init__(self, unit_test, layer, weight_bits=2, activation_bits=4, finalize=True, - weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, - activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO, training_method=TrainingMethod.STE, per_channel=True, test_loading=False): diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py index 150511b39..26325f5c6 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py @@ -22,7 +22,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform class BaseResidualCollapsingTest(BaseKerasFeatureNetworkTest): diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py index 44fb8de79..f5c47fb7e 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py @@ -20,6 +20,7 @@ import tensorflow as tf import model_compression_toolkit as mct +from mct_quantizers import QuantizationMethod from model_compression_toolkit import get_target_platform_capabilities from model_compression_toolkit.constants import TENSORFLOW from model_compression_toolkit.core import CoreConfig, QuantizationConfig, DEFAULTCONFIG, FrameworkInfo, DebugConfig @@ -37,9 +38,7 @@ from model_compression_toolkit.core.runner import core_runner from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc @@ -51,7 +50,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform class BaseSecondMomentTest(BaseKerasFeatureNetworkTest, ABC): diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py index 93e1940eb..0801c6158 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py @@ -24,7 +24,6 @@ import model_compression_toolkit as mct from tests.keras_tests.utils import get_layers_from_model_by_type -tp = mct.target_platform keras = tf.keras layers = keras.layers @@ -39,7 +38,7 @@ def generate_inputs(self): def get_tpc(self): tpc = generate_test_tpc({ - 'activation_quantization_method': tp.QuantizationMethod.SYMMETRIC, + 'activation_quantization_method': QuantizationMethod.SYMMETRIC, 'activation_n_bits': 8}) return generate_keras_tpc(name="symmetric_threshold_test", tpc=tpc) diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py b/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py index c96d414dc..9af69bbf4 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py @@ -15,7 +15,7 @@ import unittest -from model_compression_toolkit import target_platform +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core.common.network_editors.node_filters import NodeNameFilter from model_compression_toolkit.core.common.network_editors.actions import EditRule, ChangeCandidatesWeightsQuantConfigAttr from model_compression_toolkit.core.common.quantization.quantizers.uniform_quantizers import power_of_two_quantizer @@ -53,7 +53,7 @@ class KmeansQuantizerTestBase(BaseKerasFeatureNetworkTest): def __init__(self, unit_test, - quantization_method: target_platform.QuantizationMethod.LUT_POT_QUANTIZER, + quantization_method: QuantizationMethod.LUT_POT_QUANTIZER, weight_fn=get_uniform_weights, weights_n_bits: int = 3): @@ -94,7 +94,7 @@ def get_debug_config(self): return mct.core.DebugConfig(network_editor=[EditRule(filter=NodeNameFilter(self.node_to_change_name), action=ChangeCandidatesWeightsQuantConfigAttr( attr_name=KERNEL, - weights_quantization_method=target_platform.QuantizationMethod.POWER_OF_TWO)), + weights_quantization_method=QuantizationMethod.POWER_OF_TWO)), EditRule(filter=NodeNameFilter(self.node_to_change_name), action=ChangeCandidatesWeightsQuantConfigAttr( attr_name=KERNEL, @@ -120,7 +120,7 @@ class KmeansQuantizerTest(KmeansQuantizerTestBase): def __init__(self, unit_test, - quantization_method: target_platform.QuantizationMethod.LUT_POT_QUANTIZER, + quantization_method: QuantizationMethod.LUT_POT_QUANTIZER, weights_n_bits: int = 3): super().__init__(unit_test, quantization_method, get_uniform_weights, weights_n_bits) @@ -141,7 +141,7 @@ class KmeansQuantizerNotPerChannelTest(KmeansQuantizerTestBase): def __init__(self, unit_test, - quantization_method: target_platform.QuantizationMethod.LUT_POT_QUANTIZER, + quantization_method: QuantizationMethod.LUT_POT_QUANTIZER, weights_n_bits: int = 3): super().__init__(unit_test, quantization_method, get_uniform_weights, weights_n_bits) @@ -163,7 +163,7 @@ class KmeansQuantizerTestManyClasses(KmeansQuantizerTestBase): This test checks the chosen quantization method is different that symmetric uniform ''' - def __init__(self, unit_test, quantization_method: target_platform.QuantizationMethod.LUT_POT_QUANTIZER, weights_n_bits: int = 8): + def __init__(self, unit_test, quantization_method: QuantizationMethod.LUT_POT_QUANTIZER, weights_n_bits: int = 8): super().__init__(unit_test, quantization_method, get_uniform_weights, weights_n_bits) def compare(self, quantized_model, float_model, input_x=None, quantization_info=None): @@ -180,7 +180,7 @@ class KmeansQuantizerTestZeroWeights(KmeansQuantizerTestBase): ''' def __init__(self, unit_test, - quantization_method: target_platform.QuantizationMethod.LUT_POT_QUANTIZER, + quantization_method: QuantizationMethod.LUT_POT_QUANTIZER, weights_n_bits: int = 3): super().__init__(unit_test, quantization_method, get_zero_as_weights, weights_n_bits) diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py index 4904890f9..cadfbf076 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py @@ -22,7 +22,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform class TpcTest(BaseKerasFeatureNetworkTest): diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py index 9b54fc546..cc43072d0 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py @@ -27,7 +27,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform class UniformRangeSelectionActivationTest(BaseKerasFeatureNetworkTest): @@ -43,7 +42,7 @@ def get_quantization_config(self): def get_tpc(self): tpc = generate_test_tpc({ - 'activation_quantization_method': tp.QuantizationMethod.UNIFORM, + 'activation_quantization_method': QuantizationMethod.UNIFORM, 'activation_n_bits': 8}) return generate_keras_tpc(name="uniform_range_test", tpc=tpc) diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py b/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py index 18f7113db..303972294 100644 --- a/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py +++ b/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py @@ -39,7 +39,6 @@ keras = tf.keras layers = keras.layers -tp = mct.target_platform class MixedPrecisionBaseTest(BaseKerasFeatureNetworkTest): diff --git a/tests/keras_tests/feature_networks_tests/test_features_runner.py b/tests/keras_tests/feature_networks_tests/test_features_runner.py index e34525ac1..3d9988897 100644 --- a/tests/keras_tests/feature_networks_tests/test_features_runner.py +++ b/tests/keras_tests/feature_networks_tests/test_features_runner.py @@ -22,11 +22,11 @@ from sklearn.metrics.pairwise import distance_metrics from tensorflow.keras.layers import PReLU, ELU +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core import QuantizationErrorMethod from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting from model_compression_toolkit.core.common.network_editors import NodeTypeFilter, NodeNameFilter from model_compression_toolkit.gptq.keras.gptq_loss import sample_layer_attention_loss -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.gptq import RoundingType from model_compression_toolkit.target_platform_capabilities import constants as C from tests.keras_tests.feature_networks_tests.feature_networks.activation_bias_correction_test import \ diff --git a/tests/keras_tests/function_tests/test_activation_quantization_holder_gptq.py b/tests/keras_tests/function_tests/test_activation_quantization_holder_gptq.py index 8385b4cf1..aae53f6c4 100644 --- a/tests/keras_tests/function_tests/test_activation_quantization_holder_gptq.py +++ b/tests/keras_tests/function_tests/test_activation_quantization_holder_gptq.py @@ -11,12 +11,11 @@ from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO from model_compression_toolkit.gptq.keras.gptq_keras_implementation import GPTQKerasImplemantation from model_compression_toolkit.gptq.keras.gptq_training import KerasGPTQTrainer -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_quantization_parameters -tp = mct.target_platform def basic_model(input_shape): diff --git a/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py b/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py index bc6fd3c4a..b3a729a34 100644 --- a/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py +++ b/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py @@ -21,7 +21,7 @@ import tensorflow as tf from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs @@ -50,7 +50,6 @@ import model_compression_toolkit as mct from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras -tp = mct.target_platform INPUT_SHAPE = (8, 8, 3) diff --git a/tests/keras_tests/function_tests/test_cfg_candidates_filter.py b/tests/keras_tests/function_tests/test_cfg_candidates_filter.py index a01269ddf..272b0683e 100644 --- a/tests/keras_tests/function_tests/test_cfg_candidates_filter.py +++ b/tests/keras_tests/function_tests/test_cfg_candidates_filter.py @@ -27,12 +27,11 @@ from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation from model_compression_toolkit.core.common.fusion.layer_fusing import fusion -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, generate_test_op_qc from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras -tp = mct.target_platform def get_full_bitwidth_candidates(): diff --git a/tests/keras_tests/function_tests/test_custom_layer.py b/tests/keras_tests/function_tests/test_custom_layer.py index 49287ebd0..420d558ee 100644 --- a/tests/keras_tests/function_tests/test_custom_layer.py +++ b/tests/keras_tests/function_tests/test_custom_layer.py @@ -22,8 +22,8 @@ from model_compression_toolkit.core import CoreConfig, QuantizationConfig from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR -from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, DEFAULT_WEIGHT_ATTR_CONFIG, \ KERNEL_BASE_CONFIG, BIAS_CONFIG @@ -65,9 +65,8 @@ def get_tpc(): Returns: FrameworkQuantizationCapabilities object """ - tp = mct.target_platform attr_cfg = generate_test_attr_configs(kernel_lut_values_bitwidth=0) - base_cfg = schema.OpQuantizationConfig(activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + base_cfg = schema.OpQuantizationConfig(activation_quantization_method=QuantizationMethod.POWER_OF_TWO, enable_activation_quantization=True, activation_n_bits=32, supported_input_activation_n_bits=32, diff --git a/tests/keras_tests/function_tests/test_get_gptq_config.py b/tests/keras_tests/function_tests/test_get_gptq_config.py index fb3515050..a87f30c5a 100644 --- a/tests/keras_tests/function_tests/test_get_gptq_config.py +++ b/tests/keras_tests/function_tests/test_get_gptq_config.py @@ -18,13 +18,13 @@ import numpy as np import model_compression_toolkit as mct +from mct_quantizers import QuantizationMethod from model_compression_toolkit.gptq import get_keras_gptq_config, keras_gradient_post_training_quantization, GradientPTQConfig, RoundingType from model_compression_toolkit.core import QuantizationConfig, QuantizationErrorMethod, CoreConfig from model_compression_toolkit import DefaultDict import tensorflow as tf -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.gptq.common.gptq_config import GPTQHessianScoresConfig from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR from model_compression_toolkit.gptq.keras.gptq_loss import multiple_tensors_mse_loss diff --git a/tests/keras_tests/function_tests/test_gptq_soft_quantizer.py b/tests/keras_tests/function_tests/test_gptq_soft_quantizer.py index a5c2134e1..e224d62b7 100644 --- a/tests/keras_tests/function_tests/test_gptq_soft_quantizer.py +++ b/tests/keras_tests/function_tests/test_gptq_soft_quantizer.py @@ -20,8 +20,8 @@ from tensorflow.keras.layers import Conv2D, Input import numpy as np import model_compression_toolkit as mct +from mct_quantizers import QuantizationMethod from model_compression_toolkit.constants import THRESHOLD, MIN_THRESHOLD -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.core.keras.constants import KERNEL from model_compression_toolkit.gptq.keras.quantizer.soft_rounding.symmetric_soft_quantizer import \ SymmetricSoftRoundingGPTQ @@ -30,7 +30,6 @@ from tests.keras_tests.utils import get_layers_from_model_by_type -tp = mct.target_platform def model_test(input_shape, num_channels=3, kernel_size=1): diff --git a/tests/keras_tests/function_tests/test_graph_max_cut.py b/tests/keras_tests/function_tests/test_graph_max_cut.py index 4a42402bc..780b2e3b4 100644 --- a/tests/keras_tests/function_tests/test_graph_max_cut.py +++ b/tests/keras_tests/function_tests/test_graph_max_cut.py @@ -26,7 +26,6 @@ from model_compression_toolkit.core.keras.reader.reader import model_reader import model_compression_toolkit as mct -tp = mct.target_platform def simple_model(input_shape): diff --git a/tests/keras_tests/function_tests/test_hessian_info_calculator.py b/tests/keras_tests/function_tests/test_hessian_info_calculator.py index ff07cb44d..058938a67 100644 --- a/tests/keras_tests/function_tests/test_hessian_info_calculator.py +++ b/tests/keras_tests/function_tests/test_hessian_info_calculator.py @@ -28,12 +28,11 @@ from model_compression_toolkit.core.keras.data_util import data_gen_to_dataloader from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs -tp = mct.target_platform def basic_model(input_shape, layer): diff --git a/tests/keras_tests/function_tests/test_hessian_service.py b/tests/keras_tests/function_tests/test_hessian_service.py index a651674e5..3b33b32c5 100644 --- a/tests/keras_tests/function_tests/test_hessian_service.py +++ b/tests/keras_tests/function_tests/test_hessian_service.py @@ -25,7 +25,7 @@ from model_compression_toolkit.core.keras.data_util import data_gen_to_dataloader from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs diff --git a/tests/keras_tests/function_tests/test_hmse_error_method.py b/tests/keras_tests/function_tests/test_hmse_error_method.py index d64dbb5b7..6e068e575 100644 --- a/tests/keras_tests/function_tests/test_hmse_error_method.py +++ b/tests/keras_tests/function_tests/test_hmse_error_method.py @@ -32,7 +32,7 @@ from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL, BIAS from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import AttributeQuantizationConfig from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO @@ -42,7 +42,6 @@ from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs -tp = mct.target_platform def model_gen(): @@ -137,44 +136,44 @@ def _run_node_verification(node_type): _run_node_verification(layers.Dense) def test_pot_threshold_selection_hmse_per_channel(self): - self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, per_channel=True) + self._setup_with_args(quant_method=mct.QuantizationMethod.POWER_OF_TWO, per_channel=True) calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset, hessian_info_service=self.his, num_hessian_samples=1) self._verify_params_calculation_execution(THRESHOLD) def test_pot_threshold_selection_hmse_per_tensor(self): - self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, per_channel=False) + self._setup_with_args(quant_method=mct.QuantizationMethod.POWER_OF_TWO, per_channel=False) calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset, hessian_info_service=self.his, num_hessian_samples=1) self._verify_params_calculation_execution(THRESHOLD) def test_symmetric_threshold_selection_hmse_per_channel(self): - self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.SYMMETRIC, per_channel=True) + self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=True) calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset, hessian_info_service=self.his, num_hessian_samples=1) self._verify_params_calculation_execution(THRESHOLD) def test_symmetric_threshold_selection_hmse_per_tensor(self): - self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.SYMMETRIC, per_channel=False) + self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=False) calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset, hessian_info_service=self.his, num_hessian_samples=1) self._verify_params_calculation_execution(THRESHOLD) def test_usniform_threshold_selection_hmse_per_channel(self): - self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.UNIFORM, per_channel=True) + self._setup_with_args(quant_method=mct.QuantizationMethod.UNIFORM, per_channel=True) calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset, hessian_info_service=self.his, num_hessian_samples=1) self._verify_params_calculation_execution(RANGE_MAX) def test_uniform_threshold_selection_hmse_per_tensor(self): - self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.UNIFORM, per_channel=False) + self._setup_with_args(quant_method=mct.QuantizationMethod.UNIFORM, per_channel=False) calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset, hessian_info_service=self.his, num_hessian_samples=1) self._verify_params_calculation_execution(RANGE_MAX) def test_threshold_selection_hmse_no_gptq(self): with self.assertRaises(ValueError) as e: - self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.SYMMETRIC, per_channel=True, + self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=True, running_gptq=False) self.assertTrue('The HMSE error method for parameters selection is only supported when running GPTQ ' 'optimization due to long execution time that is not suitable for basic PTQ.' in @@ -201,7 +200,7 @@ def _generate_bn_quantization_tpc(quant_method, per_channel): return tpc - self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.SYMMETRIC, per_channel=True, + self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=True, tpc_fn=_generate_bn_quantization_tpc, model_gen_fn=no_bn_fusion_model_gen) calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset, hessian_info_service=self.his, num_hessian_samples=1) diff --git a/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py b/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py index 4a6a4a018..52d0bbbe3 100644 --- a/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py +++ b/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py @@ -38,9 +38,9 @@ def test_run_quantization_config_mbv1(self): def representative_data_gen(): yield [x] - quantizer_methods = [mct.target_platform.QuantizationMethod.POWER_OF_TWO, - mct.target_platform.QuantizationMethod.SYMMETRIC, - mct.target_platform.QuantizationMethod.UNIFORM] + quantizer_methods = [mct.QuantizationMethod.POWER_OF_TWO, + mct.QuantizationMethod.SYMMETRIC, + mct.QuantizationMethod.UNIFORM] quantization_error_methods = [mct.core.QuantizationErrorMethod.KL] relu_bound_to_power_of_2 = [True, False] diff --git a/tests/keras_tests/function_tests/test_layer_fusing.py b/tests/keras_tests/function_tests/test_layer_fusing.py index db548b188..9da236bd5 100644 --- a/tests/keras_tests/function_tests/test_layer_fusing.py +++ b/tests/keras_tests/function_tests/test_layer_fusing.py @@ -8,7 +8,7 @@ from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \ get_op_quantization_configs @@ -23,7 +23,6 @@ keras = tf.keras layers = keras.layers activations = keras.activations -tp = mct.target_platform INPUT_SHAPE = (16, 16, 3) @@ -185,8 +184,8 @@ def test_layer_fusing_1(self): qc = QuantizationConfig(custom_tpc_opset_to_layer={"Conv": CustomOpsetLayers([Conv2D]), "AnyReLU": CustomOpsetLayers([tf.nn.relu, - tp.LayerFilterParams(ReLU, negative_slope=0.0), - tp.LayerFilterParams(Activation, activation="relu")])}) + LayerFilterParams(ReLU, negative_slope=0.0), + LayerFilterParams(Activation, activation="relu")])}) fusion_graph = prepare_graph_with_configs(model, KerasImplementation(), DEFAULT_KERAS_INFO, representative_dataset, lambda name, _tp: get_tpc_1(), diff --git a/tests/keras_tests/function_tests/test_quant_config_filtering.py b/tests/keras_tests/function_tests/test_quant_config_filtering.py index 20b4b33aa..519f25b19 100644 --- a/tests/keras_tests/function_tests/test_quant_config_filtering.py +++ b/tests/keras_tests/function_tests/test_quant_config_filtering.py @@ -22,7 +22,7 @@ QuantizationConfigOptions from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \ get_config_options_by_operators_set -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from tests.common_tests.helpers.generate_test_tpc import generate_custom_test_tpc from tests.common_tests.helpers.tpcs_for_tests.v3.tpc import get_tpc diff --git a/tests/keras_tests/function_tests/test_quantization_configurations.py b/tests/keras_tests/function_tests/test_quantization_configurations.py index 5b44c1c3b..bbd7fccc4 100644 --- a/tests/keras_tests/function_tests/test_quantization_configurations.py +++ b/tests/keras_tests/function_tests/test_quantization_configurations.py @@ -41,9 +41,9 @@ def test_run_quantization_config(self): def representative_data_gen(): yield [x] - quantizer_methods = [mct.target_platform.QuantizationMethod.POWER_OF_TWO, - mct.target_platform.QuantizationMethod.SYMMETRIC, - mct.target_platform.QuantizationMethod.UNIFORM] + quantizer_methods = [mct.QuantizationMethod.POWER_OF_TWO, + mct.QuantizationMethod.SYMMETRIC, + mct.QuantizationMethod.UNIFORM] quantization_error_methods = [mct.core.QuantizationErrorMethod.MSE, mct.core.QuantizationErrorMethod.NOCLIPPING, diff --git a/tests/keras_tests/function_tests/test_sensitivity_eval_non_suppoerted_output.py b/tests/keras_tests/function_tests/test_sensitivity_eval_non_suppoerted_output.py index 7eebd5687..9a775a209 100644 --- a/tests/keras_tests/function_tests/test_sensitivity_eval_non_suppoerted_output.py +++ b/tests/keras_tests/function_tests/test_sensitivity_eval_non_suppoerted_output.py @@ -20,7 +20,7 @@ from model_compression_toolkit.core import MixedPrecisionQuantizationConfig from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_quantization_parameters diff --git a/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py b/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py index e46b29e33..4774b4ce4 100644 --- a/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py +++ b/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py @@ -18,6 +18,9 @@ from keras.applications.densenet import DenseNet121 from keras.applications.mobilenet_v2 import MobileNetV2 +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ + AttachTpcToKeras + if tf.__version__ >= "2.13": from keras.src.engine.input_layer import InputLayer from keras.src.layers.core import TFOpLambda @@ -26,8 +29,6 @@ from keras.layers.core import TFOpLambda from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ - AttachTpcToKeras from model_compression_toolkit.constants import AXIS from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting diff --git a/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py b/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py index 29c61dc24..b6fd4560e 100644 --- a/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py +++ b/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py @@ -21,6 +21,8 @@ from keras.layers import Conv2D from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ + AttachTpcToKeras if tf.__version__ >= "2.13": from keras.src.engine.input_layer import InputLayer @@ -29,8 +31,6 @@ from mct_quantizers import KerasActivationQuantizationHolder from model_compression_toolkit.core import QuantizationConfig -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ - AttachTpcToKeras from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper from model_compression_toolkit.core.common.mixed_precision.set_layer_to_bitwidth import set_layer_to_bitwidth diff --git a/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py b/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py index 745273eb1..c063c6188 100644 --- a/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py +++ b/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py @@ -22,7 +22,7 @@ from model_compression_toolkit.core import QuantizationConfig, QuantizationErrorMethod from model_compression_toolkit.constants import THRESHOLD from model_compression_toolkit.core.keras.constants import KERNEL -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO @@ -58,7 +58,7 @@ def representative_dataset(): def get_tpc(per_channel): tp = generate_test_tpc(edit_params_dict={ - 'weights_quantization_method': mct.target_platform.QuantizationMethod.SYMMETRIC, + 'weights_quantization_method': mct.QuantizationMethod.SYMMETRIC, 'weights_per_channel_threshold': per_channel}) tpc = generate_keras_tpc(name="symmetric_threshold_selection_test", tpc=tp) diff --git a/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py b/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py index e7087b2b6..57599b0e7 100644 --- a/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py +++ b/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py @@ -22,7 +22,7 @@ from model_compression_toolkit.core import QuantizationConfig, QuantizationErrorMethod from model_compression_toolkit.constants import RANGE_MIN, RANGE_MAX from model_compression_toolkit.core.keras.constants import KERNEL -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO @@ -57,7 +57,7 @@ def representative_dataset(): def get_tpc(per_channel): tp = generate_test_tpc({ - 'weights_quantization_method': mct.target_platform.QuantizationMethod.UNIFORM, + 'weights_quantization_method': mct.QuantizationMethod.UNIFORM, 'weights_per_channel_threshold': per_channel}) tpc = generate_keras_tpc(name="uniform_range_selection_test", tpc=tp) diff --git a/tests/keras_tests/function_tests/test_weights_activation_split_substitution.py b/tests/keras_tests/function_tests/test_weights_activation_split_substitution.py index e3f6020cb..8eb93cd72 100644 --- a/tests/keras_tests/function_tests/test_weights_activation_split_substitution.py +++ b/tests/keras_tests/function_tests/test_weights_activation_split_substitution.py @@ -18,6 +18,8 @@ import unittest from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ + AttachTpcToKeras if tf.__version__ >= "2.13": from keras.src.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Dense, BatchNormalization, ReLU, Input @@ -36,15 +38,12 @@ WeightsActivationSplit from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation from model_compression_toolkit.core.common.substitutions.apply_substitutions import substitute -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ - AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs import model_compression_toolkit as mct from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras -tp = mct.target_platform INPUT_SHAPE = (8, 8, 3) diff --git a/tests/keras_tests/non_parallel_tests/test_keras_tpc.py b/tests/keras_tests/non_parallel_tests/test_keras_tpc.py index 973a6a6b5..c6e86bdbb 100644 --- a/tests/keras_tests/non_parallel_tests/test_keras_tpc.py +++ b/tests/keras_tests/non_parallel_tests/test_keras_tpc.py @@ -25,6 +25,13 @@ import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema from model_compression_toolkit.defaultdict import DefaultDict from model_compression_toolkit.core.common import BaseNode +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attribute_filter import Greater, \ + Smaller, GreaterEq, Eq, SmallerEq, Contains +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.operations_to_layers import \ + OperationsSetToLayers from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs if version.parse(tf.__version__) >= version.parse("2.13"): @@ -36,17 +43,10 @@ import model_compression_toolkit as mct from model_compression_toolkit.constants import TENSORFLOW -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \ - LayerFilterParams -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \ - Greater, \ - Smaller, GreaterEq, Eq, SmallerEq, Contains from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \ QNNPACK_TP_MODEL, TFLITE_TP_MODEL, KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL, BIAS, WEIGHTS_N_BITS from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation -tp = mct.target_platform TEST_QC = generate_test_op_qc(**generate_test_attr_configs()) TEST_QCO = schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])) @@ -116,7 +116,7 @@ def test_get_layers_by_op(self): fw_tp = FrameworkQuantizationCapabilities(hm) with fw_tp: opset_layers = [Conv2D, LayerFilterParams(ReLU, max_value=2)] - tp.OperationsSetToLayers('opsetA', opset_layers) + OperationsSetToLayers('opsetA', opset_layers) self.assertEqual(fw_tp.get_layers_by_opset_name('opsetA'), opset_layers) self.assertEqual(fw_tp.get_layers_by_opset(op_obj), opset_layers) self.assertEqual(fw_tp.get_layers_by_opset_name('nonExistingOpsetName'), None) @@ -137,8 +137,8 @@ def test_get_layers_by_opconcat(self): with fw_tp: opset_layers_a = [Conv2D] opset_layers_b = [LayerFilterParams(ReLU, max_value=2)] - tp.OperationsSetToLayers('opsetA', opset_layers_a) - tp.OperationsSetToLayers('opsetB', opset_layers_b) + OperationsSetToLayers('opsetA', opset_layers_a) + OperationsSetToLayers('opsetB', opset_layers_b) self.assertEqual(fw_tp.get_layers_by_opset(op_concat), opset_layers_a + opset_layers_b) @@ -156,8 +156,8 @@ def test_layer_attached_to_multiple_opsets(self): fw_tp = FrameworkQuantizationCapabilities(hm) with self.assertRaises(Exception) as e: with fw_tp: - tp.OperationsSetToLayers('opsetA', [Conv2D]) - tp.OperationsSetToLayers('opsetB', [Conv2D]) + OperationsSetToLayers('opsetA', [Conv2D]) + OperationsSetToLayers('opsetB', [Conv2D]) self.assertEqual('Found layer Conv2D in more than one OperatorsSet', str(e.exception)) def test_filter_layer_attached_to_multiple_opsets(self): @@ -172,8 +172,8 @@ def test_filter_layer_attached_to_multiple_opsets(self): fw_tp = FrameworkQuantizationCapabilities(hm) with self.assertRaises(Exception) as e: with fw_tp: - tp.OperationsSetToLayers('opsetA', [LayerFilterParams(Activation, activation="relu")]) - tp.OperationsSetToLayers('opsetB', [LayerFilterParams(Activation, activation="relu")]) + OperationsSetToLayers('opsetA', [LayerFilterParams(Activation, activation="relu")]) + OperationsSetToLayers('opsetB', [LayerFilterParams(Activation, activation="relu")]) self.assertEqual('Found layer Activation(activation=relu) in more than one OperatorsSet', str(e.exception)) def test_qco_by_keras_layer(self): @@ -200,13 +200,13 @@ def test_qco_by_keras_layer(self): add_metadata=False, name='test') - tpc_keras = tp.FrameworkQuantizationCapabilities(tpm) + tpc_keras = FrameworkQuantizationCapabilities(tpm) with tpc_keras: - tp.OperationsSetToLayers("conv", [Conv2D], + OperationsSetToLayers("conv", [Conv2D], attr_mapping={KERNEL_ATTR: DefaultDict(default_value=KERAS_KERNEL), BIAS_ATTR: DefaultDict(default_value=BIAS)}) - tp.OperationsSetToLayers("tanh", [tf.nn.tanh]) - tp.OperationsSetToLayers("relu", [LayerFilterParams(Activation, activation="relu")]) + OperationsSetToLayers("tanh", [tf.nn.tanh]) + OperationsSetToLayers("relu", [LayerFilterParams(Activation, activation="relu")]) conv_node = get_node(Conv2D(1, 1)) tanh_node = get_node(tf.nn.tanh) @@ -234,7 +234,7 @@ def test_qco_by_keras_layer(self): # tpc_platform_type=None, # operator_set=tuple([schema.OperatorsSet(name="opA")]), # add_metadata=False) - # hm_keras = tp.FrameworkQuantizationCapabilities(hm) + # hm_keras = FrameworkQuantizationCapabilities(hm) # with self.assertRaises(Exception) as e: # with hm_keras: # tp.OperationsSetToLayers("conv", [Conv2D]) @@ -259,11 +259,11 @@ def test_keras_fusing_patterns(self): fusing_patterns=tuple(fusing_patterns), add_metadata=False) - hm_keras = tp.FrameworkQuantizationCapabilities(hm) + hm_keras = FrameworkQuantizationCapabilities(hm) with hm_keras: - tp.OperationsSetToLayers("opA", [Conv2D]) - tp.OperationsSetToLayers("opB", [tf.nn.tanh]) - tp.OperationsSetToLayers("opC", [LayerFilterParams(ReLU, Greater("max_value", 7), negative_slope=0)]) + OperationsSetToLayers("opA", [Conv2D]) + OperationsSetToLayers("opB", [tf.nn.tanh]) + OperationsSetToLayers("opC", [LayerFilterParams(ReLU, Greater("max_value", 7), negative_slope=0)]) fusings = hm_keras.get_fusing_patterns() self.assertEqual(len(fusings), 2) @@ -287,9 +287,9 @@ def test_get_default_op_qc(self): operator_set=tuple([schema.OperatorsSet(name="opA")]), add_metadata=False) - tpc = tp.FrameworkQuantizationCapabilities(tpm) + tpc = FrameworkQuantizationCapabilities(tpm) with tpc: - tp.OperationsSetToLayers("opA", [Conv2D]) + OperationsSetToLayers("opA", [Conv2D]) d_qco = tpc.get_default_op_qc() self.assertEqual(d_qco, TEST_QC) diff --git a/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py b/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py index af560beaf..f4cee33c5 100644 --- a/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py +++ b/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py @@ -39,7 +39,7 @@ from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \ get_op_quantization_configs diff --git a/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py b/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py index abaeda1aa..120ef70a9 100644 --- a/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py +++ b/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py @@ -33,7 +33,7 @@ from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation from model_compression_toolkit.logger import Logger from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs diff --git a/tests/keras_tests/pruning_tests/test_memory_calculator.py b/tests/keras_tests/pruning_tests/test_memory_calculator.py index 2047c34f8..0d9d56fcc 100644 --- a/tests/keras_tests/pruning_tests/test_memory_calculator.py +++ b/tests/keras_tests/pruning_tests/test_memory_calculator.py @@ -24,7 +24,7 @@ import keras -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \ +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \ AttachTpcToKeras layers = keras.layers diff --git a/tests/keras_tests/tpc_keras.py b/tests/keras_tests/tpc_keras.py index d73e48c0d..ed0f382b9 100644 --- a/tests/keras_tests/tpc_keras.py +++ b/tests/keras_tests/tpc_keras.py @@ -32,12 +32,11 @@ generate_mixed_precision_test_tpc, generate_tpc_with_activation_mp from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc -tp = mct.target_platform def get_tpc(name, weight_bits=8, activation_bits=8, - weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, - activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO, per_channel=True): tpc = generate_test_tpc({'weights_n_bits': weight_bits, 'activation_n_bits': activation_bits, diff --git a/tests/keras_tests/trainable_infrastructure_tests/base_keras_trainable_infra_test.py b/tests/keras_tests/trainable_infrastructure_tests/base_keras_trainable_infra_test.py index df4f5df5f..f739052d2 100644 --- a/tests/keras_tests/trainable_infrastructure_tests/base_keras_trainable_infra_test.py +++ b/tests/keras_tests/trainable_infrastructure_tests/base_keras_trainable_infra_test.py @@ -17,8 +17,7 @@ import numpy as np import tensorflow as tf from tensorflow import TensorShape - -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper from mct_quantizers import QuantizationTarget, mark_quantizer from model_compression_toolkit.trainable_infrastructure import BaseKerasTrainableQuantizer diff --git a/tests/keras_tests/trainable_infrastructure_tests/test_keras_trainable_infra_runner.py b/tests/keras_tests/trainable_infrastructure_tests/test_keras_trainable_infra_runner.py index 35366d02c..ef2fd206b 100644 --- a/tests/keras_tests/trainable_infrastructure_tests/test_keras_trainable_infra_runner.py +++ b/tests/keras_tests/trainable_infrastructure_tests/test_keras_trainable_infra_runner.py @@ -16,7 +16,7 @@ import tensorflow as tf from model_compression_toolkit.trainable_infrastructure import TrainingMethod -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget from model_compression_toolkit.qat.keras.quantizer.ste_rounding.symmetric_ste import STEWeightQATQuantizer from model_compression_toolkit.qat.keras.quantizer.ste_rounding.uniform_ste import STEUniformWeightQATQuantizer diff --git a/tests/keras_tests/trainable_infrastructure_tests/trainable_keras/test_keras_base_quantizer.py b/tests/keras_tests/trainable_infrastructure_tests/trainable_keras/test_keras_base_quantizer.py index cdee473c5..252622eeb 100644 --- a/tests/keras_tests/trainable_infrastructure_tests/trainable_keras/test_keras_base_quantizer.py +++ b/tests/keras_tests/trainable_infrastructure_tests/trainable_keras/test_keras_base_quantizer.py @@ -14,7 +14,7 @@ # ============================================================================== from typing import List, Any -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.trainable_infrastructure import BaseKerasTrainableQuantizer from model_compression_toolkit.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup from model_compression_toolkit.trainable_infrastructure.common.trainable_quantizer_config import \ diff --git a/tests/pytorch_tests/function_tests/get_gptq_config_test.py b/tests/pytorch_tests/function_tests/get_gptq_config_test.py index 8e4f59610..a1e8faf1a 100644 --- a/tests/pytorch_tests/function_tests/get_gptq_config_test.py +++ b/tests/pytorch_tests/function_tests/get_gptq_config_test.py @@ -17,16 +17,15 @@ from torch import nn import model_compression_toolkit as mct +from mct_quantizers import QuantizationMethod from model_compression_toolkit.gptq import get_pytorch_gptq_config, pytorch_gradient_post_training_quantization, RoundingType from model_compression_toolkit.core import CoreConfig, QuantizationConfig, QuantizationErrorMethod from model_compression_toolkit import DefaultDict -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest -tp = mct.target_platform class TestModel(nn.Module): diff --git a/tests/pytorch_tests/function_tests/layer_fusing_test.py b/tests/pytorch_tests/function_tests/layer_fusing_test.py index 8065ce500..399b427f8 100644 --- a/tests/pytorch_tests/function_tests/layer_fusing_test.py +++ b/tests/pytorch_tests/function_tests/layer_fusing_test.py @@ -19,10 +19,10 @@ import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema from model_compression_toolkit.core import QuantizationConfig -from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \ AttachTpcToPytorch from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \ @@ -32,7 +32,6 @@ import model_compression_toolkit as mct -tp = mct.target_platform class BaseLayerFusingTest(BasePytorchTest): diff --git a/tests/pytorch_tests/function_tests/test_fully_quantized_exporter.py b/tests/pytorch_tests/function_tests/test_fully_quantized_exporter.py index 0a9cdf883..3c9b2581c 100644 --- a/tests/pytorch_tests/function_tests/test_fully_quantized_exporter.py +++ b/tests/pytorch_tests/function_tests/test_fully_quantized_exporter.py @@ -27,7 +27,6 @@ import model_compression_toolkit as mct from model_compression_toolkit.core.pytorch.utils import to_torch_tensor -tp = mct.target_platform class TestFullyQuantizedExporter(unittest.TestCase): diff --git a/tests/pytorch_tests/function_tests/test_function_runner.py b/tests/pytorch_tests/function_tests/test_function_runner.py index 0ab7e6214..788c45a34 100644 --- a/tests/pytorch_tests/function_tests/test_function_runner.py +++ b/tests/pytorch_tests/function_tests/test_function_runner.py @@ -14,8 +14,8 @@ # ============================================================================== import unittest +from mct_quantizers import QuantizationMethod from model_compression_toolkit.gptq import RoundingType -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from tests.pytorch_tests.function_tests.bn_info_collection_test import BNInfoCollectionTest, \ Conv2D2BNInfoCollectionTest, Conv2DBNChainInfoCollectionTest, BNChainInfoCollectionTest, \ BNLayerInfoCollectionTest, INP2BNInfoCollectionTest diff --git a/tests/pytorch_tests/function_tests/test_gptq_soft_quantizer.py b/tests/pytorch_tests/function_tests/test_gptq_soft_quantizer.py index 83a6e7ac0..86a35b5e2 100644 --- a/tests/pytorch_tests/function_tests/test_gptq_soft_quantizer.py +++ b/tests/pytorch_tests/function_tests/test_gptq_soft_quantizer.py @@ -5,8 +5,7 @@ import model_compression_toolkit as mct from model_compression_toolkit.constants import THRESHOLD, MIN_THRESHOLD -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod -from mct_quantizers import PytorchQuantizationWrapper +from mct_quantizers import PytorchQuantizationWrapper, QuantizationMethod from model_compression_toolkit.core.pytorch.constants import KERNEL from model_compression_toolkit.core.pytorch.utils import to_torch_tensor from model_compression_toolkit.gptq.pytorch.quantizer.soft_rounding.symmetric_soft_quantizer import \ @@ -14,7 +13,6 @@ from model_compression_toolkit.trainable_infrastructure import TrainableQuantizerWeightsConfig -tp = mct.target_platform class model_test(torch.nn.Module): diff --git a/tests/pytorch_tests/function_tests/test_pytorch_tpc.py b/tests/pytorch_tests/function_tests/test_pytorch_tpc.py index be89034b3..0b40f03e6 100644 --- a/tests/pytorch_tests/function_tests/test_pytorch_tpc.py +++ b/tests/pytorch_tests/function_tests/test_pytorch_tpc.py @@ -28,18 +28,17 @@ from model_compression_toolkit.defaultdict import DefaultDict from model_compression_toolkit.constants import PYTORCH from model_compression_toolkit.core.common import BaseNode -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \ - LayerFilterParams -from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \ - Greater, Smaller, Eq from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \ TFLITE_TP_MODEL, QNNPACK_TP_MODEL, KERNEL_ATTR, WEIGHTS_N_BITS, PYTORCH_KERNEL, BIAS_ATTR, BIAS from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attribute_filter import Greater, \ + Smaller, Eq +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs from tests.pytorch_tests.layer_tests.base_pytorch_layer_test import LayerTestModel -tp = mct.target_platform TEST_QC = generate_test_op_qc(**generate_test_attr_configs()) TEST_QCO = schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])) @@ -113,7 +112,7 @@ def test_qco_by_pytorch_layer(self): add_metadata=False, name='test') - tpc_pytorch = tp.FrameworkQuantizationCapabilities(tpm) + tpc_pytorch = FrameworkQuantizationCapabilities(tpm) with tpc_pytorch: tp.OperationsSetToLayers("conv", [torch.nn.Conv2d], attr_mapping={KERNEL_ATTR: DefaultDict(default_value=PYTORCH_KERNEL), diff --git a/tests/pytorch_tests/function_tests/test_quantization_configurations.py b/tests/pytorch_tests/function_tests/test_quantization_configurations.py index 4f59c267c..c43c5c67f 100644 --- a/tests/pytorch_tests/function_tests/test_quantization_configurations.py +++ b/tests/pytorch_tests/function_tests/test_quantization_configurations.py @@ -50,9 +50,9 @@ def test_run_quantization_config(self): def representative_data_gen(): yield [x] - quantizer_methods = [mct.target_platform.QuantizationMethod.POWER_OF_TWO, - mct.target_platform.QuantizationMethod.SYMMETRIC, - mct.target_platform.QuantizationMethod.UNIFORM] + quantizer_methods = [mct.QuantizationMethod.POWER_OF_TWO, + mct.QuantizationMethod.SYMMETRIC, + mct.QuantizationMethod.UNIFORM] quantization_error_methods = [mct.core.QuantizationErrorMethod.MSE, mct.core.QuantizationErrorMethod.NOCLIPPING, diff --git a/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py b/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py index 2d78dd778..9e5bb2000 100644 --- a/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py @@ -30,7 +30,6 @@ from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest from tests.pytorch_tests.utils import get_layers_from_model_by_type -tp = mct.target_platform def _generate_bn_quantized_tpm(quantize_linear): diff --git a/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py b/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py index 406957445..e72c55187 100644 --- a/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py @@ -21,7 +21,6 @@ from model_compression_toolkit.constants import PYTORCH from mct_quantizers.pytorch.metadata import get_metadata -tp = mct.target_platform class MaxCutModel(nn.Module): diff --git a/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py b/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py index 56b8cdbab..3de54ff86 100644 --- a/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py @@ -28,9 +28,7 @@ from tests.common_tests.helpers.tensors_compare import cosine_similarity from tests.pytorch_tests.utils import get_layers_from_model_by_type from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, DEFAULT_WEIGHT_ATTR_CONFIG -from mct_quantizers import PytorchQuantizationWrapper - -tp = mct.target_platform +from mct_quantizers import PytorchQuantizationWrapper, QuantizationMethod class ConstQuantizationNet(nn.Module): @@ -231,9 +229,8 @@ def get_core_config(self): {"WeightQuant": CustomOpsetLayers([torch.Tensor.expand, torch.cat])})) def get_tpc(self): - tp = mct.target_platform attr_cfg = generate_test_attr_configs() - base_cfg = schema.OpQuantizationConfig(activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, + base_cfg = schema.OpQuantizationConfig(activation_quantization_method=QuantizationMethod.POWER_OF_TWO, enable_activation_quantization=True, activation_n_bits=32, supported_input_activation_n_bits=32, @@ -251,7 +248,7 @@ def get_tpc(self): default_weight_attr_config=base_cfg.default_weight_attr_config.clone_and_edit( enable_weights_quantization=True, weights_per_channel_threshold=False, - weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO)) + weights_quantization_method=QuantizationMethod.POWER_OF_TWO)) const_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config])) tpc = schema.TargetPlatformCapabilities( diff --git a/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py b/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py index c128d963b..70dcbb9de 100644 --- a/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py @@ -22,7 +22,6 @@ from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc from tests.common_tests.helpers.tensors_compare import cosine_similarity -tp = mct.target_platform class ConstRepresentationNet(nn.Module): diff --git a/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py b/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py index 8558ca28e..9645422f2 100644 --- a/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py @@ -22,7 +22,6 @@ from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc import numpy as np -tp = mct.target_platform class BaseConstantConvSubstitutionTest(BasePytorchFeatureNetworkTest): diff --git a/tests/pytorch_tests/model_tests/feature_models/conv2d_replacement_test.py b/tests/pytorch_tests/model_tests/feature_models/conv2d_replacement_test.py index 08f47ace3..d5fbd81db 100644 --- a/tests/pytorch_tests/model_tests/feature_models/conv2d_replacement_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/conv2d_replacement_test.py @@ -21,7 +21,6 @@ from model_compression_toolkit.core.common.network_editors.actions import EditRule, ReplaceLayer from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest -tp = mct.target_platform def get_new_weights_for_identity_dw_conv2d_layer(weights={}, activation_quantization_params={}, **kwargs): diff --git a/tests/pytorch_tests/model_tests/feature_models/gptq_test.py b/tests/pytorch_tests/model_tests/feature_models/gptq_test.py index 9349e12f1..19d81240e 100644 --- a/tests/pytorch_tests/model_tests/feature_models/gptq_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/gptq_test.py @@ -18,6 +18,7 @@ import torch.nn as nn import model_compression_toolkit as mct +from mct_quantizers import QuantizationMethod from model_compression_toolkit import DefaultDict from model_compression_toolkit.constants import GPTQ_HESSIAN_NUM_SAMPLES from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy, set_model @@ -25,14 +26,11 @@ GPTQHessianScoresConfig, GradualActivationQuantizationConfig from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR from model_compression_toolkit.gptq.pytorch.gptq_loss import multiple_tensors_mse_loss -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest from tests.pytorch_tests.utils import extract_model_weights -tp = mct.target_platform - class TestModel(nn.Module): def __init__(self): diff --git a/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py b/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py index 24c28eb1e..125c0deec 100644 --- a/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py @@ -22,7 +22,6 @@ from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc from tests.common_tests.helpers.tensors_compare import cosine_similarity -tp = mct.target_platform class BaseConv2DCollapsingTest(BasePytorchFeatureNetworkTest, ABC): diff --git a/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py b/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py index 340920081..9a63ac6a9 100644 --- a/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py @@ -24,7 +24,6 @@ from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest -tp = mct.target_platform def get_uniform_weights(out_channels, in_channels, kernel): @@ -80,7 +79,7 @@ class LUTWeightsQuantizerTest(BasePytorchTest): We check that the weights have different values for conv1 and conv2, and that conv2 and conv3 have the same values. """ - def __init__(self, unit_test, weights_n_bits=4, quant_method=tp.QuantizationMethod.LUT_POT_QUANTIZER): + def __init__(self, unit_test, weights_n_bits=4, quant_method=QuantizationMethod.LUT_POT_QUANTIZER): super().__init__(unit_test) self.weights_n_bits = weights_n_bits self.quant_method = quant_method @@ -134,7 +133,7 @@ def __init__(self, unit_test, activation_n_bits=4): def get_tpc(self): return get_pytorch_test_tpc_dict( tpc=generate_test_tpc({"activation_n_bits": self.activation_n_bits, - "activation_quantization_method": tp.QuantizationMethod.LUT_POT_QUANTIZER}), + "activation_quantization_method": QuantizationMethod.LUT_POT_QUANTIZER}), test_name='lut_quantizer_test', ftp_name='lut_quantizer_pytorch_test') diff --git a/tests/pytorch_tests/model_tests/feature_models/metadata_test.py b/tests/pytorch_tests/model_tests/feature_models/metadata_test.py index f2accc9a4..ad88fdba3 100644 --- a/tests/pytorch_tests/model_tests/feature_models/metadata_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/metadata_test.py @@ -28,8 +28,6 @@ import tempfile import os -tp = mct.target_platform - class DummyNet(nn.Module): def __init__(self): diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py index adefc81ab..dfc9edb13 100644 --- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py @@ -24,7 +24,6 @@ from model_compression_toolkit.core.pytorch.reader.node_holders import DummyPlaceHolder from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, PYTORCH_KERNEL, \ BIAS -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, OperationsSetToLayers from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSet, \ QuantizationConfigOptions from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py index 03ddf2a73..f09fb5b53 100644 --- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py @@ -23,8 +23,6 @@ from model_compression_toolkit.core.common.user_info import UserInformation from model_compression_toolkit.core.pytorch.constants import BIAS from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, PYTORCH_KERNEL, BIAS_ATTR -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, \ - OperationsSetToLayers from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSet, \ QuantizationConfigOptions from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers @@ -35,7 +33,6 @@ from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest import model_compression_toolkit as mct -tp = mct.target_platform """ This test checks the Mixed Precision feature. diff --git a/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py b/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py index 17c55037f..db35f94e0 100644 --- a/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py @@ -20,7 +20,6 @@ import model_compression_toolkit as mct from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest diff --git a/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py b/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py index d65b1c156..a1bb5a481 100644 --- a/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py @@ -21,9 +21,6 @@ from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc -tp = mct.target_platform - - class BasePermuteSubstitutionTest(BasePytorchFeatureNetworkTest): def __init__(self, unit_test): diff --git a/tests/pytorch_tests/model_tests/feature_models/qat_test.py b/tests/pytorch_tests/model_tests/feature_models/qat_test.py index c0c697b41..43c5f3b37 100644 --- a/tests/pytorch_tests/model_tests/feature_models/qat_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/qat_test.py @@ -94,8 +94,8 @@ def repr_datagen(): class QuantizationAwareTrainingTest(BasePytorchFeatureNetworkTest): def __init__(self, unit_test, weight_bits=2, activation_bits=4, - weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, - activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO, training_method=TrainingMethod.STE, finalize=False, test_loading=False): diff --git a/tests/pytorch_tests/model_tests/feature_models/relu_replacement_test.py b/tests/pytorch_tests/model_tests/feature_models/relu_replacement_test.py index 6614ee888..cd8637aec 100644 --- a/tests/pytorch_tests/model_tests/feature_models/relu_replacement_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/relu_replacement_test.py @@ -21,8 +21,6 @@ from model_compression_toolkit.core.common.network_editors.node_filters import NodeNameFilter, NodeTypeFilter from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest -tp = mct.target_platform - class Identity(torch.nn.Module): """ diff --git a/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py b/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py index d0b22afd7..1c22ae819 100644 --- a/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py @@ -21,9 +21,6 @@ from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc -tp = mct.target_platform - - class BaseReshapeSubstitutionTest(BasePytorchFeatureNetworkTest): def __init__(self, unit_test): diff --git a/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py b/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py index dc69ddf2a..39a30b511 100644 --- a/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py @@ -22,8 +22,6 @@ from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc from tests.common_tests.helpers.tensors_compare import cosine_similarity -tp = mct.target_platform - class BaseResidualCollapsingTest(BasePytorchFeatureNetworkTest): diff --git a/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py b/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py index 4ed30dbe6..79e5b1878 100644 --- a/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py @@ -20,13 +20,12 @@ import torch from torch.nn import Module +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core import FrameworkInfo, CoreConfig from model_compression_toolkit.core.common import Graph from model_compression_toolkit.core.common.statistics_correction.apply_second_moment_correction_to_graph import \ quantized_model_builder_for_second_moment_correction from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod -from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities from model_compression_toolkit.core.pytorch.constants import EPSILON_VAL, GAMMA, BETA, MOVING_MEAN, MOVING_VARIANCE from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation @@ -36,6 +35,8 @@ from model_compression_toolkit.core.runner import core_runner from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \ AttachTpcToPytorch +from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \ + FrameworkQuantizationCapabilities from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict diff --git a/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py index b91a389e4..3c2139c51 100644 --- a/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py @@ -17,8 +17,8 @@ import torch import model_compression_toolkit as mct +from mct_quantizers import QuantizationMethod from model_compression_toolkit.constants import THRESHOLD -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.core.common.user_info import UserInformation from model_compression_toolkit.core.pytorch.utils import to_torch_tensor from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc diff --git a/tests/pytorch_tests/model_tests/feature_models/tpc_test.py b/tests/pytorch_tests/model_tests/feature_models/tpc_test.py index aa65d3e66..bdebfb5b9 100644 --- a/tests/pytorch_tests/model_tests/feature_models/tpc_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/tpc_test.py @@ -27,8 +27,6 @@ import tempfile import os -tp = mct.target_platform - class DummyNet(nn.Module): def __init__(self): diff --git a/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py index b55eec63e..57b9e2285 100644 --- a/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py @@ -17,7 +17,7 @@ import torch import model_compression_toolkit as mct -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.core.common.user_info import UserInformation from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc diff --git a/tests/pytorch_tests/model_tests/test_feature_models_runner.py b/tests/pytorch_tests/model_tests/test_feature_models_runner.py index 685403b42..c314e5048 100644 --- a/tests/pytorch_tests/model_tests/test_feature_models_runner.py +++ b/tests/pytorch_tests/model_tests/test_feature_models_runner.py @@ -26,7 +26,6 @@ from model_compression_toolkit.gptq.common.gptq_config import RoundingType from model_compression_toolkit.gptq.pytorch.gptq_loss import sample_layer_attention_loss from model_compression_toolkit.target_platform_capabilities import constants as C -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod from model_compression_toolkit.trainable_infrastructure import TrainingMethod from tests.pytorch_tests.model_tests.feature_models.activation_16bit_test import Activation16BitTest, \ Activation16BitMixedPrecisionTest @@ -416,7 +415,7 @@ def test_lut_weights_quantizer(self): values. """ LUTWeightsQuantizerTest(self).run_test() - LUTWeightsQuantizerTest(self, quant_method=mct.target_platform.QuantizationMethod.LUT_SYM_QUANTIZER).run_test() + LUTWeightsQuantizerTest(self, quant_method=mct.QuantizationMethod.LUT_SYM_QUANTIZER).run_test() def test_lut_activation_quantizer(self): """ @@ -732,7 +731,7 @@ def test_qat(self): """ QuantizationAwareTrainingTest(self).run_test() QuantizationAwareTrainingTest(self, finalize=True).run_test() - _method = mct.target_platform.QuantizationMethod.SYMMETRIC + _method = mct.QuantizationMethod.SYMMETRIC QuantizationAwareTrainingTest(self, weights_quantization_method=_method, activation_quantization_method=_method @@ -741,7 +740,7 @@ def test_qat(self): weights_quantization_method=_method, activation_quantization_method=_method, finalize=True).run_test() - _method = mct.target_platform.QuantizationMethod.UNIFORM + _method = mct.QuantizationMethod.UNIFORM QuantizationAwareTrainingTest(self, weights_quantization_method=_method, activation_quantization_method=_method @@ -751,18 +750,18 @@ def test_qat(self): activation_quantization_method=_method, finalize=True).run_test() QuantizationAwareTrainingTest(self, - weights_quantization_method=mct.target_platform.QuantizationMethod.SYMMETRIC, - activation_quantization_method=mct.target_platform.QuantizationMethod.SYMMETRIC, + weights_quantization_method=mct.QuantizationMethod.SYMMETRIC, + activation_quantization_method=mct.QuantizationMethod.SYMMETRIC, training_method=TrainingMethod.LSQ, finalize=True).run_test() QuantizationAwareTrainingTest(self, - weights_quantization_method=mct.target_platform.QuantizationMethod.UNIFORM, - activation_quantization_method=mct.target_platform.QuantizationMethod.UNIFORM, + weights_quantization_method=mct.QuantizationMethod.UNIFORM, + activation_quantization_method=mct.QuantizationMethod.UNIFORM, training_method=TrainingMethod.LSQ, finalize=True).run_test() QuantizationAwareTrainingTest(self, - weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, - activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, + weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO, + activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO, training_method=TrainingMethod.LSQ, finalize=True).run_test() QuantizationAwareTrainingQuantizerHolderTest(self).run_test() diff --git a/tests/pytorch_tests/tpc_pytorch.py b/tests/pytorch_tests/tpc_pytorch.py index d4aac9470..c31f60b12 100644 --- a/tests/pytorch_tests/tpc_pytorch.py +++ b/tests/pytorch_tests/tpc_pytorch.py @@ -16,8 +16,6 @@ import model_compression_toolkit as mct from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc -tp = mct.target_platform - def get_pytorch_test_tpc_dict(tpc, test_name, ftp_name): return { diff --git a/tests/pytorch_tests/trainable_infrastructure_tests/base_pytorch_trainable_infra_test.py b/tests/pytorch_tests/trainable_infrastructure_tests/base_pytorch_trainable_infra_test.py index 8baa2fcf4..d1076e6a8 100644 --- a/tests/pytorch_tests/trainable_infrastructure_tests/base_pytorch_trainable_infra_test.py +++ b/tests/pytorch_tests/trainable_infrastructure_tests/base_pytorch_trainable_infra_test.py @@ -19,7 +19,7 @@ import torch import torch.nn as nn -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import mark_quantizer, QuantizationTarget, PytorchQuantizationWrapper from model_compression_toolkit.trainable_infrastructure.common.trainable_quantizer_config import \ TrainableQuantizerWeightsConfig, TrainableQuantizerActivationConfig diff --git a/tests/pytorch_tests/trainable_infrastructure_tests/test_pytorch_trainable_infra_runner.py b/tests/pytorch_tests/trainable_infrastructure_tests/test_pytorch_trainable_infra_runner.py index e62dd06d7..a95ee4644 100644 --- a/tests/pytorch_tests/trainable_infrastructure_tests/test_pytorch_trainable_infra_runner.py +++ b/tests/pytorch_tests/trainable_infrastructure_tests/test_pytorch_trainable_infra_runner.py @@ -16,7 +16,7 @@ import unittest -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from mct_quantizers import QuantizationTarget from model_compression_toolkit.qat.pytorch.quantizer.ste_rounding.symmetric_ste import STEWeightQATQuantizer from model_compression_toolkit.trainable_infrastructure import TrainingMethod diff --git a/tests/pytorch_tests/trainable_infrastructure_tests/trainable_pytorch/test_pytorch_base_quantizer.py b/tests/pytorch_tests/trainable_infrastructure_tests/trainable_pytorch/test_pytorch_base_quantizer.py index 461080e86..19edf364c 100644 --- a/tests/pytorch_tests/trainable_infrastructure_tests/trainable_pytorch/test_pytorch_base_quantizer.py +++ b/tests/pytorch_tests/trainable_infrastructure_tests/trainable_pytorch/test_pytorch_base_quantizer.py @@ -24,7 +24,7 @@ BasePytorchTrainableQuantizer from tests.pytorch_tests.trainable_infrastructure_tests.base_pytorch_trainable_infra_test import \ BasePytorchInfrastructureTest, ZeroWeightsQuantizer, ZeroActivationsQuantizer -from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod +from mct_quantizers import QuantizationMethod from model_compression_toolkit.trainable_infrastructure.pytorch.activation_quantizers import ( STESymmetricActivationTrainableQuantizer, STEUniformActivationTrainableQuantizer)