From 7811bb7a93de104dd97022d65cf1c40a7be04c51 Mon Sep 17 00:00:00 2001 From: elad-c Date: Wed, 25 Dec 2024 09:47:44 +0200 Subject: [PATCH] Remove max tensor functions from coverage. Add TODO for future PR. --- .../core/common/graph/memory_graph/max_cut_astar.py | 3 +++ .../resource_utilization_tools/resource_utilization_data.py | 1 + .../mixed_precision/resource_utilization_tools/ru_methods.py | 1 + .../feature_models/mixed_precision_activation_test.py | 2 ++ 4 files changed, 7 insertions(+) diff --git a/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py b/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py index 539e1e5f8..819898e4c 100644 --- a/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +++ b/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py @@ -154,6 +154,9 @@ def solve(self, estimate_factor: float, iter_limit: int = 500) -> Tuple[List[Bas cut_route = routes[next_cut] if next_cut == self.target_cut: + # TODO maxcut: Why do we filter the cuts (cut_route) but not the max cut size (cut_sost). + # This is a mismatch between max_cut and max(cuts). + # Also, unfiltered cut_route seems perfect, including input and output tensor sizes of current op. return self._remove_dummys_from_path(cut_route[0].op_order), cut_cost,\ list(set([self._remove_dummys_from_cut(self.clean_memory_for_next_step(c)) for c in cut_route])) diff --git a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py index 1eb511439..9048203a0 100644 --- a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +++ b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py @@ -179,6 +179,7 @@ def compute_activation_output_maxcut_sizes(graph: Graph) -> Tuple[np.ndarray, np return activation_outputs_bytes, activation_outputs +# pragma: no cover def compute_activation_output_sizes(graph: Graph) -> Tuple[np.ndarray, np.ndarray]: """ Computes an array of the respective output tensor size and an array of the output tensor size in bytes for diff --git a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py index 34587372b..61e46c235 100644 --- a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +++ b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py @@ -169,6 +169,7 @@ def activation_maxcut_size_utilization(mp_cfg: List[int], return np.array(activation_cut_memory) +# pragma: no cover def activation_output_size_utilization(mp_cfg: List[int], graph: Graph, fw_info: FrameworkInfo, diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py index 04ac0e95e..8b10ac7e2 100644 --- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py @@ -112,6 +112,7 @@ def compare(self, quantized_models, float_model, input_x=None, quantization_info class MixedPrecisionActivationSearch4BitFunctional(MixedPrecisionActivationBaseTest): def __init__(self, unit_test): super().__init__(unit_test) + # TODO maxcut: verify expected_config change is reasonable (was [1, 4, 4, 1]) self.expected_config = [2, 5, 5, 1] def get_resource_utilization(self): @@ -127,6 +128,7 @@ def compare(self, quantized_models, float_model, input_x=None, quantization_info class MixedPrecisionActivationMultipleInputs(MixedPrecisionActivationBaseTest): def __init__(self, unit_test): super().__init__(unit_test) + # TODO maxcut: verify expected_config change is reasonable (was all zeros) self.expected_config = [0, 0, 0, 0, 0, 0, 1, 0, 1] # expected config for this test. self.num_calibration_iter = 3 self.val_batch_size = 2