Skip to content

Commit

Permalink
Remove max tensor functions from coverage.
Browse files Browse the repository at this point in the history
Add TODO for future PR.
  • Loading branch information
elad-c committed Dec 25, 2024
1 parent 54ce883 commit 7811bb7
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,9 @@ def solve(self, estimate_factor: float, iter_limit: int = 500) -> Tuple[List[Bas
cut_route = routes[next_cut]

if next_cut == self.target_cut:
# TODO maxcut: Why do we filter the cuts (cut_route) but not the max cut size (cut_sost).
# This is a mismatch between max_cut and max(cuts).
# Also, unfiltered cut_route seems perfect, including input and output tensor sizes of current op.
return self._remove_dummys_from_path(cut_route[0].op_order), cut_cost,\
list(set([self._remove_dummys_from_cut(self.clean_memory_for_next_step(c)) for c in cut_route]))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ def compute_activation_output_maxcut_sizes(graph: Graph) -> Tuple[np.ndarray, np
return activation_outputs_bytes, activation_outputs


# pragma: no cover
def compute_activation_output_sizes(graph: Graph) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes an array of the respective output tensor size and an array of the output tensor size in bytes for
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ def activation_maxcut_size_utilization(mp_cfg: List[int],
return np.array(activation_cut_memory)


# pragma: no cover
def activation_output_size_utilization(mp_cfg: List[int],
graph: Graph,
fw_info: FrameworkInfo,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ def compare(self, quantized_models, float_model, input_x=None, quantization_info
class MixedPrecisionActivationSearch4BitFunctional(MixedPrecisionActivationBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
# TODO maxcut: verify expected_config change is reasonable (was [1, 4, 4, 1])
self.expected_config = [2, 5, 5, 1]

def get_resource_utilization(self):
Expand All @@ -127,6 +128,7 @@ def compare(self, quantized_models, float_model, input_x=None, quantization_info
class MixedPrecisionActivationMultipleInputs(MixedPrecisionActivationBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
# TODO maxcut: verify expected_config change is reasonable (was all zeros)
self.expected_config = [0, 0, 0, 0, 0, 0, 1, 0, 1] # expected config for this test.
self.num_calibration_iter = 3
self.val_batch_size = 2
Expand Down

0 comments on commit 7811bb7

Please sign in to comment.