diff --git a/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py b/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py index 819898e4c..cfab0ce04 100644 --- a/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py +++ b/model_compression_toolkit/core/common/graph/memory_graph/max_cut_astar.py @@ -181,7 +181,8 @@ def solve(self, estimate_factor: float, iter_limit: int = 500) -> Tuple[List[Bas cost = self.accumulate(cut_cost, c.memory_size()) if c not in open_list: self._update_expanded_node(c, cost, cut_route, open_list, costs, routes) - elif self.ordering(cost, costs[c]): + # TODO maxcut: this isn't covered in the coverage test. check if needed and remove no cover + elif self.ordering(cost, costs[c]): # pragma: no cover # If we already saw this cut during the search with a larger cost, then we want to update the order # of the schedule in the cut # Remove call - removes the cut with the same memory elements but different ordering from open @@ -190,7 +191,8 @@ def solve(self, estimate_factor: float, iter_limit: int = 500) -> Tuple[List[Bas self._update_expanded_node(c, cost, cut_route, open_list, costs, routes) # Halt or No Solution - return None, 0, None + # TODO maxcut: this isn't covered in the coverage test. check if needed and remove no cover + return None, 0, None # pragma: no cover @staticmethod def _update_expanded_node(cut: Cut, cost: float, route: List[Cut], open_list: List[Cut], @@ -351,7 +353,8 @@ def ordering(cost_1, cost_2) -> bool: Returns: True if the first cost is smaller than the second one, else otherwise. """ - return cost_1 < cost_2 + # TODO maxcut: this isn't covered in the coverage test. check if needed and remove no cover + return cost_1 < cost_2 # pragma: no cover def estimate(self, cut: Cut, estimate_factor: float) -> float: """ @@ -379,9 +382,10 @@ def get_init_estimate_factor(memory_graph: MemoryGraph) -> float: Returns: An initial estimate value. """ - l_bound = memory_graph.memory_lbound_single_op - u_bound = 2 * sum([t.total_size for t in memory_graph.b_nodes]) - l_bound - return (u_bound + l_bound) / 2 + # TODO maxcut: this isn't covered in the coverage test. check if needed and remove no cover + l_bound = memory_graph.memory_lbound_single_op # pragma: no cover + u_bound = 2 * sum([t.total_size for t in memory_graph.b_nodes]) - l_bound # pragma: no cover + return (u_bound + l_bound) / 2 # pragma: no cover @staticmethod def _remove_dummys_from_path(path: List[BaseNode]) -> List[BaseNode]: diff --git a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py index df87b057f..a647a2cc5 100644 --- a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py +++ b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py @@ -179,6 +179,7 @@ def compute_activation_output_maxcut_sizes(graph: Graph) -> Tuple[np.ndarray, np return activation_outputs_bytes, activation_outputs +# TODO maxcut: add test for this function and remove no cover def compute_activation_output_sizes(graph: Graph) -> Tuple[np.ndarray, np.ndarray]: # pragma: no cover """ Computes an array of the respective output tensor size and an array of the output tensor size in bytes for diff --git a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py index f361d06a5..609109649 100644 --- a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py +++ b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/ru_methods.py @@ -169,6 +169,7 @@ def activation_maxcut_size_utilization(mp_cfg: List[int], return np.array(activation_cut_memory) +# TODO maxcut: add test for this function and remove no cover def activation_output_size_utilization(mp_cfg: List[int], graph: Graph, fw_info: FrameworkInfo, diff --git a/model_compression_toolkit/core/keras/data_util.py b/model_compression_toolkit/core/keras/data_util.py index f1fba0ef3..daa5bb267 100644 --- a/model_compression_toolkit/core/keras/data_util.py +++ b/model_compression_toolkit/core/keras/data_util.py @@ -58,6 +58,7 @@ def gen(): return gen + class TFDatasetFromGenerator: """ TensorFlow dataset from a data generator function, batched to a specified size. @@ -70,7 +71,7 @@ def __init__(self, data_gen_fn: Callable[[], Generator]): """ inputs = next(data_gen_fn()) if not isinstance(inputs, list): - raise TypeError(f'Data generator is expected to yield a list of tensors, got {type(inputs)}') + raise TypeError(f'Data generator is expected to yield a list of tensors, got {type(inputs)}') # pragma: no cover self.orig_batch_size = inputs[0].shape[0] self._size = None @@ -78,7 +79,6 @@ def __init__(self, data_gen_fn: Callable[[], Generator]): output_signature = get_tensor_spec(inputs, ignore_batch_dim=True) self.dataset = tf.data.Dataset.from_generator(flat_gen_fn(data_gen_fn), output_signature=output_signature) - def __iter__(self): return iter(self.dataset) @@ -89,7 +89,6 @@ def __len__(self): return self._size - class FixedTFDataset: """ Fixed dataset containing samples from a generator, stored in memory. @@ -103,7 +102,7 @@ def __init__(self, data_gen_fn: Callable[[], Generator], n_samples: int = None): """ inputs = next(data_gen_fn()) if not isinstance(inputs, list): - raise TypeError(f'Data generator is expected to yield a list of tensors, got {type(inputs)}') + raise TypeError(f'Data generator is expected to yield a list of tensors, got {type(inputs)}') # pragma: no cover self.orig_batch_size = inputs[0].shape[0] samples = [] @@ -131,7 +130,7 @@ class FixedSampleInfoDataset: def __init__(self, samples: Sequence, sample_info: Sequence): if not all(len(info) == len(samples) for info in sample_info): - raise ValueError('Sample and additional info lengths must match') + raise ValueError('Sample and additional info lengths must match') # pragma: no cover self.samples = samples self.sample_info = sample_info diff --git a/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py b/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py index 085082a0b..7635cb78f 100644 --- a/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py +++ b/model_compression_toolkit/core/keras/graph_substitutions/substitutions/conv_funcs_to_layer.py @@ -20,7 +20,7 @@ if version.parse(tf.__version__) >= version.parse("2.13"): from keras.src.layers.core import TFOpLambda from keras.src.layers import Conv2D, DepthwiseConv2D -else: +else: # pragma: no cover from keras.layers.core import TFOpLambda from keras.layers import Conv2D, DepthwiseConv2D from model_compression_toolkit.logger import Logger diff --git a/model_compression_toolkit/core/pytorch/reader/graph_builders.py b/model_compression_toolkit/core/pytorch/reader/graph_builders.py index 7fcf474e9..564f44180 100644 --- a/model_compression_toolkit/core/pytorch/reader/graph_builders.py +++ b/model_compression_toolkit/core/pytorch/reader/graph_builders.py @@ -110,7 +110,7 @@ def _extract_torch_layer_data(node_module: torch.nn.Module) -> Tuple[Any, Dict[s """ node_type = type(node_module) if not isinstance(node_module, torch.nn.Module): - Logger.error(f"Expected an instance of torch.nn.Module for node {node_module.name}, but got {node_type}") + Logger.error(f"Expected an instance of torch.nn.Module for node {node_module.name}, but got {node_type}") # pragma: no cover # Extract the instance framework_attr (i.e. the arguments the class instance was initialized with). "fullargspec" # is a list of the layer's attribute names, that will be used as keys of the framework_attr dictionary. We the # values from the layer instance. @@ -221,16 +221,16 @@ def nodes_builder(model: GraphModule, elif hasattr(torch.Tensor, node.target): node_type = getattr(torch.Tensor, node.target) else: - Logger.critical(f"The call method '{node.target}' in {node} is not supported.") + Logger.critical(f"The call method '{node.target}' in {node} is not supported.") # pragma: no cover elif node.op == GET_ATTR: # Node holding a constant -> add to consts_dict so can add them later to weights of next node. if node.target in consts_dict: - Logger.critical('A constant weight appears to have been recorded multiple times.') + Logger.critical('A constant weight appears to have been recorded multiple times.') # pragma: no cover consts_dict[node] = model_parameters_and_buffers[node.target] continue else: - Logger.critical(f'Encountered an unsupported node type in node: {node.name}.') + Logger.critical(f'Encountered an unsupported node type in node: {node.name}.') # pragma: no cover # Add constants to weights dictionary. if node.op != PLACEHOLDER: