Skip to content

Commit

Permalink
add no covers
Browse files Browse the repository at this point in the history
  • Loading branch information
elad-c committed Dec 25, 2024
1 parent 9027a4c commit 8ddbc8b
Show file tree
Hide file tree
Showing 6 changed files with 21 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,8 @@ def solve(self, estimate_factor: float, iter_limit: int = 500) -> Tuple[List[Bas
cost = self.accumulate(cut_cost, c.memory_size())
if c not in open_list:
self._update_expanded_node(c, cost, cut_route, open_list, costs, routes)
elif self.ordering(cost, costs[c]):
# TODO maxcut: this isn't covered in the coverage test. check if needed and remove no cover
elif self.ordering(cost, costs[c]): # pragma: no cover
# If we already saw this cut during the search with a larger cost, then we want to update the order
# of the schedule in the cut
# Remove call - removes the cut with the same memory elements but different ordering from open
Expand All @@ -190,7 +191,8 @@ def solve(self, estimate_factor: float, iter_limit: int = 500) -> Tuple[List[Bas
self._update_expanded_node(c, cost, cut_route, open_list, costs, routes)

# Halt or No Solution
return None, 0, None
# TODO maxcut: this isn't covered in the coverage test. check if needed and remove no cover
return None, 0, None # pragma: no cover

@staticmethod
def _update_expanded_node(cut: Cut, cost: float, route: List[Cut], open_list: List[Cut],
Expand Down Expand Up @@ -351,7 +353,8 @@ def ordering(cost_1, cost_2) -> bool:
Returns: True if the first cost is smaller than the second one, else otherwise.
"""
return cost_1 < cost_2
# TODO maxcut: this isn't covered in the coverage test. check if needed and remove no cover
return cost_1 < cost_2 # pragma: no cover

def estimate(self, cut: Cut, estimate_factor: float) -> float:
"""
Expand Down Expand Up @@ -379,9 +382,10 @@ def get_init_estimate_factor(memory_graph: MemoryGraph) -> float:
Returns: An initial estimate value.
"""
l_bound = memory_graph.memory_lbound_single_op
u_bound = 2 * sum([t.total_size for t in memory_graph.b_nodes]) - l_bound
return (u_bound + l_bound) / 2
# TODO maxcut: this isn't covered in the coverage test. check if needed and remove no cover
l_bound = memory_graph.memory_lbound_single_op # pragma: no cover
u_bound = 2 * sum([t.total_size for t in memory_graph.b_nodes]) - l_bound # pragma: no cover
return (u_bound + l_bound) / 2 # pragma: no cover

@staticmethod
def _remove_dummys_from_path(path: List[BaseNode]) -> List[BaseNode]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ def compute_activation_output_maxcut_sizes(graph: Graph) -> Tuple[np.ndarray, np
return activation_outputs_bytes, activation_outputs


# TODO maxcut: add test for this function and remove no cover
def compute_activation_output_sizes(graph: Graph) -> Tuple[np.ndarray, np.ndarray]: # pragma: no cover
"""
Computes an array of the respective output tensor size and an array of the output tensor size in bytes for
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ def activation_maxcut_size_utilization(mp_cfg: List[int],
return np.array(activation_cut_memory)


# TODO maxcut: add test for this function and remove no cover
def activation_output_size_utilization(mp_cfg: List[int],
graph: Graph,
fw_info: FrameworkInfo,
Expand Down
9 changes: 4 additions & 5 deletions model_compression_toolkit/core/keras/data_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ def gen():

return gen


class TFDatasetFromGenerator:
"""
TensorFlow dataset from a data generator function, batched to a specified size.
Expand All @@ -70,15 +71,14 @@ def __init__(self, data_gen_fn: Callable[[], Generator]):
"""
inputs = next(data_gen_fn())
if not isinstance(inputs, list):
raise TypeError(f'Data generator is expected to yield a list of tensors, got {type(inputs)}')
raise TypeError(f'Data generator is expected to yield a list of tensors, got {type(inputs)}') # pragma: no cover
self.orig_batch_size = inputs[0].shape[0]
self._size = None

# TFDatasetFromGenerator flattens the dataset, thus we ignore the batch dimension
output_signature = get_tensor_spec(inputs, ignore_batch_dim=True)
self.dataset = tf.data.Dataset.from_generator(flat_gen_fn(data_gen_fn), output_signature=output_signature)


def __iter__(self):
return iter(self.dataset)

Expand All @@ -89,7 +89,6 @@ def __len__(self):
return self._size



class FixedTFDataset:
"""
Fixed dataset containing samples from a generator, stored in memory.
Expand All @@ -103,7 +102,7 @@ def __init__(self, data_gen_fn: Callable[[], Generator], n_samples: int = None):
"""
inputs = next(data_gen_fn())
if not isinstance(inputs, list):
raise TypeError(f'Data generator is expected to yield a list of tensors, got {type(inputs)}')
raise TypeError(f'Data generator is expected to yield a list of tensors, got {type(inputs)}') # pragma: no cover
self.orig_batch_size = inputs[0].shape[0]

samples = []
Expand Down Expand Up @@ -131,7 +130,7 @@ class FixedSampleInfoDataset:

def __init__(self, samples: Sequence, sample_info: Sequence):
if not all(len(info) == len(samples) for info in sample_info):
raise ValueError('Sample and additional info lengths must match')
raise ValueError('Sample and additional info lengths must match') # pragma: no cover
self.samples = samples
self.sample_info = sample_info

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
if version.parse(tf.__version__) >= version.parse("2.13"):
from keras.src.layers.core import TFOpLambda
from keras.src.layers import Conv2D, DepthwiseConv2D
else:
else: # pragma: no cover
from keras.layers.core import TFOpLambda
from keras.layers import Conv2D, DepthwiseConv2D
from model_compression_toolkit.logger import Logger
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def _extract_torch_layer_data(node_module: torch.nn.Module) -> Tuple[Any, Dict[s
"""
node_type = type(node_module)
if not isinstance(node_module, torch.nn.Module):
Logger.error(f"Expected an instance of torch.nn.Module for node {node_module.name}, but got {node_type}")
Logger.error(f"Expected an instance of torch.nn.Module for node {node_module.name}, but got {node_type}") # pragma: no cover
# Extract the instance framework_attr (i.e. the arguments the class instance was initialized with). "fullargspec"
# is a list of the layer's attribute names, that will be used as keys of the framework_attr dictionary. We the
# values from the layer instance.
Expand Down Expand Up @@ -221,16 +221,16 @@ def nodes_builder(model: GraphModule,
elif hasattr(torch.Tensor, node.target):
node_type = getattr(torch.Tensor, node.target)
else:
Logger.critical(f"The call method '{node.target}' in {node} is not supported.")
Logger.critical(f"The call method '{node.target}' in {node} is not supported.") # pragma: no cover

elif node.op == GET_ATTR:
# Node holding a constant -> add to consts_dict so can add them later to weights of next node.
if node.target in consts_dict:
Logger.critical('A constant weight appears to have been recorded multiple times.')
Logger.critical('A constant weight appears to have been recorded multiple times.') # pragma: no cover
consts_dict[node] = model_parameters_and_buffers[node.target]
continue
else:
Logger.critical(f'Encountered an unsupported node type in node: {node.name}.')
Logger.critical(f'Encountered an unsupported node type in node: {node.name}.') # pragma: no cover

# Add constants to weights dictionary.
if node.op != PLACEHOLDER:
Expand Down

0 comments on commit 8ddbc8b

Please sign in to comment.