From 1ec9d6592c3f8c58bc3201495b176f296846653f Mon Sep 17 00:00:00 2001 From: Harshita Mangal Date: Thu, 9 Nov 2023 13:49:28 -0800 Subject: [PATCH 1/2] Fix failing onnx models Signed-off-by: Harshita Mangal --- .../onnx/src/python/aimet_onnx/batch_norm_fold.py | 3 +++ .../onnx/src/python/aimet_onnx/meta/connectedgraph.py | 7 +++++++ TrainingExtensions/onnx/src/python/aimet_onnx/utils.py | 3 ++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/TrainingExtensions/onnx/src/python/aimet_onnx/batch_norm_fold.py b/TrainingExtensions/onnx/src/python/aimet_onnx/batch_norm_fold.py index 69eb06789ac..a4edc0de38d 100644 --- a/TrainingExtensions/onnx/src/python/aimet_onnx/batch_norm_fold.py +++ b/TrainingExtensions/onnx/src/python/aimet_onnx/batch_norm_fold.py @@ -407,6 +407,9 @@ def get_input_output_channels(node: NodeProto, model: ModelProto) -> Tuple[int, """ weight = ParamUtils.get_param(model, node, WEIGHT_INDEX) groups = get_node_attribute(node, "group") + # If group atttribute does not exist in the node,then default is 1 + if not groups: + groups = 1 if node.op_type == "Conv": num_in_channels = weight.dims[1] * groups num_out_channels = weight.dims[0] diff --git a/TrainingExtensions/onnx/src/python/aimet_onnx/meta/connectedgraph.py b/TrainingExtensions/onnx/src/python/aimet_onnx/meta/connectedgraph.py index eccc936834b..aa2f079f998 100644 --- a/TrainingExtensions/onnx/src/python/aimet_onnx/meta/connectedgraph.py +++ b/TrainingExtensions/onnx/src/python/aimet_onnx/meta/connectedgraph.py @@ -90,6 +90,7 @@ def __init__(self, model: ModelProto): # Maps output to consumer node self._input_to_node = {} self._get_input_to_node() + self._unnamed_op = 0 self.starting_ops = [] self._branch_count = 0 @@ -216,6 +217,9 @@ def _process_starting_ops(self, op_queue: List): """ input_ops = self._get_input_ops() for node in input_ops: + if not node.name: + node.name = str(node.op_type) + '_unnamed_' + str(self._unnamed_op) + self._unnamed_op += 1 node_name = node.name if node_name not in self._ops: op = self._create_ir_op(node) @@ -364,6 +368,9 @@ def fill_op_product_graph(self): # - Index 1 contains the parent node. while op_queue: child_node, parent_node, connecting_tensor_name = op_queue.pop() + if not child_node.name: + child_node.name = str(child_node.op_type) + '_unnamed_' + str(self._unnamed_op) + self._unnamed_op += 1 # new module, create op/product and link to parent if child_node.name != parent_node.name: self._create_op_if_not_exists(child_node) diff --git a/TrainingExtensions/onnx/src/python/aimet_onnx/utils.py b/TrainingExtensions/onnx/src/python/aimet_onnx/utils.py index de2adced49f..54025b1be73 100644 --- a/TrainingExtensions/onnx/src/python/aimet_onnx/utils.py +++ b/TrainingExtensions/onnx/src/python/aimet_onnx/utils.py @@ -200,7 +200,8 @@ def make_dummy_input(model: ModelProto, dynamic_size: int = 1) -> Dict[str, np.n else: # Else, axis has a fixed dimension size stored in dim.dim_value shape.append(dim.dim_value) - input_dict[name] = np.random.randn(*shape).astype(mapping.TENSOR_TYPE_TO_NP_TYPE[dtype]) + if shape: + input_dict[name] = np.random.randn(*shape).astype(mapping.TENSOR_TYPE_TO_NP_TYPE[dtype]) return input_dict From e7ad50eaf07658fd2d9cb0af40e20785fa17c936 Mon Sep 17 00:00:00 2001 From: Harshita Mangal Date: Fri, 10 Nov 2023 09:16:18 -0800 Subject: [PATCH 2/2] Fix attribute name Signed-off-by: Harshita Mangal --- .../onnx/src/python/aimet_onnx/adaround/adaround_optimizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TrainingExtensions/onnx/src/python/aimet_onnx/adaround/adaround_optimizer.py b/TrainingExtensions/onnx/src/python/aimet_onnx/adaround/adaround_optimizer.py index 26dee94deef..d75cac0f600 100644 --- a/TrainingExtensions/onnx/src/python/aimet_onnx/adaround/adaround_optimizer.py +++ b/TrainingExtensions/onnx/src/python/aimet_onnx/adaround/adaround_optimizer.py @@ -150,7 +150,7 @@ def _optimize_rounding(cls, module: ModuleInfo, quantized_input_name, out_data_torch.shape) attributes = read_attributes_for_op(module) - if len(attributes['pad']) > 2: + if len(attributes['pads']) > 2: logger.info("Skipping the Convolution layer because padding size of 4 is not supported for optimization") return