Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix failing onnx models #2569

Merged
merged 2 commits into from
Nov 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def _optimize_rounding(cls, module: ModuleInfo, quantized_input_name,
out_data_torch.shape)

attributes = read_attributes_for_op(module)
if len(attributes['pad']) > 2:
if len(attributes['pads']) > 2:
logger.info("Skipping the Convolution layer because padding size of 4 is not supported for optimization")
return

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,9 @@ def get_input_output_channels(node: NodeProto, model: ModelProto) -> Tuple[int,
"""
weight = ParamUtils.get_param(model, node, WEIGHT_INDEX)
groups = get_node_attribute(node, "group")
# If group atttribute does not exist in the node,then default is 1
if not groups:
groups = 1
if node.op_type == "Conv":
num_in_channels = weight.dims[1] * groups
num_out_channels = weight.dims[0]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ def __init__(self, model: ModelProto):
# Maps output to consumer node
self._input_to_node = {}
self._get_input_to_node()
self._unnamed_op = 0

self.starting_ops = []
self._branch_count = 0
Expand Down Expand Up @@ -216,6 +217,9 @@ def _process_starting_ops(self, op_queue: List):
"""
input_ops = self._get_input_ops()
for node in input_ops:
if not node.name:
node.name = str(node.op_type) + '_unnamed_' + str(self._unnamed_op)
self._unnamed_op += 1
node_name = node.name
if node_name not in self._ops:
op = self._create_ir_op(node)
Expand Down Expand Up @@ -364,6 +368,9 @@ def fill_op_product_graph(self):
# - Index 1 contains the parent node.
while op_queue:
child_node, parent_node, connecting_tensor_name = op_queue.pop()
if not child_node.name:
child_node.name = str(child_node.op_type) + '_unnamed_' + str(self._unnamed_op)
self._unnamed_op += 1
# new module, create op/product and link to parent
if child_node.name != parent_node.name:
self._create_op_if_not_exists(child_node)
Expand Down
3 changes: 2 additions & 1 deletion TrainingExtensions/onnx/src/python/aimet_onnx/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,8 @@ def make_dummy_input(model: ModelProto, dynamic_size: int = 1) -> Dict[str, np.n
else:
# Else, axis has a fixed dimension size stored in dim.dim_value
shape.append(dim.dim_value)
input_dict[name] = np.random.randn(*shape).astype(mapping.TENSOR_TYPE_TO_NP_TYPE[dtype])
if shape:
input_dict[name] = np.random.randn(*shape).astype(mapping.TENSOR_TYPE_TO_NP_TYPE[dtype])
return input_dict


Expand Down
Loading