From 1f1a291c9f51a237004436500d03242abe64ebd0 Mon Sep 17 00:00:00 2001 From: idanb Date: Sun, 16 Jun 2024 12:07:48 +0300 Subject: [PATCH] Bug fix in the notebook's coco-evaluation functions. --- .../evaluation_metrics/coco_evaluation.py | 11 +++++----- .../models_pytorch/yolov8/yolov8.py | 22 +++++++++++++++++++ .../pytorch_yolov8n_seg_for_imx500.ipynb | 7 +++--- 3 files changed, 31 insertions(+), 9 deletions(-) diff --git a/tutorials/mct_model_garden/evaluation_metrics/coco_evaluation.py b/tutorials/mct_model_garden/evaluation_metrics/coco_evaluation.py index 218fc6cdd..9bb90c0c0 100644 --- a/tutorials/mct_model_garden/evaluation_metrics/coco_evaluation.py +++ b/tutorials/mct_model_garden/evaluation_metrics/coco_evaluation.py @@ -21,7 +21,6 @@ from typing import List, Dict, Tuple, Callable, Any import random from pycocotools import mask as mask_utils -import torch from tqdm import tqdm from ..models_pytorch.yolov8.yolov8_postprocess import scale_boxes, scale_coords @@ -178,7 +177,7 @@ def format_results(self, outputs: List, img_ids: List, orig_img_dims: List, outp 'score': scores.tolist()[ind] if isinstance(scores.tolist(), list) else scores.tolist() }) - return detections + return detections def load_and_preprocess_image(image_path: str, preprocess: Callable) -> np.ndarray: """ @@ -506,12 +505,13 @@ def evaluate_seg_model(annotation_file, results_file): coco_eval.summarize() -def evaluate_yolov8_segmentation(model, data_dir, data_type='val2017', img_ids_limit=800, output_file='results.json',iou_thresh=0.7, conf=0.001, max_dets=300,mask_thresh=0.55): +def evaluate_yolov8_segmentation(model, model_predict_func, data_dir, data_type='val2017', img_ids_limit=800, output_file='results.json',iou_thresh=0.7, conf=0.001, max_dets=300,mask_thresh=0.55): """ Evaluate YOLOv8 model for instance segmentation on COCO dataset. Parameters: - model: The YOLOv8 model to be evaluated. + - model_predict_func: A function to execute the model preidction - data_dir: The directory containing the COCO dataset. - data_type: The type of dataset to evaluate against (default is 'val2017'). - img_ids_limit: The maximum number of images to evaluate (default is 800). @@ -535,11 +535,10 @@ def evaluate_yolov8_segmentation(model, data_dir, data_type='val2017', img_ids_l # Preprocess the image input_img = load_and_preprocess_image(image_path, yolov8_preprocess_chw_transpose).astype('float32') - input_tensor = torch.from_numpy(input_img).unsqueeze(0) # Add batch dimension # Run the model - with torch.no_grad(): - output = model(input_tensor) + output = model_predict_func(model, input_img) + #run post processing (nms) boxes, scores, classes, masks = postprocess_yolov8_inst_seg(outputs=output, conf=conf, iou_thres=iou_thresh, max_out_dets=max_dets) diff --git a/tutorials/mct_model_garden/models_pytorch/yolov8/yolov8.py b/tutorials/mct_model_garden/models_pytorch/yolov8/yolov8.py index 26622738e..7a6225ae3 100644 --- a/tutorials/mct_model_garden/models_pytorch/yolov8/yolov8.py +++ b/tutorials/mct_model_garden/models_pytorch/yolov8/yolov8.py @@ -498,6 +498,28 @@ def keypoints_model_predict(model: Any, inputs: np.ndarray) -> List: return postprocess_yolov8_keypoints(output_np) +def seg_model_predict(model: Any, + inputs: np.ndarray) -> List: + """ + Perform inference using the provided PyTorch model on the given inputs. + + This function handles moving the inputs to the appropriate torch data type and format, + and returns the outputs. + + Args: + model (Any): The PyTorch model used for inference. + inputs (np.ndarray): Input data to perform inference on. + + Returns: + List: List containing tensors of predictions. + """ + input_tensor = torch.from_numpy(inputs).unsqueeze(0) # Add batch dimension + + # Run the model + with torch.no_grad(): + outputs = model(input_tensor) + + return outputs def yolov8_pytorch(model_yaml: str) -> (nn.Module, Dict): """ diff --git a/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_yolov8n_seg_for_imx500.ipynb b/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_yolov8n_seg_for_imx500.ipynb index d267b3a86..34da15020 100644 --- a/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_yolov8n_seg_for_imx500.ipynb +++ b/tutorials/notebooks/imx500_notebooks/pytorch/pytorch_yolov8n_seg_for_imx500.ipynb @@ -417,8 +417,9 @@ }, "outputs": [], "source": [ + "from tutorials.mct_model_garden.models_pytorch.yolov8.yolov8 import seg_model_predict\n", "from tutorials.mct_model_garden.evaluation_metrics.coco_evaluation import evaluate_yolov8_segmentation\n", - "evaluate_yolov8_segmentation(model, data_dir='coco', data_type='val2017', img_ids_limit=100, output_file='results.json', iou_thresh=0.7, conf=0.001, max_dets=300,mask_thresh=0.55)" + "evaluate_yolov8_segmentation(model, seg_model_predict, data_dir='coco', data_type='val2017', img_ids_limit=100, output_file='results.json', iou_thresh=0.7, conf=0.001, max_dets=300,mask_thresh=0.55)" ] }, { @@ -442,7 +443,7 @@ "outputs": [], "source": [ "from tutorials.mct_model_garden.evaluation_metrics.coco_evaluation import evaluate_yolov8_segmentation\n", - "evaluate_yolov8_segmentation(quant_model, data_dir='coco', data_type='val2017', img_ids_limit=100, output_file='results_quant.json', iou_thresh=0.7, conf=0.001, max_dets=300,mask_thresh=0.55)" + "evaluate_yolov8_segmentation(quant_model, seg_model_predict, data_dir='coco', data_type='val2017', img_ids_limit=100, output_file='results_quant.json', iou_thresh=0.7, conf=0.001, max_dets=300,mask_thresh=0.55)" ] }, { @@ -467,7 +468,7 @@ "outputs": [], "source": [ "from tutorials.mct_model_garden.evaluation_metrics.coco_evaluation import evaluate_yolov8_segmentation\n", - "evaluate_yolov8_segmentation(gptq_quant_model, data_dir='coco', data_type='val2017', img_ids_limit=100, output_file='results_g_quant.json', iou_thresh=0.7, conf=0.001, max_dets=300,mask_thresh=0.55)" + "evaluate_yolov8_segmentation(gptq_quant_model, seg_model_predict, data_dir='coco', data_type='val2017', img_ids_limit=100, output_file='results_g_quant.json', iou_thresh=0.7, conf=0.001, max_dets=300,mask_thresh=0.55)" ] }, {