Skip to content

Commit

Permalink
Update detection visualization tutorial (#768)
Browse files Browse the repository at this point in the history
* init vis folder

Signed-off-by: dongy <dongy@nvidia.com>

* update repo

Signed-off-by: Dong Yang <dongy@dongy-mlt.client.nvidia.com>

* update repo

Signed-off-by: Dong Yang <dongy@dongy-mlt.client.nvidia.com>

* update repo

Signed-off-by: Dong Yang <dongy@dongy-mlt.client.nvidia.com>

* update repo

Signed-off-by: Dong Yang <dongy@dongy-mlt.client.nvidia.com>

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* update repo

Signed-off-by: dongy <dongy@nvidia.com>

* update repo

Signed-off-by: Dong Yang <dongy@dongy-mlt.client.nvidia.com>

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* update repo

Signed-off-by: Dong Yang <dongy@dongy-mlt.client.nvidia.com>

* update repo

Signed-off-by: dongy <dongy@nvidia.com>

Co-authored-by: dongy <dongy@nvidia.com>
Co-authored-by: Dong Yang <dongy@dongy-mlt.client.nvidia.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
4 people authored Jul 4, 2022
1 parent 2da31b6 commit 9b54616
Show file tree
Hide file tree
Showing 4 changed files with 139 additions and 16 deletions.
25 changes: 21 additions & 4 deletions detection/luna16_visualization/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ Visualizing box prediction/annotation in 3D medical image detection is not strai
## Prerequisite

- The Version of 3D Slicer should be **4.11.20210226** or later.
- Box information should be stored in a ".json" file. The "data\_sample.json" file is an example. The information of *N* boxes is stored under the key "box" as *N* lists. The six values are the *X*/*Y*/*Z* coordinates of the box center and the box length in the *X*/*Y*/*Z* axes. All the coordinate values are in the world coordinate system.
- Box information should be stored in a ".json" file. The information of *N* boxes is stored under the key "box" as *N* lists. The six values could be in different 3D [modes](https://github.com/Project-MONAI/MONAI/blob/edf3b742a4ae85d1f30462ed0c7511c520fae888/monai/data/box_utils.py#L447-L456) (e.g., corner or center coordinates). All the coordinate values are in either world coordinate system or image coordinate system. The "data\_sample.json" file is an example with X/Y/Z-axial center coordinates and box lengths (box mode **cccwhd**).

```
"box": [
Expand All @@ -32,13 +32,30 @@ Visualizing box prediction/annotation in 3D medical image detection is not strai

### 1. Create ".obj" file for predictions/annotation using the "save\_obj.sh" script.

```
#!/bin/bash
User needs to specify the box mode and whether to use image coordinates. If image coordinates are specified, the image root directory needs to be provided.

#### Example of boxes in world coordinate

```
INPUT_DATASET_JSON="./data_sample.json"
OUTPUT_DIR="./out"
python save_obj.py --input_dataset_json ${INPUT_DATASET_JSON} \
python save_obj.py --input_box_mode "cccwhd" \
--input_dataset_json ${INPUT_DATASET_JSON} \
--output_dir ${OUTPUT_DIR}
```

#### Example of boxes in image coordinate

```
IMAGE_DATA_ROOT="/data_root"
INPUT_DATASET_JSON="./data_sample_xyzxyz_image-coordinate.json"
OUTPUT_DIR="./out_image_coord"
python save_obj.py --image_coordinate \
--image_data_root ${IMAGE_DATA_ROOT} \
--input_box_mode "xyzxyz" \
--input_dataset_json ${INPUT_DATASET_JSON} \
--output_dir ${OUTPUT_DIR}
```

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
{
"validation": [
{
"image": "1.3.6.1.4.1.14519.5.2.1.6279.6001.108197895896446896160048741492.mhd",
"box": [
[
106,
342.25,
31.453125,
114.75,
351,
34.0625
]
],
"label": [
0
]
},
{
"image": "1.3.6.1.4.1.14519.5.2.1.6279.6001.109002525524522225658609808059.mhd",
"box": [
[
413.75,
273.25,
62.90625,
438.5,
298,
73.8125
],
[
404.5,
333.25,
54.8125,
412.25,
341.25,
58.3125
]
],
"label": [
0,
0
]
}
]
}
71 changes: 61 additions & 10 deletions detection/luna16_visualization/save_obj.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,13 @@
import argparse
import csv
import json
import monai
import nibabel as nib
import numpy as np
import os

from monai.data.box_utils import convert_box_mode


def save_obj(vertices, faces, filename):

Expand All @@ -29,20 +32,44 @@ def save_obj(vertices, faces, filename):
for t in faces:
f.write("f {} {} {} {}\n".format(*(np.array(t) + 1)))

return


def main():
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(
description="Save .obj files of boxes for visualization using 3D Slicer.",
)
parser.add_argument(
"--image_coordinate",
action="store_true",
help="if box coordinates in image coordinate",
)
parser.add_argument(
"--image_data_root",
type=str,
default="",
help="image data root",
)
parser.add_argument(
"--input_box_mode",
action="store",
type=str,
required=True,
help="input box coordinate mode",
),
parser.add_argument(
"--input_dataset_json",
action="store",
type=str,
required=True,
help="the dataset .json with box information",
)
parser.add_argument(
"--output_dir",
action="store",
type=str,
required=True,
help="output directory",
)
args = parser.parse_args()

Expand All @@ -52,6 +79,9 @@ def main():
with open(os.path.join(args.input_dataset_json)) as f:
input_dataset = json.load(f)

if args.image_coordinate:
image_loader = monai.transforms.LoadImage(reader=None, image_only=False)

for key in input_dataset.keys():
section = input_dataset[key]

Expand All @@ -61,17 +91,36 @@ def main():
box_filename = box_filename.split(os.sep)[-1]
print("-- {0:d}th case name:".format(_k + 1), box_filename)

if args.image_coordinate:
image_name = os.path.join(args.image_data_root, section[_k]["image"])
image_data = image_loader(image_name)
affine = image_data[1]["original_affine"]

# convert to RAS coordinate system (required by 3D Slicer)
for _i in range(3):
if affine[_i, _i] < 0:
affine[_i, _i] *= -1.0
affine[_i, 3] *= -1.0

vertices = []
faces = []
_i = 0
for vec in box_data:
xmin = vec[0] - 0.5 * vec[3]
ymin = vec[1] - 0.5 * vec[4]
zmin = vec[2] - 0.5 * vec[5]

xmax = vec[0] + 0.5 * vec[3]
ymax = vec[1] + 0.5 * vec[4]
zmax = vec[2] + 0.5 * vec[5]
for _vec in box_data:
vec = convert_box_mode(
np.expand_dims(np.array(_vec), axis=0),
src_mode=args.input_box_mode,
dst_mode="xyzxyz",
)
vec = vec.squeeze()
xmin, ymin, zmin = vec[0], vec[1], vec[2]
xmax, ymax, zmax = vec[3], vec[4], vec[5]

if args.image_coordinate:
_out = affine @ np.transpose(np.array([xmin, ymin, zmin, 1]))
xmin, ymin, zmin = _out[0], _out[1], _out[2]

_out = affine @ np.transpose(np.array([xmax, ymax, zmax, 1]))
xmax, ymax, zmax = _out[0], _out[1], _out[2]

vertices += [
(xmax, ymax, zmin),
Expand All @@ -95,7 +144,9 @@ def main():

_i += 1

save_obj(vertices, faces, os.path.join(args.output_dir, box_filename + ".obj"))
save_obj(
vertices, faces, os.path.join(args.output_dir, box_filename + ".obj")
)

return

Expand Down
14 changes: 12 additions & 2 deletions detection/luna16_visualization/save_obj.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,17 @@
#!/bin/bash

INPUT_DATASET_JSON="./data_sample.json"
OUTPUT_DIR="./out"
OUTPUT_DIR="./out_world_coord"

python save_obj.py --input_dataset_json ${INPUT_DATASET_JSON} \
python save_obj.py --input_box_mode "cccwhd" \
--input_dataset_json ${INPUT_DATASET_JSON} \
--output_dir ${OUTPUT_DIR}

IMAGE_DATA_ROOT="/data_root"
INPUT_DATASET_JSON="./data_sample_xyzxyz_image-coordinate.json"
OUTPUT_DIR="./out_image_coord"

python save_obj.py --image_coordinate \
--image_data_root ${IMAGE_DATA_ROOT} \
--input_box_mode "xyzxyz" \
--input_dataset_json ${INPUT_DATASET_JSON} \

0 comments on commit 9b54616

Please sign in to comment.