Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add new model vSHARP & CMR challenge along with 3D reconstruction code #254

Closed
wants to merge 50 commits into from
Closed
Show file tree
Hide file tree
Changes from 19 commits
Commits
Show all changes
50 commits
Select commit Hold shift + click to select a range
5860c77
Add vsharp (2D & 3D), 3D transformers/engine, unet 3D, CMR challenge …
georgeyiasemis Sep 20, 2023
f1b8c83
package for transformers
georgeyiasemis Sep 20, 2023
b680111
Fix minor issue
georgeyiasemis Sep 20, 2023
6a4ea02
Remove transformers for now
georgeyiasemis Sep 21, 2023
8f60d39
Minor fixes
georgeyiasemis Sep 21, 2023
ca0f902
Minor fixes
georgeyiasemis Sep 21, 2023
fe1c172
Fix codacy complaints
georgeyiasemis Sep 21, 2023
7b49189
Fix codacy complaints
georgeyiasemis Sep 21, 2023
d6d6d58
Fix codacy complaints
georgeyiasemis Sep 21, 2023
781322e
Improve code
georgeyiasemis Sep 21, 2023
627d37b
Improve test cover
georgeyiasemis Sep 21, 2023
9275f38
Add cmr dataset test
georgeyiasemis Sep 21, 2023
3dc73fb
Codacy fixes
georgeyiasemis Sep 21, 2023
acfa616
Not needed option
georgeyiasemis Sep 22, 2023
d5de4e4
Exception fixes
georgeyiasemis Sep 22, 2023
bac19bc
Exception fixes
georgeyiasemis Sep 22, 2023
8a28b0a
Add vSHARP prostate experiments
georgeyiasemis Sep 25, 2023
65e737b
CMR configs
georgeyiasemis Sep 25, 2023
6c87021
Forgotten print statements
georgeyiasemis Sep 25, 2023
b1d6773
Updates for PR better quality
georgeyiasemis Nov 9, 2023
7443440
Codacy complains
georgeyiasemis Nov 9, 2023
40e10a8
Tools for cmr challenge and configs
georgeyiasemis Nov 9, 2023
a983a0b
Tools for cmr challenge and configs
georgeyiasemis Nov 9, 2023
a3424fc
Minor changes
georgeyiasemis Nov 10, 2023
c0e9cbb
Minor changes
georgeyiasemis Nov 10, 2023
11d5995
Minor changes
georgeyiasemis Nov 10, 2023
5af0b70
Minor changes
georgeyiasemis Nov 10, 2023
814060e
Add readme
georgeyiasemis Nov 10, 2023
72a7000
Pylint
georgeyiasemis Nov 10, 2023
a73bb78
Minor correction
georgeyiasemis Nov 10, 2023
9845fb2
Update README.rst
georgeyiasemis Nov 10, 2023
a2a2be3
Update README.rst
georgeyiasemis Nov 10, 2023
80bbe4d
Update README.rst
georgeyiasemis Nov 10, 2023
545df66
Update README.rst
georgeyiasemis Nov 10, 2023
86ca08f
Minor issues with config and enum
georgeyiasemis Nov 10, 2023
2f39d90
Small change
georgeyiasemis Nov 12, 2023
e5e16fe
Update readmes
georgeyiasemis Nov 12, 2023
f88c66a
Update readmes
georgeyiasemis Nov 12, 2023
2b7d18d
inference fields
georgeyiasemis Nov 12, 2023
30d025b
inference fields
georgeyiasemis Nov 12, 2023
59f34d9
Update readmes
georgeyiasemis Nov 12, 2023
b9255d0
Minor yaml fix
georgeyiasemis Nov 12, 2023
d7b0054
Update citation
georgeyiasemis Nov 12, 2023
f91dbb7
Add citations
georgeyiasemis Nov 12, 2023
1526ecb
Update citation
georgeyiasemis Nov 12, 2023
489cb97
Update readme
georgeyiasemis Nov 12, 2023
da51531
Add vsharp to model zoo
georgeyiasemis Nov 13, 2023
93d39ad
Add vsharp to model zoo
georgeyiasemis Nov 13, 2023
2bc8ba0
Add vsharp to model zoo
georgeyiasemis Nov 13, 2023
174ed7d
Add vsharp to model zoo
georgeyiasemis Nov 13, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
272 changes: 272 additions & 0 deletions direct/common/subsample.py
georgeyiasemis marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@

__all__ = (
"CalgaryCampinasMaskFunc",
"CartesianRandomMaskFunc",
"CartesianEquispacedMaskFunc",
"CartesianMagicMaskFunc",
"FastMRIRandomMaskFunc",
"FastMRIEquispacedMaskFunc",
"FastMRIMagicMaskFunc",
Expand Down Expand Up @@ -243,6 +246,83 @@ def mask_func(
return torch.from_numpy(self._reshape_and_broadcast_mask(shape, mask))


class CartesianRandomMaskFunc(FastMRIRandomMaskFunc):
r"""Cartesian random vertical line mask function.

Similar to :class:`FastMRIRandomMaskFunc, but instead of center fraction (`center_fractions`) representing
the fraction of center lines to the original size, here, it represents the actual number of center lines.
"""

def __init__(
self,
accelerations: Union[List[Number], Tuple[Number, ...]],
center_fractions: Optional[Union[List[int], Tuple[int, ...]]] = None,
uniform_range: bool = False,
):
"""Inits :class:`CartesianRandomMaskFunc`.

Parameters
----------
accelerations: Union[List[Number], Tuple[Number, ...]]
Amount of under-sampling_mask. An acceleration of 4 retains 25% of the k-space, the method is given by
mask_type. Has to be the same length as center_fractions if uniform_range is not True.
center_fractions: list or tuple of ints, optional
Number of low-frequency (center) columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly each time.
uniform_range: bool
If True then an acceleration will be uniformly sampled between the two values. Default: True.
"""
super().__init__(
accelerations=accelerations,
center_fractions=center_fractions,
uniform_range=uniform_range,
)

def mask_func(
self,
shape: Union[List[int], Tuple[int, ...]],
return_acs: bool = False,
seed: Optional[Union[int, Iterable[int]]] = None,
) -> torch.Tensor:
"""Creates a random vertical Cartesian mask.

Parameters
----------
shape: list or tuple of ints
The shape of the mask to be created. The shape should at least 3 dimensions.
Samples are drawn along the second last dimension.
return_acs: bool
Return the autocalibration signal region as a mask.
seed: int or iterable of ints or None (optional)
Seed for the random number generator. Setting the seed ensures the same mask is generated
each time for the same shape. Default: None.


Returns
-------
mask: torch.Tensor
The sampling mask.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")

with temp_seed(self.rng, seed):
num_cols = shape[-2]

num_center_lines, acceleration = self.choose_acceleration()

mask = self.center_mask_func(num_cols, num_center_lines)

if return_acs:
return torch.from_numpy(self._reshape_and_broadcast_mask(shape, mask))

# Create the mask
prob = (num_cols / acceleration - num_center_lines) / (num_cols - num_center_lines)
mask = mask | (self.rng.uniform(size=num_cols) < prob)

return torch.from_numpy(self._reshape_and_broadcast_mask(shape, mask))


class FastMRIEquispacedMaskFunc(FastMRIMaskFunc):
r"""Equispaced vertical line mask function.

Expand Down Expand Up @@ -324,6 +404,90 @@ def mask_func(
return torch.from_numpy(self._reshape_and_broadcast_mask(shape, mask))


class CartesianEquispacedMaskFunc(FastMRIEquispacedMaskFunc):
r"""Cartesian equispaced vertical line mask function.

Similar to :class:`FastMRIEquispacedMaskFunc, but instead of center fraction (`center_fractions`) representing
the fraction of center lines to the original size, here, it represents the actual number of center lines.
"""
georgeyiasemis marked this conversation as resolved.
Show resolved Hide resolved

def __init__(
self,
accelerations: Union[List[Number], Tuple[Number, ...]],
center_fractions: Optional[Union[List[int], Tuple[int, ...]]] = None,
uniform_range: bool = False,
):
"""Inits :class:`CartesianEquispacedMaskFunc`.

Parameters
----------
accelerations: Union[List[Number], Tuple[Number, ...]]
Amount of under-sampling_mask. An acceleration of 4 retains 25% of the k-space, the method is given by
mask_type. Has to be the same length as center_fractions if uniform_range is not True.
center_fractions: list or tuple of ints, optional
Number of low-frequency (center) columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly each time.
uniform_range: bool
If True then an acceleration will be uniformly sampled between the two values. Default: True.
"""
super().__init__(
accelerations=accelerations,
center_fractions=center_fractions,
uniform_range=uniform_range,
)

def mask_func(
self,
shape: Union[List[int], Tuple[int, ...]],
return_acs: bool = False,
seed: Optional[Union[int, Iterable[int]]] = None,
) -> torch.Tensor:
"""Creates an equispaced vertical Cartesian mask.

Parameters
----------
shape: list or tuple of ints
The shape of the mask to be created. The shape should at least 3 dimensions.
Samples are drawn along the second last dimension.
return_acs: bool
Return the autocalibration signal region as a mask.
seed: int or iterable of ints or None (optional)
Seed for the random number generator. Setting the seed ensures the same mask is generated
each time for the same shape. Default: None.


Returns
-------
mask: torch.Tensor
The sampling mask.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")

with temp_seed(self.rng, seed):
num_cols = shape[-2]

num_center_lines, acceleration = self.choose_acceleration()

num_center_lines = int(num_center_lines)
mask = self.center_mask_func(num_cols, num_center_lines)

if return_acs:
return torch.from_numpy(self._reshape_and_broadcast_mask(shape, mask))

# determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_center_lines - num_cols)) / (
num_center_lines * acceleration - num_cols
)
offset = self.rng.randint(0, round(adjusted_accel))

accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
accel_samples = np.around(accel_samples).astype(np.uint)
mask[accel_samples] = True

return torch.from_numpy(self._reshape_and_broadcast_mask(shape, mask))


class FastMRIMagicMaskFunc(FastMRIMaskFunc):
"""Vertical line mask function as implemented in [1]_.

Expand Down Expand Up @@ -422,6 +586,114 @@ def mask_func(
return torch.from_numpy(self._reshape_and_broadcast_mask(shape, mask))


class CartesianMagicMaskFunc(FastMRIMagicMaskFunc):
r"""Cartesian equispaced mask function as implemented in [1]_.

Similar to :class:`FastMRIMagicMaskFunc, but instead of center fraction (`center_fractions`) representing
the fraction of center lines to the original size, here, it represents the actual number of center lines.

References
----------
.. [1] Defazio, Aaron. “Offset Sampling Improves Deep Learning Based Accelerated MRI Reconstructions by
Exploiting Symmetry.” ArXiv:1912.01101 [Cs, Eess], Feb. 2020. arXiv.org, http://arxiv.org/abs/1912.01101.
"""

def __init__(
self,
accelerations: Union[List[Number], Tuple[Number, ...]],
center_fractions: Optional[Union[List[int], Tuple[int, ...]]] = None,
uniform_range: bool = False,
):
"""Inits :class:`CartesianMagicMaskFunc`.

Parameters
----------
accelerations: Union[List[Number], Tuple[Number, ...]]
Amount of under-sampling_mask. An acceleration of 4 retains 25% of the k-space, the method is given by
mask_type. Has to be the same length as center_fractions if uniform_range is not True.
center_fractions: list or tuple of ints, optional
Number of low-frequency (center) columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly each time.
uniform_range: bool
If True then an acceleration will be uniformly sampled between the two values. Default: True.
"""
super().__init__(
accelerations=accelerations,
center_fractions=center_fractions,
uniform_range=uniform_range,
)

def mask_func(
self,
shape: Union[List[int], Tuple[int, ...]],
return_acs: bool = False,
seed: Optional[Union[int, Iterable[int]]] = None,
) -> torch.Tensor:
r"""Creates an equispaced Cartesian mask that exploits conjugate symmetry.

Parameters
----------
shape: list or tuple of ints
The shape of the mask to be created. The shape should at least 3 dimensions.
Samples are drawn along the second last dimension.
return_acs: bool
Return the autocalibration signal region as a mask.
seed: int or iterable of ints or None (optional)
Seed for the random number generator. Setting the seed ensures the same mask is generated
each time for the same shape. Default: None.

Returns
-------
mask: torch.Tensor
The sampling mask.
"""
if len(shape) < 3:
raise ValueError("Shape should have 3 or more dimensions")

with temp_seed(self.rng, seed):
num_cols = shape[-2]

num_center_lines, acceleration = self.choose_acceleration()

# bound the number of low frequencies between 1 and target columns
target_cols_to_sample = int(round(num_cols / acceleration))
num_center_lines = max(min(num_center_lines, target_cols_to_sample), 1)

acs_mask = self.center_mask_func(num_cols, num_center_lines)

if return_acs:
return torch.from_numpy(self._reshape_and_broadcast_mask(shape, acs_mask))

# adjust acceleration rate based on target acceleration.
adjusted_target_cols_to_sample = target_cols_to_sample - num_center_lines
adjusted_acceleration = 0
if adjusted_target_cols_to_sample > 0:
adjusted_acceleration = int(round(num_cols / adjusted_target_cols_to_sample))

offset = self.rng.randint(0, high=adjusted_acceleration)

if offset % 2 == 0:
offset_pos = offset + 1
offset_neg = offset + 2
else:
offset_pos = offset - 1 + 3
offset_neg = offset - 1 + 0

poslen = (num_cols + 1) // 2
neglen = num_cols - (num_cols + 1) // 2
mask_positive = np.zeros(poslen, dtype=bool)
mask_negative = np.zeros(neglen, dtype=bool)

mask_positive[offset_pos::adjusted_acceleration] = True
mask_negative[offset_neg::adjusted_acceleration] = True
mask_negative = np.flip(mask_negative)

mask = np.fft.fftshift(np.concatenate((mask_positive, mask_negative)))
mask = np.logical_or(mask, acs_mask)

return torch.from_numpy(self._reshape_and_broadcast_mask(shape, mask))


class CalgaryCampinasMaskFunc(BaseMaskFunc):
BASE_URL = "https://s3.aiforoncology.nl/direct-project/calgary_campinas_masks/"
MASK_MD5S = {
Expand Down
Loading