diff --git a/.travis.yml b/.travis.yml index fcbd9ce3..e377f568 100644 --- a/.travis.yml +++ b/.travis.yml @@ -179,6 +179,10 @@ script: bash scripts/ci/run_style_checks.sh; fi +after_script: + - if [[ "$TEST_TYPE" == "unittests" ]]; then + codecov; + before_deploy: - cd $TRAVIS_BUILD_DIR diff --git a/delira/data_loading/__init__.py b/delira/data_loading/__init__.py index 104b406a..9815f2aa 100644 --- a/delira/data_loading/__init__.py +++ b/delira/data_loading/__init__.py @@ -2,7 +2,7 @@ from delira.data_loading.data_loader import DataLoader from delira.data_loading.dataset import AbstractDataset, IterableDataset, \ DictDataset, BaseCacheDataset, BaseExtendCacheDataset, BaseLazyDataset, \ - ConcatDataset, Nii3DCacheDatset, Nii3DLazyDataset + ConcatDataset from delira.data_loading.augmenter import Augmenter from delira.data_loading.data_manager import DataManager from delira.data_loading.load_utils import LoadSample, LoadSampleLabel @@ -10,11 +10,6 @@ from delira.data_loading.sampler import * from delira import get_backends as _get_backends -# If torch backend is available: Import Torchvision dataset -if "TORCH" in _get_backends(): - from delira.data_loading.dataset import TorchvisionClassificationDataset - - # if numba is installed: Import Numba Transforms try: from delira.data_loading.numba_transform import NumbaTransform, \ diff --git a/delira/data_loading/dataset.py b/delira/data_loading/dataset.py index 85099164..c734fa78 100644 --- a/delira/data_loading/dataset.py +++ b/delira/data_loading/dataset.py @@ -8,9 +8,7 @@ from collections import Iterable from tqdm import tqdm -from delira import get_backends from delira.utils import subdirs -from delira.utils.decorators import make_deprecated class AbstractDataset: @@ -150,39 +148,6 @@ def get_subset(self, indices): return BlankDataset(subset_data, **kwargs) - @make_deprecated("Dataset.get_subset") - def train_test_split(self, *args, **kwargs): - """ - split dataset into train and test data - - .. deprecated-removed:: 0.3 0.4 - method will be removed in next major release - - Parameters - ---------- - *args : - positional arguments of ``train_test_split`` - **kwargs : - keyword arguments of ``train_test_split`` - - Returns - ------- - :class:`BlankDataset` - train dataset - :class:`BlankDataset` - test dataset - - See Also - -------- - ``sklearn.model_selection.train_test_split`` - - """ - - train_idxs, test_idxs = train_test_split( - np.arange(len(self)), *args, **kwargs) - - return self.get_subset(train_idxs), self.get_subset(test_idxs) - class _DatasetIter(object): """ @@ -486,7 +451,7 @@ def _make_dataset(self, path: typing.Union[str, list]): data.append(self._load_fn(p, **self._load_kwargs)) else: # call _sample_fn for all elements inside directory - assert os.path.isdir(path), '%s is not a valid directory' % dir + assert os.path.isdir(path), '%s is not a valid directory' % path for p in tqdm(os.listdir(path), unit='samples', desc="Loading samples"): data.append(self._load_fn(os.path.join(path, p), @@ -566,7 +531,7 @@ def _make_dataset(self, path: typing.Union[str, list]): data = list(path) else: # generate list from all items - assert os.path.isdir(path), '%s is not a valid directory' % dir + assert os.path.isdir(path), '%s is not a valid directory' % path data = [os.path.join(path, p) for p in os.listdir(path)] return data @@ -729,313 +694,3 @@ def __getitem__(self, index): def __len__(self): return sum([len(dset) for dset in self.data]) - - -class Nii3DLazyDataset(BaseLazyDataset): - """ - Dataset to load 3D medical images (e.g. from .nii files) during training - """ - - @make_deprecated('LoadSample') - def __init__( - self, - data_path, - load_fn, - img_files, - label_file, - **load_kwargs): - """ - Parameters - ---------- - data_path : str - root path to data samples where each samples has it's own folder - load_fn : function - function to load single data sample - img_extensions : list - valid extensions of image files - gt_extensions : list - valid extensions of label files - img_files : list - list of image filenames - label_file : string - label file name - **load_kwargs : - additional loading keyword arguments (image shape, - channel number, ...); passed to load_fn - """ - self.img_files = img_files - self.label_file = label_file - super().__init__(data_path, load_fn, **load_kwargs) - - def _make_dataset(self, path): - """ - Helper Function to make a dataset containing all samples in a certain - directory - Parameters - ---------- - path: str - path to data samples - Returns - ------- - list - list of sample paths - Raises - ------ - AssertionError - if `path` is not a valid directory - """ - assert os.path.isdir(path) - - data = [[{'img': [os.path.join(t, i) for i in self.img_files], - 'label': os.path.join(t, self.label_file)}] - for t in subdirs(path)] - return data - - -class Nii3DCacheDatset(BaseCacheDataset): - """ - Dataset to load 3D medical images (e.g. from .nii files) before training - """ - - @make_deprecated('LoadSample') - def __init__(self, data_path, load_fn, - img_files, label_file, **load_kwargs): - """ - Parameters - ---------- - data_path : str - root path to data samples where each samples has it's own folder - load_fn : function - function to load single data sample - img_extensions : list - valid extensions of image files - gt_extensions : list - valid extensions of label files - img_files : list - list of image filenames - label_file : str - label file name - **load_kwargs : - additional loading keyword arguments (image shape, - channel number, ...); passed to load_fn - """ - self.img_files = img_files - self.label_file = label_file - super().__init__(data_path, load_fn, **load_kwargs) - - def _make_dataset(self, path): - """ - Helper Function to make a dataset containing all samples in a certain - directory - Parameters - ---------- - path: str - path to data samples - Returns - ------- - list - list of samples - Raises - ------ - AssertionError - if `path` is not a valid directory - """ - assert os.path.isdir(path) - data = [] - for s in tqdm(subdirs(path), unit='samples', desc="Loading samples"): - files = {'img': [os.path.join(s, i) for i in self.img_files], - 'label': os.path.join(s, self.label_file)} - - data.append(self._load_fn(files, **self._load_kwargs)) - return data - - -if "TORCH" in get_backends(): - - from torchvision.datasets import CIFAR10, CIFAR100, EMNIST, MNIST, \ - FashionMNIST - import torch - - class TorchvisionClassificationDataset(AbstractDataset): - """ - Wrapper for torchvision classification datasets to provide consistent - API - - """ - - def __init__(self, dataset, root="/tmp/", train=True, download=True, - img_shape=(28, 28), one_hot=False, **kwargs): - """ - - Parameters - ---------- - dataset : str - Defines the dataset to use. - must be one of - ['mnist', 'emnist', 'fashion_mnist', 'cifar10', 'cifar100'] - root : str - path dataset (If download is True: dataset will be extracted - here; - else: path to extracted dataset) - train : bool - whether to use the train or the testset - download : bool - whether or not to download the dataset - (If already downloaded at specified path, - it won't be downloaded again) - img_shape : tuple - Height and width of output images (will be interpolated) - **kwargs : - Additional keyword arguments passed to the torchvision dataset - class for initialization - - """ - super().__init__("", None) - - self.download = download - self.train = train - self.root = root - self.img_shape = img_shape - self.num_classes = None - self.one_hot = one_hot - self.data = self._make_dataset(dataset, **kwargs) - - def _make_dataset(self, dataset, **kwargs): - """ - Create the actual dataset - - Parameters - ---------- - dataset: str - Defines the dataset to use. - must be one of - ['mnist', 'emnist', 'fashion_mnist', 'cifar10', 'cifar100'] - **kwargs : - Additional keyword arguments passed to the torchvision dataset - class for initialization - - Returns - ------- - torchvision.Dataset - actual Dataset - - Raises - ------ - KeyError - Dataset string does not specify a valid dataset - - """ - if dataset.lower() == "mnist": - _dataset_cls = MNIST - self.num_classes = 10 - elif dataset.lower() == "emnist": - _dataset_cls = EMNIST - self.num_classes = None - elif dataset.lower() == "fashion_mnist": - _dataset_cls = FashionMNIST - self.num_classes = 10 - elif dataset.lower() == "cifar10": - _dataset_cls = CIFAR10 - self.num_classes = 10 - elif dataset.lower() == "cifar100": - _dataset_cls = CIFAR100 - self.num_classes = 100 - else: - raise KeyError("Dataset %s not found!" % dataset.lower()) - - return _dataset_cls(root=self.root, train=self.train, - download=self.download, **kwargs) - - def __getitem__(self, index): - """ - return data sample specified by index - - Parameters - ---------- - index : int - index to specifiy which data sample to return - - Returns - ------- - dict - data sample - - """ - - data = self.data[index] - label = data[1] - - if isinstance(label, torch.Tensor): - label = label.numpy() - elif isinstance(label, int): - label = np.array(label) - data_dict = {"data": np.array(data[0]), - - "label": label.reshape(1).astype(np.float32)} - - if self.one_hot: - # TODO: Remove and refer to batchgenerators transform: - # https://github.com/MIC-DKFZ/batchgenerators/blob/master/ - # batchgenerators/transforms/utility_transforms.py#L97 - def make_onehot(num_classes, labels): - """ - Function that converts label-encoding to one-hot format. - - Parameters - ---------- - num_classes : int - number of classes present in the dataset - - labels : np.ndarray - labels in label-encoding format - - Returns - ------- - np.ndarray - labels in one-hot format - """ - if isinstance(labels, list) or isinstance(labels, int): - labels = np.asarray(labels) - assert isinstance(labels, np.ndarray) - if len(labels.shape) > 1: - one_hot = np.zeros( - shape=(list(labels.shape) + [num_classes]), - dtype=labels.dtype) - for i, c in enumerate(np.arange(num_classes)): - one_hot[..., i][labels == c] = 1 - else: - one_hot = np.zeros(shape=([num_classes]), - dtype=labels.dtype) - for i, c in enumerate(np.arange(num_classes)): - if labels == c: - one_hot[i] = 1 - return one_hot - - data_dict['label'] = make_onehot( - self.num_classes, data_dict['label']) - - img = data_dict["data"] - - img = resize(img, self.img_shape, - mode='reflect', anti_aliasing=True) - if len(img.shape) < 3: - img = img.reshape( - *img.shape, 1) - - img = img.transpose( - (len(img.shape) - 1, *range(len(img.shape) - 1))) - - data_dict["data"] = img.astype(np.float32) - return data_dict - - def __len__(self): - """ - Return Number of samples - - Returns - ------- - int - number of samples - - """ - return len(self.data) diff --git a/delira/data_loading/load_utils.py b/delira/data_loading/load_utils.py index 62d73396..0a79f2cc 100644 --- a/delira/data_loading/load_utils.py +++ b/delira/data_loading/load_utils.py @@ -5,8 +5,6 @@ from skimage.io import imread from skimage.transform import resize -from delira.utils.decorators import make_deprecated - def norm_range(mode): """ diff --git a/delira/data_loading/nii.py b/delira/data_loading/nii.py deleted file mode 100644 index 14734beb..00000000 --- a/delira/data_loading/nii.py +++ /dev/null @@ -1,71 +0,0 @@ -import json -import logging -import os -from abc import abstractmethod - -import SimpleITK as sitk - -from delira.utils.decorators import make_deprecated - -logger = logging.getLogger(__name__) - - -def load_nii(path): - """ - Loads a single nii file - Parameters - ---------- - path: str - path to nii file which should be loaded - Returns - ------- - np.ndarray - numpy array containing the loaded data - """ - return sitk.GetArrayFromImage(sitk.ReadImage(path)) - - -class BaseLabelGenerator(object): - """ - Base Class to load labels from json files - .. deprecated-removed: 0.3.3 0.3.5 - """ - - @make_deprecated('dict containing labels') - def __init__(self, fpath): - """ - Parameters - ---------- - fpath : str - filepath to json file - Raises - ------ - AssertionError - `fpath` does not end with 'json' - """ - assert fpath.endswith('json') - self.fpath = fpath - self.data = self._load() - - def _load(self): - """ - Private Helper function to load the file - Returns - ------- - Any - loaded values from file - """ - with open(os.path.join(self.fpath), 'r') as f: - label = json.load(f) - return label - - @abstractmethod - def get_labels(self): - """ - Abstractmethod to get labels from class - Raises - ------ - NotImplementedError - if not overwritten in subclass - """ - raise NotImplementedError() diff --git a/delira/utils/__init__.py b/delira/utils/__init__.py index 070da1de..4d2f96ee 100644 --- a/delira/utils/__init__.py +++ b/delira/utils/__init__.py @@ -1,6 +1,3 @@ from delira.utils.config import DeliraConfig, Config -from delira.utils.imageops import bounding_box, calculate_origin_offset, \ - max_energy_slice, sitk_new_blank_image, sitk_resample_to_image, \ - sitk_resample_to_shape, sitk_resample_to_spacing from delira.utils.path import subdirs from delira.utils.time import now diff --git a/delira/utils/context_managers.py b/delira/utils/context_managers.py index b70471df..910d701b 100644 --- a/delira/utils/context_managers.py +++ b/delira/utils/context_managers.py @@ -1,159 +1,72 @@ -import contextlib +from delira import get_current_debug_mode, set_debug_mode -from delira import get_backends -from delira.utils.decorators import make_deprecated -if "TORCH" in get_backends(): - import torch +class DebugMode(object): + """ + Context Manager to set a specific debug mode for the code inside the + defined context (and reverting to previous mode afterwards) - class DefaultOptimWrapperTorch(object): - """ - Class wrapping a ``torch`` optimizer to mirror the behavior of ``apex`` - without depending on it + """ + def __init__(self, mode): """ - @make_deprecated( - "'delira.models.model_utils.scale_loss' combined with " - "new apex.amp API (https://github.com/NVIDIA/apex)") - def __init__(self, optimizer: torch.optim.Optimizer, *args, **kwargs): - """ - - Parameters - ---------- - optimizer : torch.optim.Optimizer - the actual optimizer to wrap - *args : - additional positional arguments (unused) - **kwargs : - additional keyword arguments (unused) - - """ - - self._optimizer = optimizer - - @contextlib.contextmanager - def scale_loss(self, loss): - """ - Function which scales the loss in ``apex`` and yields the unscaled - loss here to mirror the API - - Parameters - ---------- - loss : torch.Tensor - the unscaled loss - - """ - - yield loss - return - - def step(self, closure=None): - """ - Wraps the step method of the optimizer and calls the original step - method - - Parameters - ---------- - closure : callable - A closure that reevaluates the model and returns the loss. - Optional for most optimizers. - - """ - - return self._optimizer.step(closure=closure) - - # Forward any attribute lookups - def __getattr__(self, attr): - return getattr(self._optimizer, attr) - - # Forward all torch.optim.Optimizer methods - def __getstate__(self): - return self._optimizer.__getstate__() - def __setstate__(self, *args, **kwargs): - return self._optimizer.__setstate__(*args, **kwargs) - - def __repr__(self): - return self._optimizer.__repr__() - - def state_dict(self): - return self._optimizer.state_dict() - - def load_state_dict(self, state_dict): - return self._optimizer.load_state_dict(state_dict) - - def zero_grad(self): - return self._optimizer.zero_grad() - - def add_param_group(self, param_group): - return self._optimizer.add_param_group(param_group) + Parameters + ---------- + mode : bool + the debug mode; if ``True`` disables all multiprocessing + """ + self._mode = mode - from delira import get_current_debug_mode, set_debug_mode + def _switch_to_new_mode(self): + """ + helper function to switch to the new debug mode + (and saving the previous one in ``self._mode``) - class DebugMode(object): """ - Context Manager to set a specific debug mode for the code inside the - defined context (and reverting to previous mode afterwards) + prev_mode = get_current_debug_mode() + set_debug_mode(self._mode) + self._mode = prev_mode + def __enter__(self): """ + Sets the specified debug mode on entering the context + """ + self._switch_to_new_mode() - def __init__(self, mode): - """ - - Parameters - ---------- - mode : bool - the debug mode; if ``True`` disables all multiprocessing - """ - self._mode = mode - - def _switch_to_new_mode(self): - """ - helper function to switch to the new debug mode - (and saving the previous one in ``self._mode``) - - """ - prev_mode = get_current_debug_mode() - set_debug_mode(self._mode) - self._mode = prev_mode - - def __enter__(self): - """ - Sets the specified debug mode on entering the context - """ - self._switch_to_new_mode() - - def __exit__(self, *args, **kwargs): - """ - Resets the previous debug mode on exiting the context - - Parameters - ---------- - *args : - arbitrary positional arguments - (ignored here, just needed for compatibility with other context - managers) - **kwargs : - arbitrary keyword arguments - (ignored here, just needed for compatibility with other context - managers) - - """ - self._switch_to_new_mode() - - class DebugEnabled(DebugMode): + def __exit__(self, *args, **kwargs): """ - Context Manager to enable the debug mode for the wrapped context + Resets the previous debug mode on exiting the context + + Parameters + ---------- + *args : + arbitrary positional arguments + (ignored here, just needed for compatibility with other context + managers) + **kwargs : + arbitrary keyword arguments + (ignored here, just needed for compatibility with other context + managers) """ + self._switch_to_new_mode() - def __init__(self): - super().__init__(True) - class DebugDisabled(DebugMode): - """ - Context Manager to disable the debug mode for the wrapped context - """ +class DebugEnabled(DebugMode): + """ + Context Manager to enable the debug mode for the wrapped context + + """ + + def __init__(self): + super().__init__(True) + + +class DebugDisabled(DebugMode): + """ + Context Manager to disable the debug mode for the wrapped context + """ - def __init__(self): - super().__init__(False) + def __init__(self): + super().__init__(False) diff --git a/delira/utils/imageops.py b/delira/utils/imageops.py deleted file mode 100644 index c1d801af..00000000 --- a/delira/utils/imageops.py +++ /dev/null @@ -1,245 +0,0 @@ -import SimpleITK as sitk -import numpy as np -from scipy.ndimage import zoom - -from delira.utils.decorators import dtype_func - -sitk_img_func = dtype_func(sitk.Image) - - -def calculate_origin_offset(new_spacing, old_spacing): - """ - Calculates the origin offset of two spacings - - Parameters - ---------- - new_spacing : list or np.ndarray or tuple - new spacing - old_spacing : list or np.ndarray or tuple - old spacing - - Returns - ------- - np.ndarray - origin offset - """ - return np.subtract(new_spacing, old_spacing) / 2 - - -@sitk_img_func -def sitk_resample_to_spacing(image, new_spacing=(1.0, 1.0, 1.0), - interpolator=sitk.sitkLinear, - default_value=0.): - """ - Resamples SITK Image to a given spacing - - Parameters - ---------- - image : SimpleITK.Image - image which should be resampled - new_spacing : list or np.ndarray or tuple - target spacing - interpolator : Any - implements the actual interpolation - default_value : float - default value - - Returns - ------- - SimpleITK.Image - resampled Image with target spacing - - """ - zoom_factor = np.divide(image.GetSpacing(), new_spacing) - new_size = np.asarray(np.ceil(np.round(np.multiply( - zoom_factor, image.GetSize()), decimals=5)), dtype=np.int16) - offset = calculate_origin_offset(new_spacing, image.GetSpacing()) - reference_image = sitk_new_blank_image(size=new_size, - spacing=new_spacing, - direction=image.GetDirection(), - origin=image.GetOrigin() + offset, - default_value=default_value) - return sitk_resample_to_image(image, reference_image, - interpolator=interpolator, - default_value=default_value) - - -@sitk_img_func -def sitk_resample_to_image(image, reference_image, default_value=0., - interpolator=sitk.sitkLinear, transform=None, - output_pixel_type=None): - """ - Resamples Image to reference image - - Parameters - ---------- - image : SimpleITK.Image - the image which should be resampled - reference_image : SimpleITK.Image - the resampling target - default_value : float - default value - interpolator : Any - implements the actual interpolation - transform : Any (default: None) - transformation - output_pixel_type : Any (default:None) - type of output pixels - - Returns - ------- - SimpleITK.Image - resampled image - - """ - if transform is None: - transform = sitk.Transform() - transform.SetIdentity() - if output_pixel_type is None: - output_pixel_type = image.GetPixelID() - resample_filter = sitk.ResampleImageFilter() - resample_filter.SetInterpolator(interpolator) - resample_filter.SetTransform(transform) - resample_filter.SetOutputPixelType(output_pixel_type) - resample_filter.SetDefaultPixelValue(default_value) - resample_filter.SetReferenceImage(reference_image) - return resample_filter.Execute(image) - - -def sitk_new_blank_image(size, spacing, direction, origin, default_value=0.): - """ - Create a new blank image with given properties - - Parameters - ---------- - size : list or np.ndarray or tuple - new image size - spacing : list or np.ndarray or tuple - spacing of new image - direction : - new image's direction - origin : - new image's origin - default_value : float - new image's default value - - Returns - ------- - SimpleITK.Image - Blank image with given properties - - """ - image = sitk.GetImageFromArray( - np.ones(size, dtype=np.float).T * default_value) - image.SetSpacing(spacing) - image.SetDirection(direction) - image.SetOrigin(origin) - return image - - -@sitk_img_func -def sitk_resample_to_shape(img, x, y, z, order=1): - """ - Resamples Image to given shape - - Parameters - ---------- - img : SimpleITK.Image - x : int - shape in x-direction - y : int - shape in y-direction - z : int - shape in z-direction - order : int - interpolation order - - Returns - ------- - SimpleITK.Image - Resampled Image - - """ - img_np = sitk.GetArrayFromImage(img) - img_np_fixed_size = zoom(img_np, - [z / img_np.shape[0], - y / img_np.shape[1], - x / img_np.shape[2]], - order=order) - return sitk.GetImageFromArray(img_np_fixed_size) - - -@sitk_img_func -def max_energy_slice(img): - """Determine the axial slice in which the image energy is max - - Parameters - ---------- - img : SimpleITK.Image - given image - - Returns - ------- - int - slice index - """ - assert img.GetDimension() == 3 - return int(np.argmax(np.sum(sitk.GetArrayFromImage(img), axis=(1, 2)))) - - -def sitk_copy_metadata(img_source, img_target): - """ Copy metadata (=DICOM Tags) from one image to another - - Parameters - ---------- - img_source : SimpleITK.Image - Source image - img_target : SimpleITK.Image - Source image - - Returns - ------- - SimpleITK.Image - Target image with copied metadata - """ - for k in img_source.GetMetaDataKeys(): - img_target.SetMetaData(k, img_source.GetMetaData(k)) - return img_target - - -@sitk_img_func -def bounding_box(mask, margin=None): - """Calculate bounding box coordinates of binary mask - - Parameters - ---------- - mask : SimpleITK.Image - Binary mask - margin : int, default: None - margin to be added to min/max on each dimension - - Returns - ------- - tuple - bounding box coordinates of the form (xmin, xmax, ymin, ymax, - zmin, zmax) - """ - # mask_arr is in z, y, x order - mask_arr = sitk.GetArrayFromImage(mask) - nz = np.where(mask_arr != 0) - lower = [np.min(nz[0]), np.min(nz[1]), np.min(nz[2])] - upper = [np.max(nz[0]), np.max(nz[1]), np.max(nz[2])] - if margin is not None: - for axis in range(3): - # make sure lower bound with margin is valid - if lower[axis] - margin >= 0: - lower[axis] -= margin - else: - lower[axis] = 0 - # make sure upper bound with margin is valid - if upper[axis] + margin <= mask_arr.shape[axis] - 1: - upper[axis] += margin - else: - upper[axis] = mask_arr.shape[axis] - 1 - bbox = lower[0], upper[0], lower[1], upper[1], lower[2], upper[2] - return bbox diff --git a/requirements/base.txt b/requirements/base.txt index c37eb196..5c1a9c53 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -1,16 +1,12 @@ numpy>=1.15.0 -scikit-image>=0.14.0 scikit-learn>=0.20.0 jupyter>=1.0.0 ipython joblib -pillow>=5.4.1 -SimpleITK>=1.1.0 pylint tqdm visdom>=0.1.8.5 pyyaml batchgenerators>=0.18.2,!=0.19.2,<0.19.4 tensorboardX -psutil nested_lookup