diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 5bc38f69ea..75ea009598 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -989,6 +989,8 @@ def get_data(self, img) -> tuple[np.ndarray, dict]: for i, filename in zip(ensure_tuple(img), self.filenames): header = self._get_meta_dict(i) + if MetaKeys.PIXDIM in header: + header[MetaKeys.ORIGINAL_PIXDIM] = np.array(header[MetaKeys.PIXDIM], copy=True) header[MetaKeys.AFFINE] = self._get_affine(i) header[MetaKeys.ORIGINAL_AFFINE] = self._get_affine(i) header["as_closest_canonical"] = self.as_closest_canonical diff --git a/monai/data/meta_tensor.py b/monai/data/meta_tensor.py index c4c491e1b9..b2ca6860ae 100644 --- a/monai/data/meta_tensor.py +++ b/monai/data/meta_tensor.py @@ -477,6 +477,10 @@ def pixdim(self): return [affine_to_spacing(a) for a in self.affine] return affine_to_spacing(self.affine) + def set_pixdim(self) -> None: + """Update pixdim based on current affine.""" + self.meta[MetaKeys.PIXDIM][1 : 1 + len(self.pixdim)] = affine_to_spacing(self.affine) + def peek_pending_shape(self): """ Get the currently expected spatial shape as if all the pending operations are executed. diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index e4ed196eff..f54f76f3bf 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -535,6 +535,9 @@ def __call__( dtype=dtype, lazy=lazy_, ) + if isinstance(data_array, MetaTensor) and "pixdim" in data_array.meta: + data_array = cast(MetaTensor, data_array.clone()) + data_array.set_pixdim() if self.recompute_affine and isinstance(data_array, MetaTensor): if lazy_: raise NotImplementedError("recompute_affine is not supported with lazy evaluation.") diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 2b80034a07..3cca368127 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -24,11 +24,13 @@ import numpy as np import torch +import monai.transforms as transforms from monai.config import DtypeLike, KeysCollection, SequenceStr from monai.config.type_definitions import NdarrayOrTensor from monai.data.box_utils import BoxMode, StandardMode from monai.data.meta_obj import get_track_meta from monai.data.meta_tensor import MetaTensor +from monai.data.utils import is_supported_format from monai.networks.layers.simplelayers import GaussianFilter from monai.transforms.croppad.array import CenterSpatialCrop from monai.transforms.inverse import InvertibleTransform @@ -520,6 +522,11 @@ def __call__(self, data: Mapping[Hashable, torch.Tensor], lazy: bool | None = No output_spatial_shape=output_shape_k if should_match else None, lazy=lazy_, ) + if isinstance(d[key], MetaTensor) and f"{key}_meta_dict" in d: + if "filename_or_obj" in d[key].meta and is_supported_format( + d[key].meta["filename_or_obj"], ["nii", "nii.gz"] + ): + d = transforms.sync_meta_info(key, d) if output_shape_k is None: output_shape_k = d[key].peek_pending_shape() if isinstance(d[key], MetaTensor) else d[key].shape[1:] return d diff --git a/monai/utils/enums.py b/monai/utils/enums.py index 1fbf3ffa05..24f723c36a 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -528,6 +528,8 @@ class MetaKeys(StrEnum): Typical keys for MetaObj.meta """ + PIXDIM = "pixdim" # MetaTensor.pixdim + ORIGINAL_PIXDIM = "original_pixdim" # the pixdim after image loading before any data processing AFFINE = "affine" # MetaTensor.affine ORIGINAL_AFFINE = "original_affine" # the affine after image loading before any data processing SPATIAL_SHAPE = "spatial_shape" # optional key for the length in each spatial dimension