From 484d70dba9b72dac5cd33ea3106263807eaaa286 Mon Sep 17 00:00:00 2001 From: Keyn34 Date: Mon, 2 Dec 2024 18:12:16 +0100 Subject: [PATCH 01/11] Migrated type hints to be compatible with Python 3.9 --- moosez/benchmarking/benchmark.py | 7 +++-- moosez/download.py | 3 +- moosez/file_utilities.py | 11 +++---- moosez/image_conversion.py | 8 ++--- moosez/image_processing.py | 39 +++++++++++++------------ moosez/input_validation.py | 7 +++-- moosez/models.py | 17 ++++++----- moosez/moosez.py | 9 +++--- moosez/nnUNet_custom_trainer/utility.py | 2 +- moosez/predict.py | 5 ++-- moosez/system.py | 9 +++--- setup.py | 3 +- 12 files changed, 65 insertions(+), 55 deletions(-) diff --git a/moosez/benchmarking/benchmark.py b/moosez/benchmarking/benchmark.py index c6737d8..0cd8907 100644 --- a/moosez/benchmarking/benchmark.py +++ b/moosez/benchmarking/benchmark.py @@ -4,10 +4,11 @@ import os import numpy import threading +from typing import Union, Tuple, List, Dict class PerformanceObserver: - def __init__(self, image: str | None = None, model: str | None = None, polling_rate: float = 0.1): + def __init__(self, image: Union[str, None] = None, model: Union[str, None] = None, polling_rate: float = 0.1): self.monitoring = False self.polling_rate = polling_rate self.monitoring_thread = None @@ -48,7 +49,7 @@ def __get_memory_usage_of_process_tree(self): continue return memory_usage / (1024 * 1024) # Convert to MB - def __monitor_memory_usage(self, interval): + def __monitor_memory_usage(self, interval: float): while self.monitoring: current_time = time.time() - self.monitoring_start_time current_memory_MB = self.__get_memory_usage_of_process_tree() @@ -114,7 +115,7 @@ def plot_performance(self, path: str): plt.savefig(os.path.join(path, f'performance_plot.png')) plt.close() - def get_peak_resources(self) -> list: + def get_peak_resources(self) -> List: image_name = os.path.basename(self.metadata_image) model_name = self.metadata_model if self.metadata_image_size is not None and isinstance(self.metadata_image_size, (list, tuple)): diff --git a/moosez/download.py b/moosez/download.py index 6bd2c88..f81f4eb 100644 --- a/moosez/download.py +++ b/moosez/download.py @@ -2,11 +2,12 @@ import os from pathlib import Path import requests +from typing import Union from moosez import constants from moosez import system -def download_enhance_data(download_directory: str | None, output_manager: system.OutputManager): +def download_enhance_data(download_directory: Union[str, None], output_manager: system.OutputManager): output_manager.log_update(f" - Downloading ENHANCE 1.6k data") if not download_directory: diff --git a/moosez/file_utilities.py b/moosez/file_utilities.py index 0bf2358..5bc8b2a 100644 --- a/moosez/file_utilities.py +++ b/moosez/file_utilities.py @@ -21,6 +21,7 @@ import shutil from datetime import datetime from multiprocessing import Pool +from typing import Union, Tuple, List from moosez import constants @@ -35,7 +36,7 @@ def create_directory(directory_path: str) -> None: os.makedirs(directory_path) -def get_files(directory: str, prefix: str | tuple[str, ...], suffix: str | tuple[str, ...]) -> list[str]: +def get_files(directory: str, prefix: Union[str, Tuple[str, ...]], suffix: Union[str, Tuple[str, ...]]) -> List[str]: """ Returns the list of files in the directory with the specified wildcard. @@ -65,7 +66,7 @@ def get_files(directory: str, prefix: str | tuple[str, ...], suffix: str | tuple return files -def moose_folder_structure(parent_directory: str) -> tuple[str, str, str]: +def moose_folder_structure(parent_directory: str) -> Tuple[str, str, str]: """ Creates the moose folder structure. @@ -98,7 +99,7 @@ def copy_file(file: str, destination: str) -> None: shutil.copy(file, destination) -def copy_files_to_destination(files: list[str], destination: str) -> None: +def copy_files_to_destination(files: List[str], destination: str) -> None: """ Copies the files inside the list to the destination directory in a parallel fashion. @@ -112,7 +113,7 @@ def copy_files_to_destination(files: list[str], destination: str) -> None: pool.starmap(copy_file, [(file, destination) for file in files]) -def select_files_by_modality(moose_compliant_subjects: list[str], modality_tag: str) -> list: +def select_files_by_modality(moose_compliant_subjects: List[str], modality_tag: str) -> List: """ Selects the files with the selected modality tag from the moose-compliant folders. @@ -134,7 +135,7 @@ def select_files_by_modality(moose_compliant_subjects: list[str], modality_tag: return selected_files -def find_pet_file(folder: str) -> str | None: +def find_pet_file(folder: str) -> Union[str, None]: """ Finds the PET file in the specified folder. diff --git a/moosez/image_conversion.py b/moosez/image_conversion.py index f339e5c..33b08cc 100644 --- a/moosez/image_conversion.py +++ b/moosez/image_conversion.py @@ -23,14 +23,14 @@ import re import shutil import unicodedata - import SimpleITK import dicom2nifti import pydicom +from typing import Union, Dict from moosez import system -def non_nifti_to_nifti(input_path: str, output_manager: system.OutputManager, output_directory: str = None) -> None: +def non_nifti_to_nifti(input_path: str, output_manager: system.OutputManager, output_directory: Union[str, None] = None) -> None: """ Converts any image format known to ITK to NIFTI @@ -170,7 +170,7 @@ def is_dicom_file(filename: str) -> bool: return False -def create_dicom_lookup(dicom_dir: str) -> dict: +def create_dicom_lookup(dicom_dir: str) -> Dict: """ Create a lookup dictionary from DICOM files. @@ -211,7 +211,7 @@ def create_dicom_lookup(dicom_dir: str) -> dict: return dicom_info -def rename_nifti_files(nifti_dir: str, dicom_info: dict) -> None: +def rename_nifti_files(nifti_dir: str, dicom_info: Dict) -> None: """ Rename NIfTI files based on a lookup dictionary. diff --git a/moosez/image_processing.py b/moosez/image_processing.py index 555d907..b832dd3 100644 --- a/moosez/image_processing.py +++ b/moosez/image_processing.py @@ -25,6 +25,7 @@ import scipy.ndimage as ndimage import nibabel import os +from typing import Union, Tuple, List, Dict from moosez.constants import CHUNK_THRESHOLD_RESAMPLING, CHUNK_THRESHOLD_INFERRING from moosez import models from moosez import system @@ -100,12 +101,12 @@ def get_shape_statistics(mask_image: SimpleITK.Image, model: models.Model, out_c stats_df.to_csv(out_csv) -def limit_fov(image_array: np.array, segmentation_array: np.array, fov_label: list[int] | int, largest_component_only: bool = False): +def limit_fov(image_array: np.array, segmentation_array: np.array, fov_label: Union[List[int], int], largest_component_only: bool = False): if largest_component_only: segmentation_array = largest_connected_component(segmentation_array, fov_label) - if type(fov_label) is list: + if isinstance(fov_label, list): z_indices = np.where((segmentation_array >= fov_label[0]) & (segmentation_array <= fov_label[1]))[0] else: z_indices = np.where(segmentation_array == fov_label)[0] @@ -117,7 +118,7 @@ def limit_fov(image_array: np.array, segmentation_array: np.array, fov_label: li return limited_fov_array, {"z_min": z_min, "z_max": z_max, "original_shape": image_array.shape} -def expand_segmentation_fov(limited_fov_segmentation_array: np.ndarray, original_fov_info: dict) -> np.ndarray: +def expand_segmentation_fov(limited_fov_segmentation_array: np.ndarray, original_fov_info: Dict) -> np.ndarray: z_min = original_fov_info["z_min"] z_max = original_fov_info["z_max"] original_shape = original_fov_info["original_shape"] @@ -179,13 +180,13 @@ def largest_connected_component(segmentation_array, intensities): class ImageChunker: @staticmethod - def __compute_interior_indices(axis_length: int, number_of_chunks: int) -> (list[int], list[int]): + def __compute_interior_indices(axis_length: int, number_of_chunks: int) -> Tuple[List[int], List[int]]: start = [int(round(k * axis_length / number_of_chunks)) for k in range(number_of_chunks)] end = [int(round((k + 1) * axis_length / number_of_chunks)) for k in range(number_of_chunks)] return start, end @staticmethod - def __chunk_array_with_overlap(array_shape: list[int] | tuple[int, ...], splits_per_dimension: list[int] | tuple[int, ...], overlap_per_dimension: list[int] | tuple[int, ...]) -> list[dict]: + def __chunk_array_with_overlap(array_shape: Union[List[int], Tuple[int, ...]], splits_per_dimension: Union[List[int], Tuple[int, ...]], overlap_per_dimension: Union[List[int], Tuple[int, ...]]) -> List[Dict]: dims = array_shape num_dims = len(array_shape) starts_list = [] @@ -232,7 +233,7 @@ def __chunk_array_with_overlap(array_shape: list[int] | tuple[int, ...], splits_ return chunk_info @staticmethod - def array_to_chunks(image_array: np.ndarray, splits_per_dimension: list[int] | tuple[int, ...], overlap_per_dimension: list[int] | tuple[int, ...]) -> (list[np.ndarray], list[dict]): + def array_to_chunks(image_array: np.ndarray, splits_per_dimension: Union[List[int], Tuple[int, ...]], overlap_per_dimension: Union[List[int], Tuple[int, ...]]) -> Tuple[List[np.ndarray], List[Dict]]: chunk_info = ImageChunker.__chunk_array_with_overlap(image_array.shape, splits_per_dimension, overlap_per_dimension) image_chunks = [] positions = [] @@ -248,7 +249,7 @@ def array_to_chunks(image_array: np.ndarray, splits_per_dimension: list[int] | t return image_chunks, positions @staticmethod - def chunks_to_array(image_chunks: list[np.ndarray], image_chunk_positions: dict, final_shape: list[int] | tuple[int, ...]) -> np.ndarray: + def chunks_to_array(image_chunks: List[np.ndarray], image_chunk_positions: Dict, final_shape: List[int] | Tuple[int, ...]) -> np.ndarray: final_arr = np.empty(final_shape, dtype=image_chunks[0].dtype) for image_chunk, image_chunk_position in zip(image_chunks, image_chunk_positions): interior_region = image_chunk[image_chunk_position['interior_slice']] @@ -257,7 +258,7 @@ def chunks_to_array(image_chunks: list[np.ndarray], image_chunk_positions: dict, return final_arr @staticmethod - def determine_splits(image_array: np.ndarray) -> tuple: + def determine_splits(image_array: np.ndarray) -> Tuple: image_shape = image_array.shape splits = [] for axis in image_shape: @@ -305,8 +306,8 @@ def chunk_along_axis(axis: int) -> int: return split @staticmethod - def resample_chunk_SimpleITK(image_chunk: da.array, input_spacing: tuple, interpolation_method: int, - output_spacing: tuple, output_size: tuple) -> da.array: + def resample_chunk_SimpleITK(image_chunk: da.array, input_spacing: Tuple, interpolation_method: int, + output_spacing: Tuple, output_size: Tuple) -> da.array: """ Resamples a dask array chunk. @@ -336,8 +337,8 @@ def resample_chunk_SimpleITK(image_chunk: da.array, input_spacing: tuple, interp @staticmethod def resample_image_SimpleITK_DASK(sitk_image: SimpleITK.Image, interpolation: str, - output_spacing: tuple = (1.5, 1.5, 1.5), - output_size: tuple = None) -> SimpleITK.Image: + output_spacing: Tuple[float, float, float] = (1.5, 1.5, 1.5), + output_size: Union[Tuple, None] = None) -> SimpleITK.Image: """ Resamples a sitk_image using Dask and SimpleITK. @@ -365,7 +366,7 @@ def resample_image_SimpleITK_DASK(sitk_image: SimpleITK.Image, interpolation: st @staticmethod def reslice_identity(reference_image: SimpleITK.Image, moving_image: SimpleITK.Image, - output_image_path: str = None, is_label_image: bool = False) -> SimpleITK.Image: + output_image_path: Union[str, None] = None, is_label_image: bool = False) -> SimpleITK.Image: """ Reslices an image to the same space as another image. @@ -396,8 +397,8 @@ def reslice_identity(reference_image: SimpleITK.Image, moving_image: SimpleITK.I @staticmethod def resample_image_SimpleITK_DASK_array(sitk_image: SimpleITK.Image, interpolation: str, - output_spacing: tuple = (1.5, 1.5, 1.5), - output_size: tuple = None) -> np.array: + output_spacing: Tuple[float, float, float] = (1.5, 1.5, 1.5), + output_size: Union[Tuple[float, float, float], None] = None) -> np.array: if interpolation == 'nearest': interpolation_method = SimpleITK.sitkNearestNeighbor elif interpolation == 'linear': @@ -436,13 +437,13 @@ def resample_segmentation(reference_image: SimpleITK.Image, segmentation_image: return resampled_sitk_image -def determine_orientation_code(image: nibabel.Nifti1Image) -> [tuple | list, str]: +def determine_orientation_code(image: nibabel.Nifti1Image) -> Tuple[Union[Tuple, List], str]: affine = image.affine orthonormal_orientation = nibabel.orientations.aff2axcodes(affine) return orthonormal_orientation, ''.join(orthonormal_orientation) -def confirm_orthonormality(image: nibabel.Nifti1Image) -> tuple[nibabel.Nifti1Image, bool]: +def confirm_orthonormality(image: nibabel.Nifti1Image) -> Tuple[nibabel.Nifti1Image, bool]: data = image.get_fdata() affine = image.affine header = image.header @@ -473,7 +474,7 @@ def confirm_orthonormality(image: nibabel.Nifti1Image) -> tuple[nibabel.Nifti1Im return image, orthonormalized -def confirm_orientation(image: nibabel.Nifti1Image) -> tuple[nibabel.Nifti1Image, bool]: +def confirm_orientation(image: nibabel.Nifti1Image) -> Tuple[nibabel.Nifti1Image, bool]: data = image.get_fdata() affine = image.affine header = image.header @@ -520,7 +521,7 @@ def convert_to_sitk(image: nibabel.Nifti1Image) -> SimpleITK.Image: return sitk_image -def standardize_image(image_path: str, output_manager: system.OutputManager, standardization_output_path: str | None) -> SimpleITK.Image: +def standardize_image(image_path: str, output_manager: system.OutputManager, standardization_output_path: Union[str, None]) -> SimpleITK.Image: image = nibabel.load(image_path) _, original_orientation = determine_orientation_code(image) output_manager.log_update(f" Image loaded. Orientation: {original_orientation}") diff --git a/moosez/input_validation.py b/moosez/input_validation.py index e831a16..09ec248 100644 --- a/moosez/input_validation.py +++ b/moosez/input_validation.py @@ -18,12 +18,13 @@ # ---------------------------------------------------------------------------------------------------------------------- import os +from typing import Tuple, List, Dict from moosez import constants from moosez import models from moosez import system -def determine_model_expectations(model_routine: dict[tuple, list[models.ModelWorkflow]], output_manager: system.OutputManager) -> list: +def determine_model_expectations(model_routine: Dict[Tuple, List[models.ModelWorkflow]], output_manager: system.OutputManager) -> List: """ Display expected modality for the model. @@ -31,7 +32,7 @@ def determine_model_expectations(model_routine: dict[tuple, list[models.ModelWor 'FDG-PET-CT' should be split into 'FDG-PET' and 'CT'. :param model_routine: The model routine - :type model_routine: dict[tuple, list[models.ModelWorkflow]] + :type model_routine: Dict[Tuple, List[models.ModelWorkflow]] :param output_manager: The output manager :type output_manager: system.OutputManager :return: A list of modalities. @@ -75,7 +76,7 @@ def determine_model_expectations(model_routine: dict[tuple, list[models.ModelWor return required_modalities -def select_moose_compliant_subjects(subject_paths: list[str], modality_tags: list[str], output_manager: system.OutputManager) -> list[str]: +def select_moose_compliant_subjects(subject_paths: List[str], modality_tags: List[str], output_manager: system.OutputManager) -> List[str]: """ Selects the subjects that have the files that have names that are compliant with the moosez. diff --git a/moosez/models.py b/moosez/models.py index 8a29ccf..c6bb73b 100644 --- a/moosez/models.py +++ b/moosez/models.py @@ -2,6 +2,7 @@ import json import zipfile import requests +from typing import Union, Tuple, List, Dict from moosez import system from moosez.constants import (KEY_FOLDER_NAME, KEY_URL, KEY_LIMIT_FOV, DEFAULT_SPACING, FILE_NAME_DATASET_JSON, FILE_NAME_PLANS_JSON, ANSI_GREEN, ANSI_RESET) @@ -158,7 +159,7 @@ def get_expectation(self): return expected_modalities, expected_prefixes - def __get_configuration_folders(self, output_manager: system.OutputManager) -> list[str]: + def __get_configuration_folders(self, output_manager: system.OutputManager) -> List[str]: items = os.listdir(self.directory) folders = [item for item in items if not item.startswith(".") and item.count("__") == 2 and os.path.isdir(os.path.join(self.directory, item))] @@ -170,12 +171,12 @@ def __get_configuration_folders(self, output_manager: system.OutputManager) -> l return folders - def __get_model_configuration(self) -> tuple[str, str, str]: + def __get_model_configuration(self) -> Tuple[str, str, str]: model_configuration_folder = os.path.basename(self.configuration_directory) trainer, planner, resolution_configuration = model_configuration_folder.split("__") return trainer, planner, resolution_configuration - def __get_model_identifier_segments(self) -> tuple[str, str, str]: + def __get_model_identifier_segments(self) -> Tuple[str, str, str]: segments = self.model_identifier.split('_') imaging_type = segments[0] @@ -188,7 +189,7 @@ def __get_model_identifier_segments(self) -> tuple[str, str, str]: return imaging_type, modality, region - def __get_model_data(self) -> tuple[dict, dict]: + def __get_model_data(self) -> Tuple[Dict, Dict]: dataset_json_path = os.path.join(self.configuration_directory, FILE_NAME_DATASET_JSON) plans_json_path = os.path.join(self.configuration_directory, FILE_NAME_PLANS_JSON) try: @@ -246,7 +247,7 @@ def __download(self, output_manager: system.OutputManager): output_manager.log_update(f" - {self.model_identifier} - setup complete.") output_manager.console_update(f"{ANSI_GREEN} {self.model_identifier} - setup complete. {ANSI_RESET}") - def __get_organ_indices(self) -> dict[int, str]: + def __get_organ_indices(self) -> Dict[int, str]: labels = self.dataset.get('labels', {}) return {int(value): key for key, value in labels.items() if value != "0"} @@ -306,7 +307,7 @@ def model_identifier_valid(model_identifier: str, output_manager: system.OutputM class ModelWorkflow: def __init__(self, model_identifier: str, output_manager: system.OutputManager): - self.workflow: list[Model] = [] + self.workflow: List[Model] = [] self.__construct_workflow(model_identifier, output_manager) if self.workflow: self.initial_desired_spacing = self.workflow[0].voxel_spacing @@ -331,11 +332,11 @@ def __str__(self) -> str: return " -> ".join([model.model_identifier for model in self.workflow]) -def construct_model_routine(model_identifiers: str | list[str], output_manager: system.OutputManager) -> dict[tuple, list[ModelWorkflow]]: +def construct_model_routine(model_identifiers: Union[str, List[str]], output_manager: system.OutputManager) -> Dict[tuple, List[ModelWorkflow]]: if isinstance(model_identifiers, str): model_identifiers = [model_identifiers] - model_routine: dict = {} + model_routine: Dict = {} output_manager.log_update(' SETTING UP MODEL WORKFLOWS:') for model_identifier in model_identifiers: output_manager.log_update(' - Model name: ' + model_identifier) diff --git a/moosez/moosez.py b/moosez/moosez.py index aa52a69..a236813 100644 --- a/moosez/moosez.py +++ b/moosez/moosez.py @@ -30,6 +30,7 @@ import pandas as pd import multiprocessing as mp import concurrent.futures +from typing import Union, Tuple, List, Dict from moosez import constants from moosez import download from moosez import file_utilities @@ -300,8 +301,8 @@ def main(): output_manager.log_update('----------------------------------------------------------------------------------------------------') -def moose(input_data: str | tuple[numpy.ndarray, tuple[float, float, float]] | SimpleITK.Image, - model_names: str | list[str], output_dir: str = None, accelerator: str = None) -> tuple[list[str] | list[SimpleITK.Image] | list[numpy.ndarray], list[models.Model]]: +def moose(input_data: Union[str, Tuple[numpy.ndarray, Tuple[float, float, float]], SimpleITK.Image], + model_names: Union[str, List[str]], output_dir: str = None, accelerator: str = None) -> Tuple[Union[List[str], List[SimpleITK.Image], List[numpy.ndarray]], List[models.Model]]: """ Execute the MOOSE 3.0 image segmentation process. @@ -403,8 +404,8 @@ def moose(input_data: str | tuple[numpy.ndarray, tuple[float, float, float]] | S return generated_segmentations, used_models -def moose_subject(subject: str, subject_index: int, number_of_subjects: int, model_routine: dict, accelerator: str, - output_manager: system.OutputManager | None, benchmark: bool = False): +def moose_subject(subject: str, subject_index: int, number_of_subjects: int, model_routine: Dict, accelerator: str, + output_manager: Union[system.OutputManager, None], benchmark: bool = False): # SETTING UP DIRECTORY STRUCTURE subject_name = os.path.basename(subject) diff --git a/moosez/nnUNet_custom_trainer/utility.py b/moosez/nnUNet_custom_trainer/utility.py index ec0c3e5..f246532 100644 --- a/moosez/nnUNet_custom_trainer/utility.py +++ b/moosez/nnUNet_custom_trainer/utility.py @@ -5,7 +5,7 @@ from moosez import file_utilities -def add_custom_trainers_to_local_nnunetv2(): +def add_custom_trainers_to_local_nnunetv2() -> str: # Locate the site-packages directory site_packages = site.getsitepackages()[0] diff --git a/moosez/predict.py b/moosez/predict.py index 6c606b8..4348504 100644 --- a/moosez/predict.py +++ b/moosez/predict.py @@ -20,6 +20,7 @@ import torch import numpy as np import SimpleITK +from typing import Tuple, List, Dict, Iterator from moosez import models from moosez import image_processing from moosez import system @@ -44,7 +45,7 @@ def initialize_predictor(model: models.Model, accelerator: str) -> nnUNetPredict @dask.delayed -def process_case(preprocessor, chunk: np.ndarray, chunk_properties: dict, predictor: nnUNetPredictor, location: tuple) -> dict: +def process_case(preprocessor, chunk: np.ndarray, chunk_properties: Dict, predictor: nnUNetPredictor, location: Tuple) -> Dict: data, seg = preprocessor.run_case_npy(chunk, None, chunk_properties, @@ -59,7 +60,7 @@ def process_case(preprocessor, chunk: np.ndarray, chunk_properties: dict, predic return {'data': data_tensor, 'data_properties': chunk_properties, 'ofile': None, 'location': location} -def preprocessing_iterator_from_array(image_array: np.ndarray, image_properties: dict, predictor: nnUNetPredictor) -> (iter, list): +def preprocessing_iterator_from_array(image_array: np.ndarray, image_properties: Dict, predictor: nnUNetPredictor) -> Tuple[Iterator, List]: overlap_per_dimension = (0, 20, 20, 20) splits = image_processing.ImageChunker.determine_splits(image_array) chunks, locations = image_processing.ImageChunker.array_to_chunks(image_array, splits, overlap_per_dimension) diff --git a/moosez/system.py b/moosez/system.py index 4512228..c86a41d 100644 --- a/moosez/system.py +++ b/moosez/system.py @@ -13,6 +13,7 @@ from rich.table import Table from rich.text import Text from rich.progress import Progress, TextColumn, BarColumn, FileSizeColumn, TransferSpeedColumn, TimeRemainingColumn +from typing import Union, Tuple, List from moosez.constants import VERSION, ANSI_VIOLET, ANSI_RESET @@ -70,7 +71,7 @@ def create_progress_bar(self): progress_bar = Progress(console=self.console) return progress_bar - def create_table(self, header: list[str], styles: list[str] | None = None) -> Table: + def create_table(self, header: List[str], styles: Union[List[str], None] = None) -> Table: table = Table() if styles is None: styles = [None] * len(header) @@ -78,7 +79,7 @@ def create_table(self, header: list[str], styles: list[str] | None = None) -> Ta table.add_column(header, style = style) return table - def configure_logging(self, log_file_directory: str | None): + def configure_logging(self, log_file_directory: Union[str, None]): if not self.verbose_log or self.logger: return @@ -108,7 +109,7 @@ def log_update(self, text: str): if self.verbose_log and self.logger: self.logger.info(text) - def console_update(self, text: str | RenderableType): + def console_update(self, text: Union[str, RenderableType]): if isinstance(text, str): text = Text.from_ansi(text) self.console.print(text) @@ -181,7 +182,7 @@ def display_doi(self): self.console_update(" Copyright 2022, Quantitative Imaging and Medical Physics Team, Medical University of Vienna") -def check_device(output_manager: OutputManager = OutputManager(False, False)) -> tuple[str, int | None]: +def check_device(output_manager: OutputManager = OutputManager(False, False)) -> Tuple[str, Union[int | None]]: """ This function checks the available device for running predictions, considering CUDA and MPS (for Apple Silicon). diff --git a/setup.py b/setup.py index eeb95b8..42ecd06 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ setup( name='moosez', - version="3.0.5", + version="3.0.6", author='Lalith Kumar Shiyam Sundar | Sebastian Gutschmayer | Manuel Pires', author_email='Lalith.shiyamsundar@meduniwien.ac.at', description='An AI-inference engine for 3D clinical and preclinical whole-body segmentation tasks', @@ -21,6 +21,7 @@ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.9', 'Topic :: Scientific/Engineering :: Medical Science Apps.', ], From e86b9629f21f1b3de7b63f69d5b448ea4995ddc3 Mon Sep 17 00:00:00 2001 From: Keyn34 Date: Mon, 2 Dec 2024 18:32:08 +0100 Subject: [PATCH 02/11] Fixed Python requires in setup.py to >= 3.9 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 42ecd06..44c8312 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ author='Lalith Kumar Shiyam Sundar | Sebastian Gutschmayer | Manuel Pires', author_email='Lalith.shiyamsundar@meduniwien.ac.at', description='An AI-inference engine for 3D clinical and preclinical whole-body segmentation tasks', - python_requires='>=3.10', + python_requires='>=3.9', long_description='mooseZ is an AI-inference engine based on nnUNet, designed for 3D clinical and preclinical' ' whole-body segmentation tasks. It serves models tailored towards different modalities such' ' as PET, CT, and MR. mooseZ provides fast and accurate segmentation results, making it a ' From 496889b9273d096d84a21233cdf230b8c6f4895d Mon Sep 17 00:00:00 2001 From: Keyn34 Date: Mon, 2 Dec 2024 18:37:44 +0100 Subject: [PATCH 03/11] Fixed type hint for returned objects of check_device() in system.py --- moosez/system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moosez/system.py b/moosez/system.py index c86a41d..c45f108 100644 --- a/moosez/system.py +++ b/moosez/system.py @@ -182,7 +182,7 @@ def display_doi(self): self.console_update(" Copyright 2022, Quantitative Imaging and Medical Physics Team, Medical University of Vienna") -def check_device(output_manager: OutputManager = OutputManager(False, False)) -> Tuple[str, Union[int | None]]: +def check_device(output_manager: OutputManager = OutputManager(False, False)) -> Tuple[str, Union[int, None]]: """ This function checks the available device for running predictions, considering CUDA and MPS (for Apple Silicon). From 1533c1a65b53e3353dde0094575e683c56dea120 Mon Sep 17 00:00:00 2001 From: Keyn34 Date: Mon, 2 Dec 2024 18:44:20 +0100 Subject: [PATCH 04/11] Fixed type hint for final_shape in chunks_to_array() in image_processing.py --- moosez/image_processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moosez/image_processing.py b/moosez/image_processing.py index b832dd3..4513a05 100644 --- a/moosez/image_processing.py +++ b/moosez/image_processing.py @@ -249,7 +249,7 @@ def array_to_chunks(image_array: np.ndarray, splits_per_dimension: Union[List[in return image_chunks, positions @staticmethod - def chunks_to_array(image_chunks: List[np.ndarray], image_chunk_positions: Dict, final_shape: List[int] | Tuple[int, ...]) -> np.ndarray: + def chunks_to_array(image_chunks: List[np.ndarray], image_chunk_positions: Dict, final_shape: Union[List[int], Tuple[int, ...]]) -> np.ndarray: final_arr = np.empty(final_shape, dtype=image_chunks[0].dtype) for image_chunk, image_chunk_position in zip(image_chunks, image_chunk_positions): interior_region = image_chunk[image_chunk_position['interior_slice']] From 70da0fa43d2418f7bc8e3db92436840eb7af06bb Mon Sep 17 00:00:00 2001 From: Keyn34 Date: Tue, 3 Dec 2024 16:54:24 +0100 Subject: [PATCH 05/11] Attempts for Errorhandling system.py - Equipped OutputManager with __remove_emojis() to try to escape emojis if an error occurs. --- moosez/system.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/moosez/system.py b/moosez/system.py index c45f108..f142367 100644 --- a/moosez/system.py +++ b/moosez/system.py @@ -6,6 +6,7 @@ import sys import emoji import pyfiglet +import re from halo import Halo from datetime import datetime from contextlib import contextmanager, redirect_stdout, redirect_stderr @@ -109,10 +110,30 @@ def log_update(self, text: str): if self.verbose_log and self.logger: self.logger.info(text) + def __remove_emojis(self, text: Union[str, RenderableType]) -> str: + emoji_pattern = re.compile( + "[" + u"\U0001F600-\U0001F64F" # Emoticons + u"\U0001F300-\U0001F5FF" # Symbols & Pictographs + u"\U0001F680-\U0001F6FF" # Transport & Map Symbols + u"\U0001F700-\U0001F77F" # Alchemical Symbols + u"\U0001F780-\U0001F7FF" # Geometric Shapes Extended + u"\U0001F800-\U0001F8FF" # Supplemental Arrows-C + u"\U0001F900-\U0001F9FF" # Supplemental Symbols & Pictographs + u"\U0001FA00-\U0001FA6F" # Chess Symbols + u"\U0001FA70-\U0001FAFF" # Symbols & Pictographs Extended-A + u"\U00002700-\U000027BF" # Dingbats + u"\U0001F1E6-\U0001F1FF" # Flags + "]+", flags=re.UNICODE) + return emoji_pattern.sub(r'', text) + def console_update(self, text: Union[str, RenderableType]): if isinstance(text, str): text = Text.from_ansi(text) - self.console.print(text) + try: + self.console.print(text) + except UnicodeEncodeError as e: + self.console.print(self.__remove_emojis(text)) def spinner_update(self, text: str = None): if self.spinner.enabled: From e3837e96822b78d2179419f03b43f5ccc5bdc7f9 Mon Sep 17 00:00:00 2001 From: Keyn34 Date: Tue, 3 Dec 2024 17:17:06 +0100 Subject: [PATCH 06/11] Attempts for Errorhandling system.py - tried to restructure the print call --- moosez/system.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/moosez/system.py b/moosez/system.py index f142367..0ee4be0 100644 --- a/moosez/system.py +++ b/moosez/system.py @@ -110,7 +110,7 @@ def log_update(self, text: str): if self.verbose_log and self.logger: self.logger.info(text) - def __remove_emojis(self, text: Union[str, RenderableType]) -> str: + def __remove_emojis(self, text: str) -> str: emoji_pattern = re.compile( "[" u"\U0001F600-\U0001F64F" # Emoticons @@ -128,12 +128,15 @@ def __remove_emojis(self, text: Union[str, RenderableType]) -> str: return emoji_pattern.sub(r'', text) def console_update(self, text: Union[str, RenderableType]): - if isinstance(text, str): - text = Text.from_ansi(text) try: - self.console.print(text) - except UnicodeEncodeError as e: - self.console.print(self.__remove_emojis(text)) + if isinstance(text, str): + self.console.print(Text.from_ansi(text)) + else: + self.console.print(text) + except UnicodeEncodeError: + if isinstance(text, str): + clean_text = self.__remove_emojis(text) + self.console.print(Text.from_ansi(clean_text)) def spinner_update(self, text: str = None): if self.spinner.enabled: From ac7ec703306eceb3aa9294566fe9f246bdf53319 Mon Sep 17 00:00:00 2001 From: Keyn34 Date: Tue, 3 Dec 2024 18:45:44 +0100 Subject: [PATCH 07/11] Revert "Attempts for Errorhandling" This reverts commit e3837e96822b78d2179419f03b43f5ccc5bdc7f9. --- moosez/system.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/moosez/system.py b/moosez/system.py index 0ee4be0..f142367 100644 --- a/moosez/system.py +++ b/moosez/system.py @@ -110,7 +110,7 @@ def log_update(self, text: str): if self.verbose_log and self.logger: self.logger.info(text) - def __remove_emojis(self, text: str) -> str: + def __remove_emojis(self, text: Union[str, RenderableType]) -> str: emoji_pattern = re.compile( "[" u"\U0001F600-\U0001F64F" # Emoticons @@ -128,15 +128,12 @@ def __remove_emojis(self, text: str) -> str: return emoji_pattern.sub(r'', text) def console_update(self, text: Union[str, RenderableType]): + if isinstance(text, str): + text = Text.from_ansi(text) try: - if isinstance(text, str): - self.console.print(Text.from_ansi(text)) - else: - self.console.print(text) - except UnicodeEncodeError: - if isinstance(text, str): - clean_text = self.__remove_emojis(text) - self.console.print(Text.from_ansi(clean_text)) + self.console.print(text) + except UnicodeEncodeError as e: + self.console.print(self.__remove_emojis(text)) def spinner_update(self, text: str = None): if self.spinner.enabled: From 25a0adf6783bad215b0549a1d1d725753aea6cc4 Mon Sep 17 00:00:00 2001 From: Keyn34 Date: Tue, 3 Dec 2024 18:45:56 +0100 Subject: [PATCH 08/11] Revert "Attempts for Errorhandling" This reverts commit 70da0fa43d2418f7bc8e3db92436840eb7af06bb. --- moosez/system.py | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/moosez/system.py b/moosez/system.py index f142367..c45f108 100644 --- a/moosez/system.py +++ b/moosez/system.py @@ -6,7 +6,6 @@ import sys import emoji import pyfiglet -import re from halo import Halo from datetime import datetime from contextlib import contextmanager, redirect_stdout, redirect_stderr @@ -110,30 +109,10 @@ def log_update(self, text: str): if self.verbose_log and self.logger: self.logger.info(text) - def __remove_emojis(self, text: Union[str, RenderableType]) -> str: - emoji_pattern = re.compile( - "[" - u"\U0001F600-\U0001F64F" # Emoticons - u"\U0001F300-\U0001F5FF" # Symbols & Pictographs - u"\U0001F680-\U0001F6FF" # Transport & Map Symbols - u"\U0001F700-\U0001F77F" # Alchemical Symbols - u"\U0001F780-\U0001F7FF" # Geometric Shapes Extended - u"\U0001F800-\U0001F8FF" # Supplemental Arrows-C - u"\U0001F900-\U0001F9FF" # Supplemental Symbols & Pictographs - u"\U0001FA00-\U0001FA6F" # Chess Symbols - u"\U0001FA70-\U0001FAFF" # Symbols & Pictographs Extended-A - u"\U00002700-\U000027BF" # Dingbats - u"\U0001F1E6-\U0001F1FF" # Flags - "]+", flags=re.UNICODE) - return emoji_pattern.sub(r'', text) - def console_update(self, text: Union[str, RenderableType]): if isinstance(text, str): text = Text.from_ansi(text) - try: - self.console.print(text) - except UnicodeEncodeError as e: - self.console.print(self.__remove_emojis(text)) + self.console.print(text) def spinner_update(self, text: str = None): if self.spinner.enabled: From 5d2f47804b84a39377d2d2b42c6a2a0c54bee77c Mon Sep 17 00:00:00 2001 From: Keyn34 <87951050+Keyn34@users.noreply.github.com> Date: Mon, 9 Dec 2024 09:07:59 +0100 Subject: [PATCH 09/11] Improved chunking logging and FOV warnings system.py - added spinner_warn() to OutputManager and improved the type hints of the spinner related function as defaulting their text to None made no sense. moosez.py - moose_subject() now prints a warning with spinner_warn() if the organ to crop from is not in the field of view. image_processing.py - fixed a type hint of the parameter image_chunk_positions of chunks_to_array() from Dict to List[Dict] predict.py - adjusted chunking logging in predict_from_array_by_iterator() - preprocessing_iterator_from_array now also takes an OutputManager as argument to log chunking information, such as if the image is below chunking threshold, and the size(s) of the chunk(s) the image was split to. --- moosez/image_processing.py | 2 +- moosez/moosez.py | 2 +- moosez/predict.py | 15 +++++++++------ moosez/system.py | 10 +++++++--- 4 files changed, 18 insertions(+), 11 deletions(-) diff --git a/moosez/image_processing.py b/moosez/image_processing.py index 4513a05..0bc2bbd 100644 --- a/moosez/image_processing.py +++ b/moosez/image_processing.py @@ -249,7 +249,7 @@ def array_to_chunks(image_array: np.ndarray, splits_per_dimension: Union[List[in return image_chunks, positions @staticmethod - def chunks_to_array(image_chunks: List[np.ndarray], image_chunk_positions: Dict, final_shape: Union[List[int], Tuple[int, ...]]) -> np.ndarray: + def chunks_to_array(image_chunks: List[np.ndarray], image_chunk_positions: List[Dict], final_shape: Union[List[int], Tuple[int, ...]]) -> np.ndarray: final_arr = np.empty(final_shape, dtype=image_chunks[0].dtype) for image_chunk, image_chunk_position in zip(image_chunks, image_chunk_positions): interior_region = image_chunk[image_chunk_position['interior_slice']] diff --git a/moosez/moosez.py b/moosez/moosez.py index a236813..f88cdc3 100644 --- a/moosez/moosez.py +++ b/moosez/moosez.py @@ -470,7 +470,7 @@ def moose_subject(subject: str, subject_index: int, number_of_subjects: int, mod existing_intensities = numpy.unique(segmentation_array) if not all([intensity in existing_intensities for intensity in inference_fov_intensities]): - output_manager.spinner_update(f'[{subject_index + 1}/{number_of_subjects}] Organ to crop from not in initial FOV...') + output_manager.spinner_warn(f'[{subject_index + 1}/{number_of_subjects}] Organ to crop from not in initial FOV. No segmentation result for this subject.') output_manager.log_update(" - Organ to crop from not in initial FOV.") performance_observer.time_phase() continue diff --git a/moosez/predict.py b/moosez/predict.py index 4348504..b1b991b 100644 --- a/moosez/predict.py +++ b/moosez/predict.py @@ -60,11 +60,16 @@ def process_case(preprocessor, chunk: np.ndarray, chunk_properties: Dict, predic return {'data': data_tensor, 'data_properties': chunk_properties, 'ofile': None, 'location': location} -def preprocessing_iterator_from_array(image_array: np.ndarray, image_properties: Dict, predictor: nnUNetPredictor) -> Tuple[Iterator, List]: +def preprocessing_iterator_from_array(image_array: np.ndarray, image_properties: Dict, predictor: nnUNetPredictor, output_manager: system.OutputManager) -> Tuple[Iterator, List[Dict]]: overlap_per_dimension = (0, 20, 20, 20) splits = image_processing.ImageChunker.determine_splits(image_array) chunks, locations = image_processing.ImageChunker.array_to_chunks(image_array, splits, overlap_per_dimension) + if len(chunks) == 1: + output_manager.log_update(f" - Image below chunking threshold. Single chunk of size: {'x'.join(map(str, chunks[0].shape))}") + else: + output_manager.log_update(f" - Image split into {len(chunks)} chunks of size: {'x'.join(map(str, chunks[0].shape))}") + preprocessor = predictor.configuration_manager.preprocessor_class(verbose=predictor.verbose) delayed_tasks = [] @@ -86,15 +91,13 @@ def predict_from_array_by_iterator(image_array: np.ndarray, model: models.Model, image_properties = { 'spacing': model.voxel_spacing } - splits = image_processing.ImageChunker.determine_splits(image_array) - output_manager.log_update(f" - Image chunked into {'x'.join(map(str, splits))} chunks") - iterator, chunk_locations = preprocessing_iterator_from_array(image_array, image_properties, predictor) + iterator, chunk_locations = preprocessing_iterator_from_array(image_array, image_properties, predictor, output_manager) segmentations = predictor.predict_from_data_iterator(iterator) - output_manager.log_update(f" - Retrieved {len(segmentations)} segmentations") + output_manager.log_update(f" - Retrieved {len(segmentations)} chunks") segmentations = [segmentation[None, ...] for segmentation in segmentations] combined_segmentations = image_processing.ImageChunker.chunks_to_array(segmentations, chunk_locations, image_array.shape) - output_manager.log_update(f" - Combined them to {'x'.join(map(str, combined_segmentations.shape))} array") + output_manager.log_update(f" - Combined them to an {'x'.join(map(str, combined_segmentations.shape))} array") return np.squeeze(combined_segmentations) diff --git a/moosez/system.py b/moosez/system.py index c45f108..2ee1f2a 100644 --- a/moosez/system.py +++ b/moosez/system.py @@ -114,7 +114,7 @@ def console_update(self, text: Union[str, RenderableType]): text = Text.from_ansi(text) self.console.print(text) - def spinner_update(self, text: str = None): + def spinner_update(self, text: str): if self.spinner.enabled: self.spinner.text = text @@ -122,14 +122,18 @@ def spinner_stop(self): if self.spinner.enabled: self.spinner.stop() - def spinner_start(self, text: str = None): + def spinner_start(self, text: str): if self.spinner.enabled: self.spinner.start(text) - def spinner_succeed(self, text: str = None): + def spinner_succeed(self, text: str): if self.spinner.enabled: self.spinner.succeed(text) + def spinner_warn(self, text: str): + if self.spinner.enabled: + self.spinner.warn(text) + @contextmanager def manage_nnUNet_output(self): target_path = self.nnunet_log_filename if self.verbose_log else os.devnull From c59a3a5ad7c902389e6ebba715a0b9f18cb5d5f9 Mon Sep 17 00:00:00 2001 From: Keyn34 Date: Mon, 9 Dec 2024 09:59:10 +0100 Subject: [PATCH 10/11] Removed version restriction of SimpleITK in setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 44c8312..69c5e81 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ 'acvl-utils==0.2', 'nnunetv2', 'halo~=0.0.31', - 'SimpleITK~=2.2.1', + 'SimpleITK', 'pydicom~=2.2.2', 'argparse~=1.4.0', 'numpy<2.0', From dfaf3df31f5d51061aaf6713e964aefc6d652081 Mon Sep 17 00:00:00 2001 From: Keyn34 Date: Mon, 9 Dec 2024 10:10:46 +0100 Subject: [PATCH 11/11] Minor improvements image_processing.py - improved logging in standardize_image() moosez.py - improved status print in spinner when organs are not in FOV --- moosez/image_processing.py | 10 +++++----- moosez/moosez.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/moosez/image_processing.py b/moosez/image_processing.py index 0bc2bbd..79f45ae 100644 --- a/moosez/image_processing.py +++ b/moosez/image_processing.py @@ -524,24 +524,24 @@ def convert_to_sitk(image: nibabel.Nifti1Image) -> SimpleITK.Image: def standardize_image(image_path: str, output_manager: system.OutputManager, standardization_output_path: Union[str, None]) -> SimpleITK.Image: image = nibabel.load(image_path) _, original_orientation = determine_orientation_code(image) - output_manager.log_update(f" Image loaded. Orientation: {original_orientation}") + output_manager.log_update(f" - Image loaded. Orientation: {original_orientation}") image, orthonormalized = confirm_orthonormality(image) if orthonormalized: _, orthonormal_orientation = determine_orientation_code(image) - output_manager.log_update(f" Image orthonormalized. Orientation: {orthonormal_orientation}") + output_manager.log_update(f" - Image orthonormalized. Orientation: {orthonormal_orientation}") image, reoriented = confirm_orientation(image) if reoriented: _, reoriented_orientation = determine_orientation_code(image) - output_manager.log_update(f" Image reoriented. Orientation: {reoriented_orientation}") + output_manager.log_update(f" - Image reoriented. Orientation: {reoriented_orientation}") sitk_image = convert_to_sitk(image) - output_manager.log_update(f" Image converted to SimpleITK.") + output_manager.log_update(f" - Image converted to SimpleITK.") processing_steps = [orthonormalized, reoriented] prefixes = ["orthonormal", "reoriented"] if standardization_output_path is not None and any(processing_steps): - output_manager.log_update(f" Writing standardized image.") + output_manager.log_update(f" - Writing standardized image.") prefix = "_".join([prefix for processing_step, prefix in zip(processing_steps, prefixes) if processing_step]) output_path = os.path.join(standardization_output_path, f"{prefix}_{os.path.basename(image_path)}") SimpleITK.WriteImage(sitk_image, output_path) diff --git a/moosez/moosez.py b/moosez/moosez.py index f88cdc3..58d0902 100644 --- a/moosez/moosez.py +++ b/moosez/moosez.py @@ -470,7 +470,7 @@ def moose_subject(subject: str, subject_index: int, number_of_subjects: int, mod existing_intensities = numpy.unique(segmentation_array) if not all([intensity in existing_intensities for intensity in inference_fov_intensities]): - output_manager.spinner_warn(f'[{subject_index + 1}/{number_of_subjects}] Organ to crop from not in initial FOV. No segmentation result for this subject.') + output_manager.spinner_warn(f'[{subject_index + 1}/{number_of_subjects}] {subject_name}: organ to crop from not in initial FOV. No segmentation result ({model_workflow.target_model}) for this subject.') output_manager.log_update(" - Organ to crop from not in initial FOV.") performance_observer.time_phase() continue