diff --git a/README.md b/README.md index ce0e6c74..1077759e 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,8 @@ in the base directory. Then do `cd docs` and `make html` to build the static sit The following minimal example loads a CT volume from a NifTi `.nii.gz` file and simulates an X-ray projection: ```python -from deepdrr import geo, Volume, MobileCArm, Projector +from deepdrr import geo, Volume, MobileCArm +from deepdrr.projector import Projector # separate import for CUDA init import matplotlib.pyplot as plt volume = Volume.from_nifti('/path/to/ct_image.nii.gz') @@ -147,6 +148,41 @@ This capability has not been tested in version 1.0. For tool insertion, we recom 2. The density of the tool needs to be provided via hard coding in the file 'load_dicom_tool.py' (line 127). The pose of the tool/implant with respect to the CT volume requires manual setup. We provide one example origin setting at line 23-24. 3. The tool/implant will supersede the anatomy defined by the CT volume intensities. To this end, we sample the CT materials and densities at the location of the tool in the tool volume, and subtract them from the anatomy forward projections in detector domain (to enable different resolutions of CT and tool volume). Further information can be found in the IJCARS article. +### Using DeepDRR Simultaneously with PyTorch + +Some issues may arise when using DeepDRR at the same time as PyTorch due to conflicts between pycuda's CUDA initialization and PyTorch CUDA initialization. The best workaround we know of is to first initialize the PyCUDA context (by importing `deepdrr.projector`) and then run your model on a dummy batch before creating a `Projector` object. For mysterious reasons (likely involving overlapping GPU resources and the retrograde of Mercury), this seems to work. + +```Python +import torch +from torch import nn +from torchvision import models + +import deepdrr +from deepdrr.projector import Projector # initializes PyCUDA + +# Before creating a Projector, run backprop to initialize PyTorch +criterion = nn.CrossEntropyLoss() +model = models.resnet50() # Your model here +model.cuda() +optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) +optimizer.zero_grad() +x = torch.ones((32, 3, 224, 224), dtype=torch.float32).cuda() # Your image size +y = torch.ones(32, dtype=torch.int64).cuda() +y_pred = model(x) +loss = criterion(y_pred, y) +loss.backward() +optimizer.step() +log.info(f"Ran dummy batch to initialize torch.") + +volume = ... +carm = ... +with Projector(volume, carm=carm): + image = projector() + image = image.unsqueeze(0) # add batch dim + y_pred = model(image) + ... +``` + ## Reference We hope this proves useful for medical imaging research. If you use our work, we would kindly ask you to reference our work. diff --git a/deepdrr/annotations/line_annotation.py b/deepdrr/annotations/line_annotation.py index 10504b85..81e9d7ca 100644 --- a/deepdrr/annotations/line_annotation.py +++ b/deepdrr/annotations/line_annotation.py @@ -1,13 +1,13 @@ from __future__ import annotations import logging -from typing import Optional +from typing import List, Optional from pathlib import Path import numpy as np import json import pyvista as pv -from .. import geo +from .. import geo, utils from ..vol import Volume, AnyVolume logger = logging.getLogger(__name__) @@ -65,6 +65,110 @@ def from_markup(cls, path: str, volume: AnyVolume) -> LineAnnotation: return cls(*points, volume) + def save(self, path: str, color: List[float] = [0.5, 0.5, 0.5]): + """Save the Line annotation to a mrk.json file, which can be opened by 3D Slicer. + + Args: + path (str): Output path to the file. + color (List[int], optional): The color of the saved annotation. + """ + path = Path(path).expanduser() + + markup = { + "@schema": "https://raw.githubusercontent.com/slicer/slicer/master/Modules/Loadable/Markups/Resources/Schema/markups-schema-v1.0.0.json#", + "markups": [ + { + "type": "Line", + "coordinateSystem": self.volume.anatomical_coordinate_system, + "locked": True, + "labelFormat": "%N-%d", + "controlPoints": [ + { + "id": "1", + "label": "entry", + "description": "", + "associatedNodeID": "", + "position": utils.jsonable(self.startpoint), + "orientation": [ + -1.0, + -0.0, + -0.0, + -0.0, + -1.0, + -0.0, + 0.0, + 0.0, + 1.0, + ], + "selected": True, + "locked": False, + "visibility": True, + "positionStatus": "defined", + }, + { + "id": "2", + "label": "exit", + "description": "", + "associatedNodeID": "", + "position": utils.jsonable(self.endpoint), + "orientation": [ + -1.0, + -0.0, + -0.0, + -0.0, + -1.0, + -0.0, + 0.0, + 0.0, + 1.0, + ], + "selected": True, + "locked": False, + "visibility": True, + "positionStatus": "defined", + }, + ], + "measurements": [ + { + "name": "length", + "enabled": True, + "value": 124.90054351814699, + "printFormat": "%-#4.4gmm", + } + ], + "display": { + "visibility": True, + "opacity": 1.0, + "color": color, + "selectedColor": [1.0, 0.5000076295109484, 0.5000076295109484], + "activeColor": [0.4, 1.0, 0.0], + "propertiesLabelVisibility": True, + "pointLabelsVisibility": True, + "textScale": 3.0, + "glyphType": "Sphere3D", + "glyphScale": 5.800000000000001, + "glyphSize": 5.0, + "useGlyphScale": True, + "sliceProjection": False, + "sliceProjectionUseFiducialColor": True, + "sliceProjectionOutlinedBehindSlicePlane": False, + "sliceProjectionColor": [1.0, 1.0, 1.0], + "sliceProjectionOpacity": 0.6, + "lineThickness": 0.2, + "lineColorFadingStart": 1.0, + "lineColorFadingEnd": 10.0, + "lineColorFadingSaturation": 1.0, + "lineColorFadingHueOffset": 0.0, + "handlesInteractive": False, + "snapMode": "toVisibleSurface", + }, + } + ], + } + + with open(path, "w") as file: + json.dump(markup, file) + @property def startpoint_in_world(self) -> geo.Point: return self.volume.world_from_anatomical @ self.startpoint diff --git a/deepdrr/device.py b/deepdrr/device.py index 092ef85b..69124bde 100644 --- a/deepdrr/device.py +++ b/deepdrr/device.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple, Union, List +from typing import Any, Dict, Optional, Tuple, Union, List import logging import numpy as np @@ -96,6 +96,7 @@ def __init__( isocenter: geo.Point3D = [0, 0, 0], alpha: float = 0, beta: float = 0, + degrees: bool = True, horizontal_movement: float = 200, # width of window in X and Y planes. vertical_travel: float = 430, # width of window in Z plane. min_alpha: float = -40, @@ -103,7 +104,6 @@ def __init__( # note that this would collide with the patient. Suggested to limit to +/- 45 min_beta: float = -225, max_beta: float = 225, - degrees: bool = True, source_to_detector_distance: float = 1020, # vertical component of the source point offset from the isocenter of rotation, in -Z. Previously called `isocenter_distance` source_to_isocenter_vertical_distance: float = 530, @@ -117,7 +117,7 @@ def __init__( sensor_width: int = 1536, pixel_size: float = 0.194, rotate_camera_left: bool = True, # make it so that down in the image corresponds to -x, so that patient images appear as expected. - enforce_isocenter_bounds: bool = True, + enforce_isocenter_bounds: bool = False, # Allow the isocenter to travel arbitrarily far from the device origin ) -> None: """A simulated C-arm imaging device with orbital movement (alpha), angulation (beta) and 3D translation. @@ -201,6 +201,34 @@ def __str__(self): f"alpha={np.degrees(self.alpha)}, beta={np.degrees(self.beta)}, degrees=True)" ) + def to_config(self) -> Dict[str, Any]: + """Get a json-safe dictionary that can be used to initialize the C-arm in its current pose.""" + return utils.jsonable( + dict( + world_from_device=self.world_from_device, + isocenter=self.isocenter, + alpha=self.alpha, + beta=self.beta, + degrees=False, + horizontal_movement=self.horizontal_movement, + vertical_travel=self.vertical_travel, + min_alpha=self.min_alpha, + max_alpha=self.max_alpha, + min_beta=self.min_beta, + max_beta=self.max_beta, + source_to_detector_distance=self.source_to_detector_distance, + source_to_isocenter_vertical_distance=self.source_to_isocenter_vertical_distance, + source_to_isocenter_horizontal_offset=self.source_to_isocenter_horizontal_offset, + immersion_depth=self.immersion_depth, + free_space=self.free_space, + sensor_height=self.sensor_height, + sensor_width=self.sensor_width, + pixel_size=self.pixel_size, + rotate_camera_left=self.rotate_camera_left, + enforce_isocenter_bounds=self.enforce_isocenter_bounds, + ) + ) + @property def max_isocenter(self) -> np.ndarray: return ( @@ -257,10 +285,10 @@ def camera3d_from_device(self) -> geo.FrameTransform: @property def camera3d_from_world(self) -> geo.FrameTransform: """Rigid transformation of the C-arm camera pose.""" - return self.camera3d_from_device @ self.device_from_world + return self.get_camera3d_from_world() def get_camera3d_from_world(self) -> geo.FrameTransform: - return self.camera3d_from_world + return self.camera3d_from_device @ self.device_from_world def get_camera_projection(self) -> geo.CameraProjection: return geo.CameraProjection( diff --git a/deepdrr/geo/camera_projection.py b/deepdrr/geo/camera_projection.py index 03a77b83..3e2e582b 100644 --- a/deepdrr/geo/camera_projection.py +++ b/deepdrr/geo/camera_projection.py @@ -1,7 +1,7 @@ from typing import Union, Optional, Any, TYPE_CHECKING import numpy as np -from .core import Transform, FrameTransform, point, Point3D +from .core import Transform, FrameTransform, point, Point3D, get_data from .camera_intrinsic_transform import CameraIntrinsicTransform from ..vol import AnyVolume @@ -66,7 +66,7 @@ def extrinsic(self) -> FrameTransform: return self.camera3d_from_world @property - def index_from_world(self) -> FrameTransform: + def index_from_world(self) -> Transform: proj = np.concatenate([np.eye(3), np.zeros((3, 1))], axis=1) camera2d_from_camera3d = Transform(proj, _inv=proj.T) return ( @@ -74,9 +74,17 @@ def index_from_world(self) -> FrameTransform: ) @property - def world_from_index(self) -> FrameTransform: + def world_from_index(self) -> Transform: return self.index_from_world.inv + @property + def world_from_index_on_image_plane(self) -> FrameTransform: + """Get the transform to points in world on the image (detector) plane from image indices.""" + proj = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]]) + proj = Transform(proj, _inv=proj.T) + index_from_world_3d = proj @ self.index_from_world + return FrameTransform(data=get_data(index_from_world_3d.inv)) + @property def sensor_width(self) -> int: return self.intrinsic.sensor_width diff --git a/deepdrr/geo/core.py b/deepdrr/geo/core.py index b66f4b1f..6f2b003a 100644 --- a/deepdrr/geo/core.py +++ b/deepdrr/geo/core.py @@ -92,6 +92,10 @@ def to_array(self, is_point): """ pass + def tolist(self) -> List: + """Get a json-save list with the data in this object.""" + return self.data.tolist() + def __array__(self, dtype=None): return self.to_array() diff --git a/deepdrr/projector/projector.py b/deepdrr/projector/projector.py index a3972266..d4586f4e 100644 --- a/deepdrr/projector/projector.py +++ b/deepdrr/projector/projector.py @@ -2,7 +2,9 @@ import time from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union +import os +import torch import numpy as np from .. import geo, utils, vol @@ -20,28 +22,20 @@ from .mcgpu_mfp_data import MFP_DATA from .mcgpu_rita_samplers import rita_samplers -log = logging.getLogger(__name__) - -try: - import pycuda.autoinit - import pycuda.driver as cuda - from pycuda.autoinit import context - from pycuda.compiler import SourceModule - pycuda_available = True -except ImportError: - pycuda_available = False - SourceModule = Any - log.warning("pycuda unavailable") +import pycuda.autoinit +import pycuda.driver as cuda +from pycuda.autoinit import context +from pycuda.compiler import SourceModule +log = logging.getLogger(__name__) NUMBYTES_INT8 = 1 NUMBYTES_INT32 = 4 NUMBYTES_FLOAT32 = 4 -log.setLevel(logging.INFO) -logging.basicConfig(level=logging.INFO) + def _get_spectrum(spectrum: Union[np.ndarray, str]): """Get the data corresponding to the given spectrum name. @@ -258,13 +252,15 @@ def __init__( self.num_scatter_blocks = min(32768, self.max_block_index) # TODO (mjudish): discuss with killeen max_block_index and what makes sense # for the scatter block structure - + total_threads = self.num_scatter_blocks * self.threads * self.threads log.info(f"total threads: {total_threads}") self.histories_per_thread = int(np.ceil(self.scatter_num / total_threads)) self.scatter_num = self.histories_per_thread * total_threads - log.info(f"input scatter_num: {scatter_num}, rounded up to {self.scatter_num}\nhistories per thread: {self.histories_per_thread}") + log.info( + f"input scatter_num: {scatter_num}, rounded up to {self.scatter_num}\nhistories per thread: {self.histories_per_thread}" + ) if len(self.volumes) > 1: self.resample_megavolume = self.mod.get_function("resample_megavolume") @@ -401,7 +397,7 @@ def project( blocks_w = int(np.ceil(self.output_shape[0] / self.threads)) blocks_h = int(np.ceil(self.output_shape[1] / self.threads)) block = (self.threads, self.threads, 1) - log.debug( + log.info( f"Running: {blocks_w}x{blocks_h} blocks with {self.threads}x{self.threads} threads each" ) @@ -446,7 +442,7 @@ def project( project_tock = time.perf_counter() log.debug( - f"projection #{i}: time elpased after copy from kernel: {project_tock - project_tick}" + f"projection #{i}: time elapased after copy from kernel: {project_tock - project_tick}" ) if self.scatter_num > 0: @@ -456,14 +452,16 @@ def project( f"Starting scatter simulation, scatter_num={self.scatter_num}. Time: {time.asctime()}" ) - #index_from_ijk = ( + # index_from_ijk = ( # self.megavol_ijk_from_world @ proj.world_from_index - #).inv - #index_from_ijk = np.array(index_from_ijk).astype(np.float32) # 2x4 matrix - #print(f"index_from_ijk on GPU:\n{index_from_ijk}") - #cuda.memcpy_htod(self.index_from_ijk_gpu, index_from_ijk) + # ).inv + # index_from_ijk = np.array(index_from_ijk).astype(np.float32) # 2x4 matrix + # print(f"index_from_ijk on GPU:\n{index_from_ijk}") + # cuda.memcpy_htod(self.index_from_ijk_gpu, index_from_ijk) print(f"index_from_world on GPU:\n{np.array(proj.index_from_world)}") - cuda.memcpy_htod(self.index_from_world_gpu, np.array(proj.index_from_world)) + cuda.memcpy_htod( + self.index_from_world_gpu, np.array(proj.index_from_world) + ) scatter_source_ijk = np.array( self.megavol_ijk_from_world @ proj.center_in_world @@ -472,14 +470,12 @@ def project( print( f"np.array(self.megavol_ijk_from_world) dims:{np.array(self.megavol_ijk_from_world).shape}\n{np.array(self.megavol_ijk_from_world)}" ) - print( - f"world_from_index:\n{world_from_index}" - ) + print(f"world_from_index:\n{world_from_index}") scatter_source_world = np.array(proj.center_in_world).astype(np.float32) detector_plane = scatter.get_detector_plane( - #np.array(self.megavol_ijk_from_world @ proj.world_from_index), + # np.array(self.megavol_ijk_from_world @ proj.world_from_index), np.array(proj.world_from_index), proj.index_from_camera2d, self.source_to_detector_distance, @@ -495,9 +491,11 @@ def project( np.array([0, 0, 1]), np.array([self.output_shape[0], 0, 1]), np.array([self.output_shape[0], self.output_shape[1], 1]), - np.array([0, self.output_shape[1], 1]) + np.array([0, self.output_shape[1], 1]), + ] + _tmp_corner_rays_world = [ + proj.world_from_index @ corner for corner in _tmp_corners_idx ] - _tmp_corner_rays_world = [proj.world_from_index @ corner for corner in _tmp_corners_idx] print(f"Detector corner rays in world: (0,0), (W,0), (W,H), (0, H):") for _corner_ray in _tmp_corner_rays_world: @@ -505,23 +503,33 @@ def project( # end print corners print(f"source in world:\n\t{proj.center_in_world}") - detector_ctr_in_world = detector_plane.surface_origin + (detector_plane.basis_1 * self.output_shape[0] * 0.5) + (detector_plane.basis_2 * self.output_shape[1] * 0.5) + detector_ctr_in_world = ( + detector_plane.surface_origin + + (detector_plane.basis_1 * self.output_shape[0] * 0.5) + + (detector_plane.basis_2 * self.output_shape[1] * 0.5) + ) print(f"detector center in world:\n\t{detector_ctr_in_world}") print(f"Detector corners in world, FROM RAYS:") for _corner_ray in _tmp_corner_rays_world: - print(f"\t{proj.center_in_world + self.source_to_detector_distance * _corner_ray}") + print( + f"\t{proj.center_in_world + self.source_to_detector_distance * _corner_ray}" + ) print(f"Detector corners in world, FROM PLANE_SURFACE:") for indices in _tmp_corners_idx: - corner = detector_plane.surface_origin + (detector_plane.basis_1 * indices[0]) + (detector_plane.basis_2 * indices[1]) + corner = ( + detector_plane.surface_origin + + (detector_plane.basis_1 * indices[0]) + + (detector_plane.basis_2 * indices[1]) + ) print(f"\t{corner}") world_from_ijk_arr = np.array(self.megavol_ijk_from_world.inv) cuda.memcpy_htod(self.world_from_ijk_gpu, world_from_ijk_arr) - #print(f"world_from_ijk_arr:\n{world_from_ijk_arr}") + # print(f"world_from_ijk_arr:\n{world_from_ijk_arr}") ijk_from_world_arr = np.array(self.megavol_ijk_from_world) cuda.memcpy_htod(self.ijk_from_world_gpu, ijk_from_world_arr) - #print(f"ijk_from_world_arr:\n{ijk_from_world_arr}") + # print(f"ijk_from_world_arr:\n{ijk_from_world_arr}") E_abs_keV = 5 # E_abs == 5000 eV @@ -576,8 +584,12 @@ def project( ) else: print("running scatter kernel patchwise") - for i in range(int(np.ceil(self.num_scatter_blocks / self.max_block_index))): - blocks_left_to_run = self.num_scatter_blocks - (i * self.max_block_index) + for i in range( + int(np.ceil(self.num_scatter_blocks / self.max_block_index)) + ): + blocks_left_to_run = self.num_scatter_blocks - ( + i * self.max_block_index + ) blocks_for_grid = min(blocks_left_to_run, self.max_block_index) self.simulate_scatter( *scatter_args, block=block, grid=(blocks_for_grid, 1) @@ -667,7 +679,7 @@ def project( intensities.append(deposited_energy) else: intensities.append(intensity) - + photon_probs.append(photon_prob) # end for-loop over the projections @@ -688,6 +700,9 @@ def project( log.debug("applying negative log transform") images = utils.neglog(images) + # Don't think this does anything. + # torch.cuda.synchronize() + if images.shape[0] == 1: return images[0] else: @@ -728,7 +743,7 @@ def initialize(self): if self.initialized: raise RuntimeError("Close projector before initializing again.") - # TODO: in this function, there are several instances of axis swaps. + # TODO: in this function, there are several instances of axis swaps. # We may want to investigate if the axis swaps are necessary. log.debug(f"beginning call to Projector.initialize") @@ -855,7 +870,6 @@ def initialize(self): ) # allocate ijk_from_index matrix array on GPU (3x3 array x 4 bytes per float32) - # TODO: represent the factor of "3 x 3" in a more abstracted way self.world_from_index_gpu = cuda.mem_alloc(3 * 3 * NUMBYTES_FLOAT32) # allocate ijk_from_world for each volume. @@ -1109,9 +1123,9 @@ def initialize(self): # Calculate block and grid sizes: each block is a 4x4x4 cube of voxels block = (1, 1, 1) - blocks_x = np.int(np.ceil(mega_x_len / block[0])) - blocks_y = np.int(np.ceil(mega_y_len / block[1])) - blocks_z = np.int(np.ceil(mega_z_len / block[2])) + blocks_x = int(np.ceil(mega_x_len / block[0])) + blocks_y = int(np.ceil(mega_y_len / block[1])) + blocks_z = int(np.ceil(mega_z_len / block[2])) log.info( f"Resampling: {blocks_x}x{blocks_y}x{blocks_z} blocks with {block[0]}x{block[1]}x{block[2]} threads each" ) @@ -1198,13 +1212,11 @@ def initialize(self): labeled_seg, i * self.volumes[0].materials[mat] ).astype(np.int8) null_seg = np.logical_and( - null_seg, - np.logical_not(self.volumes[0].materials[mat]) + null_seg, np.logical_not(self.volumes[0].materials[mat]) ).astype(np.int8) # a labeled_seg value of NUM_MATERIALS indicates a null segmentation labeled_seg = np.add( - labeled_seg, - len(self.all_materials) * null_seg + labeled_seg, len(self.all_materials) * null_seg ).astype(np.int8) # NOTE: axis swap not necessary because using raw array, not texture cuda.memcpy_htod(self.megavol_labeled_seg_gpu, labeled_seg) @@ -1212,7 +1224,7 @@ def initialize(self): # Copy volume density info to self.megavol_density_gpu # NOTE: axis swap not necessary because using raw array, not texture cuda.memcpy_htod(self.megavol_density_gpu, self.volumes[0].data) - + init_tock = time.perf_counter() log.debug( f"time elapsed after copying megavolume to GPU: {init_tock - init_tick}" diff --git a/deepdrr/utils/__init__.py b/deepdrr/utils/__init__.py index 77f61e86..e08cb1a8 100644 --- a/deepdrr/utils/__init__.py +++ b/deepdrr/utils/__init__.py @@ -9,9 +9,18 @@ from . import data_utils, image_utils, test_utils -__all__ = ["param_saver", "one_hot", "tuplify", "listify", - "radians", "generate_uniform_angles", "neglog", - "try_import_pyvista", "try_import_vtk"] +__all__ = [ + "param_saver", + "one_hot", + "tuplify", + "listify", + "radians", + "generate_uniform_angles", + "neglog", + "try_import_pyvista", + "try_import_vtk", + "jsonable", +] logger = logging.getLogger(__name__) @@ -38,8 +47,7 @@ def param_saver( Returns: [type]: [description] """ - i0 = np.sum(spectrum[:, 0] * (spectrum[:, 1] / - np.sum(spectrum[:, 1]))) / 1000 + i0 = np.sum(spectrum[:, 0] * (spectrum[:, 1] / np.sum(spectrum[:, 1]))) / 1000 data = { "date": datetime.now(), "thetas": thetas, @@ -58,7 +66,9 @@ def param_saver( def one_hot( - x: np.ndarray, num_classes: Optional[int] = None, axis: int = -1, + x: np.ndarray, + num_classes: Optional[int] = None, + axis: int = -1, ) -> np.ndarray: """One-hot encode the vector x along the axis. @@ -85,7 +95,7 @@ def one_hot( def tuplify(t: Union[Tuple[T, ...], T], n: int = 1) -> Tuple[T, ...]: - """ Create a tuple with `n` copies of `t`, if `t` is not already a tuple of length `n`.""" + """Create a tuple with `n` copies of `t`, if `t` is not already a tuple of length `n`.""" if isinstance(t, (tuple, list)): assert len(t) == n return tuple(t) @@ -118,7 +128,8 @@ def radians( def generate_uniform_angles( - phi_range: Tuple[float, float, float], theta_range: Tuple[float, float, float], + phi_range: Tuple[float, float, float], + theta_range: Tuple[float, float, float], ) -> Tuple[np.ndarray, np.ndarray]: """Generate a uniform sampling of angles over the given ranges. @@ -175,8 +186,7 @@ def neglog(image: np.ndarray, epsilon: float = 0.01) -> np.ndarray: # TODO(killeen): for multiple images, only fill the bad ones image[:] = 0 if image.shape[0] > 1: - logger.error( - "TODO: zeroed all images, even though only one might be bad.") + logger.error("TODO: zeroed all images, even though only one might be bad.") else: image = (image - image_min) / (image_max - image_min) @@ -213,3 +223,26 @@ def try_import_vtk(): vtk_available = False return vtk, nps, vtk_available + + +def jsonable(obj: Any): + """Convert obj to a JSON-ready container or object. + Args: + obj ([type]): + """ + if isinstance(obj, (str, float, int, complex)): + return obj + elif isinstance(obj, Path): + return str(obj.resolve()) + elif isinstance(obj, (list, tuple)): + return type(obj)(map(jsonable, obj)) + elif isinstance(obj, dict): + return dict(jsonable(list(obj.items()))) + elif isinstance(obj, np.ndarray): + return obj.tolist() + elif hasattr(obj, "tolist"): + return obj.tolist() + elif hasattr(obj, "__array__"): + return np.array(obj).tolist() + else: + raise ValueError(f"Unknown type for JSON: {type(obj)}") diff --git a/deepdrr/utils/data_utils.py b/deepdrr/utils/data_utils.py index a693a440..f5dd9814 100644 --- a/deepdrr/utils/data_utils.py +++ b/deepdrr/utils/data_utils.py @@ -2,27 +2,30 @@ import os import logging from pathlib import Path -from torchvision.datasets.utils import download_url +from torchvision.datasets.utils import download_url, extract_archive import urllib import subprocess logger = logging.getLogger(__name__) -def download(url: str, filename: Optional[str] = None, root: str = "~/datasets/DeepDRR_Data", md5: Optional[str] = None) -> Path: - """Download a data file and place it in the default root for DeepDRR. +def download(url: str, filename: Optional[str] = None, root: Optional[str] = None, md5: Optional[str] = None, extract_name: Optional[str] = None) -> Path: + """Download a data file and place it in root. Args: url (str): The download link. filename (str, optional): The name the save the file under. If None, uses the name from the URL. Defaults to None. root (str, optional): The directory to place downloaded data in. Can be overriden by setting the environment variable DEEPDRR_DATA_DIR. Defaults to "~/datasets/DeepDRR_Data". md5 (str, optional): MD5 checksum of the download. Defaults to None. + extract_name: If not None, extract the downloaded file to `root / extract_name`. Returns: - Path: The path of the downloaded file. + Path: The path of the downloaded file, or the extracted directory. """ - if os.environ.get("DEEPDRR_DATA_DIR") is not None: + if root is None and os.environ.get("DEEPDRR_DATA_DIR") is not None: root = os.environ["DEEPDRR_DATA_DIR"] + elif root is None: + root = "~/datasets/DeepDRR_Data" root = Path(root).expanduser() if not root.exists(): @@ -36,4 +39,10 @@ def download(url: str, filename: Optional[str] = None, root: str = "~/datasets/D except urllib.error.HTTPError: logger.warning(f"Pretty download failed. Attempting with wget...") subprocess.call(["wget", "-O", str(root / filename), url]) - return root / filename + + path = root / filename + if extract_name is not None: + extract_archive(path, root / extract_name, remove_finished=True) + path = root / extract_name + + return path diff --git a/deepdrr/vol/volume.py b/deepdrr/vol/volume.py index 782ed661..312aa7b1 100644 --- a/deepdrr/vol/volume.py +++ b/deepdrr/vol/volume.py @@ -180,8 +180,7 @@ def _convert_hounsfield_to_density(hu_values: np.ndarray): @staticmethod def _segment_materials( - hu_values: np.ndarray, - use_thresholding: bool = True, + hu_values: np.ndarray, use_thresholding: bool = True, ) -> Dict[str, np.ndarray]: """Segment the materials. @@ -282,7 +281,7 @@ def from_nifti( use_thresholding=use_thresholding, use_cached=use_cached, cache_dir=cache_dir, - prefix=path.stem, + prefix=path.name.split(".")[0], ) return cls( @@ -469,10 +468,7 @@ def from_nrrd( path = Path(path) hu_values, header = nrrd.read(path) ijk_from_anatomical = np.concatenate( - [ - header["space directions"], - header["space origin"].reshape(-1, 1), - ], + [header["space directions"], header["space origin"].reshape(-1, 1),], axis=1, ) anatomical_from_ijk = np.concatenate( @@ -565,10 +561,7 @@ def spacing(self) -> geo.Vector3D: """The spacing of the voxels.""" return geo.vector(np.abs(np.array(self.anatomical_from_ijk.R)).max(axis=0)) - def _format_materials( - self, - materials: Dict[str, np.ndarray], - ) -> np.ndarray: + def _format_materials(self, materials: Dict[str, np.ndarray],) -> np.ndarray: """Standardize the input material segmentation.""" for mat in materials: materials[mat] = np.array(materials[mat]).astype(np.float32) @@ -735,14 +728,10 @@ def _make_surface(self, material: str = "bone"): segmentation.shape[0], segmentation.shape[1], segmentation.shape[2] ) vol.SetOrigin( - -np.sign(R[0, 0]) * t[0], - -np.sign(R[1, 1]) * t[1], - np.sign(R[2, 2]) * t[2], + -np.sign(R[0, 0]) * t[0], -np.sign(R[1, 1]) * t[1], np.sign(R[2, 2]) * t[2], ) vol.SetSpacing( - -abs(R[0, 0]), - -abs(R[1, 1]), - abs(R[2, 2]), + -abs(R[0, 0]), -abs(R[1, 1]), abs(R[2, 2]), ) segmentation = segmentation.astype(np.uint8) @@ -880,7 +869,5 @@ def _segment_materials( raise NotImplementedError return dict( - air=(hu_values == 0), - bone=(hu_values > 0), - titanium=(hu_values > 0), + air=(hu_values == 0), bone=(hu_values > 0), titanium=(hu_values > 0), ) diff --git a/example_projector.py b/example_projector.py index 5500c448..acfd6506 100644 --- a/example_projector.py +++ b/example_projector.py @@ -12,6 +12,7 @@ import deepdrr from deepdrr import geo from deepdrr.utils import test_utils, image_utils +from deepdrr.projector import Projector # set up fancy logging log = logging.getLogger().handlers.clear() @@ -31,7 +32,7 @@ def main(): carm = deepdrr.MobileCArm(patient.center_in_world) # project in the AP view - with deepdrr.Projector(patient, carm=carm) as projector: + with Projector(patient, carm=carm) as projector: carm.move_to(alpha=0, beta=-15) image = projector() diff --git a/setup.py b/setup.py index d605d37e..5d876360 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setuptools.setup( name="deepdrr", - version="1.1.0a3", + version="1.1.0a4", author="Benjamin D. Killeen", author_email="killeen@jhu.edu", description="A Catalyst for Machine Learning in Fluoroscopy-guided Procedures.",