diff --git a/README.md b/README.md index 817121c1..3d1549c4 100644 --- a/README.md +++ b/README.md @@ -197,7 +197,7 @@ If the 2D/3D registration capabilities are helpful, please cite our followup, [` year={2024} } -If you use the 3D CBCT reconstruction capabilities, please cite our followup, [`DiffVox`](https://arxiv.org/abs/2312.06358): +If you use the 3D CBCT reconstruction capabilities, please cite our followup, [`DiffVox`](https://arxiv.org/abs/2411.19224): @article{momeni2024voxel, title={Voxel-based Differentiable X-ray Rendering Improves Self-Supervised 3D CBCT Reconstruction}, diff --git a/diffdrr/_modidx.py b/diffdrr/_modidx.py index 73988e8a..241e8355 100644 --- a/diffdrr/_modidx.py +++ b/diffdrr/_modidx.py @@ -175,6 +175,7 @@ 'diffdrr.utils.resample': ('api/utils.html#resample', 'diffdrr/utils.py')}, 'diffdrr.visualization': { 'diffdrr.visualization._make_camera_frustum_mesh': ( 'api/visualization.html#_make_camera_frustum_mesh', 'diffdrr/visualization.py'), + 'diffdrr.visualization.add_image': ('api/visualization.html#add_image', 'diffdrr/visualization.py'), 'diffdrr.visualization.animate': ('api/visualization.html#animate', 'diffdrr/visualization.py'), 'diffdrr.visualization.drr_to_mesh': ( 'api/visualization.html#drr_to_mesh', 'diffdrr/visualization.py'), @@ -183,5 +184,6 @@ 'diffdrr.visualization.labelmap_to_mesh': ( 'api/visualization.html#labelmap_to_mesh', 'diffdrr/visualization.py'), 'diffdrr.visualization.plot_drr': ('api/visualization.html#plot_drr', 'diffdrr/visualization.py'), - 'diffdrr.visualization.plot_mask': ( 'api/visualization.html#plot_mask', - 'diffdrr/visualization.py')}}} + 'diffdrr.visualization.plot_mask': ('api/visualization.html#plot_mask', 'diffdrr/visualization.py'), + 'diffdrr.visualization.visualize_scene': ( 'api/visualization.html#visualize_scene', + 'diffdrr/visualization.py')}}} diff --git a/diffdrr/data.py b/diffdrr/data.py index 96b3906e..c7c5e481 100644 --- a/diffdrr/data.py +++ b/diffdrr/data.py @@ -161,6 +161,8 @@ def read( dim=0, ) + subject.volume.data = subject.volume.data * mask + subject.mask.data = subject.mask.data * mask subject.density.data = subject.density.data * mask return subject diff --git a/diffdrr/visualization.py b/diffdrr/visualization.py index 7a9f78b2..3e74b181 100644 --- a/diffdrr/visualization.py +++ b/diffdrr/visualization.py @@ -12,7 +12,7 @@ from tqdm import tqdm # %% auto 0 -__all__ = ['plot_drr', 'plot_mask', 'animate', 'drr_to_mesh', 'labelmap_to_mesh', 'img_to_mesh'] +__all__ = ['plot_drr', 'plot_mask', 'animate', 'drr_to_mesh', 'labelmap_to_mesh', 'img_to_mesh', 'visualize_scene', 'add_image'] # %% ../notebooks/api/04_visualization.ipynb 5 import torch @@ -352,3 +352,39 @@ def _make_camera_frustum_mesh(source, target, size=0.125): ] ) return pyvista.PolyData(vertices, faces) + +# %% ../notebooks/api/04_visualization.ipynb 15 +def visualize_scene( + drr: DRR, + pose: RigidTransform, + labelmap: bool = False, + grid: bool = True, + verbose: bool = False, + **kwargs +): + """ + Given a DRR and a RigidTransform, render the 3D scene in PyVista. + **kwargs are passed to drr_to_mesh. + """ + # Extract a mesh from the subject + if labelmap: + mesh = labelmap_to_mesh(drr.subject, verbose=verbose) + else: + mesh = drr_to_mesh(drr.subject, "surface_nets", verbose=verbose, **kwargs) + + # Plot on a grid + pl = pyvista.Plotter() + pl.add_mesh(mesh) + pl = add_image(drr, pose, pl) + if grid: + pl.show_grid() + return pl + + +def add_image(drr: DRR, pose: RigidTransform, pl: pyvista.Plotter): + """Add a camera to an existing scene.""" + camera, detector, texture, principal_ray = img_to_mesh(drr, pose) + pl.add_mesh(camera, show_edges=True) + pl.add_mesh(detector, texture=texture) + pl.add_mesh(principal_ray, color="lime", line_width=3) + return pl diff --git a/notebooks/api/03_data.ipynb b/notebooks/api/03_data.ipynb index 69db0397..c1b3b943 100644 --- a/notebooks/api/03_data.ipynb +++ b/notebooks/api/03_data.ipynb @@ -232,6 +232,8 @@ " dim=0,\n", " )\n", "\n", + " subject.volume.data = subject.volume.data * mask\n", + " subject.mask.data = subject.mask.data * mask\n", " subject.density.data = subject.density.data * mask\n", "\n", " return subject" diff --git a/notebooks/api/04_visualization.ipynb b/notebooks/api/04_visualization.ipynb index 89c6594f..f3bd1091 100644 --- a/notebooks/api/04_visualization.ipynb +++ b/notebooks/api/04_visualization.ipynb @@ -483,6 +483,50 @@ " return pyvista.PolyData(vertices, faces)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "ebb5fe6b-886c-415a-8881-5387269f7950", + "metadata": {}, + "outputs": [], + "source": [ + "#| export\n", + "def visualize_scene(\n", + " drr: DRR,\n", + " pose: RigidTransform,\n", + " labelmap: bool = False,\n", + " grid: bool = True,\n", + " verbose: bool = False,\n", + " **kwargs\n", + "):\n", + " \"\"\"\n", + " Given a DRR and a RigidTransform, render the 3D scene in PyVista.\n", + " **kwargs are passed to drr_to_mesh.\n", + " \"\"\"\n", + " # Extract a mesh from the subject\n", + " if labelmap:\n", + " mesh = labelmap_to_mesh(drr.subject, verbose=verbose)\n", + " else:\n", + " mesh = drr_to_mesh(drr.subject, \"surface_nets\", verbose=verbose, **kwargs)\n", + "\n", + " # Plot on a grid\n", + " pl = pyvista.Plotter()\n", + " pl.add_mesh(mesh)\n", + " pl = add_image(drr, pose, pl)\n", + " if grid:\n", + " pl.show_grid()\n", + " return pl\n", + "\n", + "\n", + "def add_image(drr: DRR, pose: RigidTransform, pl: pyvista.Plotter):\n", + " \"\"\"Add a camera to an existing scene.\"\"\"\n", + " camera, detector, texture, principal_ray = img_to_mesh(drr, pose)\n", + " pl.add_mesh(camera, show_edges=True)\n", + " pl.add_mesh(detector, texture=texture)\n", + " pl.add_mesh(principal_ray, color=\"lime\", line_width=3)\n", + " return pl" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/notebooks/index.ipynb b/notebooks/index.ipynb index 5d1c5a31..a35afee8 100644 --- a/notebooks/index.ipynb +++ b/notebooks/index.ipynb @@ -48,9 +48,17 @@ "source": [ "## Install\n", "\n", + "To install the latest stable release (**recommended**):\n", + "\n", "```zsh\n", "pip install diffdrr\n", - "```" + "```\n", + "\n", + "To install the development version:\n", + "\n", + "```zsh\n", + "git clone https://github.com/eigenvivek/DiffDRR.git --depth 1\n", + "pip install -e 'DiffDRR/[dev]'" ] }, { @@ -253,19 +261,36 @@ "source": [ "## Citing `DiffDRR`\n", "\n", - "If you find `DiffDRR` useful in your work, please cite our [paper](https://doi.org/10.1007/978-3-031-23179-7_1) (or the [freely accessible arXiv version](https://arxiv.org/abs/2208.12737)):\n", - "\n", - "```\n", - "@inproceedings{gopalakrishnanDiffDRR2022,\n", - " author = {Gopalakrishnan, Vivek and Golland, Polina},\n", - " title = {Fast Auto-Differentiable Digitally Reconstructed Radiographs for Solving Inverse Problems in Intraoperative Imaging},\n", - " year = {2022},\n", - " booktitle = {Clinical Image-based Procedures: 11th International Workshop, CLIP 2022, Held in Conjunction with MICCAI 2022, Singapore, Proceedings},\n", - " series = {Lecture Notes in Computer Science},\n", - " publisher = {Springer},\n", - " doi = {https://doi.org/10.1007/978-3-031-23179-7_1},\n", - "}\n", - "```" + "If you find `DiffDRR` useful in your work, please cite our\n", + "[paper](https://arxiv.org/abs/2208.12737):\n", + "\n", + " @inproceedings{gopalakrishnan2022fast,\n", + " title={Fast auto-differentiable digitally reconstructed radiographs for solving inverse problems in intraoperative imaging},\n", + " author={Gopalakrishnan, Vivek and Golland, Polina},\n", + " booktitle={Workshop on Clinical Image-Based Procedures},\n", + " pages={1--11},\n", + " year={2022},\n", + " organization={Springer}\n", + " }\n", + "\n", + "If the 2D/3D registration capabilities are helpful, please cite our followup, [`DiffPose`](https://arxiv.org/abs/2312.06358):\n", + "\n", + " @article{gopalakrishnan2023intraoperative,\n", + " title={Intraoperative {2D/3D} image registration via differentiable {X}-ray rendering},\n", + " author={Gopalakrishnan, Vivek and Dey, Neel and Golland, Polina},\n", + " booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n", + " pages={11662--11672},\n", + " year={2024}\n", + " }\n", + "\n", + "If you use the 3D CBCT reconstruction capabilities, please cite our followup, [`DiffVox`](https://arxiv.org/abs/2411.19224):\n", + "\n", + " @article{momeni2024voxel,\n", + " title={Voxel-based Differentiable X-ray Rendering Improves Self-Supervised 3D CBCT Reconstruction},\n", + " author={Momeni, Mohammadhossein and Gopalakrishnan, Vivek and Dey, Neel and Golland, Polina and Frisken, Sarah},\n", + " booktitle={Machine Learning and the Physical Sciences, NeurIPS 2024},\n", + " year={2024}\n", + " }" ] }, {