diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml new file mode 100644 index 0000000..7ffd3e4 --- /dev/null +++ b/.github/workflows/build_docs.yml @@ -0,0 +1,47 @@ +name: Build documentation + +on: + pull_request: + branches: [main] + workflow_call: + workflow_dispatch: + +env: + DEB_PYTHON_INSTALL_LAYOUT: deb_system + DISPLAY: ":99.0" + CI: 1 + +jobs: + + build: + runs-on: ubuntu-22.04 + env: + PUBLISH_DIR: ./_build/html + DISPLAY: ":99.0" + PYVISTA_TRAME_SERVER_PROXY_PREFIX: "/proxy/" + PYVISTA_TRAME_SERVER_PROXY_ENABLED: "True" + PYVISTA_OFF_SCREEN: false + PYVISTA_JUPYTER_BACKEND: "trame" + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install dependencies for pyvista + run: sudo apt-get update && sudo apt-get install -y libgl1-mesa-dev xvfb + + - name: Setup python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + + - name: Install dependencies + run: python3 -m pip install ".[docs]" + + - name: Build docs + run: jupyter book build -W --keep-going . + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: ${{ env.PUBLISH_DIR }} diff --git a/.github/workflows/deploy_docs.yml b/.github/workflows/deploy_docs.yml new file mode 100644 index 0000000..03d37b2 --- /dev/null +++ b/.github/workflows/deploy_docs.yml @@ -0,0 +1,41 @@ +name: Publish documentation + +on: + push: + branches: [main] + workflow_dispatch: + + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: "pages" + cancel-in-progress: true + +jobs: + + build: + uses: ./.github/workflows/build_docs.yml + + deploy: + needs: build + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Pages + uses: actions/configure-pages@v5 + + + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index ee93dc8..f3a1464 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -24,9 +24,11 @@ jobs: with: python-version: ${{ matrix.python-version }} allow-prereleases: true + - name: Install mri2mesh run: | python -m pip install -e ".[test]" + - name: Test with pytest run: | python -m pytest --cov=mri2mesh --cov-report html --cov-report xml --cov-report term-missing -v diff --git a/.gitignore b/.gitignore index b3cc416..696bbbb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +_build *.DS_Store # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 910200b..e963a88 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,6 +8,7 @@ repos: - id: end-of-file-fixer - id: check-yaml - id: check-added-large-files + args: ['--maxkb=1000'] - id: check-docstring-first - id: debug-statements - id: check-toml @@ -28,3 +29,9 @@ repos: - id: mypy files: ^src/|^tests/ args: ["--config-file", "pyproject.toml"] + + + - repo: https://github.com/kynan/nbstripout + rev: 0.8.1 + hooks: + - id: nbstripout diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..9db47ee --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,71 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our community include: + +- Demonstrating empathy and kindness toward other people +- Being respectful of differing opinions, viewpoints, and experiences +- Giving and gracefully accepting constructive feedback +- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience +- Focusing on what is best not just for us as individuals, but for the overall community + +Examples of unacceptable behavior include: + +- The use of sexualized language or imagery, and sexual attention or advances +- Trolling, insulting or derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others’ private information, such as a physical or email address, without their explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders. All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact:** Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. + +**Consequence:** A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact:** A violation through a single incident or series of actions. + +**Consequence:** A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. + +### 3. Temporary Ban + +**Community Impact:** A serious violation of community standards, including sustained inappropriate behavior. + +**Consequence:** A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact:** Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. + +**Consequence:** A permanent ban from any sort of public interaction within the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org/), version [1.4](https://www.contributor-covenant.org/version/1/4/code-of-conduct/code_of_conduct.md) and [2.0](https://www.contributor-covenant.org/version/2/0/code_of_conduct/code_of_conduct.md, and was generated by [contributing-gen](https://github.com/bttger/contributing-gen). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..d18ce43 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,85 @@ +# Contributing + +When contributing to this repository, please first [create an issue](https://github.com/scientificcomputing/mri2mesh/issues/new/choose) containing information about the missing feature or the bug that you would like to fix. Here you can discuss the change you want to make with the maintainers of the repository. + +Please note we have a code of conduct, please follow it in all your interactions with the project. + +## New contributor guide + +To get an overview of the project, read the [documentation](https://scientificcomputing.github.io/mri2mesh/). Here are some resources to help you get started with open source contributions: + +- [Finding ways to contribute to open source on GitHub](https://docs.github.com/en/get-started/exploring-projects-on-github/finding-ways-to-contribute-to-open-source-on-github) +- [Set up Git](https://docs.github.com/en/get-started/quickstart/set-up-git) +- [GitHub flow](https://docs.github.com/en/get-started/quickstart/github-flow) +- [Collaborating with pull requests](https://docs.github.com/en/github/collaborating-with-pull-requests) + +## Pull Request Process + + +### Pull Request + +- When you're finished with the changes, create a pull request, also known as a PR. It is also OK to create a [draft pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/) from the very beginning. Once you are done you can click on the ["Ready for review"] button. You can also [request a review](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/requesting-a-pull-request-review) from one of the maintainers. +- Don't forget to [link PR to the issue that you opened ](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue). +- Enable the checkbox to [allow maintainer edits](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork) so the branch can be updated for a merge. +Once you submit your PR, a team member will review your proposal. We may ask questions or request for additional information. +- We may ask for changes to be made before a PR can be merged, either using [suggested changes](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/incorporating-feedback-in-your-pull-request) or pull request comments. You can apply suggested changes directly through the UI. You can make any other changes in your fork, then commit them to your branch. +- As you update your PR and apply changes, mark each conversation as [resolved](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/commenting-on-a-pull-request#resolving-conversations). +- If you run into any merge issues, checkout this [git tutorial](https://lab.github.com/githubtraining/managing-merge-conflicts) to help you resolve merge conflicts and other issues. +- Please make sure that all tests are passing, github pages renders nicely, and code coverage are are not lower than before your contribution. You see the different github action workflows by clicking the "Action" tab in the GitHub repository. + + +### Enforced style guide using pre-commit hooks + +We want to have a consistent style on all the contributions to the repository. The way we enforce this is through pre-commit hooks and contributors are encouraged to install the pre-commit hooks locally when developing. You can install the pre commit hooks by first install `pre-commit` +``` +python3 -m pip install pre-commit +``` +and then install the pre-commit hooks using the command +``` +pre-commit install +``` +at the root of the repository. This will install all the hooks listed in the file called `.pre-commit-config.yaml` in the root of the repository. + +Every time you make a commit to the repository a set of tests will run to make sure that the changes you made are following the style guide. Usually, the hooks will autoformat your code so that you only need to do a `git add` again and then redo the `git commit`. + +Note that when you make a push to the repo, the pre-commit hooks will be run on all the files in the repository. You can also run the pre-commit hooks on all the files using the command +``` +pre-commit run --all +``` +To learn more about pre-commit you can check out https://pre-commit.com + +## Test suite +For every new feature of bugfix you should also make sure to not lower the code coverage for the test suite. This means that if you for example add a new function then you should also make sure that the function is properly tested (at a minimum it should be covered by the test suite). + +To run the test suite, please install the package with the optional dependencies `test`, i.e +``` +python3 -m pip install -e ".[test]" +``` +in the root of the repository. To run the tests you can execute the command +``` +python3 -m pytest +``` +You can read more about using pytest in the [official documentation of pytest](https://docs.pytest.org/). + +## Documentation +The documentation is hosted at GitHub pages and created with [`JupyterBook`](https://jupyterbook.org/en/stable/intro.html). Contributions to the documentation is very welcomed. + +To build the documentation locally you can installed the `docs` optional dependencies, i.e +``` +python3 -m pip install -e ".[docs]" +``` +in the root of the repository. Now you can build the documentation by running the command +``` +jupyter-book build . +``` +from the root of the repository. The command should complete without errors or warnings. In particular, you might want to run the command +``` +jupyter-book build -W --keep-going . +``` +which will turn warnings into errors. + +For reference, please see the [github workflow](https://github.com/scientificcomputing/mri2mesh/blob/main/.github/workflows/build_docs.yml) that is used for building the pages. + + +## Need help? +If something is not working as expected, or you need help please file an [issue](https://github.com/finsberg/gotranx/issues/new/choose). diff --git a/_config.yml b/_config.yml new file mode 100644 index 0000000..9ef4b58 --- /dev/null +++ b/_config.yml @@ -0,0 +1,49 @@ +# Book settings +# Learn more at https://jupyterbook.org/customize/config.html + +title: mri2mesh +author: Henrik Finsberg +logo: "docs/logo.png" +copyright: "2024" +only_build_toc_files: true + +# Force re-execution of notebooks on each build. +# See https://jupyterbook.org/content/execute.html +execute: + execute_notebooks: cache + timeout: 3000 + +# Information about where the book exists on the web +repository: + url: https://github.com/scientificcomputing/mri2mesh # Online location of your book + branch: main + + +html: + use_issues_button: true + use_repository_button: true + +parse: + myst_enable_extensions: + - amsmath + - dollarmath + - linkify + +sphinx: + config: + html_last_updated_fmt: "%b %d, %Y" + # bibtex_bibfiles: ["docs/refs.bib"] + nb_execution_show_tb: True + nb_custom_formats: # https://jupyterbook.org/en/stable/file-types/jupytext.html#file-types-custom + .py: + - jupytext.reads + - fmt: py + + suppress_warnings: ["mystnb.unknown_mime_type"] #, "bibtex.duplicate_citation"] + extra_extensions: + - 'sphinx.ext.autodoc' + - 'sphinx.ext.napoleon' + - 'sphinx.ext.viewcode' + # - 'sphinxcontrib.bibtex' + +exclude_patterns: [".pytest_cache/*", ".github/*", ".tox/*", "third_party/*", "venv", "examples/wildmeshing-python/**"] diff --git a/_toc.yml b/_toc.yml new file mode 100644 index 0000000..bc7a9db --- /dev/null +++ b/_toc.yml @@ -0,0 +1,13 @@ +format: jb-book +root: README + +parts: + - caption: Examples + chapters: + - file: "examples/synthseg.ipynb" + - caption: Community + chapters: + - file: "CONTRIBUTING" + - caption: Python API + chapters: + - file: "docs/api" diff --git a/docs/api.rst b/docs/api.rst new file mode 100644 index 0000000..2aad78b --- /dev/null +++ b/docs/api.rst @@ -0,0 +1,5 @@ +API Reference +============= + +.. automodule:: mri2mesh + :members: diff --git a/docs/logo.png b/docs/logo.png new file mode 100644 index 0000000..dd93cac Binary files /dev/null and b/docs/logo.png differ diff --git a/examples/201_t13d_synthseg.nii.gz b/examples/201_t13d_synthseg.nii.gz new file mode 100644 index 0000000..c30a043 Binary files /dev/null and b/examples/201_t13d_synthseg.nii.gz differ diff --git a/examples/synthseg.ipynb b/examples/synthseg.ipynb new file mode 100644 index 0000000..0269d01 --- /dev/null +++ b/examples/synthseg.ipynb @@ -0,0 +1,435 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# SynthSeg example\n", + "\n", + "In this example we will show how to use the `mri2mesh` package to visualize the voxels and to generate surfaces from a synthetic segmentation. The synthetic segmentation here is assumed to allready be generated using the `SynthSeg` package. The synthetic segmentation is a 3D volume where each voxel has a label. First we do the neccerasy imports and set the pyvista backend to `trame`. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "import mri2mesh\n", + "import numpy as np\n", + "import pyvista as pv\n", + "\n", + "pv.start_xvfb()\n", + "pv.set_jupyter_backend('trame')" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "Note that to get the visualization to show correctly you might need to set the following environment variables\n", + "\n", + "```\n", + "export DISPLAY=\":99.0\"\n", + "export PYVISTA_TRAME_SERVER_PROXY_PREFIX=\"/proxy/\"\n", + "export PYVISTA_TRAME_SERVER_PROXY_ENABLED=\"True\"\n", + "export PYVISTA_OFF_SCREEN=false\n", + "export PYVISTA_JUPYTER_BACKEND=\"html\"\n", + "```\n", + "\n", + "Next we will try to visualize a Nifty File with a volume clip. Here we will load a allready segmented brain from the dataset https://zenodo.org/records/4899120 (the `ernie` case), which has been segmented with [`SynthSeg`](https://github.com/BBillot/SynthSeg)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# Path to the Nifty file\n", + "path = Path(\"201_t13d_synthseg.nii.gz\")\n", + "mri2mesh.viz.volume_clip.main(path)" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "We could also visualize it as a slice" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "mri2mesh.viz.volume_slice.main(path)" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "Here we visualize all three slices, but you can all specify which axis to show by passing the `axis` keyword (showing all axis correspond to `axis=3`)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "mri2mesh.viz.volume_slice.main(path, axis=2)" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "To see all the different visalization options you can do" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "mri2mesh.viz.list_viz_commands()" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "You can also visualize `numpy`arrays directly, for example by first loadind the Nifty file with `nibabel`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "import nibabel as nib\n", + "\n", + "img = nib.load(path)\n", + "vol = img.get_fdata()" + ] + }, + { + "cell_type": "markdown", + "id": "12", + "metadata": {}, + "source": [ + "However, you would need to first convert the volume to a vtk object" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "img = mri2mesh.vtk_utils.numpy_to_vtkImageData(vol)" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "Now, you can visualize the image directly" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "mri2mesh.viz.volume_clip.volume_clip(img)" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "The volume contains different labels for the different regions. For example we could plot different slices for the background, which has the label `0`. Let us plot a $5\\times 5$ grid for the slices in the $x$-direction" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "mask = vol == 0\n", + "mri2mesh.viz.mpl_slice.plot_slices(mask, cmap=\"gray\", add_colorbar=True, nx=5, ny=5, slice=\"y\")" + ] + }, + { + "cell_type": "markdown", + "id": "18", + "metadata": {}, + "source": [ + "Since this particular image is segmented with `SynthSeg` we know the labels for each region. These are also found in " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "labels = mri2mesh.segmentation_labels.SYNTHSEG_LABELS\n", + "for k, v in labels.items():\n", + " print(f\"{k}: {v}\")" + ] + }, + { + "cell_type": "markdown", + "id": "20", + "metadata": {}, + "source": [ + "## Extracting the surface of the paranchyma\n", + "\n", + "We can first try to extract the parenchyma surface. To do this we first extract all the labels that are not fluid, and then we first remove small objects and then fill inn the small holes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "import skimage.morphology as skim\n", + "\n", + "par_mask = np.logical_not(np.isin(vol, labels[\"FLUID\"]))\n", + "par_mask = skim.remove_small_objects(par_mask, mri2mesh.constants.HOLE_THRESHOLD)\n", + "par_mask = skim.remove_small_holes(par_mask, mri2mesh.constants.HOLE_THRESHOLD)" + ] + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "We can now plot the slices of the corresponding mask" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "mri2mesh.viz.mpl_slice.plot_slices(par_mask, cmap=\"gray\", add_colorbar=True, nx=5, ny=5, slice=\"z\")" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "We can also extract the isosurface of the mask by first converting the mask to a vtk object and then plotting it with pyvista" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "par_mask_img = mri2mesh.vtk_utils.numpy_to_vtkImageData(par_mask.astype(int))\n", + "plotter = pv.Plotter()\n", + "par_mask_isosurface = par_mask_img.contour([1.0])\n", + "plotter.add_mesh_clip_plane(par_mask_isosurface, color=\"white\", show_edges=True)\n", + "plotter.show()" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, + "source": [ + "Now let us generate the surface using the marching cubes algorithm" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27", + "metadata": {}, + "outputs": [], + "source": [ + "par_surf = mri2mesh.surface.utils.extract_surface(par_mask)\n", + "\n", + "plotter = pv.Plotter()\n", + "plotter.add_mesh_clip_plane(par_surf)\n", + "plotter.show()" + ] + }, + { + "cell_type": "markdown", + "id": "28", + "metadata": {}, + "source": [ + "## Extracting surfaces of the lateral ventricles \n", + "\n", + "Another serfacce we could extract is the surface of the left and right lateral ventricles with label 4 and 43 respectively. You can first plot the slices" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29", + "metadata": {}, + "outputs": [], + "source": [ + "mask_lateral_ventricles = np.logical_or(vol == 4, vol == 43)\n", + "mri2mesh.viz.mpl_slice.plot_slices(mask_lateral_ventricles, cmap=\"gray\", add_colorbar=True, nx=5, ny=5, slice=\"z\")" + ] + }, + { + "cell_type": "markdown", + "id": "30", + "metadata": {}, + "source": [ + "We can also plot the isosurface with pyvista" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31", + "metadata": {}, + "outputs": [], + "source": [ + "mask_lateral_ventricles_img = mri2mesh.vtk_utils.numpy_to_vtkImageData(mask_lateral_ventricles.astype(int))\n", + "plotter = pv.Plotter()\n", + "surface = mask_lateral_ventricles_img.contour([1.0])\n", + "plotter.add_mesh(surface)\n", + "plotter.show()" + ] + }, + { + "cell_type": "markdown", + "id": "32", + "metadata": {}, + "source": [ + "We can now generate a surface of this mask using `pyvista`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33", + "metadata": {}, + "outputs": [], + "source": [ + "surf_lateral_ventricles = mri2mesh.surface.utils.extract_surface(mask_lateral_ventricles)" + ] + }, + { + "cell_type": "markdown", + "id": "34", + "metadata": {}, + "source": [ + "Let us plot the surface with `pyvista`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35", + "metadata": {}, + "outputs": [], + "source": [ + "surf_lateral_ventricles = mri2mesh.surface.utils.extract_surface(mask_lateral_ventricles)\n", + "plotter = pv.Plotter()\n", + "plotter.add_mesh(surf_lateral_ventricles)\n", + "plotter.show()" + ] + }, + { + "cell_type": "markdown", + "id": "36", + "metadata": {}, + "source": [ + "We see that the surface is not very smooth, but we can use the `smooth_taubin` method to smooth it\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "37", + "metadata": {}, + "outputs": [], + "source": [ + "surf_lateral_ventricles_smooth = surf_lateral_ventricles.smooth_taubin(n_iter=20, pass_band=0.05)\n", + "plotter = pv.Plotter()\n", + "plotter.add_mesh(surf_lateral_ventricles)\n", + "plotter.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "jupytext": { + "cell_metadata_filter": "-all", + "main_language": "python", + "notebook_metadata_filter": "-all" + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pyproject.toml b/pyproject.toml index 4ab280f..44d2dd3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,8 @@ mri2mesh = "mri2mesh.cli:main" [project.optional-dependencies] test = ["pytest", "pytest-cov"] +docs = ["pyvista[jupyter]", "jupyter-book"] + [tool.setuptools] @@ -41,8 +43,6 @@ zip-safe = false where = ["src"] namespaces = false -[tool.setuptools.package-data] -"*" = ["*.lark"] [tool.aliases] test = "pytest" diff --git a/src/mri2mesh/reader.py b/src/mri2mesh/reader.py index 4610a59..291d0ae 100644 --- a/src/mri2mesh/reader.py +++ b/src/mri2mesh/reader.py @@ -1,3 +1,4 @@ +from __future__ import annotations from pathlib import Path import logging import typing @@ -13,7 +14,7 @@ logger = logging.getLogger(__name__) -@dataclass(slots=True) +@dataclass # (slots=True) # - will be added when we drop python3.9 class Segmentation: img: np.ndarray labels: dict[str, list[int]] = field(default_factory=lambda: SYNTHSEG_LABELS) diff --git a/src/mri2mesh/surface/parenchyma.py b/src/mri2mesh/surface/parenchyma.py index c36a7de..af16d64 100644 --- a/src/mri2mesh/surface/parenchyma.py +++ b/src/mri2mesh/surface/parenchyma.py @@ -1,3 +1,4 @@ +from __future__ import annotations import argparse import typing from pathlib import Path diff --git a/src/mri2mesh/surface/utils.py b/src/mri2mesh/surface/utils.py index a8b08e9..ffb5dd6 100644 --- a/src/mri2mesh/surface/utils.py +++ b/src/mri2mesh/surface/utils.py @@ -4,7 +4,7 @@ def extract_surface(img, resolution=(1, 1, 1), origin=(0, 0, 0)): # img should be a binary 3D np.array grid = pv.ImageData(dimensions=img.shape, spacing=resolution, origin=origin) - mesh = grid.contour([0.5], img.flatten(order="F"), method="marching_cubes") + mesh = grid.contour(isosurfaces=[0.5], scalars=img.flatten(order="F"), method="marching_cubes") surf = mesh.extract_geometry() surf.clear_data() return surf diff --git a/src/mri2mesh/viz/__init__.py b/src/mri2mesh/viz/__init__.py index 0208ef1..492f542 100644 --- a/src/mri2mesh/viz/__init__.py +++ b/src/mri2mesh/viz/__init__.py @@ -7,6 +7,10 @@ __all__ = ["add_viz_parser", "dispatch", "numpy_to_vtkImageData", "plot_slices", "mpl_slice"] +def list_viz_commands() -> list[str]: + return ["volume-clip", "volume-slice", "volume-threshold", "mpl-slice"] + + def add_viz_parser(parser: argparse.ArgumentParser) -> None: subparsers = parser.add_subparsers(dest="viz-command") diff --git a/src/mri2mesh/viz/mpl_slice.py b/src/mri2mesh/viz/mpl_slice.py index 8a2e15c..9121605 100644 --- a/src/mri2mesh/viz/mpl_slice.py +++ b/src/mri2mesh/viz/mpl_slice.py @@ -1,4 +1,6 @@ +from __future__ import annotations import argparse +import typing from pathlib import Path import nibabel as nib import matplotlib.pyplot as plt @@ -69,48 +71,28 @@ def main( return 0 -def plot_slices( - img, - outdir, - tag="", - cmap="gray", - labels=None, - add_colorbar=False, - nx: int = 5, - ny: int = 5, +def plot_slice( + img: np.ndarray, + outdir: Path | None, + tag: str, + cmap: str, + labels, + add_colorbar: bool, + nx: int, + ny: int, + slice: str, ): - fig, ax = plt.subplots(nx, ny, figsize=(20, 20)) minval = np.min(img) maxval = np.max(img) - for k, i in enumerate(np.linspace(0, img.shape[0], 26, dtype=int)[:-1]): - ax.flatten()[k].set_title(f"{i}") - im = ax.flatten()[k].imshow(img[i, :, :], cmap=cmap, vmin=minval, vmax=maxval) - if add_colorbar: - fig.subplots_adjust(right=0.8) - cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) - cax = fig.colorbar(im, cax=cbar_ax) - if labels: - cax.ax.set_yticks(np.array(list(labels.values()))) - cax.ax.set_yticklabels(np.array(list(labels.keys()))) - fig.savefig(outdir / f"{tag}x_slice.png") - fig, ax = plt.subplots(nx, ny, figsize=(20, 20)) - for k, i in enumerate(np.linspace(0, img.shape[1], 26, dtype=int)[:-1]): - ax.flatten()[k].set_title(f"{i}") - im = ax.flatten()[k].imshow(img[:, i, :], cmap=cmap, vmin=minval, vmax=maxval) - if add_colorbar: - fig.subplots_adjust(right=0.8) - cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) - cax = fig.colorbar(im, cax=cbar_ax) - if labels: - cax.ax.set_yticks(np.array(list(labels.values()))) - cax.ax.set_yticklabels(np.array(list(labels.keys()))) - fig.savefig(outdir / f"{tag}y_slice.png") - fig, ax = plt.subplots(nx, ny, figsize=(20, 20)) - for k, i in enumerate(np.linspace(0, img.shape[2], 26, dtype=int)[:-1]): + slice2axis = {"x": 0, "y": 1, "z": 2} + axis = slice2axis[slice] + + for k, i in enumerate(np.linspace(0, img.shape[axis], 26, dtype=int)[:-1]): ax.flatten()[k].set_title(f"{i}") - im = ax.flatten()[k].imshow(img[:, :, i], cmap=cmap, vmin=minval, vmax=maxval) + img_i = img.take(indices=i, axis=axis) + im = ax.flatten()[k].imshow(img_i, cmap=cmap, vmin=minval, vmax=maxval) if add_colorbar: fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) @@ -118,5 +100,27 @@ def plot_slices( if labels: cax.ax.set_yticks(np.array(list(labels.values()))) cax.ax.set_yticklabels(np.array(list(labels.keys()))) - fig.savefig(outdir / f"{tag}z_slice.png") + if outdir is not None: + fig.savefig(outdir / f"{tag}x_slice.png") + else: + plt.show() plt.close("all") + + +def plot_slices( + img: np.ndarray, + outdir: Path | None = None, + tag="", + cmap="gray", + labels=None, + add_colorbar=False, + nx: int = 5, + ny: int = 5, + slice: typing.Literal["x", "y", "z", "all"] = "all", +): + if slice == "all": + plot_slice(img, outdir, tag, cmap, labels, add_colorbar, nx, ny, "x") + plot_slice(img, outdir, tag, cmap, labels, add_colorbar, nx, ny, "y") + plot_slice(img, outdir, tag, cmap, labels, add_colorbar, nx, ny, "z") + else: + plot_slice(img, outdir, tag, cmap, labels, add_colorbar, nx, ny, slice) diff --git a/src/mri2mesh/viz/volume_slice.py b/src/mri2mesh/viz/volume_slice.py index 3160329..836b596 100644 --- a/src/mri2mesh/viz/volume_slice.py +++ b/src/mri2mesh/viz/volume_slice.py @@ -76,13 +76,12 @@ def update(value): name="slice", normal=normal, origin=origin, - title=f"{index2title[axis]}-coordinate", ) plotter.show() -def main(input: Path, axis: int = 0, slider: bool = False) -> int: +def main(input: Path, axis: int = 3, slider: bool = False) -> int: reader = pv.get_reader(input) mesh = reader.read() if axis == 3: diff --git a/tests/test_morphology.py b/tests/test_morphology.py new file mode 100644 index 0000000..47220fe --- /dev/null +++ b/tests/test_morphology.py @@ -0,0 +1,34 @@ +import numpy as np +import skimage.morphology as skim + +from mri2mesh import morphology as morph + + +def test_closest_point(): + a = np.zeros((10, 10, 10), dtype=bool) + b = np.zeros((10, 10, 10), dtype=bool) + + a[:2, :2, :2] = True + b[5:7, 5:7, 5:7] = True + + a_b_index = morph.get_closest_point(a, b) + assert np.allclose(a_b_index, (5, 5, 5)) + + b_a_index = morph.get_closest_point(b, a) + assert np.allclose(b_a_index, (1, 1, 1)) + + +def test_connect_by_line(): + m1 = np.zeros((3, 3, 3), dtype=bool) + m2 = np.zeros((3, 3, 3), dtype=bool) + + m1[0, 0, 0] = True + m2[2, 0, 0] = True + + conn = morph.connect_by_line(m1, m2, footprint=skim.ball(1)) + expected = np.zeros((3, 3, 3), dtype=np.bool) + expected[:, 0, 0] = True + expected[:, 1, 0] = True + expected[:, 0, 1] = True + + assert np.allclose(conn, expected)