diff --git a/.github/workflows/draft-pdf.yml b/.github/workflows/draft-pdf.yml new file mode 100644 index 0000000..e261678 --- /dev/null +++ b/.github/workflows/draft-pdf.yml @@ -0,0 +1,25 @@ +name: JOSS Paper Compile + +on: [push] + +jobs: + paper: + runs-on: ubuntu-latest + name: Paper Draft + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Build draft PDF + uses: openjournals/openjournals-draft-action@master + with: + journal: joss + # This should be the path to the paper within your repo. + paper-path: paper.md + - name: Upload + uses: actions/upload-artifact@v1 + with: + name: paper + # This is the output path where Pandoc will write the compiled + # PDF. Note, this should be the same directory as the input + # paper.md + path: paper.pdf diff --git a/.github/workflows/taufactor.yml b/.github/workflows/taufactor.yml new file mode 100644 index 0000000..3a89f79 --- /dev/null +++ b/.github/workflows/taufactor.yml @@ -0,0 +1,37 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: TauFactor Tests + +on: [push, pull_request] + +permissions: + contents: read + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.8 + uses: actions/setup-python@v3 + with: + python-version: "3.8" + - name: Install dependencies + run: | + $CONDA/bin/conda env update --file tests/environment.yml --name base + - name: Install taufactor + run: | + $CONDA/bin/pip install -e . + - name: Lint with flake8 + run: | + $CONDA/bin/conda install flake8 + # stop the build if there are Python syntax errors or undefined names + $CONDA/bin/flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + $CONDA/bin/flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with pytest + run: | + $CONDA/bin/conda install pytest + $CONDA/bin/python -m pytest diff --git a/.readthedocs.yml b/.readthedocs.yml index 48d844a..b41cc55 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -7,10 +7,10 @@ version: 2 # Build documentation in the docs/ directory with Sphinx sphinx: - configuration: docs/conf.py + configuration: docs/conf.py # Optionally set the version of Python and requirements required to build your docs python: - version: 3.7 - install: - - requirements: docs/requirements.txt \ No newline at end of file + version: 3.8 + install: + - requirements: docs/requirements.txt diff --git a/AUTHORS.md b/AUTHORS.md new file mode 100644 index 0000000..c72739a --- /dev/null +++ b/AUTHORS.md @@ -0,0 +1,11 @@ +# Authors + +## Team + +- Sam Cooper +- Isaac Squires +- Steve Kench + +## Contributors + +None yet. Why not be the first? diff --git a/AUTHORS.rst b/AUTHORS.rst deleted file mode 100644 index dc7d6ee..0000000 --- a/AUTHORS.rst +++ /dev/null @@ -1,15 +0,0 @@ -======= -Credits -======= - -Team ----------------- - -* Sam Cooper -* Isaac Squires -* Steve Kench - -Contributors ------------- - -None yet. Why not be the first? diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.md similarity index 50% rename from CONTRIBUTING.rst rename to CONTRIBUTING.md index 4bed0bc..edeeae4 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.md @@ -1,125 +1,120 @@ -.. highlight:: shell - -============ -Contributing -============ +# Contributing Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: -Types of Contributions ----------------------- +## Types of Contributions Report Bugs -~~~~~~~~~~~ Report bugs at https://github.com/tldr-group/taufactor/issues. If you are reporting a bug, please include: -* Your operating system name and version. -* Any details about your local setup that might be helpful in troubleshooting. -* Detailed steps to reproduce the bug. +- Your operating system name and version. +- Any details about your local setup that might be helpful in troubleshooting. +- Detailed steps to reproduce the bug. -Fix Bugs -~~~~~~~~ +## Fix Bugs Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. -Implement Features -~~~~~~~~~~~~~~~~~~ +## Implement Features Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. -Write Documentation -~~~~~~~~~~~~~~~~~~~ +## Write Documentation TauFactor could always use more documentation, whether as part of the official TauFactor docs, in docstrings, or even on the web in blog posts, articles, and such. -Submit Feedback -~~~~~~~~~~~~~~~ +## Submit Feedback The best way to send feedback is to file an issue at https://github.com/tldr-group/taufactor/issues. If you are proposing a feature: -* Explain in detail how it would work. -* Keep the scope as narrow as possible, to make it easier to implement. -* Remember that this is a volunteer-driven project, and that contributions - are welcome :) +- Explain in detail how it would work. +- Keep the scope as narrow as possible, to make it easier to implement. +- Remember that this is a volunteer-driven project, and that contributions + are welcome :) -Get Started! ------------- +## Get Started! Ready to contribute? Here's how to set up `taufactor` for local development. 1. Fork the `taufactor` repo on GitHub. -2. Clone your fork locally:: - - $ git clone git@github.com:your_name_here/taufactor.git +2. Clone your fork locally: -3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: +``` +git clone git@github.com:your_name_here/taufactor.git +``` - $ mkvirtualenv taufactor - $ cd taufactor/ - $ python setup.py develop +3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development: -4. Create a branch for local development:: +``` +mkvirtualenv taufactor +cd taufactor/ +python setup.py develop +``` - $ git checkout -b name-of-your-bugfix-or-feature +4. Create a branch for local development: - Now you can make your changes locally. +``` +git checkout -b name-of-your-bugfix-or-feature +``` 5. When you're done making changes, check that your changes pass flake8 and the - tests, including testing other Python versions with tox:: + tests, including testing other Python versions with tox: - $ flake8 taufactor tests - $ python setup.py test or pytest - $ tox +``` +flake8 taufactor tests +python setup.py test or pytest +tox +``` - To get flake8 and tox, just pip install them into your virtualenv. + To get flake8 and tox, just pip install them into your virtualenv. -6. Commit your changes and push your branch to GitHub:: +6. Commit your changes and push your branch to GitHub: - $ git add . - $ git commit -m "Your detailed description of your changes." - $ git push origin name-of-your-bugfix-or-feature +``` +git add . +git commit -m "Your detailed description of your changes." +git push origin name-of-your-bugfix-or-feature +``` 7. Submit a pull request through the GitHub website. -Pull Request Guidelines ------------------------ +## Pull Request Guidelines Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the - feature to the list in README.rst. - -Tips ----- + feature to the list in README.md. -To run a subset of tests:: +## Tips -$ pytest tests.test_taufactor +To run a subset of tests: +pytest tests.test_taufactor -Deploying ---------- +## Deploying A reminder for the maintainers on how to deploy. Make sure all your changes are committed (including an entry in HISTORY.rst). -Then run:: +Then run: -$ bump2version patch # possible: major / minor / patch -$ git push -$ git push --tags +``` +bump2version patch # possible: major / minor / patch +git push +git push --tags +``` Travis will then deploy to PyPI if tests pass. diff --git a/HISTORY.md b/HISTORY.md new file mode 100644 index 0000000..9915c28 --- /dev/null +++ b/HISTORY.md @@ -0,0 +1,44 @@ +# History + +## 1.0.0 (2023-03-23) + +--- + +- Migrated to PyTorch from CuPy +- New convergence criteria +- New documentation style +- CI testing +- Includes TauFactor paper + +## 0.1.4 (2022-07-11) + +--- + +- Add TauE solver +- Add triple phase boundary calculations +- Fix cuboids not converging +- Fix convergence messaging + +## 0.1.3 (2021-03-25) + +--- + +- Hotfix code in taufactor.py + +## 0.1.2 (2021-03-25) + +--- + +- Added multi-phase and periodic solvers and metrics calculations + +## 0.1.1 (2021-02-10) + +--- + +- Removed CuPy from requirements and added installation instructions to README + +## 0.1.0 (2021-02-08) + +--- + +- First release on PyPI. diff --git a/HISTORY.rst b/HISTORY.rst deleted file mode 100644 index 989ea59..0000000 --- a/HISTORY.rst +++ /dev/null @@ -1,35 +0,0 @@ -======= -History -======= - -0.1.4 (2022-07-11) ------------------- - -* Add TauE solver -* Add triple phase boundary calculations -* Fix cuboids not converging -* Fix convergence messaging - - -0.1.3 (2021-03-25) ------------------- - -* Hotfix code in taufactor.py - - -0.1.2 (2021-03-25) ------------------- - -* Added multi-phase and periodic solvers and metrics calculations - - -0.1.1 (2021-02-10) ------------------- - -* Removed CuPy from requirements and added installation instructions to README - - -0.1.0 (2021-02-08) ------------------- - -* First release on PyPI. diff --git a/LICENSE b/LICENSE index 24b4fe7..15d9b82 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2021, Isaac Squires +Copyright (c) 2023, Isaac Squires Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/MANIFEST.in b/MANIFEST.in index 965b2dd..54e006c 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,8 +1,8 @@ -include AUTHORS.rst -include CONTRIBUTING.rst -include HISTORY.rst +include AUTHORS.md +include CONTRIBUTING.md +include HISTORY.md include LICENSE -include README.rst +include README.md recursive-include tests * recursive-exclude * __pycache__ diff --git a/README.md b/README.md index 331213b..bfa720a 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ # TauFactor -TauFactor is an application for calculating tortuosity factors from tomographic data. TauFactor uses [CuPy](https://cupy.dev/) which is an implementation of NumPy-compatible multi-dimensional array on CUDA. +TauFactor is an application for calculating tortuosity factors from tomographic data. - Free software: MIT license -- Documentation: https://taufactor.readthedocs.io. +- Documentation: [https://taufactor.readthedocs.io](https://taufactor.readthedocs.io).

-TauFactor +TauFactor

@@ -18,40 +18,51 @@ TauFactor is an application for calculating tortuosity factors from tomographic MIT LICENSE +github actions +

-# Requirements +## Requirements + +Before installing taufactor, [download the most recent version of PyTorch](https://pytorch.org/get-started/locally/). Ensure you have `pytorch>=1.10` installed in your Python environment. + +For example, for a Linux machine with CUDA GPU + +``` +conda install pytorch pytorch-cuda=11.7 -c pytorch -c nvidia +``` + +## Quickstart -**You will need an NVIDIA GPU to use this distribution of taufactor.**
-Before installing taufactor, download the most recent version of CuPy: -https://docs.cupy.dev/en/stable/install.html +To install TauFactor via PyPI -# Quickstart +``` +pip install taufactor +``` -A basic example for taufactor: +To extract effective diffusivity and tortuosity factor from your data: ```python import taufactor as tau +import tifffile # load image img = tifffile.imread('path/filename') # ensure 1s for conductive phase and 0s otherwise. -# here we perform an example segmentation on a grayscale img -img[img > 0.7] = 1 -img[img != 1] = 0 -# create a solver object + +# create a solver object with loaded image s = tau.Solver(img) + # call solve function s.solve() -# view effective diffusivity -D_eff = s.D_eff -# plot steady state maps -s.flux_map() -s.conc_map() + +# view effective diffusivity and tau +print(s.D_eff, s.tau) ``` -# Tests +## Tests To run unit tests navigate to the root directory and run @@ -59,11 +70,10 @@ To run unit tests navigate to the root directory and run pytest ``` -# Credits +## Credits -This package was created by the tldr group at the Dyson School of Design Engineering, Imperial College London. -This package was created with Cookiecutter* and the `audreyr/cookiecutter-pypackage`* project template. +This package was created by the [tldr group](https://tldr-group.github.io/) at the Dyson School of Design Engineering, Imperial College London. -[Cookiecutter](https://github.com/audreyr/cookiecutter) +## TauFactor MATLAB -[`audreyr/cookiecutter-pypackage`](https://github.com/audreyr/cookiecutter-pypackage) +The package in this repository refers to a Python implementation of the TauFactor solver. There is a deprecated [MATLAB implementation](https://www.mathworks.com/matlabcentral/fileexchange/57956-taufactor), which is no longer maintained. diff --git a/README.rst b/README.rst deleted file mode 100644 index d087cb8..0000000 --- a/README.rst +++ /dev/null @@ -1,42 +0,0 @@ -========= -TauFactor -========= - - -.. image:: https://img.shields.io/pypi/v/taufactor.svg - :target: https://pypi.python.org/pypi/taufactor - -.. image:: https://readthedocs.org/projects/taufactor/badge/?version=latest - :target: https://taufactor.readthedocs.io/en/latest/?badge=latest - :alt: Documentation Status - - -.. image:: https://pyup.io/repos/github/tldr-group/taufactor/shield.svg - :target: https://pyup.io/repos/github/tldr-group/taufactor/ - :alt: Updates - - -.. image:: https://img.shields.io/badge/License-MIT-yellow.svg - :target: https://opensource.org/licenses/MIT - -.. image:: tau_example.png - :width: 400 - :alt: TauFactor - -TauFactor is an application for calculating tortuosity factors from tomographic data. TauFactor uses CuPy_ which is an implementation of NumPy-compatible multi-dimensional array on CUDA. **You will need an NVIDIA CUDA GPU to run TauFactor.** - -.. _CuPy: https://github.com/cupy/cupy - - -* Free software: MIT license -* Documentation: https://taufactor.readthedocs.io. - - - -Credits -------- - -This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template. - -.. _Cookiecutter: https://github.com/audreyr/cookiecutter -.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage diff --git a/TF-500x500x500-1x2x6-v3.tiff.zip b/TF-500x500x500-1x2x6-v3.tiff.zip new file mode 100644 index 0000000..4a3f564 Binary files /dev/null and b/TF-500x500x500-1x2x6-v3.tiff.zip differ diff --git a/docs/authors.rst b/docs/authors.rst index e122f91..72d71a8 100644 --- a/docs/authors.rst +++ b/docs/authors.rst @@ -1 +1,2 @@ -.. include:: ../AUTHORS.rst +.. include:: ../AUTHORS.md + :parser: myst_parser.sphinx_ \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 0f5ae7d..684e554 100755 --- a/docs/conf.py +++ b/docs/conf.py @@ -31,7 +31,7 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'm2r2'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'myst_parser'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -47,7 +47,7 @@ # General information about the project. project = 'TauFactor' -copyright = "2021, Isaac Squires" +copyright = "2023, Isaac Squires" author = "Isaac Squires" # The version info for the project you're documenting, acts as replacement @@ -83,7 +83,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the @@ -154,7 +154,7 @@ 'TauFactor Documentation', author, 'taufactor', - 'One line description of project.', + 'Parallelised Laplace solver.', 'Miscellaneous'), ] diff --git a/docs/contributing.rst b/docs/contributing.rst index e582053..d1b6e78 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -1 +1,2 @@ -.. include:: ../CONTRIBUTING.rst +.. include:: ../CONTRIBUTING.md + :parser: myst_parser.sphinx_ \ No newline at end of file diff --git a/docs/history.rst b/docs/history.rst index 2506499..12a1388 100644 --- a/docs/history.rst +++ b/docs/history.rst @@ -1 +1,2 @@ -.. include:: ../HISTORY.rst +.. include:: ../HISTORY.md + :parser: myst_parser.sphinx_ \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index ece3499..0ebb58c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -2,7 +2,7 @@ Welcome to TauFactor's documentation! ====================================== .. toctree:: - :maxdepth: 4 + :maxdepth: 2 :caption: Contents: readme diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 0000000..eb298fb --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,49 @@ +# Installation + +## Requirements + +Before installing taufactor, [download the most recent version of PyTorch](https://pytorch.org/get-started/locally/). Your exact PyTorch configuration will depend on your operating system and GPU availability. Ensure your PyTorch version is `pytorch>=1.0`. + +For example, for a Linux machine with CUDA GPU + +``` +conda install pytorch pytorch-cuda=11.7 -c pytorch -c nvidia +``` + +## Stable release + +To install TauFactor via PyPI + +``` +pip install taufactor +``` + +This is the preferred method to install TauFactor, as it will always install the most recent stable release. + +If you don't have [pip](https://pip.pypa.io) installed, this [Python installation guide](http://docs.python-guide.org/en/latest/starting/installation/) can guide you through the process. + +## Development + +These instructions are for running TauFactor in a development environment (i.e. not using the PyPI package) + +To access the most recent development branch, run:: + + git clone https://github.com/tldr-group/taufactor.git + cd taufactor + git checkout development + +If running locally, you must adjust `environment.yml` with appropriate CUDA version. Then follow these steps for setup. + +``` +conda env create -f environment.yml +conda activate taufactor +pip install -e . +``` + +Taufactor can be installed from + +[PyPI](https://pypi.org/project/taufactor/) + +[Github](https://github.com/tldr-group/taufactor) + +[tarball](https://github.com/tldr-group/taufactor/tarball/master) diff --git a/docs/installation.rst b/docs/installation.rst deleted file mode 100644 index 46f4b02..0000000 --- a/docs/installation.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. highlight:: shell - -============ -Installation -============ - - -Stable release --------------- - -To install TauFactor, run this command in your terminal: - -.. code-block:: console - - $ pip install taufactor - -This is the preferred method to install TauFactor, as it will always install the most recent stable release. - -If you don't have `pip`_ installed, this `Python installation guide`_ can guide -you through the process. - -.. _pip: https://pip.pypa.io -.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ - - -Development -########### - -These instructions are for running TauFactor in a development environment (i.e. not using the PyPI package) - -To access the most recent development branch, run:: - - git clone --recursive https://github.com/tldr-group/taufactor.git - cd taufactor - pip install . - -.. _Github repo: https://github.com/tldr-group/taufactor -.. _tarball: https://github.com/tldr-group/taufactor/tarball/master diff --git a/docs/readme.rst b/docs/readme.rst index 3d0939f..c17c940 100644 --- a/docs/readme.rst +++ b/docs/readme.rst @@ -1,4 +1,2 @@ -Readme --------------- - -.. mdinclude:: ../README.md \ No newline at end of file +.. include:: ../README.md + :parser: myst_parser.sphinx_ \ No newline at end of file diff --git a/docs/requirements.txt b/docs/requirements.txt index 7a894b0..84652fb 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,6 @@ numpy -cupy +torch scipy matplotlib -m2r2 \ No newline at end of file +tifffile +myst_parser \ No newline at end of file diff --git a/docs/Usage.md b/docs/usage.md similarity index 54% rename from docs/Usage.md rename to docs/usage.md index a76fde4..42b8c5b 100644 --- a/docs/Usage.md +++ b/docs/usage.md @@ -1,4 +1,6 @@ -This documentation covers more advanced usage of TauFactor +# Using Taufactor + +This documentation covers the full usage of TauFactor ## Core solver @@ -6,44 +8,65 @@ This documentation covers more advanced usage of TauFactor The main output of the solve function is `tau` and `D_eff`. `tau` is the tortuoisty factor, a measure of the reduction in diffusive transport caused by convolution in the geometry of the material. `D_eff` is the effective diffusivity resulting from the tortuous nature of the material. The relationship between these values is given by: -$D_{eff}=D\frac{\epsilon}{\tau}$ +{math}`D_{eff}=D\frac{\epsilon}{\tau}` For more see [Cooper _et al._](https://doi.org/10.1016/j.softx.2016.09.002) -``` +```python import taufactor as tau +import tifffile # load segmented image img = tifffile.imread('path/filename') s = tau.Solver(img) +s.solve() # tau s.tau # D_eff s.D_eff ``` -### Steady-state maps +The iteration limit, convergence criteria and verbosity of the solver can be adjusted. Setting `verbose='per_iter'` logs the output of the solver every 100 steps whilst solving. The `conv_crit` controls the value at which convergence is met. + +```python +s.solve(verbose='per_iter', conv_crit='1e-5') +``` + +### Flux direction -Steady-state maps can be visualised after solving. These maps show the steady state solution to the flux field and concentration field after convergence. +The direction of flow by default is the first index of the loaded image. If a different direction is required, the image must be permuted before solving. To visualise this and give guidance, the utility function `flux_direction` can be used +```python +from taufactor.utils import flux_direction +import tifffile + +img = tifffile.imread('path/filename') +flux_direction(img) ``` + +When the flux direction has been chosen, the image can be permuted using `torch.permute` as outlined in the output from `flux_direction`, for example + +```python import taufactor as tau +import tifffile # load segmented image img = tifffile.imread('path/filename') +flux_direction(img) +img = torch.permute(torch.tensor(img), (1,2,0)) s = tau.Solver(img) -# flux map -s.flux_map(lay=5, filename='example.png') -# concentration map -s.conc_map() +s.solve() ``` ## Other Solvers ### Periodic solver -``` +The periodic solver applies periodic boundary conditions instead of mirror boundaries. + +```python import taufactor as tau +import tifffile # load 2 phase periodic segmented image img = tifffile.imread('path/filename') @@ -57,8 +80,11 @@ s.solve() ### Multi-phase solver -``` +The multi-phase solver allows for more than 2 conductive phases per image. The conductivity of each phase is given as an input to the solver along with the phase label + +```python import taufactor as tau +import tifffile # load n phase segmented image img = tifffile.imread('path/filename') @@ -76,8 +102,9 @@ s.solve() This solver implements the electrode tortuosity method from [Nguyen _et al._](https://doi.org/10.1038/s41524-020-00386-4) -``` +```python import taufactor as tau +import tifffile # load n phase segmented image img = tifffile.imread('path/filename') @@ -114,15 +141,25 @@ vf = volume_fraction(img, phases={'pore':0, 'particle':1, 'binder':2}) ### Surface area -Volume fraction is calculated for each phase in a segmented image: +Surface area is calculated for each phase in a segmented image. The surface area returned is the fraction of interfacial faces with respect to the total number of faces n the image ```python from taufactor.metrics import surface_area -# calculate the volume fraction of a single phase in an img -vf = surface_area(img, phases=1) +# calculate the surface area of a single phase in an img +sa = surface_area(img, phases=1) # consider a three phase image with pore, particle and binder # where 0, 1, 2 correspond to pore, particle and binder respectively -# calculate the volume fraction between pore and binder with periodic boundaries in y and z axes -vf = volume_fraction(img, phases=[0,2], periodic=[0,1,1]) +# calculate the surface area between pore and binder with periodic boundaries in y and z axes +sa = surface_area(img, phases=[0,2], periodic=[0,1,1]) +``` + +### Triple phase boundary + +Triple phase boundary is calculated on a segmented image with exactky three phases. The value returned is the fraction of triple phase edges with respect to the total number of edges + +```python +from taufactor.metrics import triple_phase_boundary +# calculate the triple phase boundareies +tpb = triple_phase_boundary(img) ``` diff --git a/docs/usage.rst b/docs/usage.rst deleted file mode 100644 index 809b578..0000000 --- a/docs/usage.rst +++ /dev/null @@ -1,4 +0,0 @@ -Usage -=========== - -.. mdinclude:: Usage.md \ No newline at end of file diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000..5e2aa01 --- /dev/null +++ b/environment.yml @@ -0,0 +1,8 @@ +name: taufactor +channels: + - pytorch + - nvidia +dependencies: + - python=3.8 + - pytorch>-1.10 + - pytorch-cuda=11.6 diff --git a/example.pdf b/example.pdf new file mode 100644 index 0000000..ade876b Binary files /dev/null and b/example.pdf differ diff --git a/paper.bib b/paper.bib new file mode 100644 index 0000000..4006d22 --- /dev/null +++ b/paper.bib @@ -0,0 +1,201 @@ +@article{Pearson:2017, +url = {http://adsabs.harvard.edu/abs/2017arXiv170304627P}, +Archiveprefix = {arXiv}, +Author = {{Pearson}, S. and {Price-Whelan}, A.~M. and {Johnston}, K.~V.}, + Eprint = {1703.04627}, + Journal = {ArXiv e-prints}, + Keywords = {Astrophysics - Astrophysics of Galaxies}, + Month = mar, + Title = {{Gaps in Globular Cluster Streams: Pal 5 and the Galactic Bar}}, +Year = 2017 +} + +@book{Binney:2008, +url = {http://adsabs.harvard.edu/abs/2008gady.book.....B}, +Author = {{Binney}, J. and {Tremaine}, S.}, + Booktitle = {Galactic Dynamics: Second Edition, by James Binney and Scott Tremaine.~ISBN 978-0-691-13026-2 (HB).~Published by Princeton University Press, Princeton, NJ USA, 2008.}, + Publisher = {Princeton University Press}, + Title = {{Galactic Dynamics: Second Edition}}, +Year = 2008 +} + +@article{gaia, +author = {{Gaia Collaboration}}, +title = "{The Gaia mission}", +journal = {Astronomy and Astrophysics}, +archivePrefix = "arXiv", +eprint = {1609.04153}, +primaryClass = "astro-ph.IM", +keywords = {space vehicles: instruments, Galaxy: structure, astrometry, parallaxes, proper motions, telescopes}, +year = 2016, +month = nov, +volume = 595, +doi = {10.1051/0004-6361/201629272}, +url = {http://adsabs.harvard.edu/abs/2016A%26A...595A...1G}, +} + +@article{astropy, +author = {{Astropy Collaboration}}, +title = "{Astropy: A community Python package for astronomy}", +journal = {Astronomy and Astrophysics}, +archivePrefix = "arXiv", +eprint = {1307.6212}, +primaryClass = "astro-ph.IM", +keywords = {methods: data analysis, methods: miscellaneous, virtual observatory tools}, +year = 2013, +month = oct, +volume = 558, +doi = {10.1051/0004-6361/201322068}, +url = {http://adsabs.harvard.edu/abs/2013A%26A...558A..33A} +} + +@misc{fidgit, +author = {A. M. Smith and K. Thaney and M. Hahnel}, +title = {Fidgit: An ungodly union of GitHub and Figshare}, +year = {2020}, +publisher = {GitHub}, +journal = {GitHub repository}, +url = {https://github.com/arfon/fidgit} +} + +@article{gostick2019porespy, + title={PoreSpy: A python toolkit for quantitative analysis of porous media images}, + author={Gostick, Jeff T and Khan, Zohaib A and Tranter, Thomas G and Kok, Matthew DR and Agnaou, Mehrez and Sadeghi, Mohammadamin and Jervis, Rhodri}, + journal={Journal of Open Source Software}, + volume={4}, + number={37}, + pages={1296}, + year={2019} +} + +@article{ferguson2018puma, + title={PuMA: the porous microstructure analysis software}, + author={Ferguson, Joseph C and Panerai, Francesco and Borner, Arnaud and Mansour, Nagi N}, + journal={SoftwareX}, + volume={7}, + pages={81--87}, + year={2018}, + publisher={Elsevier} +} + +@article{le2021openimpala, + title={Openimpala: Open source image based parallisable linear algebra solver}, + author={Le Houx, James and Kramer, Denis}, + journal={SoftwareX}, + volume={15}, + pages={100729}, + year={2021}, + publisher={Elsevier} +} + +@article{cooper2016taufactor, + title={TauFactor: An open-source application for calculating tortuosity factors from tomographic data}, + author={Cooper, Samuel J and Bertei, Antonio and Shearing, Paul R and Kilner, JA and Brandon, Nigel P}, + journal={SoftwareX}, + volume={5}, + pages={203--210}, + year={2016}, + publisher={Elsevier} +} + +@article{hart1999measurement, + title={Measurement and classification of retinal vascular tortuosity}, + author={Hart, William E and Goldbaum, Michael and C{\^o}t{\'e}, Brad and Kube, Paul and Nelson, Mark R}, + journal={International journal of medical informatics}, + volume={53}, + number={2-3}, + pages={239--252}, + year={1999}, + publisher={Elsevier} +} + +@article{carey2016estimating, + title={Estimating tortuosity coefficients based on hydraulic conductivity}, + author={Carey, Grant R and McBean, Edward A and Feenstra, Stan}, + journal={Groundwater}, + volume={54}, + number={4}, + pages={476--487}, + year={2016}, + publisher={Wiley Online Library} +} + +@article{landesfeind2018tortuosity, + title={Tortuosity of battery electrodes: validation of impedance-derived values and critical comparison with 3D tomography}, + author={Landesfeind, Johannes and Ebner, Martin and Eldiven, Askin and Wood, Vanessa and Gasteiger, Hubert A}, + journal={Journal of The Electrochemical Society}, + volume={165}, + number={3}, + pages={A469--A476}, + year={2018}, + publisher={The Electrochemical Society} +} + +@article{dahari2023fusion, + title={Fusion of Complementary 2D and 3D Mesostructural Datasets Using Generative Adversarial Networks (Adv. Energy Mater. 2/2023)}, + author={Dahari, Amir and Kench, Steve and Squires, Isaac and Cooper, Samuel J}, + journal={Advanced Energy Materials}, + volume={13}, + number={2}, + pages={2370009}, + year={2023}, + publisher={Wiley Online Library} +} +@article{kench2022microlib, + title={MicroLib: A library of 3D microstructures generated from 2D micrographs using SliceGAN}, + author={Kench, Steve and Squires, Isaac and Dahari, Amir and Cooper, Samuel J}, + journal={Scientific Data}, + volume={9}, + number={1}, + pages={645}, + year={2022}, + publisher={Nature Publishing Group UK London} +} +@article{withers2021x, + title={X-ray computed tomography}, + author={Withers, Philip J and Bouman, Charles and Carmignato, Simone and Cnudde, Veerle and Grimaldi, David and Hagen, Charlotte K and Maire, Eric and Manley, Marena and Du Plessis, Anton and Stock, Stuart R}, + journal={Nature Reviews Methods Primers}, + volume={1}, + number={1}, + pages={18}, + year={2021}, + publisher={Nature Publishing Group UK London} +} +@article{nguyen2020electrode, + title={The electrode tortuosity factor: why the conventional tortuosity factor is not well suited for quantifying transport in porous Li-ion battery electrodes and what to use instead}, + author={Nguyen, Tuan-Tu and Demorti{\`e}re, Arnaud and Fleutot, Benoit and Delobel, Bruno and Delacourt, Charles and Cooper, Samuel J}, + journal={npj Computational Materials}, + volume={6}, + number={1}, + pages={123}, + year={2020}, + publisher={Nature Publishing Group UK London} +} + +@incollection{pytorch, + title = {PyTorch: An Imperative Style, High-Performance Deep Learning Library}, + author = {Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca and Desmaison, Alban and Kopf, Andreas and Yang, Edward and DeVito, Zachary and Raison, Martin and Tejani, Alykhan and Chilamkurthy, Sasank and Steiner, Benoit and Fang, Lu and Bai, Junjie and Chintala, Soumith}, + booktitle = {Advances in Neural Information Processing Systems 32}, + pages = {8024--8035}, + year = {2019}, + publisher = {Curran Associates, Inc.}, + url = {http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf} +} +@article{cooper2017simulated, + title={Simulated impedance of diffusion in porous media}, + author={Cooper, Samuel J and Bertei, Antonio and Finegan, Donal P and Brandon, Nigel P}, + journal={Electrochimica Acta}, + volume={251}, + pages={681--689}, + year={2017}, + publisher={Elsevier} +} +@article{tjaden2016origin, + title={On the origin and application of the Bruggeman correlation for analysing transport phenomena in electrochemical systems}, + author={Tjaden, Bernhard and Cooper, Samuel J and Brett, Daniel JL and Kramer, Denis and Shearing, Paul R}, + journal={Current opinion in chemical engineering}, + volume={12}, + pages={44--51}, + year={2016}, + publisher={Elsevier} +} \ No newline at end of file diff --git a/paper.md b/paper.md new file mode 100644 index 0000000..7832487 --- /dev/null +++ b/paper.md @@ -0,0 +1,66 @@ +--- +title: "TauFactor 2: A GPU accelerated python tool for microstructural analysis" +tags: + - Python + - Material Science + - Tortuosity + - Modelling + - Effective diffusivity + - Bruggeman coefficient +authors: + - name: Steve Kench + orcid: 0000-0002-7263-6724 + equal-contrib: true + affiliation: 1 # (Multiple affiliations must be quoted) + - name: Isaac Squires + equal-contrib: true # (This is how you can denote equal contributions between multiple authors) + orcid: 0000-0003-1919-061X + affiliation: 1 + - name: Samuel Cooper + corresponding: true # (This is how to denote the corresponding author) + affiliation: 1 +affiliations: + - name: Imperial College London, UK + index: 1 +date: 9 March 2023 +bibliography: paper.bib +--- +# Summary + +TauFactor 2 is an open-source, GPU accelerated microstructural analysis tool for extracting metrics from voxel based data, including transport properties such as the touristy factor. Tortuosity factor, $\tau$, is a material parameter that defines the reduction in transport arising from the arrangement of the phases in a multiphase medium (see \autoref{example}). As shown in \autoref{eq:tort}, the effective transport co-efficient of a material, $D_{\text{eff}}$, can be calculated from the phases intrinsic transport coefficient, $D$, volume fraction, $\epsilon$, and $\tau$ [@cooper2016taufactor] (note, this value of $\tau$ should not be squared [@tjaden2016origin]). + +\begin{equation}\label{eq:tort} +D_{\text{eff}} = D\dfrac{\epsilon}{\tau} +\end{equation} + +Tortuosity factor has been a metric of interest in a broad range of fields for many of decades. In geophysics, $\tau$ influences groundwater flow through pourous rocks, which has significant environmental contamination impacts [@carey2016estimating]. Electrochemists use $\tau$ to solve a reduced-order system of equations describing the electrochemical behaviour of lithium-ion batteries, which influences a cells power rating [@landesfeind2018tortuosity]. The imaging and subsequent modeling of materials to determine $\tau$ is thus commonplace. + + + +![Microstructure and flux field of a sample from the \href{https://microlib.io/}{microlib.io} library [@kench2022microlib].\label{example}](example.pdf) + +# Statement of need + +Materials characterisation techniques are constantly improving, allowing the collection of larger field-of-view images with higher resolutions [@withers2021x]. Alongside these developments, machine learning algorithms have enabled the generation of arbitrarily large volumes, and can further enhance image quality through super-resolution techniques [@dahari2023fusion]. The resulting high fidelity microstructural datasets can be used to extract statistically representative metrics of a materials composition and performance. However, with increasing dataset size, the computational cost to perform such analysis can lead to prohibitively long run times. This is especially problematic for transport type metrics, such as the touristy factor, as they are inherently 3D and require the use of iterative solvers. + +TauFactor 1 [@cooper2016taufactor] provided an open-source MATLAB application for calculating various microstructural metrics, including the touristy factor. However, its implementation as a serial CPU based solver meant that large microstructural dataset could take hours to converge. This made TauFactor 1 unsuitable for use in high-throughput tasks such as materials optimisation. TauFactor 2 provides the necessary efficiency to ensure users can analyse large datasets in reasonable times. The software is built with PyTorch [@pytorch], a commonly used and highly optimised python package for machine learning. The GPU acceleration that has enabled the drastic speed up of neural network training proves equally effective for the task of iteratively solving transport equations, where matrix multiplication and addition are the main operations required. The use of Python and PyTorch ensures broad support and easy installation, as well as the option to run the software on CPU if GPU hardware is not available. The ability to run simulations with just a few lines of code ensures accessibility for researchers from the diverse fields where this software may be of use. + +The Python implementation is similar to the original TauFactor 1, taking advantage of efficiency gains such as the precalculation of prefactors and the use of over-relaxation. The same convergence criteria are also used, where the vertical flux between each layer is averaged across the planes parallel to the stimulated boundaries. If the percentage error between the minimum and maximum flux is below a given value (default 1\%), this indicates convergence has been reached. Once this is satsfied, an extra 100 iterations are performed to confirm the stability of the system. A notable difference in TauFactor 2 is that flux is calculated for all voxels. This replaces an indexing system in TauFactor 1, which solved only in active voxels. We find that the speed of GPU indexing compared to matrix multiplication makes this trade-off worthwhile. As well as the standard solver for a single transport phase, a multi-phase solver is available, where the tortuosity relates $D_{\text{eff}}$ to the intrinsic diffusion coefficients and volume fractions of the various phases, $p$, as follows: + +\begin{equation}\label{eq:mptort} +D_{\text{eff}} = \dfrac{\sum_p{D_p \epsilon_p}}{\tau} = \dfrac{D_{\text{mean}}}{\tau} +\end{equation} + +$D_{\text{mean}}$ is a weighted sum of the active phase transport coefficients according to their volume fractions, which gives a transport coefficient equivalent to prismatic blocks of each phase spanning a test volume, in the direction of transport (i.e. perfectly straight transport paths). Periodic boundary conditions can also be used, which replace no-flux boundary conditions at the outer edges of the control volume. Finally, Taufactor 2 also includes an electrode tortuosity factor solver (see [@nguyen2020electrode]). There are also GPU accelerated functions for calculating volume fractions, surface areas, triple phase boundaries and the two-point correlation function. + +To compare the performance of TauFactor 2 to other available software, a test volume (500x500x500 = 125,000,000 voxels) was created. One of the phases in this two-phase volume fully percolates in all three directions, while the other phase does not percolate at all. The percolating phase has a volume fraction of exactly 30%. The percolating network is anisotropic in the three directions, leading to different transport metrics. Lastly, the structure is periodic at its boundaries, allowing for the exploration of the impact of periodic transport boundaries. This microstructure is available in the GitHub repository, providing a standard against which new software can also be measured. The speed of five different solvers, namely TauFactor 1 [@cooper2016taufactor], TauFactor 1.9 (an updated version of TauFactor 1, available \href{https://uk.mathworks.com/matlabcentral/fileexchange/57956-taufactor}{here}, that has new solvers such as diffusion impedance and can be called inline as well as from the GUI [@cooper2017simulated]), TauFactor 2 (CPU), TauFactor 2 (GPU), PoreSpy [@gostick2019porespy] and Puma [@ferguson2018puma], are shown in \autoref{speeds}. To check the accuracy of the calaculated tortuosity factors, we overconverge TauFactor 2 to give a ‘true value’ in each direction. Using default convergence criteria, all five solvers are within 0.5% of the true values other then PuMa’s explicit jump solver (5% error), which is thus excluded (note it was still >2x slower than TF2). For this analysis we used a NVIDIA A6000 48GB GPU and AMD Ryzen Threadripper 3970X Gen3 32 Core TRX4 CPU. TauFactor 2 is over 10 times faster than the next best solver, TauFactor 1.9, and over 100 times faster than the original TauFactor 1 solver. + + +![Speed comparison for the four solvers when applied to the test volume. The mean time across all 3 directions is plotted. The values of the overconverged $\tau$ in each direction are: 1.1513, 1.3905, 4.2431. \label{speeds}](tauspeeds.pdf) + +# Acknowledgements + +This work was supported by funding from the EPSRC Faraday Institution Multi-Scale Modelling project +(https://faraday.ac.uk/; EP/S003053/1, grant number FIRG003 received by SK). We acknowledge contributions from Amir Dahari in the testing of the solver and python package installation. + +# References diff --git a/requirements_dev.txt b/requirements_dev.txt index 5edfbe5..39c031f 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -1,13 +1,16 @@ -pip==21.0.1 +pip==23.0.1 bump2version==1.0.1 wheel==0.33.6 watchdog==0.9.0 flake8==3.7.8 tox==3.21.4 coverage==5.4 -Sphinx==3.4.3 +Sphinx==6.1.3 twine==3.3.0 Click==7.0 pytest==4.6.5 -matplotlib==4.3.3 +matplotlib>3.6 pytest-runner==5.2 +numpy==1.24.2 +tifffile==2023.2.3 +myst-parser==0.18.1 \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index d547948..349b7aa 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.1.4 +current_version = 1.0.0 commit = True tag = True @@ -19,4 +19,3 @@ exclude = docs [aliases] test = pytest - diff --git a/setup.py b/setup.py index 580443e..4ef683b 100644 --- a/setup.py +++ b/setup.py @@ -4,13 +4,13 @@ from setuptools import setup, find_packages -with open('README.rst') as readme_file: +with open('README.md') as readme_file: readme = readme_file.read() -with open('HISTORY.rst') as history_file: +with open('HISTORY.md') as history_file: history = history_file.read() -requirements = ['Click>=7.0', 'numpy>=1.0', 'matplotlib>=3.4'] +requirements = ['Click>=7.0', 'numpy>=1.0', 'matplotlib>=3.4', 'tifffile>=2023.2.3'] setup_requirements = ['pytest-runner', ] @@ -21,10 +21,16 @@ author_email='is21@ic.ac.uk', python_requires='>=3.5', classifiers=[ - 'Development Status :: 2 - Pre-Alpha', - 'Intended Audience :: Developers', + 'Development Status :: 4 - Beta', + 'Intended Audience :: Science/Research', + 'Intended Audience :: Manufacturing', + 'Topic :: Scientific/Engineering :: Physics', + 'Topic :: Scientific/Engineering :: Chemistry', + 'Topic :: Scientific/Engineering :: Image Processing', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', + 'Environment :: GPU', + 'Environment :: GPU :: NVIDIA CUDA', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', @@ -35,6 +41,7 @@ install_requires=requirements, license="MIT license", long_description=readme + '\n\n' + history, + long_description_content_type='text/markdown', include_package_data=True, keywords='taufactor', name='taufactor', @@ -43,6 +50,6 @@ test_suite='tests', tests_require=test_requirements, url='https://github.com/tldr-group/taufactor', - version='0.1.4', + version='1.0.0', zip_safe=False, ) diff --git a/tau_speeds.png b/tau_speeds.png new file mode 100644 index 0000000..6699ff3 Binary files /dev/null and b/tau_speeds.png differ diff --git a/taufactor/__init__.py b/taufactor/__init__.py index f5899b2..8021fac 100644 --- a/taufactor/__init__.py +++ b/taufactor/__init__.py @@ -2,6 +2,6 @@ __author__ = """Isaac Squires""" __email__ = 'is21@ic.ac.uk' -__version__ = '0.1.4' +__version__ = '1.0.0' from .taufactor import * \ No newline at end of file diff --git a/taufactor/metrics.py b/taufactor/metrics.py index 2b7c99d..ddc3102 100644 --- a/taufactor/metrics.py +++ b/taufactor/metrics.py @@ -1,5 +1,6 @@ import numpy as np -import cupy as cp +import torch +import torch.nn.functional as F def volume_fraction(img, phases={}): """ @@ -9,20 +10,20 @@ def volume_fraction(img, phases={}): :return: list of volume fractions if no labels, dictionary of label: volume fraction pairs if labelled """ - if type(img) is not type(cp.array(1)): - img = cp.asarray(img) + if type(img) is not type(torch.tensor(1)): + img = torch.tensor(img) if phases=={}: - phases = cp.unique(img) + phases = torch.unique(img) vf_out = [] for p in phases: - vf_out.append((img==p).mean().item()) + vf_out.append((img==p).to(torch.float).mean().item()) if len(vf_out)==1: vf_out=vf_out[0] else: vf_out={} for p in phases: - vf_out[p]=(img==phases[p]).mean().item() + vf_out[p]=(img==phases[p]).to(torch.float).mean().item() return vf_out @@ -31,29 +32,33 @@ def surface_area(img, phases, periodic=False): Calculate interfacial surface area between two phases or the total surface area of one phase :param img: :param phases: list of phases to calculate SA, if lenght 1 calculate total SA, if length 2 calculate inerfacial SA + :param periodic: list of bools indicating if the image is periodic in each dimension :return: the surface area in faces per unit volume """ shape = img.shape - int_not_in_img = np.unique(img).min() -1 + int_not_in_img = int(np.unique(img).max()+1) dim = len(shape) - img = cp.asarray(img) + img = torch.tensor(img) # finding an int that is not in the img for padding: - if periodic: - pad = [(int(not x),int(not x)) for x in periodic] - img = cp.pad(img, pad, 'constant', constant_values=int_not_in_img) + periodic.reverse() + pad = () + for x in periodic: + pad += tuple((int(not x),)*2) + img = F.pad(img, pad, 'constant', value=int_not_in_img) + periodic.reverse() else: - img = cp.pad(img, 1, 'constant', constant_values=int_not_in_img) + img = F.pad(img, (1,)*dim*2, 'constant', value=int_not_in_img) periodic=[0]*dim - SA_map = cp.zeros_like(img) + SA_map = torch.zeros_like(img) if not isinstance(phases, list): phases = [phases] for i in range(dim): for j in [1, -1]: - i_rolled = cp.roll(img, j, i) + i_rolled = torch.roll(img, j, i) if len(phases)==2: SA_map[(i_rolled == phases[0]) & (img == phases[1])] += 1 else: @@ -70,41 +75,41 @@ def surface_area(img, phases, periodic=False): z = shape[2] if not periodic[2]: SA_map = SA_map[:, :, 1:-1] - sf = cp.sum(cp.array([x,y,z])[periodic_mask]*cp.roll(cp.array([x,y,z])[periodic_mask],1)) + sf = torch.sum(torch.tensor([x,y,z])[periodic_mask]*torch.roll(torch.tensor([x,y,z])[periodic_mask],1)) total_faces = 3*(x*y*z)-sf elif dim == 2: - sf = cp.sum(cp.array([x,y])[periodic_mask]) + sf = torch.sum(torch.tensor([x,y])[periodic_mask]) total_faces = 2*(x+1)*(y+1)-(x+1)-(y+1)-2*sf else: total_faces=SA_map.size - sa = cp.sum(SA_map)/total_faces + sa = torch.sum(SA_map)/total_faces return sa def triple_phase_boundary(img): - phases = cp.unique(cp.asarray(img)) + phases = torch.unique(torch.tensor(img)) if len(phases)!=3: - return None + raise ValueError('Image must have exactly 3 phases') shape = img.shape dim = len(shape) ph_maps = [] - img = cp.pad(cp.asarray(img), 1, 'constant', constant_values=-1) + img = F.pad(torch.tensor(img), (1,)*dim*2, 'constant', value=-1) if dim==2: x, y = shape total_edges = (x-1)*(y-1) for ph in phases: - ph_map = cp.zeros_like(img) - ph_map_temp = cp.zeros_like(img) + ph_map = torch.zeros_like(img) + ph_map_temp = torch.zeros_like(img) ph_map_temp[img==ph] = 1 for i in [0, 1]: for j in [0, 1]: - ph_map += cp.roll(cp.roll(ph_map_temp, i, 0), j, 1) + ph_map += torch.roll(torch.roll(ph_map_temp, i, 0), j, 1) ph_maps.append(ph_map) - tpb_map = cp.ones_like(img) + tpb_map = torch.ones_like(img) for ph_map in ph_maps: tpb_map *= ph_map tpb_map[tpb_map>1] = 1 tpb_map = tpb_map[1:-1, 1:-1] - tpb = np.sum(tpb_map) + tpb = torch.sum(tpb_map) else: tpb = 0 x, y, z = shape @@ -113,20 +118,20 @@ def triple_phase_boundary(img): for d in range(dim): ph_maps = [] for ph in phases: - ph_map = cp.zeros_like(img) - ph_map_temp = cp.zeros_like(img) + ph_map = torch.zeros_like(img) + ph_map_temp = torch.zeros_like(img) ph_map_temp[img==ph] = 1 for i in [0, 1]: for j in [0, 1]: d1 =( d + 1) % 3 d2 = (d + 2) % 3 - ph_map += cp.roll(cp.roll(ph_map_temp, i, d1), j, d2) + ph_map += torch.roll(torch.roll(ph_map_temp, i, d1), j, d2) ph_maps.append(ph_map) - tpb_map = cp.ones_like(img) + tpb_map = torch.ones_like(img) for ph_map in ph_maps: tpb_map *= ph_map tpb_map[tpb_map>1] = 1 tpb_map = tpb_map[1:-1, 1:-1, 1:-1] - tpb += np.sum(tpb_map) + tpb += torch.sum(tpb_map) return tpb/total_edges diff --git a/taufactor/taufactor.py b/taufactor/taufactor.py index 02efed0..b1a2735 100644 --- a/taufactor/taufactor.py +++ b/taufactor/taufactor.py @@ -1,17 +1,21 @@ """Main module.""" -from math import tau import numpy as np -import cupy as cp from timeit import default_timer as timer import matplotlib.pyplot as plt -from taufactor.metrics import surface_area +try: + import torch +except ImportError: + raise ImportError("Pytorch is required to use this package. Please install pytorch and try again. More information about TauFactor's requirements can be found at https://taufactor.readthedocs.io/en/latest/") +import warnings + class Solver: """ Default solver for two phase images. Once solve method is called, tau, D_eff and D_rel are available as attributes. """ - def __init__(self, img, precision=cp.single, bc=(-0.5, 0.5), D_0=1): + + def __init__(self, img, bc=(-0.5, 0.5), D_0=1, device=torch.device('cuda')): """ Initialise parameters, conc map and other tools that can be re-used for multiple solves. @@ -20,6 +24,7 @@ def __init__(self, img, precision=cp.single, bc=(-0.5, 0.5), D_0=1): :param precision: cp.single or cp.double :param bc: Upper and lower boundary conditions. Leave as default. :param D_0: reference material diffusivity + :param device: pytorch device, can be cuda or cpu """ # add batch dim now for consistency self.D_0 = D_0 @@ -27,210 +32,202 @@ def __init__(self, img, precision=cp.single, bc=(-0.5, 0.5), D_0=1): if len(img.shape) == 3: img = np.expand_dims(img, 0) self.cpu_img = img - self.precision = precision - # VF calc - self.VF = np.mean(img) + self.precision = torch.float + self.device = torch.device(device) + # check device is available + if torch.device(device).type.startswith('cuda') and not torch.cuda.is_available(): + self.device = torch.device('cpu') + warnings.warn( + "CUDA not available, defaulting device to cpu. To avoid this warning, explicitly set the device when initialising the solver with device=torch.device('cpu')") # save original image in cuda - img = cp.array(img, dtype=self.precision) - self.ph_bot = cp.sum(img[:, -1]) * self.bot_bc - self.ph_top = cp.sum(img[:, 0]) * self.top_bc + img = torch.tensor(img, dtype=self.precision, device=self.device) + self.VF = torch.mean(img) + if len(torch.unique(img).shape) > 2 or torch.unique(img).max() not in [0, 1] or torch.unique(img).min() not in [0, 1]: + raise ValueError( + f'Input image must only contain 0s and 1s. Your image must be segmented to use this tool. If your image has been segmented, ensure your labels are 0 for non-conductive and 1 for conductive phase. Your image has the following labels: {torch.unique(img).numpy()}. If you have more than one conductive phase, use the multi-phase solver.') + + # calculate + self.ph_bot = torch.sum(img[:, -1]).to(self.device) * self.bot_bc + self.ph_top = torch.sum(img[:, 0]).to(self.device) * self.top_bc # init conc self.conc = self.init_conc(img) # create nn map self.nn = self.init_nn(img) - - #checkerboarding - self.w = 2 - cp.pi / (1.5 * img.shape[1]) + # overrelaxation factor + self.w = 2 - torch.pi / (1.5 * img.shape[1]) + # checkerboarding to ensure stable steps self.cb = self.init_cb(img) - - # solving params bs, x, y, z = self.cpu_img.shape self.L_A = x / (z * y) + # solving params self.converged = False self.semi_converged = False - self.iter=0 + self.old_fl = -1 + self.iter = 0 img = None - # Results - self.tau=None - self.D_eff=None - self.D_mean=None + self.tau = None + self.D_eff = None + self.D_mean = None def init_conc(self, img): + """Sets an initial linear field across the volume""" bs, x, y, z = img.shape sh = 1 / (x * 2) - vec = cp.linspace(self.top_bc + sh, self.bot_bc - sh, x) + vec = torch.linspace(self.top_bc + sh, self.bot_bc - + sh, x, dtype=self.precision, device=self.device) for i in range(2): - vec = cp.expand_dims(vec, -1) - vec = cp.expand_dims(vec, 0) - vec = vec.repeat(z, -1) - vec = vec.repeat(y, -2) - vec = vec.repeat(bs, 0) - vec = vec.astype(self.precision) - - return self.pad(img * vec, [self.top_bc * 2, self.bot_bc * 2]) + vec = torch.unsqueeze(vec, -1) + vec = torch.unsqueeze(vec, 0) + vec = vec.repeat(bs, 1, y, z, ) + return self.pad(img * vec, [self.top_bc * 2, self.bot_bc * 2]).to(self.device) def init_nn(self, img): + """Saves the number of conductive neighbours for flux calculation""" img2 = self.pad(self.pad(img, [2, 2])) - nn = cp.zeros_like(img2, dtype=self.precision) + nn = torch.zeros_like(img2, dtype=self.precision) # iterate through shifts in the spatial dimensions for dim in range(1, 4): for dr in [1, -1]: - nn += cp.roll(img2, dr, dim) + nn += torch.roll(img2, dr, dim) # remove the two paddings nn = self.crop(nn, 2) # avoid div 0 errors - nn[img == 0] = cp.inf - nn[nn == 0] = cp.inf - return nn + nn[img == 0] = torch.inf + nn[nn == 0] = torch.inf + return nn.to(self.device) def init_cb(self, img): + """Creates a chequerboard to ensure neighbouring pixels dont update, + which can cause instability""" bs, x, y, z = img.shape cb = np.zeros([x, y, z]) a, b, c = np.meshgrid(range(x), range(y), range(z), indexing='ij') cb[(a + b + c) % 2 == 0] = 1 cb *= self.w - return [cp.roll(cp.array(cb), sh, 0) for sh in [0, 1]] + return [torch.roll(torch.tensor(cb, dtype=self.precision).to(self.device), sh, 0) for sh in [0, 1]] def pad(self, img, vals=[0] * 6): + """Pads a volume with values""" while len(vals) < 6: vals.append(0) to_pad = [1]*8 - to_pad[:2] = (0, 0) - img = cp.pad(img, to_pad, 'constant') + to_pad[-2:] = (0, 0) + img = torch.nn.functional.pad(img, to_pad, 'constant') img[:, 0], img[:, -1] = vals[:2] img[:, :, 0], img[:, :, -1] = vals[2:4] img[:, :, :, 0], img[:, :, :, -1] = vals[4:] return img - def crop(self, img, c = 1): + def crop(self, img, c=1): + """removes a layer from the volume edges""" return img[:, c:-c, c:-c, c:-c] def solve(self, iter_limit=5000, verbose=True, conv_crit=2*10**-2): """ run a solve simulation - :param iter_limit: max iterations before aborting, will attempt double for the same no. iterations + :param iter_limit: max iterations before aborting, will attemtorch double for the same no. iterations if initialised as singles :param verbose: Whether to print tau. Can be set to 'per_iter' for more feedback :param conv_crit: convergence criteria, minimum percent difference between max and min flux through a given layer :return: tau """ - start = timer() - while not self.converged: - out = self.conc[:, 2:, 1:-1, 1:-1] + \ - self.conc[:, :-2, 1:-1, 1:-1] + \ - self.conc[:, 1:-1, 2:, 1:-1] + \ - self.conc[:, 1:-1, :-2, 1:-1] + \ - self.conc[:, 1:-1, 1:-1, 2:] + \ - self.conc[:, 1:-1, 1:-1, :-2] - out /= self.nn - if self.iter % 20 == 0: - lt = abs(cp.sum(out[:, 0]) - self.ph_top) - lb = abs(cp.sum(out[:, -1]) - self.ph_bot) - self.converged, D_rel = self.check_convergence(lt, lb, verbose, conv_crit, start, iter_limit) - out -= self.crop(self.conc, 1) - out *= self.cb[self.iter%2] - self.conc[:, 1:-1, 1:-1, 1:-1] += out - self.iter += 1 - self.D_mean = self.D_0 - self.tau=self.VF/D_rel if D_rel != 0 else cp.inf - self.D_eff=self.D_mean*D_rel - self.end_simulation(iter_limit, verbose, start) - return self.tau - def check_convergence(self, lt, lb, verbose, conv_crit, start, iter_limit): - loss = lt - lb + with torch.no_grad(): + start = timer() + while not self.converged: + # find sum of all nearest neighbours + out = self.conc[:, 2:, 1:-1, 1:-1] + \ + self.conc[:, :-2, 1:-1, 1:-1] + \ + self.conc[:, 1:-1, 2:, 1:-1] + \ + self.conc[:, 1:-1, :-2, 1:-1] + \ + self.conc[:, 1:-1, 1:-1, 2:] + \ + self.conc[:, 1:-1, 1:-1, :-2] + # divide by n conductive nearest neighbours to give flux + out /= self.nn + # check convergence using criteria + if self.iter % 100 == 0: + self.converged = self.check_convergence( + verbose, conv_crit, start, iter_limit) + # efficient way of adding flux to old conc with overrelaxation + out -= self.crop(self.conc, 1) + out *= self.cb[self.iter % 2] + self.conc[:, 1:-1, 1:-1, 1:-1] += out + self.iter += 1 + self.D_mean = self.D_0 + self.D_eff = self.D_mean*self.D_rel + self.end_simulation(iter_limit, verbose, start) + return self.tau + + def check_convergence(self, verbose, conv_crit, start, iter_limit): # print progress - if self.iter % 100 == 0: - if verbose == 'per_iter': - print(self.iter, abs(loss)) + self.semi_converged, self.new_fl, err = self.check_vertical_flux( + conv_crit) + self.D_rel = ((self.new_fl) * self.L_A / + abs(self.top_bc - self.bot_bc)).cpu() + self.tau = self.VF / \ + self.D_rel if self.D_rel != 0 else torch.tensor(torch.inf) + if self.semi_converged == 'zero_flux': + return True - # check for convergence if loss is good - if abs(loss) < conv_crit * 0.01: - if self.semi_converged: - cvf = self.check_vertical_flux(conv_crit) - if cvf: - if cvf == 'zero_flux': - return True, 0 - return True, ((lt + lb) * self.L_A / abs(self.top_bc - self.bot_bc)).get() - else: - self.semi_converged = True - else: - self.semi_converged = False + if verbose == 'per_iter': + print( + f'Iter: {self.iter}, conv error: {abs(err.item())}, tau: {self.tau.item()}') - # increase precision to double if currently single - if self.iter >= iter_limit: - if self.precision == cp.single: - print('increasing precision to double') - self.iter = 0 - self.conc = cp.array(self.conc, dtype=cp.double) - self.nn = cp.array(self.nn, dtype=cp.double) - self.precision = cp.double - else: - return True, ((lt + lb) * self.L_A / abs(self.top_bc - self.bot_bc)).get() + if self.semi_converged: + self.converged = self.check_rolling_mean(conv_crit=1e-3) - return False, False + if not self.converged: + self.old_fl = self.new_fl + return False + else: + return True + else: + self.old_fl = self.new_fl + return False def check_vertical_flux(self, conv_crit): - vert_flux = self.conc[:, 1:-1, 1:-1, 1:-1] - self.conc[:, :-2, 1:-1, 1:-1] + vert_flux = self.conc[:, 1:-1, 1:-1, 1:-1] - \ + self.conc[:, :-2, 1:-1, 1:-1] vert_flux[self.conc[:, :-2, 1:-1, 1:-1] == 0] = 0 vert_flux[self.conc[:, 1:-1, 1:-1, 1:-1] == 0] = 0 - fl = cp.sum(vert_flux, (0, 2, 3))[1:-1] + fl = torch.sum(vert_flux, (0, 2, 3))[1:-1] err = (fl.max() - fl.min())*2/(fl.max() + fl.min()) - if err < conv_crit or np.isnan(err).item(): - return True + if err < conv_crit or torch.isnan(err).item(): + return True, torch.mean(fl), err if fl.min() == 0: - return 'zero_flux' - return False + return 'zero_flux', torch.mean(fl), err + return False, torch.mean(fl), err - def conc_map(self, lay=0): - """ - Plots a concentration map perpendicular to the direction of flow - :param lay: depth to plot - :return: 3D conc map - """ - img = self.conc[0, 1:-1, 1:-1, 1:-1].get() - img[self.cpu_img[0, :, :, :] == 0] = -1 - plt.imshow(img[:, :, lay]) - plt.show() - return img - - def flux_map(self, lay=0): - """ - Plots a flux map perpendicular to the direction of flow - :param lay: depth to plot - :return: 3D flux map - """ - flux = cp.zeros_like(self.conc) - ph_map = self.pad(cp.array(self.cpu_img)) - for dim in range(1, 4): - for dr in [1, -1]: - flux += abs(cp.roll(self.conc, dr, dim) - self.conc) * cp.roll(ph_map, dr, dim) - flux = flux[0, 2:-2, 1:-1, 1:-1].get() - flux[self.cpu_img[0, 1:-1] == 0] = 0 - plt.imshow(flux[:, :, lay]) - return flux + def check_rolling_mean(self, conv_crit): + err = (self.new_fl - self.old_fl) / (self.new_fl + self.old_fl) + if err < conv_crit: + return True + else: + return False def end_simulation(self, iter_limit, verbose, start): converged = 'converged to' - if self.iter >=iter_limit -1: + if self.iter >= iter_limit - 1: print('Warning: not converged') converged = 'unconverged value of tau' - + if verbose: print(f'{converged}: {self.tau} \ after: {self.iter} iterations in: {np.around(timer() - start, 4)} \ seconds at a rate of {np.around((timer() - start)/self.iter, 4)} s/iter') + class PeriodicSolver(Solver): """ Periodic Solver (works for non-periodic structures, but has higher RAM requirements) Once solve method is called, tau, D_eff and D_rel are available as attributes. """ - def __init__(self, img, precision=cp.single, bc=(-0.5, 0.5), D_0=1): + + def __init__(self, img, bc=(-0.5, 0.5), D_0=1, device=torch.device('cuda:0')): """ Initialise parameters, conc map and other tools that can be re-used for multiple solves. @@ -241,27 +238,27 @@ def __init__(self, img, precision=cp.single, bc=(-0.5, 0.5), D_0=1): :param D_0: reference material diffusivity """ - super().__init__(img, precision, bc, D_0) - self.conc = self.pad(self.conc)[:, :, 2:-2, 2:-2] + super().__init__(img, bc, D_0, device) + self.conc = self.pad(self.conc)[:, :, 2:-2, 2:-2].to(self.device) def init_nn(self, img): img2 = self.pad(self.pad(img, [2, 2]))[:, :, 2:-2, 2:-2] - nn = cp.zeros_like(img2, dtype=self.precision) + nn = torch.zeros_like(img2) # iterate through shifts in the spatial dimensions for dim in range(1, 4): for dr in [1, -1]: - nn += cp.roll(img2, dr, dim) + nn += torch.roll(img2, dr, dim) # avoid div 0 errors nn = nn[:, 2:-2] - nn[img == 0] = cp.inf - nn[nn == 0] = cp.inf - return nn + nn[img == 0] = torch.inf + nn[nn == 0] = torch.inf + return nn.to(self.device) def solve(self, iter_limit=5000, verbose=True, conv_crit=2*10**-2, D_0=1): """ run a solve simulation - :param iter_limit: max iterations before aborting, will attempt double for the same no. iterations + :param iter_limit: max iterations before aborting, will attemtorch double for the same no. iterations if initialised as singles :param verbose: Whether to print tau. Can be set to 'per_iter' for more feedback :param conv_crit: convergence criteria, minimum percent difference between @@ -270,71 +267,45 @@ def solve(self, iter_limit=5000, verbose=True, conv_crit=2*10**-2, D_0=1): """ start = timer() while not self.converged: - out = cp.zeros_like(self.conc) + out = torch.zeros_like(self.conc) for dim in range(1, 4): for dr in [1, -1]: - out += cp.roll(self.conc, dr, dim) + out += torch.roll(self.conc, dr, dim) out = out[:, 2:-2] out /= self.nn if self.iter % 50 == 0: - lt = abs(cp.sum(out[:, 0]) - self.ph_top) - lb = abs(cp.sum(out[:, -1]) - self.ph_bot) - self.converged, D_rel = self.check_convergence(lt, lb, verbose, conv_crit, start, iter_limit) + self.converged = self.check_convergence( + verbose, conv_crit, start, iter_limit) out -= self.conc[:, 2:-2] out *= self.cb[self.iter % 2] self.conc[:, 2:-2] += out self.iter += 1 - self.D_mean=D_0 - self.tau = self.VF/D_rel if D_rel !=0 else cp.inf - self.D_eff=D_0*D_rel + self.D_mean = D_0 + self.D_eff = D_0*self.D_rel self.end_simulation(iter_limit, verbose, start) return self.tau def check_vertical_flux(self, conv_crit): - vert_flux = abs(self.conc - cp.roll(self.conc, 1, 1)) + vert_flux = abs(self.conc - torch.roll(self.conc, 1, 1)) vert_flux[self.conc == 0] = 0 - vert_flux[cp.roll(self.conc, 1, 1) == 0] = 0 - fl = cp.sum(vert_flux, (0, 2, 3))[3:-2] + vert_flux[torch.roll(self.conc, 1, 1) == 0] = 0 + fl = torch.sum(vert_flux, (0, 2, 3))[3:-2] err = (fl.max() - fl.min())*2/(fl.max() + fl.min()) - if err < conv_crit or np.isnan(err): - return True + if err < conv_crit or torch.isnan(err).item(): + return True, torch.mean(fl), err if fl.min() == 0: - return 'zero_flux' + return 'zero_flux', torch.mean(fl), err + return False, torch.mean(fl), err - def flux_map(self, lay=0): - """ - Plots a flux map perpendicular to the direction of flow - :param lay: depth to plot - :return: 3D flux map - """ - flux = cp.zeros_like(self.conc) - ph_map = self.pad(self.pad(cp.array(self.cpu_img)))[:, :, 2:-2, 2:-2] - for dim in range(1, 4): - for dr in [1, -1]: - flux += abs(cp.roll(self.conc, dr, dim) - self.conc) * cp.roll(ph_map, dr, dim) - flux = flux[0, 2:-2].get() - flux[self.cpu_img[0] == 0] = 0 - plt.imshow(flux[:, :, lay]) - return flux - - def conc_map(self, lay=0): - """ - Plots a concentration map perpendicular to the direction of flow - :param lay: depth to plot - :return: 3D conc map - """ - img = self.conc[0, 2:-2].get() - img[self.cpu_img[0] == 0] = -1 - plt.imshow(img[:, :, lay]) - plt.show() class MultiPhaseSolver(Solver): """ Multi=phase solver for two phase images. Once solve method is called, tau, D_eff and D_rel are available as attributes. """ - def __init__(self, img, cond={1:1}, precision=cp.single, bc=(-0.5, 0.5)): + + def __init__(self, img, cond={1: 1}, bc=(-0.5, 0.5), device=torch.device('cuda:0')): """ Initialise parameters, conc map and other tools that can be re-used for multiple solves. @@ -344,59 +315,104 @@ def __init__(self, img, cond={1:1}, precision=cp.single, bc=(-0.5, 0.5)): for a 2 phase material, {1:0.543, 2: 0.420}, with 1s and 2s in the input img :param precision: cp.single or cp.double :param bc: Upper and lower boundary conditions. Leave as default. - :param D_0: reference material diffusivity """ - if 0 in cond.values(): - raise ValueError('0 conductivity phase: non-conductive phase should be labelled 0 in the input image and ommitted from the cond argument') + if (0 in cond.values()): + raise ValueError( + '0 conductivity phase: non-conductive phase should be labelled 0 in the input image and ommitted from the cond argument') + if (0 in cond.keys()): + raise ValueError( + '0 cannot be used as a conductive phase label, please use a positive integer and leave 0 for non-conductive phase') + self.cond = {ph: 0.5 / c for ph, c in cond.items()} - super().__init__(img, precision, bc) + self.top_bc, self.bot_bc = bc + if len(img.shape) == 3: + img = np.expand_dims(img, 0) + self.cpu_img = img + self.precision = torch.float + + self.device = torch.device(device) + # check device is available + if torch.device(device).type.startswith('cuda') and not torch.cuda.is_available(): + self.device = torch.device('cpu') + warnings.warn( + "CUDA not available, defaulting device to cpu. To avoid this warning, explicitly set the device when initialising the solver with device=torch.device('cpu')") + # save original image in cuda + img = torch.tensor(img, dtype=self.precision, device=self.device) + + # calculate + self.ph_bot = torch.sum(img[:, -1]).to(self.device) * self.bot_bc + self.ph_top = torch.sum(img[:, 0]).to(self.device) * self.top_bc + # init conc + self.conc = self.init_conc(img) + # create nn map + self.nn = self.init_nn(img) + # overrelaxation factor + self.w = 2 - torch.pi / (1.5 * img.shape[1]) + # checkerboarding to ensure stable steps + self.cb = self.init_cb(img) + bs, x, y, z = self.cpu_img.shape + self.L_A = x / (z * y) + # solving params + self.converged = False + self.iter = 0 + # Results + self.tau = None + self.D_eff = None self.pre_factors = self.nn[1:] self.nn = self.nn[0] - self.VF = {p:np.mean(img==p) for p in np.unique(img)} + self.semi_converged = False + self.old_fl = -1 + self.VF = {p: np.mean(img.cpu().numpy() == p) + for p in np.unique(img.cpu().numpy())} + + if len(np.array([self.VF[z] for z in self.VF.keys() if z != 0])) > 0: + self.D_mean = np.sum( + np.array([self.VF[z]*(1/(2*self.cond[z])) for z in self.VF.keys() if z != 0])) + else: + self.D_mean = 0 def init_nn(self, img): - #conductivity map - img2 = cp.zeros_like(img) + # conductivity map + img2 = torch.zeros_like(img) for ph in self.cond: c = self.cond[ph] img2[img == ph] = c img2 = self.pad(self.pad(img2)) img2[:, 1] = img2[:, 2] img2[:, -2] = img2[:, -3] - nn = cp.zeros_like(img2, dtype=self.precision) + nn = torch.zeros_like(img2, dtype=self.precision) # iterate through shifts in the spatial dimensions nn_list = [] for dim in range(1, 4): for dr in [1, -1]: - shift = cp.roll(img2, dr, dim) + shift = torch.roll(img2, dr, dim) sum = img2 + shift - sum[shift==0] = 0 - sum[img2==0] = 0 + sum[shift == 0] = 0 + sum[img2 == 0] = 0 sum = 1/sum - sum[sum == cp.inf] = 0 + sum[sum == torch.inf] = 0 nn += sum - nn_list.append(self.crop(sum, 1)) + nn_list.append(self.crop(sum, 1).to(self.device)) # remove the two paddings nn = self.crop(nn, 2) # avoid div 0 errors - nn[img == 0] = cp.inf - nn[nn == 0] = cp.inf - nn_list.insert(0, nn) + nn[img == 0] = torch.inf + nn[nn == 0] = torch.inf + nn_list.insert(0, nn.to(self.device)) return nn_list def init_conc(self, img): bs, x, y, z = img.shape sh = 1 / (x + 1) - vec = cp.linspace(self.top_bc + sh, self.bot_bc - sh, x) + vec = torch.linspace(self.top_bc + sh, self.bot_bc - sh, x) for i in range(2): - vec = cp.expand_dims(vec, -1) - vec = cp.expand_dims(vec, 0) - vec = vec.repeat(z, -1) - vec = vec.repeat(y, -2) - vec = vec.repeat(bs, 0) - vec = vec.astype(self.precision) - img1 = cp.array(img) + vec = torch.unsqueeze(vec, -1) + vec = torch.unsqueeze(vec, 0) + vec = vec.repeat(bs, 1, y, z) + vec = vec.to(self.device) + # vec = vec.astype(self.precision) + img1 = img.clone().to(self.device) img1[img1 > 1] = 1 return self.pad(img1 * vec, [self.top_bc, self.bot_bc]) @@ -404,7 +420,7 @@ def solve(self, iter_limit=5000, verbose=True, conv_crit=2*10**-2): """ run a solve simulation - :param iter_limit: max iterations before aborting, will attempt double for the same no. iterations + :param iter_limit: max iterations before aborting, will attemtorch double for the same no. iterations if initialised as singles :param verbose: Whether to print tau. Can be set to 'per_iter' for more feedback :param conv_crit: convergence criteria, minimum percent difference between @@ -416,57 +432,76 @@ def solve(self, iter_limit=5000, verbose=True, conv_crit=2*10**-2): while not self.converged: self.iter += 1 out = self.conc[:, 2:, 1:-1, 1:-1] * self.pre_factors[0][:, 2:, 1:-1, 1:-1] + \ - self.conc[:, :-2, 1:-1, 1:-1] * self.pre_factors[1][:, :-2, 1:-1, 1:-1] + \ - self.conc[:, 1:-1, 2:, 1:-1] * self.pre_factors[2][:, 1:-1, 2:, 1:-1] + \ - self.conc[:, 1:-1, :-2, 1:-1] * self.pre_factors[3][:, 1:-1, :-2, 1:-1] + \ - self.conc[:, 1:-1, 1:-1, 2:] * self.pre_factors[4][:, 1:-1, 1:-1, 2:] + \ - self.conc[:, 1:-1, 1:-1, :-2] * self.pre_factors[5][:, 1:-1, 1:-1, :-2] + self.conc[:, :-2, 1:-1, 1:-1] * self.pre_factors[1][:, :-2, 1:-1, 1:-1] + \ + self.conc[:, 1:-1, 2:, 1:-1] * self.pre_factors[2][:, 1:-1, 2:, 1:-1] + \ + self.conc[:, 1:-1, :-2, 1:-1] * self.pre_factors[3][:, 1:-1, :-2, 1:-1] + \ + self.conc[:, 1:-1, 1:-1, 2:] * self.pre_factors[4][:, 1:-1, 1:-1, 2:] + \ + self.conc[:, 1:-1, 1:-1, :-2] * \ + self.pre_factors[5][:, 1:-1, 1:-1, :-2] out /= self.nn if self.iter % 20 == 0: - self.converged, self.D_eff = self.check_convergence(verbose, conv_crit, start, iter_limit) + self.converged = self.check_convergence( + verbose, conv_crit, start, iter_limit) out -= self.crop(self.conc, 1) - out *= self.cb[self.iter%2] + out *= self.cb[self.iter % 2] self.conc[:, 1:-1, 1:-1, 1:-1] += out - if len(np.array([self.VF[z] for z in self.VF.keys() if z!=0]))>0: - self.D_mean=np.sum(np.array([self.VF[z]*(1/(2*self.cond[z])) for z in self.VF.keys() if z!=0])) - else: - self.D_mean=0 - self.tau = self.D_mean/self.D_eff if self.D_eff != 0 else cp.inf self.end_simulation(iter_limit, verbose, start) return self.tau + def check_convergence(self, verbose, conv_crit, start, iter_limit): # print progress if self.iter % 100 == 0: - loss, flux = self.check_vertical_flux(conv_crit) - if verbose=='per_iter': - print(loss) - if abs(loss) < conv_crit or np.isnan(loss).item(): - self.converged = True - b, x, y, z = self.cpu_img.shape - flux *= (x+1)/(y*z) - return True, flux.get() + self.semi_converged, self.new_fl, err = self.check_vertical_flux( + conv_crit) + b, x, y, z = self.cpu_img.shape + self.D_eff = (self.new_fl*(x+1)/(y*z)).cpu() + self.tau = self.D_mean / \ + self.D_eff if self.D_eff != 0 else torch.tensor(torch.inf) + if self.semi_converged == 'zero_flux': + return True - # increase precision to double if currently single - if self.iter >= iter_limit: - if self.precision == cp.single: - print('increasing precision to double') - self.iter = 0 - self.conc = cp.array(self.conc, dtype=cp.double) - self.nn = cp.array(self.nn, dtype=cp.double) - self.precision = cp.double + if verbose == 'per_iter': + print( + f'Iter: {self.iter}, conv error: {abs(err.item())}, tau: {self.tau.item()}') + + if self.semi_converged: + self.converged = self.check_rolling_mean(conv_crit=1e-3) + + if not self.converged: + self.old_fl = self.new_fl + return False + else: + return True else: - return True, flux.get() - return False, False + self.old_fl = self.new_fl + return False + + # increase precision to double if currently single + # if self.iter >= iter_limit: + # # if self.precision == cp.single: + # # print('increasing precision to double') + # # self.iter = 0 + # # self.conc = cp.array(self.conc, dtype=cp.double) + # # self.nn = cp.array(self.nn, dtype=cp.double) + # # self.precision = cp.double + # else: + # return True + + return False def check_vertical_flux(self, conv_crit): - vert_flux = (self.conc[:, 1:-1, 1:-1, 1:-1] - self.conc[:, :-2, 1:-1, 1:-1]) * self.pre_factors[1][:, :-2, 1:-1, 1:-1] - vert_flux[self.nn == cp.inf] = 0 - fl = cp.sum(vert_flux, (0, 2, 3)) + vert_flux = (self.conc[:, 1:-1, 1:-1, 1:-1] - self.conc[:, + :-2, 1:-1, 1:-1]) * self.pre_factors[1][:, :-2, 1:-1, 1:-1] + vert_flux[self.nn == torch.inf] = 0 + fl = torch.sum(vert_flux, (0, 2, 3))[2:-2] + print(fl.argmin(), fl.argmax()) err = (fl.max() - fl.min())*2/(fl.max() + fl.min()) - if abs(fl).min()==0: - return 0, cp.array([0], dtype=self.precision) - return err, fl.mean() + if err < conv_crit or torch.isnan(err).item(): + return True, torch.mean(fl), err + if fl.min() == 0: + return 'zero_flux', torch.mean(fl), err + return False, torch.mean(fl), err class ElectrodeSolver(): @@ -474,42 +509,45 @@ class ElectrodeSolver(): Electrode Solver - solves the electrode tortuosity factor system (migration and capacitive current between current collector and solid/electrolyte interface) Once solve method is called, tau, D_eff and D_rel are available as attributes. """ - - def __init__(self, img, precision=cp.double, omega=1e-6): + + def __init__(self, img, omega=1e-6, device=torch.device('cuda')): img = np.expand_dims(img, 0) self.cpu_img = img - self.precision = precision - + self.precision = torch.double + # check device is available + self.device = torch.device(device) + if torch.device(device).type.startswith('cuda') and not torch.cuda.is_available(): + self.device = torch.device('cpu') + warnings.warn( + "CUDA not available, defaulting device to cpu. To avoid this warning, explicitly set the device when initialising the solver with device=torch.device('cpu')") # Define omega, res and c_DL self.omega = omega self.res = 1 self.c_DL = 1 - if len(img.shape)==4: + if len(img.shape) == 4: self.A_CC = img.shape[2]*img.shape[3] else: self.A_CC = img.shape[2] self.k_0 = 1 - + # VF calc self.VF = np.mean(img) # save original image in cuda - img = cp.array(img, dtype=self.precision) + img = torch.tensor(img, dtype=self.precision).to(self.device) self.img = img # init phi - + self.phi = self.init_phi(img) - - self.phase_map = self.pad(img,[1,0]) + + self.phase_map = self.pad(img, [1, 0]) # create prefactor map self.prefactor = self.init_prefactor(img) - - - #checkerboarding - self.w = 2 - cp.pi / (1.5 * img.shape[1]) + # checkerboarding + self.w = 2 - torch.pi / (1.5 * img.shape[1]) # self.w = 1.8 # self.w = 0.01 self.cb = self.init_cb(img) @@ -517,69 +555,71 @@ def __init__(self, img, precision=cp.double, omega=1e-6): # solving params self.converged = False self.semiconverged = False - self.iter=0 + self.old_fl = -1 + self.iter = 0 # Results - self.tau_e=0 - self.D_eff=None - self.D_mean=None - + self.tau_e = 0 + self.D_eff = None + self.D_mean = None + def pad(self, img, vals=[0] * 6): while len(vals) < 6: vals.append(0) - if len(img.shape)==4: + if len(img.shape) == 4: to_pad = [1]*8 - to_pad[:2] = (0, 0) - elif len(img.shape)==3: + to_pad[-2:] = (0, 0) + elif len(img.shape) == 3: to_pad = [1]*6 - to_pad[:2] = (0, 0) + to_pad[-2:] = (0, 0) - img = cp.pad(img, to_pad, 'constant') + img = torch.nn.functional.pad(img, to_pad, 'constant') img[:, 0], img[:, -1] = vals[:2] img[:, :, 0], img[:, :, -1] = vals[2:4] - if len(img.shape)==4: + if len(img.shape) == 4: img[:, :, :, 0], img[:, :, :, -1] = vals[4:] return img - - def crop(self, img, c = 1): - if len(img.shape)==4: + + def crop(self, img, c=1): + if len(img.shape) == 4: return img[:, c:-c, c:-c, c:-c] - elif len(img.shape)==3: + elif len(img.shape) == 3: return img[:, c:-c, c:-c] - + def init_phi(self, img): """ Initialise phi field as zeros :param img: input image, with 1s conductive and 0s non-conductive - :type img: cp.array + :type img: torch.array :return: phi - :rtype: cp.array + :rtype: torch.array """ - phi = cp.zeros_like(img, dtype=self.precision)+0j + phi = torch.zeros_like(img, dtype=self.precision, + device=self.device)+0j phi = self.pad(phi, [1, 0]) - return phi - + return phi.to(self.device) + def init_cb(self, img): - if len(img.shape)==4: + if len(img.shape) == 4: bs, x, y, z = img.shape cb = np.zeros([x, y, z]) a, b, c = np.meshgrid(range(x), range(y), range(z), indexing='ij') cb[(a + b + c) % 2 == 0] = 1*self.w - return [cp.roll(cp.array(cb), sh, 0) for sh in [0, 1]] + return [torch.roll(torch.tensor(cb), sh, 0).to(self.device) for sh in [0, 1]] - elif len(img.shape)==3: + elif len(img.shape) == 3: bs, x, y = img.shape cb = np.zeros([x, y]) a, b = np.meshgrid(range(x), range(y), indexing='ij') cb[(a + b) % 2 == 0] = 1*self.w - cb = [cp.roll(cp.array(cb), sh, 0) for sh in [0, 1]] + cb = [torch.roll(torch.tensor(cb).to(self.device), sh, 0) + for sh in [0, 1]] cb[1][0] = cb[1][2] return cb - def init_prefactor(self, img): """ Initialise prefactors -> (nn_cond+2j*omega*res*c(dims-nn_cond))**-1 @@ -591,12 +631,12 @@ def init_prefactor(self, img): """ dims = (len(img.shape)-1)*2 # find number of conducting nearest neighbours - img2 = self.pad(img, [1,0]) - nn_cond = cp.zeros_like(img2, dtype=self.precision) + img2 = self.pad(img, [1, 0]) + nn_cond = torch.zeros_like(img2, dtype=self.precision) # iterate through shifts in the spatial dimensions for dim in range(1, len(img.shape)): for dr in [1, -1]: - nn_cond += cp.roll(img2, dr, dim) + nn_cond += torch.roll(img2, dr, dim) # remove the paddings nn_cond = self.crop(nn_cond, 1) self.nn = nn_cond @@ -605,30 +645,30 @@ def init_prefactor(self, img): omegapf = (orc**2 + 1j*orc)/(orc**2+1) prefactor = (nn_cond + 2*nn_solid*omegapf)**-1 # prefactor = (nn_cond+2j*self.omega*self.res*self.c_DL*(dims-nn_cond))**-1 - prefactor[prefactor==cp.inf] = 0 - prefactor[img==0] = 0 - return prefactor - + prefactor[prefactor == torch.inf] = 0 + prefactor[img == 0] = 0 + return prefactor.to(self.device) + def sum_neighbours(self): - i=0 + i = 0 for dim in range(1, len(self.phi.shape)): for dr in [1, -1]: - if i==0: - out = cp.roll(self.phi, dr, dim) + if i == 0: + out = torch.roll(self.phi, dr, dim) else: - out += cp.roll(self.phi, dr, dim) - i+=1 + out += torch.roll(self.phi, dr, dim) + i += 1 out = self.crop(out, 1) return out - + def check_convergence(self): - - if len(self.tau_es) < 1000: + + if len(self.tau_es) < 1000: return False loss = np.std(np.array(self.tau_es[-100:])) # print(len(self.tau_es),self.tau_es[-1], loss) - if self.verbose=='per_iter': + if self.verbose == 'per_iter': print(f'(iter {self.iter} loss {loss}, taue {self.tau_es[-1]}') if loss < self.conv_crit: if self.semiconverged: @@ -641,36 +681,36 @@ def check_convergence(self): self.phi = self.init_phi(self.img) self.semiconverged = self.tau_es[-1] self.omega *= 0.1 - print(f'Semi-converged to {self.semiconverged}. Reducing omega to {self.omega} to check convergence') - + print( + f'Semi-converged to {self.semiconverged}. Reducing omega to {self.omega} to check convergence') + self.iter = 0 self.prefactor = self.init_prefactor(self.img) - self.solve(iter_limit=self.iter_limit, verbose=self.verbose, conv_crit=self.conv_crit) + self.solve(iter_limit=self.iter_limit, + verbose=self.verbose, conv_crit=self.conv_crit) return True if self.iter_limit == self.iter: - print('Iteration limit reached. Increase the iteration limit or try starting from a smaller omega') - + print( + 'Iteration limit reached. Increase the iteration limit or try starting from a smaller omega') + return True return False - def tau_e_from_phi(self): # calculate total current on bottom boundary - n = self.phase_map[0,1].sum() + n = self.phase_map[0, 1].sum() z = self.res / (n-self.phi[0, 1].sum()) self.z = z r_ion = z.real*3 tau_e = self.VF * r_ion * self.k_0 * self.A_CC / self.phi.shape[1] - return tau_e.get() - - + return tau_e.cpu() def solve(self, iter_limit=100000, verbose=True, conv_crit=1e-5, conv_crit_2=1e-3): """ run a solve simulation - :param iter_limit: max iterations before aborting, will attempt double for the same no. iterations + :param iter_limit: max iterations before aborting, will attemtorch double for the same no. iterations if initialised as singles :param verbose: Whether to print tau. Can be set to 'per_iter' for more feedback :param conv_crit: convergence criteria - running standard deviation of tau_e @@ -679,7 +719,7 @@ def solve(self, iter_limit=100000, verbose=True, conv_crit=1e-5, conv_crit_2=1e- """ self.conv_crit = conv_crit self.conv_crit_2 = conv_crit_2 - + self.iter_limit = iter_limit self.verbose = verbose dim = len(self.phi.shape) @@ -691,31 +731,26 @@ def solve(self, iter_limit=100000, verbose=True, conv_crit=1e-5, conv_crit_2=1e- while not self.converged: out = self.sum_neighbours() out *= self.prefactor*self.crop(self.phase_map) - out[self.prefactor==-1] = 0 + out[self.prefactor == -1] = 0 self.tau_es.append(self.tau_e_from_phi()) if self.iter % 100 == 0: self.converged = self.check_convergence() out -= self.crop(self.phi, 1) - out *= self.cb[self.iter%2] + out *= self.cb[self.iter % 2] - if dim==4: + if dim == 4: self.phi[:, 1:-1, 1:-1, 1:-1] += out - elif dim==3: + elif dim == 3: self.phi[:, 1:-1, 1:-1] += out self.iter += 1 # self.tau_e = self.tau_es[-1] # self.end_simulation(iter_limit, verbose, start) - + def end_simulation(self, ): - if self.iter==self.iter_limit -1: + if self.iter == self.iter_limit - 1: print('Warning: not converged') converged = 'unconverged value of tau' converged = 'converged to' if self.verbose: print(f'{converged}: {self.tau_e} after: {self.iter} iterations in: {np.around(timer() - self.start, 4)} seconds at a rate of {np.around((timer() - self.start)/self.iter, 4)} s/iter') - - - - - diff --git a/taufactor/utils.py b/taufactor/utils.py new file mode 100644 index 0000000..99f29a0 --- /dev/null +++ b/taufactor/utils.py @@ -0,0 +1,46 @@ +import torch +import matplotlib.pyplot as plt + + +def flux_direction(im, outpath=None): + """ + Plots the flux direction of the image and provides code for transposing the image to change the flux direction + :param im: segmented input image with n phases + :return: None + """ + dims = len(im.shape) + if dims == 3: + fig, ax = plt.subplots(1, 3, figsize=(15, 5), constrained_layout=True) + for i, dir in enumerate([(0, 1, 2), (1, 2, 0), (2, 0, 1)]): + im_temp = torch.permute(torch.tensor(im), dir).cpu()[0] + ax[i].imshow(im_temp, cmap='gray') + ax[i].axis('off') + if i == 0: + ax[i].set_title("img", family='monospace') + else: + ax[i].set_title( + f"torch.permute(img,{dir})", family='monospace') + plt.suptitle( + 'Direction of flux is into the page.\nThe transformations required to change direction of flux are shown.', fontsize=12) + else: + fig, ax = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True) + for i, dir in enumerate([(0, 1), (1, 0)]): + im_temp = torch.permute(torch.tensor(im), dir).cpu() + x, y = im_temp.shape + ax[i].imshow(im_temp, cmap='gray') + ax[i].axis('off') + t = ax[i].text(1.05*x, y//2, "Flux", + ha="center", va="center", size=15, rotation=-90, family='monospace', + bbox=dict(boxstyle="rarrow,pad=0.3", + fc="white", ec="black", lw=1)) + if i == 0: + ax[i].set_title("img", family='monospace') + else: + ax[i].set_title( + f"torch.permute(img,{dir})", family='monospace') + plt.suptitle( + 'Direction of flux is down.\nThe transformations required to change direction of flux are shown', fontsize=12) + if outpath: + plt.savefig(outpath) + else: + plt.show() diff --git a/tauspeeds.pdf b/tauspeeds.pdf new file mode 100644 index 0000000..5aa74db Binary files /dev/null and b/tauspeeds.pdf differ diff --git a/tests/environment.yml b/tests/environment.yml new file mode 100644 index 0000000..f38aa80 --- /dev/null +++ b/tests/environment.yml @@ -0,0 +1,10 @@ +name: taufactor +channels: + - pytorch + - nvidia +dependencies: + - pytorch + - pytorch-cuda=11.6 + - tifffile + - matplotlib + - pytest diff --git a/tests/test_taufactor.py b/tests/test_taufactor.py index 7e0519e..5d4992d 100644 --- a/tests/test_taufactor.py +++ b/tests/test_taufactor.py @@ -5,170 +5,193 @@ import pytest import taufactor as tau from taufactor.metrics import volume_fraction, surface_area, triple_phase_boundary +import torch as pt from tests.utils import * import numpy as np import matplotlib.pyplot as plt # Testing the main solver + def test_solver_on_uniform_block(): """Run solver on a block of ones.""" - l=20 - img = np.ones([l,l, l]).reshape(1, l, l, l) - S = tau.Solver(img) + l = 20 + img = np.ones([l, l, l]).reshape(1, l, l, l) + img[:, :, 0] = 0 + S = tau.Solver(img, device=pt.device('cpu')) S.solve() - assert S.tau==1.0 + assert np.around(S.tau, decimals=5) == 1.0 + def test_solver_on_uniform_rectangular_block_solver_dim(): """Run solver on a block of ones.""" - l=20 - img = np.ones([l*2,l, l]).reshape(1, l*2, l, l) - S = tau.Solver(img) + l = 20 + img = np.ones([l*2, l, l]).reshape(1, l*2, l, l) + img[:, :, 0] = 0 + S = tau.Solver(img, device=pt.device('cpu')) S.solve() - assert S.tau==1.0 + assert np.around(S.tau, decimals=5) == 1.0 + def test_solver_on_uniform_rectangular_block_non_solver_dim(): """Run solver on a block of ones.""" - l=20 - img = np.ones([l,l, l*2]).reshape(1, l, l, l*2) - S = tau.Solver(img) + l = 20 + img = np.ones([l, l, l*2]).reshape(1, l, l, l*2) + img[:, :, 0] = 0 + S = tau.Solver(img, device=pt.device('cpu')) S.solve() - assert S.tau==1.0 + assert np.around(S.tau, decimals=5) == 1.0 + def test_solver_on_empty_block(): """Run solver on a block of zeros.""" - l=20 - img = np.zeros([l,l, l]).reshape(1, l, l, l) - S = tau.Solver(img) + l = 20 + img = np.zeros([l, l, l]).reshape(1, l, l, l) + img[:, 0] = 1 + S = tau.Solver(img, device=pt.device('cpu')) S.solve(verbose='per_iter', iter_limit=1000) - assert S.tau==cp.inf + assert S.tau == pt.inf def test_solver_on_strip_of_ones(): """Run solver on a strip of ones, 1/4 volume of total""" - l=20 - img = np.zeros([l,l, l]).reshape(1, l, l, l) - t=10 - img[:,:,0:t,0:t]=1 - S = tau.Solver(img) + l = 20 + img = np.zeros([l, l, l]).reshape(1, l, l, l) + t = 10 + img[:, :, 0:t, 0:t] = 1 + S = tau.Solver(img, device=pt.device('cpu')) S.solve() - assert S.tau==1 + assert np.around(S.tau, decimals=5) == 1 # Testing the periodic solver + def test_periodic_solver_on_uniform_block(): """Run solver on a block of ones.""" - l=20 - img = np.ones([l,l, l]).reshape(1, l, l, l) - S = tau.PeriodicSolver(img) + l = 20 + img = np.ones([l, l, l]).reshape(1, l, l, l) + img[:, :, 0] = 0 + S = tau.PeriodicSolver(img, device=pt.device('cpu')) S.solve() - assert S.tau==1.0 + assert np.around(S.tau, decimals=5) == 1.0 + def test_periodic_solver_on_empty_block(): """Run solver on a block of zeros.""" - l=20 - img = np.zeros([l,l, l]).reshape(1, l, l, l) - S = tau.PeriodicSolver(img) + l = 20 + img = np.zeros([l, l, l]).reshape(1, l, l, l) + img[:, 0] = 1 + S = tau.PeriodicSolver(img, device=pt.device('cpu')) S.solve(verbose='per_iter', iter_limit=1000) - assert S.tau == cp.inf + assert S.tau == pt.inf def test_periodic_solver_on_strip_of_ones(): """Run solver on a strip of ones, 1/4 volume of total""" - l=20 - img = np.zeros([l,l, l]).reshape(1, l, l, l) - t=10 - img[:,:,0:t,0:t]=1 - S = tau.PeriodicSolver(img) + l = 20 + img = np.zeros([l, l, l]).reshape(1, l, l, l) + t = 10 + img[:, :, 0:t, 0:t] = 1 + S = tau.PeriodicSolver(img, device=pt.device('cpu')) S.solve() - assert S.tau==1 + assert np.around(S.tau, decimals=5) == 1 # Testing the metrics # Volume fraction + def test_volume_fraction_on_uniform_block(): """Run volume fraction on uniform block""" - l=20 - img = np.ones([l,l, l]).reshape(1, l, l, l) + l = 20 + img = np.ones([l, l, l]).reshape(1, l, l, l) vf = volume_fraction(img) - assert vf==1.0 + assert np.around(vf, decimals=5) == 1.0 + def test_volume_fraction_on_empty_block(): """Run volume fraction on empty block""" - l=20 - img = np.zeros([l,l, l]).reshape(1, l, l, l) + l = 20 + img = np.zeros([l, l, l]).reshape(1, l, l, l) vf = volume_fraction(img) - assert vf==1.0 + assert np.around(vf, decimals=5) == 1.0 + def test_volume_fraction_on_checkerboard(): """Run volume fraction on checkerboard block""" - l=20 + l = 20 img = generate_checkerboard(l) vf = volume_fraction(img) - assert vf==[0.5, 0.5] + assert vf == [0.5, 0.5] + def test_volume_fraction_on_strip_of_ones(): """Run volume fraction on strip of ones""" - l=20 - img = np.zeros([l,l,l]) - t=10 - img[:,0:t,0:t]=1 - vf = volume_fraction(img, phases={'zeros':0, 'ones':1}) + l = 20 + img = np.zeros([l, l, l]) + t = 10 + img[:, 0:t, 0:t] = 1 + vf = volume_fraction(img, phases={'zeros': 0, 'ones': 1}) - assert (vf['zeros'],vf['ones'])==(0.75,0.25) + assert (vf['zeros'], vf['ones']) == (0.75, 0.25) # Surface area + def test_surface_area_on_uniform_block(): """Run surface area on uniform block""" - l=20 - img = np.ones([l,l,l]) + l = 20 + img = np.ones([l, l, l]) sa = surface_area(img, phases=1) - assert sa==0 + assert sa == 0 + def test_surface_area_on_empty_block(): """Run surface area on empty block""" - l=20 - img = np.zeros([l,l, l]) + l = 20 + img = np.zeros([l, l, l]) sa = surface_area(img, phases=0) - assert sa==0 + assert sa == 0 + def test_surface_area_on_checkerboard(): """Run surface area on checkerboard block""" - l=50 + l = 50 img = generate_checkerboard(l) - sa = surface_area(img, phases=[0,1]) + sa = surface_area(img, phases=[0, 1]) + + assert sa == 1 - assert sa==1 def test_surface_area_on_strip_of_ones(): """Run surface area on single one in small 2x2z2 cube""" - l=2 - img = np.zeros([l,l,l]) - t=1 - img[0,0,0]=1 - sa = surface_area(img, phases=[0,1]) + l = 2 + img = np.zeros([l, l, l]) + t = 1 + img[0, 0, 0] = 1 + sa = surface_area(img, phases=[0, 1]) + + assert sa == 0.25 - assert sa==0.25 def test_surface_area_on_non_periodic_2d(): """Run surface area on a pair of one in small 3x3 square""" - img = np.array([[0,0,0],[1,1,0],[0,0,0]]) - sa = surface_area(img, phases=[0,1]) + img = np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]]) + sa = surface_area(img, phases=[0, 1]) + + assert sa == 5/12 - assert sa==5/12 def test_surface_area_on_periodic_2d(): """Run surface area on a pair of one in small 3x3 square""" - img = np.array([[0,0,0],[1,1,0],[0,0,0]]) - sa = surface_area(img, phases=[0,1], periodic=[0,1]) + img = np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]]) + sa = surface_area(img, phases=[0, 1], periodic=[0, 1]) - assert sa==6/18 + assert sa == 6/18 def test_surface_area_interfactial_3ph(): @@ -177,33 +200,37 @@ def test_surface_area_interfactial_3ph(): img[1] = 1 img[2] = 2 sa = surface_area(img, phases=[1, 2]) - assert sa==1/6 + assert sa == 1/6 + def test_tpb_2d(): l = 3 img = np.zeros([l, l]) img[0] = 1 - img[:,0] = 2 + img[:, 0] = 2 tpb = triple_phase_boundary(img) - assert tpb==0.25 + assert tpb == 0.25 + def test_tpb_3d_corners(): l = 2 img = np.zeros([l, l, l]) - img[0,0,0] = 1 - img[1,1,1] = 1 - img[0,1,1] = 2 - img[1,0,0] = 2 + img[0, 0, 0] = 1 + img[1, 1, 1] = 1 + img[0, 1, 1] = 2 + img[1, 0, 0] = 2 tpb = triple_phase_boundary(img) - assert tpb==1 + assert tpb == 1 + def test_tpb_3d_corners(): l = 2 img = np.zeros([l, l, l]) img[0] = 1 - img[:,0] = 2 + img[:, 0] = 2 tpb = triple_phase_boundary(img) - assert tpb==1/3 + assert tpb == 1/3 + def test_multiphase_and_solver_agree(): x = 100 @@ -211,95 +238,102 @@ def test_multiphase_and_solver_agree(): img[50:] = 2 img[:, :20] = 0 img[:, 50:] = 1 - s = tau.MultiPhaseSolver(img, {1:1, 2:1*10**-4}) - mph = s.solve(verbose = 'per_iter', conv_crit=0.02) - img[img==2] = 0 - s = tau.Solver(img) - s.solve(verbose = 'per_iter') + s = tau.MultiPhaseSolver(img, {1: 1, 2: 1*10**-4}, device=pt.device('cpu')) + mph = s.solve(verbose='per_iter', conv_crit=0.02) + img[img == 2] = 0 + s = tau.Solver(img, device=pt.device('cpu')) + s.solve(verbose='per_iter') err = (mph-s.tau) - assert err< 0.02 + assert err < 0.02 + def test_mphsolver_on_empty_block(): """Run solver on a block of zeros.""" - l=20 - img = np.zeros([l,l, l]).reshape(1, l, l, l) - S = tau.MultiPhaseSolver(img) + l = 20 + img = np.zeros([l, l, l]).reshape(1, l, l, l) + S = tau.MultiPhaseSolver(img, device=pt.device('cpu')) S.solve(iter_limit=1000) - assert S.tau==cp.inf + assert S.tau == pt.inf + def test_mphsolver_on_ones_block(): """Run solver on a block of ones.""" - l=20 - img = np.ones([l,l, l]).reshape(1, l, l, l) - S = tau.MultiPhaseSolver(img) + l = 20 + img = np.ones([l, l, l]).reshape(1, l, l, l) + S = tau.MultiPhaseSolver(img, device=pt.device('cpu')) S.solve(iter_limit=1000) - assert np.around(S.tau,4)==1.0 + assert np.around(S.tau, 4) == 1.0 + def test_mphsolver_on_halves(): """Run solver on a block of halves.""" - l=20 - img = np.ones([l,l, l]).reshape(1, l, l, l) + l = 20 + img = np.ones([l, l, l]).reshape(1, l, l, l) cond = 0.5 - S = tau.MultiPhaseSolver(img, {1:cond}) + S = tau.MultiPhaseSolver(img, {1: cond}, device=pt.device('cpu')) S.solve(iter_limit=1000) print(S.D_eff, S.D_mean) - assert np.around(S.tau,4)==1.0 + assert np.around(S.tau, 4) == 1.0 + def test_mphsolver_on_strip_of_ones(): """Run solver on a strip of ones, 1/4 volume of total""" - l=20 - img = np.zeros([l,l, l]).reshape(1, l, l, l) - x=10 - img[:,:,0:x,0:x]=1 - S = tau.MultiPhaseSolver(img) + l = 20 + img = np.zeros([l, l, l]).reshape(1, l, l, l) + x = 10 + img[:, :, 0:x, 0:x] = 1 + S = tau.MultiPhaseSolver(img, device=pt.device('cpu')) S.solve() - assert np.around(S.tau,4)==1.0 + assert np.around(S.tau, 4) == 1.0 + def test_mphsolver_on_strip_of_ones_and_twos(): """Run solver on a strip of ones, 1/4 volume of total""" - l=20 - img = np.zeros([l,l, l]).reshape(1, l, l, l) - x=10 - img[:,:,0:x,0:x]=1 - img[:,:,0:x,x:l]=2 - cond = {1:1, 2:0.5} - S = tau.MultiPhaseSolver(img, cond) + l = 20 + img = np.zeros([l, l, l]).reshape(1, l, l, l) + x = 10 + img[:, :, 0:x, 0:x] = 1 + img[:, :, 0:x, x:l] = 2 + cond = {1: 1, 2: 0.5} + S = tau.MultiPhaseSolver(img, cond, device=pt.device('cpu')) S.solve() - assert np.around(S.tau,4)==1 + assert np.around(S.tau, 4) == 1 def test_mphsolver_on_strip_of_ones_and_twos_and_threes(): """Run solver on a strip of ones, 1/4 volume of total""" - l=20 - img = np.ones([l,l, l]).reshape(1, l, l, l) - x=10 - img[:,:,0:x,0:x]=2 - img[:,:,0:x,x:l]=3 - cond = {1:1, 2:0.5, 3:2} - S = tau.MultiPhaseSolver(img, cond) + l = 20 + img = np.ones([l, l, l]).reshape(1, l, l, l) + x = 10 + img[:, :, 0:x, 0:x] = 2 + img[:, :, 0:x, x:l] = 3 + cond = {1: 1, 2: 0.5, 3: 2} + S = tau.MultiPhaseSolver(img, cond, device=pt.device('cpu')) S.solve() - assert np.around(S.tau,4)==1 + assert np.around(S.tau, 4) == 1 + def test_taue_deadend(): """Run solver on a strip of ones, 1/4 volume of total""" - + l = 100 - img = np.zeros((l,l)) - img[:75,45:55] = 1 - img[img!=1] = 0 - esolver = tau.ElectrodeSolver(img) + img = np.zeros((l, l)) + img[:75, 45:55] = 1 + img[img != 1] = 0 + esolver = tau.ElectrodeSolver(img, device=pt.device('cpu')) esolver.solve() - assert np.around(esolver.tau_e,3)==0.601 + assert np.around(esolver.tau_e, 3) == 0.601 + def test_taue_throughpore(): """Run solver on a strip of ones, 1/4 volume of total""" - + l = 100 - img = np.zeros((l,l)) - img[:,45:55] = 1 - img[img!=1] = 0 - esolver = tau.ElectrodeSolver(img) + img = np.zeros((l, l)) + img[:, 45:55] = 1 + img[img != 1] = 0 + esolver = tau.ElectrodeSolver(img, device=pt.device('cpu')) esolver.solve() - assert np.around(esolver.tau_e,3)==1.046 + assert np.around(esolver.tau_e, 3) == 1.046 diff --git a/tests/utils.py b/tests/utils.py index 175c134..21e2234 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,5 +1,4 @@ import numpy as np -import cupy as cp def generate_checkerboard(size, d=3): if d==2: