diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml new file mode 100644 index 0000000..fd416ad --- /dev/null +++ b/.github/workflows/documentation.yml @@ -0,0 +1,98 @@ +name: documentation +on: + # Triggers the workflow on push but only for the main branch + push: +# branches: [ main ] + + paths: + - specsanalyzer/**/* + - specsscan/**/* + - tutorial/** + - .github/workflows/documentation.yml + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + + + # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + build: + runs-on: ubuntu-latest + steps: + # Check out repo and set up Python + - name: Check out the repository + uses: actions/checkout@v4 + with: + lfs: true + + # see https://stackoverflow.com/questions/57612428/cloning-private-github-repository-within-organisation-in-actions and https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent#generating-a-new-ssh-key + - name: checkout test data + run: | + eval `ssh-agent -s` + ssh-add - <<< '${{ secrets.TEST_DATA_ACCESS_KEY }}' + git submodule sync --recursive + git submodule update --init --recursive + + # Use cached python and dependencies, install poetry + - name: "Setup Python, Poetry and Dependencies" + uses: packetcoders/action-setup-cache-python-poetry@main + with: + python-version: 3.8 + poetry-version: 1.2.2 + + - name: Install notebook dependencies + run: poetry install -E notebook --with docs + + - name: Install pandoc + run: | + sudo wget https://github.com/jgm/pandoc/releases/download/3.1.8/pandoc-3.1.8-1-amd64.deb + sudo dpkg -i pandoc-3.1.8-1-amd64.deb + + # rm because hextof_workflow notebook can not run outside maxwell + - name: copy tutorial files to docs + run: | + cp -r $GITHUB_WORKSPACE/tutorial $GITHUB_WORKSPACE/docs/ + cp -r $GITHUB_WORKSPACE/specsscan/config $GITHUB_WORKSPACE/docs/specsscan/ + mkdir $GITHUB_WORKSPACE/docs/tests + cp -r $GITHUB_WORKSPACE/tests/data $GITHUB_WORKSPACE/docs/tests/ + + # To be included later + # - name: Cache docs build + # id: cache-docs + # uses: actions/cache@v3 + # with: + # path: $GITHUB_WORKSPACE/_build + # key: ${{ runner.os }}-docs + + - name: build Sphinx docs + run: poetry run sphinx-build -b html $GITHUB_WORKSPACE/docs $GITHUB_WORKSPACE/_build + + - name: Setup Pages + uses: actions/configure-pages@v3 + + - name: Upload artifact + uses: actions/upload-pages-artifact@v2 + with: + path: '_build' + + # Deployment job + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v2 diff --git a/.github/workflows/testing_coverage.yml b/.github/workflows/testing_coverage.yml index 768d655..e22efcb 100644 --- a/.github/workflows/testing_coverage.yml +++ b/.github/workflows/testing_coverage.yml @@ -40,8 +40,8 @@ jobs: poetry run pytest --cov --cov-report xml:cobertura.xml --full-trace --show-capture=no -sv -n auto tests/ # Take report and upload to coveralls -# - name: Coveralls -# uses: coverallsapp/github-action@v2 -# with: -# github-token: ${{ secrets.GITHUB_TOKEN }} -# file: ./cobertura.xml + - name: Coveralls + uses: coverallsapp/github-action@v2 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + file: ./cobertura.xml diff --git a/.gitignore b/.gitignore index a5b45d5..14ceb11 100755 --- a/.gitignore +++ b/.gitignore @@ -133,3 +133,8 @@ dmypy.json # SPECS config files *.calib2d + +# result files +*.h5 +*.tiff +*.nxs diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 35b642a..7f535da 100755 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ repos: - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.1.7 + rev: v0.2.2 hooks: # Run the formatter. - id: ruff-format diff --git a/README.md b/README.md index 89c9580..6b1e6bd 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,4 @@ +[![Documentation Status](https://github.com/OpenCOMPES/specsanalyzer/actions/workflows/documentation.yml/badge.svg)](https://opencompes.github.io/specsanalyzer/) [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) ![](https://github.com/OpenCOMPES/specsanalyzer/actions/workflows/linting.yml/badge.svg) ![](https://github.com/OpenCOMPES/specsanalyzer/actions/workflows/testing_multiversion.yml/badge.svg?branch=main) @@ -7,10 +8,14 @@ [![Coverage Status](https://coveralls.io/repos/github/OpenCOMPES/specsanalyzer/badge.svg?branch=main&kill_cache=1)](https://coveralls.io/github/OpenCOMPES/specsanalyzer?branch=main) # specsanalyzer +This is the package `specsanalyzer` for conversion and handling of SPECS Phoibos analyzer data. + This package contains two modules: `specsanalyzer` is a package to import and convert MCP analyzer images from SPECS Phoibos analyzers into energy and emission angle/physical coordinates. `specsscan` is a Python package for loading Specs Phoibos scans accquired with the labview software developed at FHI/EPFL +Tutorials for usage and the API documentation can be found in the [Documentation](https://opencompes.github.io/specsanalyzer/) + ## Installation ### Pip (for users) @@ -40,6 +45,12 @@ pip install specsanalyzer ```bash python -m ipykernel install --user --name=specs_kernel ``` + +#### Configuration and calib2d file +The conversion procedures require to set up several configuration parameters in a config file. An example config file is provided as part of the package (see documentation). Configuration files can either be passed to the class constructures, or are read from system-wide or user-defined locations (see documentation). + +Most importantly, conversion of analyzer data to energy/angular coordinates requires detector calibration data provided by the manufacturer. The corresponding *.calib2d file (e.g. phoibos150.calbid2d) are provided together with the spectrometer software, and need to be set in the config file. + ### For Contributors To contribute to the development of `specsanalyzer`, you can follow these steps: diff --git a/docs/conf.py b/docs/conf.py index c1d6bc1..8a102c8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,11 +1,3 @@ -import os -import sys - -import specsanalyzer - - -sys.path.insert(0, os.path.abspath("..")) - # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full @@ -16,30 +8,33 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # +import os +import sys + +import tomlkit + + +sys.path.insert(0, os.path.abspath("..")) # -- Project information ----------------------------------------------------- -# The suffix of source filenames. -source_suffix = ".rst" -# The encoding of source files. -# source_encoding = 'utf-8-sig' +def _get_project_meta(): + with open("../pyproject.toml") as pyproject: + file_contents = pyproject.read() -# The master toctree document. -master_doc = "index" + return tomlkit.parse(file_contents)["tool"]["poetry"] -# General information about the project. -project = "specsanalyzer" -copyright = "2022, Laurenz Rettig, Michele Puppin, Abeer Arora" -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = specsanalyzer.__version__ -# The full version, including alpha/beta/rc tags. -release = specsanalyzer.__version__ +pkg_meta = _get_project_meta() +project = str(pkg_meta["name"]) +copyright = "2024, OpenCOMPES team" +author = "OpenCOMPES team" + +# The short X.Y version +version = str(pkg_meta["version"]) +# The full version, including alpha/beta/rc tags +release = version # -- General configuration --------------------------------------------------- @@ -55,8 +50,13 @@ "sphinx.ext.autosummary", "sphinx.ext.coverage", "sphinx_autodoc_typehints", + # "bokeh.sphinxext.bokeh_autodoc", + # "bokeh.sphinxext.bokeh_plot", + "nbsphinx", + "myst_parser", ] + autoclass_content = "class" autodoc_member_order = "bysource" diff --git a/docs/examples/example.rst b/docs/examples/example.rst deleted file mode 100755 index 2734f41..0000000 --- a/docs/examples/example.rst +++ /dev/null @@ -1,2 +0,0 @@ -Use notebook to fill -=================================================== diff --git a/docs/getting_started.rst b/docs/getting_started.rst new file mode 100644 index 0000000..fba0172 --- /dev/null +++ b/docs/getting_started.rst @@ -0,0 +1,2 @@ +.. include:: ../README.md + :parser: myst_parser.sphinx_ diff --git a/docs/index.rst b/docs/index.rst index 7167a42..ca5bee5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,7 +8,18 @@ Welcome to specsanalyzer's documentation! Concept description. TODO .. toctree:: - :maxdepth: 3 + :maxdepth: 1 + :caption: Getting Started + + getting_started + specsanalyzer/config + tutorial/1_specsanalyzer_conversion_examples + tutorial/2_specsscan_example + tutorial/3_specsscan_conversion_to_NeXus + tutorial/4_specsscan_load_sweep_scan + +.. toctree:: + :maxdepth: 1 :numbered: :caption: SpecsAnalyzer Core Modules @@ -16,15 +27,14 @@ Concept description. TODO specsanalyzer/convert specsanalyzer/img_tools specsanalyzer/io - specsanalyzer/metadata - specsanalyzer/settings .. toctree:: - :maxdepth: 2 + :maxdepth: 1 :numbered: - :caption: Examples + :caption: SpecsScan Core Modules - examples/example + specsscan/core + specsscan/helpers .. toctree:: :maxdepth: 2 diff --git a/docs/specsanalyzer/config.rst b/docs/specsanalyzer/config.rst new file mode 100644 index 0000000..d74dd70 --- /dev/null +++ b/docs/specsanalyzer/config.rst @@ -0,0 +1,39 @@ +Config +=================================================== +The config module contains a mechanis to collect configuration parameters from various sources and configuration files, and to combine them in a hierachical manner into a single, consistent configuration dictionary. +It will load an (optional) provided config file, or alternatively use a passed python dictionary as initial config dictionary, and subsequently look for the following additional config files to load: + +* ``folder_config``: A config file of name :file:`specs_config.yaml` in the current working directory. This is mostly intended to pass calibration parameters of the workflow between different notebook instances. +* ``user_config``: A config file provided by the user, stored as :file:`.specsanalyzer/config.yaml` in the current user's home directly. This is intended to give a user the option for individual configuration modifications of system settings. +* ``system_config``: A config file provided by the system administrator, stored as :file:`/etc/specsanalyzer/config.yaml` on Linux-based systems, and :file:`%ALLUSERSPROFILE%/specsanalyzer/config.yaml` on Windows. This should provide all necessary default parameters for using the specsanalyzer processor with a given setup. For an example for the setup at the Fritz Haber Institute setup, see :ref:`example_config` +* ``default_config``: The default configuration shipped with the package. Typically, all parameters here should be overwritten by any of the other configuration files. + +The config mechanism returns the combined dictionary, and reports the loaded configuration files. In order to disable or overwrite any of the configuration files, they can be also given as optional parameters (path to a file, or python dictionary). + + +API +*************************************************** +.. automodule:: specsanalyzer.config + :members: + :undoc-members: + + +.. _example_config: + +Default specsanalyzer configuration settings +*************************************************** + +.. literalinclude:: ../../specsanalyzer/config/default.yaml + :language: yaml + +Default specsscan configuration settings +*************************************************** + +.. literalinclude:: ../../specsscan/config/default.yaml + :language: yaml + +Example configuration file for the trARPES setup at FHI-Berlin +********************************************************************************* + +.. literalinclude:: ../../specsscan/config/example_config_FHI.yaml + :language: yaml diff --git a/docs/specsanalyzer/metadata.rst b/docs/specsanalyzer/metadata.rst deleted file mode 100755 index 053fe63..0000000 --- a/docs/specsanalyzer/metadata.rst +++ /dev/null @@ -1,5 +0,0 @@ -Metadata -=================================================== -.. automodule:: specsanalyzer.metadata - :members: - :undoc-members: diff --git a/docs/specsanalyzer/settings.rst b/docs/specsanalyzer/settings.rst deleted file mode 100644 index 2ef30ff..0000000 --- a/docs/specsanalyzer/settings.rst +++ /dev/null @@ -1,5 +0,0 @@ -Settings -=================================================== -.. automodule:: specsanalyzer.settings - :members: - :undoc-members: diff --git a/docs/specsscan/settings.rst b/docs/specsscan/helpers.rst old mode 100755 new mode 100644 similarity index 65% rename from docs/specsscan/settings.rst rename to docs/specsscan/helpers.rst index c527dfe..aeca3c1 --- a/docs/specsscan/settings.rst +++ b/docs/specsscan/helpers.rst @@ -1,5 +1,5 @@ -Settings +Helpers =================================================== -.. automodule:: specsscan.settings +.. automodule:: specsscan.helpers :members: :undoc-members: diff --git a/docs/specsscan/io.rst b/docs/specsscan/io.rst deleted file mode 100755 index 474d66d..0000000 --- a/docs/specsscan/io.rst +++ /dev/null @@ -1,7 +0,0 @@ -io functions ``(specsscan.io)`` -===================================== -io functions for the specsscan package - -.. automodule:: specsscan.io - :members: - :private-members: diff --git a/docs/specsscan/metadata.rst b/docs/specsscan/metadata.rst deleted file mode 100755 index 7e4eaf1..0000000 --- a/docs/specsscan/metadata.rst +++ /dev/null @@ -1,5 +0,0 @@ -Metadata -=================================================== -.. automodule:: specsscan.metadata - :members: - :undoc-members: diff --git a/poetry.lock b/poetry.lock index b9b850a..d6b186f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "alabaster" @@ -1818,13 +1818,13 @@ files = [ [[package]] name = "json5" -version = "0.9.17" +version = "0.9.18" description = "A Python implementation of the JSON5 data format." optional = true python-versions = ">=3.8" files = [ - {file = "json5-0.9.17-py2.py3-none-any.whl", hash = "sha256:f8ec1ecf985951d70f780f6f877c4baca6a47b6e61e02c4cd190138d10a7805a"}, - {file = "json5-0.9.17.tar.gz", hash = "sha256:717d99d657fa71b7094877b1d921b1cce40ab444389f6d770302563bb7dfd9ae"}, + {file = "json5-0.9.18-py2.py3-none-any.whl", hash = "sha256:3f20193ff8dfdec6ab114b344e7ac5d76fac453c8bab9bdfe1460d1d528ec393"}, + {file = "json5-0.9.18.tar.gz", hash = "sha256:ecb8ac357004e3522fb989da1bf08b146011edbd14fdffae6caad3bd68493467"}, ] [package.extras] @@ -2770,67 +2770,68 @@ tests = ["pytest (>=4.6)"] [[package]] name = "msgpack" -version = "1.0.7" +version = "1.0.8" description = "MessagePack serializer" optional = false python-versions = ">=3.8" files = [ - {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862"}, - {file = "msgpack-1.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329"}, - {file = "msgpack-1.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b"}, - {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6"}, - {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee"}, - {file = "msgpack-1.0.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d"}, - {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d"}, - {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1"}, - {file = "msgpack-1.0.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681"}, - {file = "msgpack-1.0.7-cp310-cp310-win32.whl", hash = "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9"}, - {file = "msgpack-1.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415"}, - {file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84"}, - {file = "msgpack-1.0.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93"}, - {file = "msgpack-1.0.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8"}, - {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46"}, - {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b"}, - {file = "msgpack-1.0.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e"}, - {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002"}, - {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c"}, - {file = "msgpack-1.0.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e"}, - {file = "msgpack-1.0.7-cp311-cp311-win32.whl", hash = "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1"}, - {file = "msgpack-1.0.7-cp311-cp311-win_amd64.whl", hash = "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82"}, - {file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b"}, - {file = "msgpack-1.0.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4"}, - {file = "msgpack-1.0.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee"}, - {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5"}, - {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672"}, - {file = "msgpack-1.0.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075"}, - {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba"}, - {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c"}, - {file = "msgpack-1.0.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5"}, - {file = "msgpack-1.0.7-cp312-cp312-win32.whl", hash = "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9"}, - {file = "msgpack-1.0.7-cp312-cp312-win_amd64.whl", hash = "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf"}, - {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95"}, - {file = "msgpack-1.0.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0"}, - {file = "msgpack-1.0.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7"}, - {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d"}, - {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524"}, - {file = "msgpack-1.0.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc"}, - {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc"}, - {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf"}, - {file = "msgpack-1.0.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c"}, - {file = "msgpack-1.0.7-cp38-cp38-win32.whl", hash = "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2"}, - {file = "msgpack-1.0.7-cp38-cp38-win_amd64.whl", hash = "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c"}, - {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f"}, - {file = "msgpack-1.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81"}, - {file = "msgpack-1.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc"}, - {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d"}, - {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7"}, - {file = "msgpack-1.0.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61"}, - {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819"}, - {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd"}, - {file = "msgpack-1.0.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f"}, - {file = "msgpack-1.0.7-cp39-cp39-win32.whl", hash = "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad"}, - {file = "msgpack-1.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3"}, - {file = "msgpack-1.0.7.tar.gz", hash = "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87"}, + {file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:505fe3d03856ac7d215dbe005414bc28505d26f0c128906037e66d98c4e95868"}, + {file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6b7842518a63a9f17107eb176320960ec095a8ee3b4420b5f688e24bf50c53c"}, + {file = "msgpack-1.0.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:376081f471a2ef24828b83a641a02c575d6103a3ad7fd7dade5486cad10ea659"}, + {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e390971d082dba073c05dbd56322427d3280b7cc8b53484c9377adfbae67dc2"}, + {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e073efcba9ea99db5acef3959efa45b52bc67b61b00823d2a1a6944bf45982"}, + {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82d92c773fbc6942a7a8b520d22c11cfc8fd83bba86116bfcf962c2f5c2ecdaa"}, + {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9ee32dcb8e531adae1f1ca568822e9b3a738369b3b686d1477cbc643c4a9c128"}, + {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e3aa7e51d738e0ec0afbed661261513b38b3014754c9459508399baf14ae0c9d"}, + {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69284049d07fce531c17404fcba2bb1df472bc2dcdac642ae71a2d079d950653"}, + {file = "msgpack-1.0.8-cp310-cp310-win32.whl", hash = "sha256:13577ec9e247f8741c84d06b9ece5f654920d8365a4b636ce0e44f15e07ec693"}, + {file = "msgpack-1.0.8-cp310-cp310-win_amd64.whl", hash = "sha256:e532dbd6ddfe13946de050d7474e3f5fb6ec774fbb1a188aaf469b08cf04189a"}, + {file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9517004e21664f2b5a5fd6333b0731b9cf0817403a941b393d89a2f1dc2bd836"}, + {file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d16a786905034e7e34098634b184a7d81f91d4c3d246edc6bd7aefb2fd8ea6ad"}, + {file = "msgpack-1.0.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2872993e209f7ed04d963e4b4fbae72d034844ec66bc4ca403329db2074377b"}, + {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c330eace3dd100bdb54b5653b966de7f51c26ec4a7d4e87132d9b4f738220ba"}, + {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b5c044f3eff2a6534768ccfd50425939e7a8b5cf9a7261c385de1e20dcfc85"}, + {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1876b0b653a808fcd50123b953af170c535027bf1d053b59790eebb0aeb38950"}, + {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dfe1f0f0ed5785c187144c46a292b8c34c1295c01da12e10ccddfc16def4448a"}, + {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3528807cbbb7f315bb81959d5961855e7ba52aa60a3097151cb21956fbc7502b"}, + {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e2f879ab92ce502a1e65fce390eab619774dda6a6ff719718069ac94084098ce"}, + {file = "msgpack-1.0.8-cp311-cp311-win32.whl", hash = "sha256:26ee97a8261e6e35885c2ecd2fd4a6d38252246f94a2aec23665a4e66d066305"}, + {file = "msgpack-1.0.8-cp311-cp311-win_amd64.whl", hash = "sha256:eadb9f826c138e6cf3c49d6f8de88225a3c0ab181a9b4ba792e006e5292d150e"}, + {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:114be227f5213ef8b215c22dde19532f5da9652e56e8ce969bf0a26d7c419fee"}, + {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d661dc4785affa9d0edfdd1e59ec056a58b3dbb9f196fa43587f3ddac654ac7b"}, + {file = "msgpack-1.0.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d56fd9f1f1cdc8227d7b7918f55091349741904d9520c65f0139a9755952c9e8"}, + {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0726c282d188e204281ebd8de31724b7d749adebc086873a59efb8cf7ae27df3"}, + {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8db8e423192303ed77cff4dce3a4b88dbfaf43979d280181558af5e2c3c71afc"}, + {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99881222f4a8c2f641f25703963a5cefb076adffd959e0558dc9f803a52d6a58"}, + {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b5505774ea2a73a86ea176e8a9a4a7c8bf5d521050f0f6f8426afe798689243f"}, + {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ef254a06bcea461e65ff0373d8a0dd1ed3aa004af48839f002a0c994a6f72d04"}, + {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e1dd7839443592d00e96db831eddb4111a2a81a46b028f0facd60a09ebbdd543"}, + {file = "msgpack-1.0.8-cp312-cp312-win32.whl", hash = "sha256:64d0fcd436c5683fdd7c907eeae5e2cbb5eb872fafbc03a43609d7941840995c"}, + {file = "msgpack-1.0.8-cp312-cp312-win_amd64.whl", hash = "sha256:74398a4cf19de42e1498368c36eed45d9528f5fd0155241e82c4082b7e16cffd"}, + {file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ceea77719d45c839fd73abcb190b8390412a890df2f83fb8cf49b2a4b5c2f40"}, + {file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ab0bbcd4d1f7b6991ee7c753655b481c50084294218de69365f8f1970d4c151"}, + {file = "msgpack-1.0.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1cce488457370ffd1f953846f82323cb6b2ad2190987cd4d70b2713e17268d24"}, + {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3923a1778f7e5ef31865893fdca12a8d7dc03a44b33e2a5f3295416314c09f5d"}, + {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22e47578b30a3e199ab067a4d43d790249b3c0587d9a771921f86250c8435db"}, + {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd739c9251d01e0279ce729e37b39d49a08c0420d3fee7f2a4968c0576678f77"}, + {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d3420522057ebab1728b21ad473aa950026d07cb09da41103f8e597dfbfaeb13"}, + {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5845fdf5e5d5b78a49b826fcdc0eb2e2aa7191980e3d2cfd2a30303a74f212e2"}, + {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a0e76621f6e1f908ae52860bdcb58e1ca85231a9b0545e64509c931dd34275a"}, + {file = "msgpack-1.0.8-cp38-cp38-win32.whl", hash = "sha256:374a8e88ddab84b9ada695d255679fb99c53513c0a51778796fcf0944d6c789c"}, + {file = "msgpack-1.0.8-cp38-cp38-win_amd64.whl", hash = "sha256:f3709997b228685fe53e8c433e2df9f0cdb5f4542bd5114ed17ac3c0129b0480"}, + {file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f51bab98d52739c50c56658cc303f190785f9a2cd97b823357e7aeae54c8f68a"}, + {file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:73ee792784d48aa338bba28063e19a27e8d989344f34aad14ea6e1b9bd83f596"}, + {file = "msgpack-1.0.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f9904e24646570539a8950400602d66d2b2c492b9010ea7e965025cb71d0c86d"}, + {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e75753aeda0ddc4c28dce4c32ba2f6ec30b1b02f6c0b14e547841ba5b24f753f"}, + {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dbf059fb4b7c240c873c1245ee112505be27497e90f7c6591261c7d3c3a8228"}, + {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4916727e31c28be8beaf11cf117d6f6f188dcc36daae4e851fee88646f5b6b18"}, + {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7938111ed1358f536daf311be244f34df7bf3cdedb3ed883787aca97778b28d8"}, + {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:493c5c5e44b06d6c9268ce21b302c9ca055c1fd3484c25ba41d34476c76ee746"}, + {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273"}, + {file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"}, + {file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"}, + {file = "msgpack-1.0.8-py3-none-any.whl", hash = "sha256:24f727df1e20b9876fa6e95f840a2a2651e34c0ad147676356f4bf5fbb0206ca"}, + {file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"}, ] [[package]] @@ -4116,6 +4117,26 @@ zipfile37 = "0.1.3" [package.extras] dev = ["mypy", "pip-tools", "pre-commit", "pytest", "pytest-cov", "pytest-timeout", "ruff", "structlog", "types-pytz", "types-pyyaml", "types-requests"] +[[package]] +name = "pynxtools-mpes" +version = "0.0.1" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pynxtools-mpes-0.0.1.tar.gz", hash = "sha256:6cd1bf35d1ce7e8e7c8623aff6baa1471ef56c9d78b07741da2e5fb67ac76da0"}, + {file = "pynxtools_mpes-0.0.1-py3-none-any.whl", hash = "sha256:574be453a476be6efd16e9417afd2d0ca83b065a728bcbb09b125cd8ca4cc3f8"}, +] + +[package.dependencies] +h5py = ">=3.6.0" +pynxtools = ">=0.0.10" +PyYAML = ">=6.0" +xarray = ">=0.20.2" + +[package.extras] +dev = ["mypy", "pip-tools", "pytest", "ruff", "types-pyyaml"] + [[package]] name = "pyparsing" version = "3.1.1" @@ -4207,13 +4228,13 @@ testing = ["filelock"] [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -5909,4 +5930,4 @@ notebook = ["ipykernel", "jupyter", "jupyterlab-h5web"] [metadata] lock-version = "2.0" python-versions = ">=3.8, <3.12" -content-hash = "747434a2c690484bb17da7874588df8a570069400a7d06ba96639555ef3eeced" +content-hash = "50db34a96449a2d52ca91d623c33f50149d4de8c794ea27c9bf6fa4e3f949cb1" diff --git a/pyproject.toml b/pyproject.toml index bf02e42..25f9575 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,7 @@ ipywidgets = ">=8.1.1" matplotlib = ">=3.5.1" numpy = ">=1.21.6" opencv-python = ">=4.8.1.78" -pynxtools = ">=0.0.9" +pynxtools-mpes = ">=0.0.1" python-dateutil = ">=2.8.2" pyyaml = ">=6.0" xarray = ">=0.20.2" @@ -44,7 +44,7 @@ pytest = ">=7.0.1" pytest-cov = ">=3.0.0" pytest-xdist = ">=2.5.0" pytest-clarity = ">=1.0.1" -ruff = ">=0.1.7" +ruff = ">=0.1.7, <0.3.0" mypy = ">=1.6.0" types-pyyaml = ">=6.0.12.12" types-requests = ">=2.31.0.9" diff --git a/specsanalyzer/__init__.py b/specsanalyzer/__init__.py index 8598c72..e6b114e 100755 --- a/specsanalyzer/__init__.py +++ b/specsanalyzer/__init__.py @@ -1,6 +1,4 @@ -"""SpecsAnalyzer class easy access APIs - -""" +"""SpecsAnalyzer class easy access APIs""" # Easy access APIs from .core import SpecsAnalyzer diff --git a/specsanalyzer/config.py b/specsanalyzer/config.py index c27c62b..a88f243 100755 --- a/specsanalyzer/config.py +++ b/specsanalyzer/config.py @@ -1,11 +1,11 @@ -"""This module contains a config library for loading yaml/json files into dicts -""" +"""This module contains a config library for loading yaml/json files into dicts""" +from __future__ import annotations + import json import os import platform from importlib.util import find_spec from pathlib import Path -from typing import Union import yaml @@ -13,14 +13,11 @@ def parse_config( - config: Union[dict, str] = None, - folder_config: Union[dict, str] = None, - user_config: Union[dict, str] = None, - system_config: Union[dict, str] = None, - default_config: Union[ - dict, - str, - ] = f"{package_dir}/config/default.yaml", + config: dict | str = None, + folder_config: dict | str = None, + user_config: dict | str = None, + system_config: dict | str = None, + default_config: dict | str = f"{package_dir}/config/default.yaml", verbose: bool = True, ) -> dict: """Load the config dictionary from a file, or pass the provided config dictionary. @@ -30,22 +27,22 @@ def parse_config( can be also passed as optional arguments (file path strings or dictionaries). Args: - config (Union[dict, str], optional): config dictionary or file path. + config (dict | str, optional): config dictionary or file path. Files can be *json* or *yaml*. Defaults to None. - folder_config (Union[ dict, str, ], optional): working-folder-based config dictionary + folder_config (dict | str, optional): working-folder-based config dictionary or file path. The loaded dictionary is completed with the folder-based values, taking preference over user, system and default values. Defaults to the file "specs_config.yaml" in the current working directory. - user_config (Union[ dict, str, ], optional): user-based config dictionary + user_config (dict | str, optional): user-based config dictionary or file path. The loaded dictionary is completed with the user-based values, taking preference over system and default values. Defaults to the file ".specsanalyzer/config.yaml" in the current user's home directory. - system_config (Union[ dict, str, ], optional): system-wide config dictionary + system_config (dict | str, optional): system-wide config dictionary or file path. The loaded dictionary is completed with the system-wide values, taking preference over default values. Defaults to the file "/etc/specsanalyzer/config.yaml" on linux, - and "%ALLUSERPROFILE%/specsanalyzer/config.yaml" on windows. - default_config (Union[ dict, str, ], optional): default config dictionary + and "%ALLUSERSPROFILE%/specsanalyzer/config.yaml" on windows. + default_config (dict | str, optional): default config dictionary or file path. The loaded dictionary is completed with the default values. Defaults to *package_dir*/config/default.yaml". verbose (bool, optional): Option to report loaded config files. Defaults to True. @@ -101,7 +98,7 @@ def parse_config( ) elif platform.system() == "Windows": system_config = str( - Path(os.environ["ALLUSERPROFILE"]) + Path(os.environ["ALLUSERSPROFILE"]) .joinpath("specsanalyzer") .joinpath("config.yaml"), ) @@ -212,18 +209,20 @@ def complete_dictionary(dictionary: dict, base_dictionary: dict) -> dict: Returns: dict: the completed (merged) dictionary """ - for k, v in base_dictionary.items(): - if isinstance(v, dict): - if k not in dictionary.keys(): - dictionary[k] = v + if base_dictionary: + for k, v in base_dictionary.items(): + if isinstance(v, dict): + if k not in dictionary.keys(): + dictionary[k] = v + else: + if not isinstance(dictionary[k], dict): + raise ValueError( + "Cannot merge dictionaries. " + f"Mismatch on Key {k}: {dictionary[k]}, {v}.", + ) + dictionary[k] = complete_dictionary(dictionary[k], v) else: - if not isinstance(dictionary[k], dict): - raise ValueError( - f"Cannot merge dictionaries. Mismatch on Key {k}: {dictionary[k]}, {v}.", - ) - dictionary[k] = complete_dictionary(dictionary[k], v) - else: - if k not in dictionary.keys(): - dictionary[k] = v + if k not in dictionary.keys(): + dictionary[k] = v return dictionary diff --git a/specsanalyzer/config/20221212_config_LACUS.yaml b/specsanalyzer/config/20221212_config_LACUS.yaml deleted file mode 100644 index e7b5c13..0000000 --- a/specsanalyzer/config/20221212_config_LACUS.yaml +++ /dev/null @@ -1,40 +0,0 @@ -calib2d_file: 'C:\Users\spec\Documents\GitHub\wetlab-software\lib_LACUS\python_ARPES\config_files\20221212_phoibos150.calib2d' -nx_pixel: 1376 -ny_pixel: 1024 -pixel_size: 0.00645 -magnification: 4.54 -Ang_Offset_px: -2 -E_Offset_px: 0 -apply_fft_filter: True -crop: False -fft_filter_peaks: - - amplitude: .95 - pos_x: 77.9131 - pos_y: 0 - sigma_x: 13.7216 - sigma_y: 20.9152 - - amplitude: .95 - pos_x: 178.087 - pos_y: 0 - sigma_x: 13.7216 - sigma_y: 20.9152 - - amplitude: .95 - pos_x: 0 - pos_y: 109.183 - sigma_x: 13.7216 - sigma_y: 20.9152 - - amplitude: .95 - pos_x: 77.9131 - pos_y: 109.183 - sigma_x: 13.7216 - sigma_y: 20.9152 - - amplitude: .95 - pos_x: 178.087 - pos_y: 109.183 - sigma_x: 13.7216 - sigma_y: 20.9152 - - amplitude: .95 - pos_x: 256 - pos_y: 109.183 - sigma_x: 13.7216 - sigma_y: 20.9152 diff --git a/specsanalyzer/config/default.yaml b/specsanalyzer/config/default.yaml index 5dfed1e..5a81996 100644 --- a/specsanalyzer/config/default.yaml +++ b/specsanalyzer/config/default.yaml @@ -1,9 +1,18 @@ +# path to SPECS calib2d file (provided together with your analyzer) calib2d_file: "./config/phoibos150.calib2d" +# number of pixels along the energy dispersing direction nx_pixel: 1376 +# number of pixels along the angle/spatially dispersing direction ny_pixel: 1024 +# pixel size in millimeters pixel_size: 0.00645 +# binning factor applied to the image binning: 4 +# magnification of the lens system used for imaging the detector magnification: 4.54 -Ang_Offset_px: 0 -E_Offset_px: 0 -apply_fft_filter: False +# offset in pixels along the angular dispersing axis +angle_offset_px: 0 +# offset in pixels along the energy dispersing axis +energy_offset_px: 0 +# flag controlling the application of a Fourier filter to remove grid artefacts +apply_fft_filter: false diff --git a/specsanalyzer/convert.py b/specsanalyzer/convert.py index df154de..d3b54f4 100755 --- a/specsanalyzer/convert.py +++ b/specsanalyzer/convert.py @@ -1,35 +1,32 @@ -"""Specsanalyzer image conversion module -""" -from typing import Tuple +"""Specsanalyzer image conversion module""" +from __future__ import annotations import numpy as np from scipy.ndimage import map_coordinates -def get_damatrix_fromcalib2d( # pylint: disable=too-many-locals +def get_damatrix_fromcalib2d( lens_mode: str, kinetic_energy: float, pass_energy: float, work_function: float, config_dict: dict, -) -> Tuple[float, np.ndarray]: - """This function estimates the best angular - conversion coefficients for the current analyser mode, starting from - a dictionary containing the specs .calib2d database. - A linear interpolation is performed from the tabulated coefficients based - on the retardatio ratio value. +) -> tuple[float, np.ndarray]: + """This function estimates the best angular conversion coefficients for the current analyser + mode, starting from a dictionary containing the specs .calib2d database. A linear interpolation + is performed from the tabulated coefficients based on the retardatio ratio value. Args: lens_mode (string): the lens mode string description kinetic_energy (float): kinetic energy of the photoelectron pass_energy (float): analyser pass energy work_function (float): work function settings - config_dict (dict): dictionary containing the configuration parameters - for angulat correction + config_dict (dict): dictionary containing the configuration parameters for angular + correction Returns: - Tuple[float,np.ndarray]: a_inner, damatrix - interpolated damatrix and a_inner, needed for the coordinate conversion + tuple[float,np.ndarray]: (a_inner, damatrix) + interpolated damatrix and a_inner, needed for the coordinate conversion """ # retardation ratio @@ -89,15 +86,12 @@ def get_damatrix_fromcalib2d( # pylint: disable=too-many-locals def bisection(array: np.ndarray, value: float) -> int: """ - Auxiliary function to find the closest rr index - from https://stackoverflow.com/questions/2566412/ + Auxiliary function to find the closest rr index from https://stackoverflow.com/questions/2566412/ find-nearest-value-in-numpy-array - Given an ``array`` , and given a ``value`` , returns an index - j such that ``value`` is between array[j] - and array[j+1]. ``array`` must be monotonic - increasing. j=-1 or j=len(array) is returned - to indicate that ``value`` is out of range below and above respectively. + Given an ``array`` , and given a ``value`` , returns an index j such that ``value`` is between + array[j] and array[j+1]. ``array`` must be monotonic increasing. j=-1 or j=len(array) is + returned to indicate that ``value`` is out of range below and above respectively. This should mimick the function BinarySearch in igor pro 6 Args: @@ -105,8 +99,7 @@ def bisection(array: np.ndarray, value: float) -> int: value (float): comparison value Returns: - int: index (non-integer) expressing the position of value between - array[j] and array[j+1] + int: index (non-integer) expressing the position of value between array[j] and array[j+1] """ num_elems = len(array) @@ -133,8 +126,7 @@ def bisection(array: np.ndarray, value: float) -> int: def second_closest_rr(rrvec: np.ndarray, closest_rr_index: int) -> int: - """Return closest_rr_index+1 unless you are at the edge - of the rrvec. + """Return closest_rr_index+1 unless you are at the edge of the rrvec. Args: rrvec (np.ndarray): the retardation ratio vector @@ -155,22 +147,21 @@ def second_closest_rr(rrvec: np.ndarray, closest_rr_index: int) -> int: def get_rr_da( # pylint: disable=too-many-locals lens_mode: str, config_dict: dict, -) -> Tuple[np.ndarray, np.ndarray]: - """Get the retardatio ratios and the da for a certain lens mode from the - confugaration dictionary +) -> tuple[np.ndarray, np.ndarray]: + """Get the retardatio ratios and the da for a certain lens mode from the confugaration + dictionary Args: lens_mode (string): string containing the lens mode config_dict (dict): config dictionary Raises: - ValueError + KeyError: Raised if the requested lens mode is not found + ValueError: Raised if no da values are found for the given mode Returns: - Tuple[np.ndarray,np.ndarray]: retardation ratio vector, matrix of - da coeffients, per row row0 : da1, row1: da3, .. up to da7 - non angle resolved lens modes do not posses das. - + tuple[np.ndarray,np.ndarray]: retardation ratio vector, matrix of da coeffients, per row + row0 : da1, row1: da3, .. up to da7 non angle resolved lens modes do not posses da values. """ # check if this is spatial or an angular mode # check the angular mode type @@ -229,23 +220,20 @@ def calculate_polynomial_coef_da( pass_energy: float, e_shift: np.ndarray, ) -> np.ndarray: - """Given the da coeffiecients contained in the - scanpareters, the program calculate the energy range based - on the eshift parameter and fits a second order polinomial - to the tabulated values. The polinomial coefficients - are packed in the dapolymatrix array (row0 da1, row1 da3, ..) + """Given the da coeffiecients contained in the scanpareters, the program calculate the energy + range based on the eshift parameter and fits a second order polinomial to the tabulated values. + The polinomial coefficients are packed in the dapolymatrix array (row0 da1, row1 da3, ..) The dapolymatrix is also saved in the scanparameters dictionary Args: da_matrix (np.ndarray): the matrix of interpolated da coefficients kinetic_energy (float): photoelectorn kinetic energy pass_energy (float): analyser pass energy - eshift (float): e shift parameter, defining the energy - range around the center for the polynomial fit of the da coefficients + e_shift (np.ndarray): e shift parameter, defining the energy + range around the center for the polynomial fit of the da coefficients Returns: - np.ndarray: dapolymatrix containg the fit results (row0 da1, row1 - da3, ..) + np.ndarray: dapolymatrix containg the fit results (row0 da1, row1 da3, ..) """ # get the Das from the damatrix # da1=currentdamatrix[0][:] @@ -284,9 +272,8 @@ def zinner( angle: np.ndarray, da_poly_matrix: np.ndarray, ) -> np.ndarray: - """Auxiliary function for mcp_position_mm, uses kinetic energy and angle - starting from the dapolymatrix, to get - the zinner coefficient to calculate the electron arrival position on the + """Auxiliary function for mcp_position_mm, uses kinetic energy and angle starting from the + dapolymatrix, to get the zinner coefficient to calculate the electron arrival position on the mcp withing the a_inner boundaries Args: @@ -295,8 +282,7 @@ def zinner( da_poly_matrix (np.ndarray): matrix with polynomial coefficients Returns: - float: returns the calcualted position on the mcp, - valid for low angles (< ainner) + float: returns the calcualted position on the mcp, valid for low angles (< ainner) """ out = np.zeros(angle.shape, float) @@ -314,10 +300,9 @@ def zinner_diff( angle: np.ndarray, da_poly_matrix: np.ndarray, ) -> np.ndarray: - """Auxiliary function for mcp_position_mm, uses kinetic energy and angle - starting from the dapolymatrix, to get - the zinner_diff coefficient to coorect the electron arrival position on the - mcp outside the a_inner boundaries + """Auxiliary function for mcp_position_mm, uses kinetic energy and angle starting from the + dapolymatrix, to get the zinner_diff coefficient to coorect the electron arrival position on + the mcp outside the a_inner boundaries Args: kinetic_energy (float): kinetic energy @@ -325,9 +310,8 @@ def zinner_diff( da_poly_matrix (np.ndarray): polynomial matrix Returns: - float: zinner_diff the correction for the - zinner position on the MCP for high (>ainner) - angles, + float: zinner_diff the correction for the zinner position on the MCP for high (>ainner) + angles. """ out = np.zeros(angle.shape, float) @@ -349,19 +333,18 @@ def mcp_position_mm( a_inner: float, da_poly_matrix: np.ndarray, ) -> np.ndarray: - """calculated the position of the photoelectron on the mcp, for - a certain kinetic energy and emission angle. This is determined for - the given lens mode (as defined by the a_inner and dapolymatrix) + """calculated the position of the photoelectron on the mcp, for a certain kinetic energy and + emission angle. This is determined for the given lens mode (as defined by the a_inner and + dapolymatrix) Args: kinetic_energy (float): kinetic energy angle (float): photoemission angle a_inner (float): inner angle parameter of the lens mode - da_poly_matrix (np.ndarray): matrix with the polynomial correction - coefficients for calculating the arrival position on the MCP + da_poly_matrix (np.ndarray): matrix with the polynomial correction coefficients for + calculating the arrival position on the MCP Returns: - np.ndarray: - lateral position of photoelectron on the mcp (angular dispersing axis) + np.ndarray: lateral position of photoelectron on the mcp (angular dispersing axis) """ # define two angular regions: within and outsied the a_inner boundaries @@ -382,17 +365,16 @@ def mcp_position_mm( return result -def calculate_matrix_correction( # pylint: disable=too-many-arguments, too-many-locals +def calculate_matrix_correction( lens_mode: str, kinetic_energy: float, pass_energy: float, work_function: float, binning: int, config_dict: dict, -) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: - """Calculate the angular and - energy interpolation matrices for - the currection function + **kwds, +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + """Calculate the angular and energy interpolation matrices for the currection function Args: lens_mode (str): analyser lens mode @@ -401,18 +383,18 @@ def calculate_matrix_correction( # pylint: disable=too-many-arguments, too-many work_function (float): analyser set work function binning (int): image binning config_dict (dict): dictionary containing the calibration files + ** kwds: Keyword parameters: + + - eangle_offset_px: Angular offset in pixel + - energy_offset_px: Energy offset in pixel Returns: - tuple[np.ndarray, - np.ndarray, - np.ndarray, - np.ndarray, - np.ndarray]: returns ek_axis, kinetic energy axis - angle_axis, angle of emissio axis - angular_correction_matrix, the matrix for angular interpolation - e_correction, the matrix for energy interpolation - jacobian_determinant, the transformation jacobian for area preserving - transformation + tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + - ek_axis: kinetic energy axis + - angle_axis, angle of emission axis + - angular_correction_matrix: the matrix for angular interpolation + - e_correction: the matrix for energy interpolation + - jacobian_determinant: the transformation jacobian for area preserving transformation """ e_shift = np.array(config_dict["calib2d_dict"]["eShift"]) @@ -480,8 +462,8 @@ def calculate_matrix_correction( # pylint: disable=too-many-arguments, too-many ) # read angular and energy offsets from configuration file - angle_offset_px = config_dict.get("Ang_Offset_px", 0) - energy_offset_px = config_dict.get("E_Offset_px", 0) + angle_offset_px = kwds.get("angle_offset_px", config_dict.get("angle_offset_px", 0)) + energy_offset_px = kwds.get("energy_offset_px", config_dict.get("energy_offset_px", 0)) angular_correction_matrix = ( mcp_position_mm_matrix / magnification / (pixel_size * binning) @@ -531,7 +513,7 @@ def calculate_jacobian( angle_axis (np.ndarray): angle axis Returns: - np.ndarray: jacobian_determinanant matrix + np.ndarray: jacobian_determinant matrix """ w_dyde = np.gradient(angular_correction_matrix, ek_axis, axis=1) w_dyda = np.gradient(angular_correction_matrix, angle_axis, axis=0) @@ -547,15 +529,15 @@ def physical_unit_data( e_correction: float, jacobian_determinant: np.ndarray, ) -> np.ndarray: - """interpolate the image on physical units, using the map_coordinates - function from scipy.ndimage + """interpolate the image on physical units, using the ``map_coordinates`` function from + ``scipy.ndimage`` Args: image (np.ndarray): raw image angular_correction_matrix (np.ndarray): angular correction matrix e_correction (float): energy correction jacobian_determinant (np.ndarray): jacobian determinant for preserving - the area normalization + the area normalization Returns: np.ndarray: interpolated image as a function of angle and energy diff --git a/specsanalyzer/core.py b/specsanalyzer/core.py index c432292..a2872d0 100755 --- a/specsanalyzer/core.py +++ b/specsanalyzer/core.py @@ -1,12 +1,9 @@ -"""This is the specsanalyzer core class +"""This is the specsanalyzer core class""" +from __future__ import annotations -""" import os from typing import Any -from typing import Dict from typing import Generator -from typing import Tuple -from typing import Union import imutils import ipywidgets as ipw @@ -22,34 +19,38 @@ from specsanalyzer.convert import physical_unit_data from specsanalyzer.img_tools import crop_xarray from specsanalyzer.img_tools import fourier_filter_2d -from specsanalyzer.metadata import MetaHandler package_dir = os.path.dirname(__file__) -class SpecsAnalyzer: # pylint: disable=dangerous-default-value +class SpecsAnalyzer: """SpecsAnalyzer: A class to convert photoemission data from a SPECS Phoibos analyzer from camera image coordinates into physical units (energy, angle, position). + + Args: + metadata (dict, optional): Metadata dictionary. Defaults to {}. + config (dict | str, optional): Metadata dictionary or file path. Defaults to {}. + **kwds: Keyword arguments passed to ``parse_config``. """ def __init__( self, - metadata: Dict[Any, Any] = {}, - config: Union[Dict[Any, Any], str] = {}, + metadata: dict[Any, Any] = {}, + config: dict[Any, Any] | str = {}, **kwds, ): """SpecsAnalyzer constructor. Args: metadata (dict, optional): Metadata dictionary. Defaults to {}. - config (Union[dict, str], optional): Metadata dictionary or file path. Defaults to {}. + config (dict | str, optional): Metadata dictionary or file path. Defaults to {}. **kwds: Keyword arguments passed to ``parse_config``. """ self._config = parse_config( config, **kwds, ) - self._attributes = MetaHandler(meta=metadata) + self.metadata = metadata self._data_array = None self.print_msg = True try: @@ -62,9 +63,8 @@ def __init__( os.path.join(package_dir, self._config["calib2d_file"]), ) - self._correction_matrix_dict: Dict[Any, Any] = {} + self._correction_matrix_dict: dict[Any, Any] = {} - # pylint: disable=duplicate-code def __repr__(self): if self._config is None: pretty_str = "No configuration available" @@ -81,7 +81,7 @@ def config(self): return self._config @config.setter - def config(self, config: Union[dict, str]): + def config(self, config: dict | str): """Set config""" self._config = parse_config(config) @@ -113,18 +113,12 @@ def convert_image( xr.DataArray: xarray containg the corrected data and kinetic and angle axis """ - apply_fft_filter = kwds.pop( - "apply_fft_filter", - self._config.get("apply_fft_filter", False), - ) + apply_fft_filter = kwds.pop("apply_fft_filter", self._config.get("apply_fft_filter", False)) binning = kwds.pop("binning", self._config.get("binning", 1)) if apply_fft_filter: try: - fft_filter_peaks = kwds.pop( - "fft_filter_peaks", - self._config["fft_filter_peaks"], - ) + fft_filter_peaks = kwds.pop("fft_filter_peaks", self._config["fft_filter_peaks"]) img = fourier_filter_2d(raw_img, fft_filter_peaks) except KeyError: img = raw_img @@ -152,6 +146,7 @@ def convert_image( f"convert_image: unsupported lens mode: '{lens_mode}'", ) + new_matrix = False try: old_db = self._correction_matrix_dict[lens_mode][kinetic_energy][pass_energy][ work_function @@ -164,8 +159,11 @@ def convert_image( jacobian_determinant = old_db["jacobian_determinant"] except KeyError: + new_matrix = True + + if new_matrix or "angle_offset_px" in kwds or "energy_offset_px" in kwds: old_matrix_check = False - ( # pylint: disable=duplicate-code + ( ek_axis, angle_axis, angular_correction_matrix, @@ -178,6 +176,7 @@ def convert_image( work_function, binning, self._config, + **kwds, ) # save the config parameters for later use @@ -325,10 +324,11 @@ def crop_tool( **kwds, ): """Crop tool for selecting cropping parameters + Args: raw_img (np.ndarray): Raw image data, numpy 2d matrix lens_mode (str): analzser lens mode, check calib2d for a list - of modes Camelcase naming convention e.g. "WideAngleMode" + of modes CamelCase naming convention e.g. "WideAngleMode" kinetic_energy (float): set analyser kinetic energy pass_energy (float): set analyser pass energy work_function (float): set analyser work function @@ -336,12 +336,11 @@ def crop_tool( Defaults to False. **kwds: Keyword parameters for the crop tool: - -ek_range_min - -ek_range_max - -ang_range_min - -ang_range_max + - ek_range_min + - ek_range_max + - ang_range_min + - ang_range_max """ - data_array = self.convert_image( raw_img=raw_img, lens_mode=lens_mode, @@ -366,64 +365,60 @@ def crop_tool( linev2 = ax.axvline(x=data_array.Ekin[-1]) try: - range_dict = self._correction_matrix_dict[lens_mode][kinetic_energy][pass_energy][ - work_function - ]["crop_params"] - - ek_min = range_dict["ek_min"] - ek_max = range_dict["ek_max"] - ang_min = range_dict["ang_min"] - ang_max = range_dict["ang_max"] - except KeyError: - try: - ang_range_min = ( - kwds["ang_range_min"] - if "ang_range_min" in kwds - else self._config["ang_range_min"] - ) - ang_range_max = ( - kwds["ang_range_max"] - if "ang_range_max" in kwds - else self._config["ang_range_max"] - ) - ek_range_min = ( - kwds["ek_range_min"] if "ek_range_min" in kwds else self._config["ek_range_min"] - ) - ek_range_max = ( - kwds["ek_range_max"] if "ek_range_max" in kwds else self._config["ek_range_max"] - ) - ang_min = ( - ang_range_min - * ( - data_array.coords[data_array.dims[0]][-1] - - data_array.coords[data_array.dims[0]][0] - ) - + data_array.coords[data_array.dims[0]][0] + ang_range_min = ( + kwds["ang_range_min"] if "ang_range_min" in kwds else self._config["ang_range_min"] + ) + ang_range_max = ( + kwds["ang_range_max"] if "ang_range_max" in kwds else self._config["ang_range_max"] + ) + ek_range_min = ( + kwds["ek_range_min"] if "ek_range_min" in kwds else self._config["ek_range_min"] + ) + ek_range_max = ( + kwds["ek_range_max"] if "ek_range_max" in kwds else self._config["ek_range_max"] + ) + ang_min = ( + ang_range_min + * ( + data_array.coords[data_array.dims[0]][-1] + - data_array.coords[data_array.dims[0]][0] ) - ang_max = ( - ang_range_max - * ( - data_array.coords[data_array.dims[0]][-1] - - data_array.coords[data_array.dims[0]][0] - ) - + data_array.coords[data_array.dims[0]][0] + + data_array.coords[data_array.dims[0]][0] + ) + ang_max = ( + ang_range_max + * ( + data_array.coords[data_array.dims[0]][-1] + - data_array.coords[data_array.dims[0]][0] ) - ek_min = ( - ek_range_min - * ( - data_array.coords[data_array.dims[1]][-1] - - data_array.coords[data_array.dims[1]][0] - ) - + data_array.coords[data_array.dims[1]][0] + + data_array.coords[data_array.dims[0]][0] + ) + ek_min = ( + ek_range_min + * ( + data_array.coords[data_array.dims[1]][-1] + - data_array.coords[data_array.dims[1]][0] ) - ek_max = ( - ek_range_max - * ( - data_array.coords[data_array.dims[1]][-1] - - data_array.coords[data_array.dims[1]][0] - ) - + data_array.coords[data_array.dims[1]][0] + + data_array.coords[data_array.dims[1]][0] + ) + ek_max = ( + ek_range_max + * ( + data_array.coords[data_array.dims[1]][-1] + - data_array.coords[data_array.dims[1]][0] ) + + data_array.coords[data_array.dims[1]][0] + ) + except KeyError: + try: + range_dict = self._correction_matrix_dict[lens_mode][kinetic_energy][pass_energy][ + work_function + ]["crop_params"] + + ek_min = range_dict["ek_min"] + ek_max = range_dict["ek_max"] + ang_min = range_dict["ang_min"] + ang_max = range_dict["ang_max"] except KeyError: ek_min = data_array.coords[data_array.dims[1]][0] ek_max = data_array.coords[data_array.dims[1]][-1] @@ -533,7 +528,7 @@ def cropit(val): # pylint: disable=unused-argument def mergedicts( dict1: dict, dict2: dict, -) -> Generator[Tuple[Any, Any], None, None]: +) -> Generator[tuple[Any, Any], None, None]: """Merge two dictionaries, overwriting only existing values and retaining previously present values diff --git a/specsanalyzer/img_tools.py b/specsanalyzer/img_tools.py index f86494b..be38091 100755 --- a/specsanalyzer/img_tools.py +++ b/specsanalyzer/img_tools.py @@ -1,8 +1,7 @@ -"""This module contains image manipulation tools for the specsanalyzer package +"""This module contains image manipulation tools for the specsanalyzer package""" +from __future__ import annotations -""" from typing import Sequence -from typing import Union import numpy as np import xarray as xr @@ -10,26 +9,26 @@ def gauss2d( # pylint: disable=invalid-name, too-many-arguments - x: Union[float, np.ndarray], - y: Union[float, np.ndarray], + x: float | np.ndarray, + y: float | np.ndarray, mx: float, my: float, sx: float, sy: float, -) -> Union[float, np.ndarray]: - """Function to calculate a 2-dimensional Gaussian peak function without - correlation, and amplitude 1. +) -> float | np.ndarray: + """Function to calculate a 2-dimensional Gaussian peak function without correlation, and + amplitude 1. Args: - x: independent x-variable - y: independent y-variable - mx: x-center of the 2D Gaussian - my: y-center of the 2D Gaussian - sx: Sigma in y direction - sy: Sigma in x direction + x (float | np.ndarray): independent x-variable + y (float | np.ndarray): independent y-variable + mx (float): x-center of the 2D Gaussian + my (float): y-center of the 2D Gaussian + sx (float): Sigma in y direction + sy (float): Sigma in x direction Returns: - peak intensity at the given (x, y) coordinates. + float | np.ndarray: peak intensity at the given (x, y) coordinates. """ return np.exp( @@ -39,24 +38,26 @@ def gauss2d( def fourier_filter_2d( image: np.ndarray, - peaks: Sequence, + peaks: Sequence[dict], ret: str = "filtered", ) -> np.ndarray: """Function to Fourier filter an image for removal of regular pattern artefacts, e.g. grid lines. Args: - image: the input image - peaks: list of dicts containing the following information about a "peak" in the - Fourier image: - 'pos_x', 'pos_y', sigma_x', sigma_y', 'amplitude'. Define one entry for - each feature you want to suppress in the Fourier image, where amplitude - 1 corresponds to full suppression. - ret: flag to indicate which data to return. Possible values are: - 'filtered', 'fft', 'mask', 'filtered_fft' + image (np.ndarray): the input image + peaks (Sequence[dict]): list of dicts containing the following information about a "peak" + in the Fourier image: + + 'pos_x', 'pos_y', sigma_x', sigma_y', 'amplitude'. + + Define one entry for each feature you want to suppress in the Fourier image, where + amplitude 1 corresponds to full suppression. + ret (str, optional): flag to indicate which data to return. Possible values are: + 'filtered', 'fft', 'mask', 'filtered_fft'. Defaults to "filtered" Returns: - The chosen image data. Default is the filtered real image. + np.ndarray: The chosen image data. Default is the filtered real image. """ # Do Fourier Transform of the (real-valued) image @@ -109,14 +110,14 @@ def crop_xarray( """Crops an xarray according to the provided coordinate boundaries. Args: - data_array: the input xarray DataArray - x_min: the minimum position along the first element in the x-array dims list. - x_max: the maximum position along the first element in the x-array dims list. - y_min: the minimum position along the second element in the x-array dims list. - y_max: the maximum position along the second element in the x-array dims list. + data_array (xr.DataArray): the input xarray DataArray + x_min (float): the minimum position along the first element in the x-array dims list. + x_max (float): the maximum position along the first element in the x-array dims list. + y_min (float): the minimum position along the second element in the x-array dims list. + y_max (float): the maximum position along the second element in the x-array dims list. Returns: - The cropped xarray DataArray. + xr.DataArray: The cropped xarray DataArray. """ x_axis = data_array.coords[data_array.dims[0]] diff --git a/specsanalyzer/io.py b/specsanalyzer/io.py index 3523e2d..a80830e 100755 --- a/specsanalyzer/io.py +++ b/specsanalyzer/io.py @@ -1,12 +1,9 @@ -"""This module contains file input/output functions for the specsanalyzer module +"""This module contains file input/output functions for the specsanalyzer module""" +from __future__ import annotations -""" from pathlib import Path from typing import Any -from typing import Dict -from typing import List from typing import Sequence -from typing import Union import h5py import numpy as np @@ -44,8 +41,8 @@ def recursive_write_metadata(h5group: h5py.Group, node: dict): """Recurses through a python dictionary and writes it into an hdf5 file. Args: - h5group: hdf5 group element where to store the current dict node to. - node: dictionary node to store + h5group (h5py.Group): hdf5 group element where to store the current dict node to. + node (dict): dictionary node to store Raises: Warning: warns if elements have been converted into strings for saving. @@ -84,15 +81,15 @@ def recursive_write_metadata(h5group: h5py.Group, node: dict): def recursive_parse_metadata( - node: Union[h5py.Group, h5py.Dataset], + node: h5py.Group | h5py.Dataset, ) -> dict: """Recurses through an hdf5 file, and parse it into a dictionary. Args: - node: hdf5 group or dataset to parse into dictionary. + node (h5py.Group | h5py.Dataset): hdf5 group or dataset to parse into dictionary. Returns: - dictionary: Dictionary of elements in the hdf5 path contained in node + dict: Dictionary of elements in the hdf5 path contained in node """ if isinstance(node, h5py.Group): dictionary = {} @@ -115,15 +112,13 @@ def to_h5(data: xr.DataArray, faddr: str, mode: str = "w"): """Save xarray formatted data to hdf5 Args: - data: input data + data (xr.DataArray): input data faddr (str): complete file name (including path) mode (str): hdf5 read/write mode Raises: Warning: subfunction warns if elements have been converted into strings for - saving. - - Returns: + saving. """ with h5py.File(faddr, mode) as h5_file: print(f"saving data to {faddr}") @@ -166,11 +161,11 @@ def load_h5(faddr: str, mode: str = "r") -> xr.DataArray: """Read xarray data from formatted hdf5 file Args: - faddr: complete file name (including path) - mode: hdf5 read/write mode + faddr (str): complete file name (including path) + mode (str, optional): hdf5 read/write mode. Defaults to "r" Returns: - xarray: output xarra data + xr.DataArray: output xarra data """ with h5py.File(faddr, mode) as h5_file: # Reading data array @@ -220,26 +215,25 @@ def load_h5(faddr: str, mode: str = "r") -> xr.DataArray: def to_tiff( - data: Union[xr.DataArray, np.ndarray], - faddr: Union[Path, str], + data: xr.DataArray | np.ndarray, + faddr: Path | str, alias_dict: dict = None, -) -> None: +): """Save an array as a .tiff stack compatible with ImageJ Args: - data: data to be saved. If a np.ndarray, the order is retained. If it - is an xarray.DataArray, the order is inferred from axis_dict instead. - ImageJ likes tiff files with axis order as - TZCYXS. Therefore, best axis order in input should be: Time, Energy, - posY, posX. The channels 'C' and 'S' are automatically added and can - be ignored. - faddr: full path and name of file to save. - alias_dict: name pairs for correct axis ordering. Keys should be any of + data (xr.DataArray | np.ndarray): data to be saved. If a np.ndarray, the order is retained. + If it is an xarray.DataArray, the order is inferred from axis_dict instead. + ImageJ likes tiff files with axis order as TZCYXS. Therefore, best axis order in input + should be: Time, Energy, posY, posX. The channels 'C' and 'S' are automatically added + and can be ignored. + faddr (Path | str): full path and name of file to save. + alias_dict (dict, optional): name pairs for correct axis ordering. Keys should be any of T,Z,C,Y,X,S. The Corresponding value should be a dimension of the xarray or the dimension number if a numpy array. This is used to sort the data in the correct order for imagej standards. If None it tries to guess the order from the name of the axes or assumes T,Z,C,Y,X,S order for numpy arrays. - Defaults to None + Defaults to None. Raise: AttributeError: if more than one axis corresponds to a single dimension @@ -247,7 +241,7 @@ def to_tiff( TypeError: if data is not a np.ndarray or an xarray.DataArray """ - out: Union[np.ndarray, xr.DataArray] = None + out: np.ndarray | xr.DataArray = None if isinstance(data, np.ndarray): # TODO: add sorting by dictionary keys dim_expansions = {2: [0, 1, 2, 5], 3: [0, 2, 5], 4: [2, 5]} @@ -286,14 +280,20 @@ def _sort_dims_for_imagej(dims: list, alias_dict: dict = None) -> list: """Guess the order of the dimensions from the alias dictionary Args: - dims: the list of dimensions to sort + dims (list): the list of dimensions to sort + alias_dict (dict, optional): name pairs for correct axis ordering. Keys should be any of + T,Z,C,Y,X,S. The Corresponding value should be a dimension of the xarray or + the dimension number if a numpy array. This is used to sort the data in the + correct order for imagej standards. If None it tries to guess the order + from the name of the axes or assumes T,Z,C,Y,X,S order for numpy arrays. + Defaults to None. Raises: ValueError: for duplicate entries for a single imagej dimension NameError: when a dimension cannot be found in the alias dictionary Returns: - _description_ + list: List of sorted dimensions """ order = _fill_missing_dims(dims=dims, alias_dict=alias_dict) return [d for d in order if d in dims] @@ -303,14 +303,20 @@ def _fill_missing_dims(dims: list, alias_dict: dict = None) -> list: """Guess the order of the dimensions from the alias dictionary Args: - dims: the list of dimensions to sort + dims (list): the list of dimensions to sort + alias_dict (dict, optional): name pairs for correct axis ordering. Keys should be any of + T,Z,C,Y,X,S. The Corresponding value should be a dimension of the xarray or + the dimension number if a numpy array. This is used to sort the data in the + correct order for imagej standards. If None it tries to guess the order + from the name of the axes or assumes T,Z,C,Y,X,S order for numpy arrays. + Defaults to None. Raises: ValueError: for duplicate entries for a single imagej dimension NameError: when a dimension cannot be found in the alias dictionary Returns: - _description_ + list: List of extended dimensions """ order: list = [] # overwrite the default values with the provided dict @@ -345,8 +351,8 @@ def _fill_missing_dims(dims: list, alias_dict: dict = None) -> list: def load_tiff( - faddr: Union[str, Path], - coords: Dict = None, + faddr: str | Path, + coords: dict = None, dims: Sequence = None, attrs: dict = None, ) -> xr.DataArray: @@ -357,16 +363,15 @@ def load_tiff( only as np.ndarray Args: - faddr: Path to file to load. - coords: The axes describing the data, following the tiff stack order: - dims: the order of the coordinates provided, considering the data is - ordered as TZCYXS. If None (default) it infers the order from the order - of the coords dictionary. - attrs: dictionary to add as attributes to the xarray.DataArray + faddr (str | Path): Path to file to load. + coords (dict, optional): The axes describing the data, following the tiff stack order: + dims (Sequence, optional): the order of the coordinates provided, considering the data is + ordered as TZCYXS. If None (default) it infers the order from the order + of the coords dictionary. + attrs (dict, optional): dictionary to add as attributes to the xarray.DataArray Returns: - data: an xarray representing the data loaded from the .tiff - file + xr.DataArray: an xarray representing the data loaded from the .tiff file """ data = tifffile.imread(faddr) @@ -397,7 +402,7 @@ def to_nexus( faddr: str, reader: str, definition: str, - input_files: Union[str, Sequence[str]], + input_files: str | Sequence[str], **kwds, ): """Saves the x-array provided to a NeXus file at faddr, using the provided reader, @@ -409,7 +414,7 @@ def to_nexus( faddr (str): The file path to save to. reader (str): The name of the NeXus reader to use. definition (str): The NeXus definiton to use. - config_file (str): The file path to the configuration file to use. + input_files (str | Sequence[str]): The file path to the configuration file to use. **kwds: Keyword arguments for ``nexusutils.dataconverter.convert``. """ @@ -428,12 +433,12 @@ def to_nexus( ) -def get_pair_from_list(list_line: List[Any]) -> List[Any]: +def get_pair_from_list(list_line: list) -> list: """Returns key value pair for the read function where a line in the file contains '=' character. Args: - list_line: list of splitted line from the file. + list_line (list): list of splitted line from the file. Returns: list: List of a tuple containing key value pair. @@ -458,12 +463,12 @@ def get_pair_from_list(list_line: List[Any]) -> List[Any]: return [(k, v)] -def read_calib2d(filepath: str) -> List[Any]: +def read_calib2d(filepath: str) -> list: """Reads the calib2d file into a convenient list for the parser function containing useful and cleaned data. Args: - filepath: Path to file to load. + filepath (str): Path to file to load. Returns: list: List containing dictionary, string and float objects. @@ -471,7 +476,7 @@ def read_calib2d(filepath: str) -> List[Any]: with open(filepath, encoding="utf-8") as file: lines = file.readlines() - listf: List[Any] = [] + listf: list[Any] = [] for line in lines: if "# !!!!! Place a valid calib2D file from your Specslab Installation here!" in line: print( @@ -498,20 +503,19 @@ def read_calib2d(filepath: str) -> List[Any]: return listf -def parse_calib2d_to_dict(filepath: str) -> Dict[Any, Any]: +def parse_calib2d_to_dict(filepath: str) -> dict: """Parses the given calib2d file into a nested dictionary structure to provide parameters for image conversion. Args: - filepath: Path to file to load. + filepath (str): Path to file to load. Returns: - calib_dict: Populated nested dictionary parsed from the provided - calib2d file. + dict: Populated nested dictionary parsed from the provided calib2d file. """ listf = read_calib2d(filepath) - calib_dict: Dict[Any, Any] = {} + calib_dict: dict[Any, Any] = {} mode = None retardation_ratio = None for elem in listf: @@ -539,15 +543,15 @@ def parse_calib2d_to_dict(filepath: str) -> Dict[Any, Any]: return calib_dict -def get_modes_from_calib_dict( - calib_dict: dict, -): +def get_modes_from_calib_dict(calib_dict: dict) -> tuple[list, list]: """create a list of supported modes, divided in spatial and angular modes + Args: calib_dict (dict): the calibration dictionary, created with the io parse_calib2d_to_dict + Returns: - _type_: _description_ + tuple[list, list]: lists of supported angular and spatial lens modes """ key_list = list(calib_dict.keys()) supported_angle_modes = [] diff --git a/specsanalyzer/metadata.py b/specsanalyzer/metadata.py deleted file mode 100755 index d56e97e..0000000 --- a/specsanalyzer/metadata.py +++ /dev/null @@ -1,137 +0,0 @@ -"""This is a metadata handler class from the specsanalyzer package - -""" -from typing import Any -from typing import Dict - - -class MetaHandler: - """[summary]""" - - def __init__(self, meta: Dict = None) -> None: - self._m = meta if meta is not None else {} - - def __getitem__(self, val: Any) -> None: - return self._m[val] - - def __repr__(self) -> str: - # TODO: #35 add pretty print, possibly to HTML - return str(self._m) - - def add(self, v: Dict, duplicate: str = "raise") -> None: - """Add an entry to the metadata container - - Args: - v: dictionary containing the metadata to add. - Must contain a 'name' key. - overwrite: Control behaviour in case the 'name' key - is already present in the metadata dictionary. If raise, raises - a DuplicateEntryError. - If 'overwrite' it overwrites the previous data with the new - one. - If 'append' it adds a trailing number, keeping both entries. - - Raises: - DuplicateEntryError: [description] - """ - if v["name"] not in self._m.keys() or duplicate == "overwrite": - self._m[v["name"]] = v - elif duplicate == "raise": - raise DuplicateEntryError( - f"an entry {v['name']} already exists in metadata", - ) - elif duplicate == "append": - i = 0 - while True: - i += 1 - newname = f"name_{i}" - if newname not in self._m.keys(): - break - self._m[newname] = v - - else: - raise ValueError( - f"could not interpret duplication handling method {duplicate}" - f"Please choose between overwrite,append or raise.", - ) - - def add_processing(self, method: str, **kwds: Any) -> None: - """docstring - - Args: - - Returns: - - """ - # TODO: #36 Add processing metadata validation tests - self._m["processing"][method] = kwds - - def from_nexus(self, val: Any) -> None: - """docstring - - Args: - - Returns: - - """ - raise NotImplementedError() - - def to_nexus(self, val: Any) -> None: - """docstring - - Args: - - Returns: - - """ - raise NotImplementedError() - - def from_json(self, val: Any) -> None: - """docstring - - Args: - - Returns: - - """ - raise NotImplementedError() - - def to_json(self, val: Any) -> None: - """docstring - - Args: - - Returns: - - """ - raise NotImplementedError() - - def from_dict(self, val: Any) -> None: - """docstring - - Args: - - Returns: - - """ - raise NotImplementedError() - - def to_dict(self, val: Any) -> None: - """docstring - - Args: - - Returns: - - """ - raise NotImplementedError() - - -class DuplicateEntryError(Exception): - """[summary]""" - - -if __name__ == "__main__": - m = MetaHandler() - m.add({"name": "test", "start": 0, "stop": 1}) - print(m) diff --git a/specsscan/__init__.py b/specsscan/__init__.py index 11d68c9..4dc0f55 100755 --- a/specsscan/__init__.py +++ b/specsscan/__init__.py @@ -1,6 +1,4 @@ -"""SpecsScan class easy access APIs - -""" +"""SpecsScan class easy access APIs""" # Easy access APIs from .core import SpecsScan diff --git a/specsscan/config/default.yaml b/specsscan/config/default.yaml index f245a65..94d2250 100644 --- a/specsscan/config/default.yaml +++ b/specsscan/config/default.yaml @@ -1,8 +1,12 @@ -data_path: "//nap32/topfloor/trARPES/PESData/" +# path to the default data directory +data_path: "" +# option to enable nested progress bars enable_nested_progress_bar: false +# dictionary containing parameters passed to the SpecsAnalyzer. Will be completed by the SpecsAnalyzer default config parameters spa_params: apply_fft_filter: false +# dictionary containing units for the respective axes units: Angle: "degree" Ekin: "eV" diff --git a/specsscan/config/example_config_FHI.yaml b/specsscan/config/example_config_FHI.yaml index b71b42b..12405c6 100644 --- a/specsscan/config/example_config_FHI.yaml +++ b/specsscan/config/example_config_FHI.yaml @@ -1,6 +1,9 @@ -data_path: "//mnt/topfloor/trARPES/PESData/" +# path to the default data directory +data_path: "path/to/data" +# option to enable nested progress bars enable_nested_progress_bar: false +# dictionary containing renaming rules for axis names (to change the name in the xarrays) coordinate_mapping: Ekin: "energy" Angle: "angular1" @@ -11,6 +14,7 @@ coordinate_mapping: Y: "spatial1" Z: "spatial1" +# dictionary of corresponding NeXus paths for the different axes coordinate_depends: Ekin: "/entry/instrument/electronanalyser/energydispersion/kinetic_energy" Angle: "/entry/instrument/electronanalyser/transformations/analyzer_dispersion" @@ -21,6 +25,7 @@ coordinate_depends: Y: "/entry/sample/transformations/trans_y" Z: "/entry/sample/transformations/trans_z" +# dictionary containing units for the respective axes units: angular1: "degree" angular2: "degree" @@ -33,6 +38,9 @@ units: Z: "mm" voltage: "V" +# URL of the epics archiver request engine +archiver_url: "http://__epicsarchiver_host__:17668/retrieval/data/getData.json?pv=" +# dictionary containing axis names with Epics channels to request from the EPICS archiver epics_channels: tempa: "trARPES:Carving:TEMP_RBV" x: "trARPES:Carving:TRX.RBV" @@ -44,19 +52,38 @@ epics_channels: drain_current: "trARPES:Sample:Measure" pressure: "trARPES:XGS600:PressureAC:P_RD" +# parameters for NeXus conversion nexus: + # the reader to use reader: "mpes" + # the NeXus definition to use definition: "NXmpes" + # additional input files, e.g. config and ELN files input_files: ["../specsscan/config/NXmpes_arpes_config.json"] +# parameters for the SpecsAnalyzer spa_params: + # path to SPECS calib2d file (provided together with your analyzer) calib2d_file: "../tests/data/phoibos150.calib2d" + # number of pixels along the energy dispersing direction nx_pixel: 1376 + # number of pixels along the angle/spatially dispersing direction ny_pixel: 1024 + # pixel size in millimeters pixel_size: 0.00645 + # binning factor applied to the image + binning: 4 + # magnification of the lens system used for imaging the detector magnification: 4.54 + # option for cropping the resulting xarrays crop: false + # option to apply Fourier filtering apply_fft_filter: true + # dictionary containing definition for Gaussian Fourier peaks to subtract. + # each entry defines: + # amplitude: the normalized peak amplitude + # pos_x/pos_y: the peak position in Fourier plane pixels + # sigma_x/sigma_y: the peak width (standard deviation) along each direction fft_filter_peaks: - amplitude: 1 pos_x: 79 diff --git a/specsscan/core.py b/specsscan/core.py index 5198705..70db6a3 100755 --- a/specsscan/core.py +++ b/specsscan/core.py @@ -1,6 +1,6 @@ -"""This is the SpecsScan core class +"""This is the SpecsScan core class""" +from __future__ import annotations -""" import copy import os import pathlib @@ -8,10 +8,7 @@ from logging import warn from pathlib import Path from typing import Any -from typing import Dict -from typing import List from typing import Sequence -from typing import Union import matplotlib import numpy as np @@ -23,7 +20,6 @@ from specsanalyzer.io import to_h5 from specsanalyzer.io import to_nexus from specsanalyzer.io import to_tiff -from specsscan.helpers import find_scan from specsscan.helpers import get_coords from specsscan.helpers import get_scan_path from specsscan.helpers import handle_meta @@ -38,15 +34,21 @@ class SpecsScan: """SpecsAnalyzer class for loading scans and data from SPECS phoibos electron analyzers, generated with the ARPESControl software at Fritz Haber Institute, Berlin, and EPFL, Lausanne. + + Args: + metadata (dict, optional): Metadata dictionary. Defaults to {}. + config (Union[dict, str], optional): Metadata dictionary or file path. Defaults to {}. + **kwds: Keyword arguments passed to ``parse_config``. """ - def __init__( # pylint: disable=dangerous-default-value + def __init__( self, metadata: dict = {}, - config: Union[dict, str] = {}, + config: dict | str = {}, **kwds, ): """SpecsScan constructor. + Args: metadata (dict, optional): Metadata dictionary. Defaults to {}. config (Union[dict, str], optional): Metadata dictionary or file path. Defaults to {}. @@ -61,7 +63,7 @@ def __init__( # pylint: disable=dangerous-default-value # self.metadata = MetaHandler(meta=metadata) self.metadata = metadata - self._scan_info: Dict[Any, Any] = {} + self._scan_info: dict[Any, Any] = {} try: self.spa = SpecsAnalyzer( @@ -96,7 +98,7 @@ def config(self): return self._config @config.setter - def config(self, config: Union[dict, str]): + def config(self, config: dict | str): """Set config""" self._config = parse_config( config, @@ -115,35 +117,33 @@ def result(self): def load_scan( self, scan: int, - path: Union[str, Path] = "", - iterations: Union[np.ndarray, slice, Sequence[int], Sequence[slice]] = None, - metadata: dict = None, + path: str | Path = "", + iterations: np.ndarray | slice | Sequence[int] | Sequence[slice] = None, + metadata: dict = {}, + collect_metadata: bool = False, **kwds, ) -> xr.DataArray: - """Load scan with given scan number. When iterations is - given, average is performed over the iterations over - all delays. + """Load scan with given scan number. When iterations is given, average is performed over + the iterations over all delays. Args: - scan: The scan number of interest - path: Either a string of the path to the folder - containing the scan or a Path object - iterations: A 1-D array of the number of iterations over - which the images are to be averaged. The array - can be a list, numpy array or a Tuple consisting of - slice objects and integers. For ex., - np.s_[1:10, 15, -1] would be a valid input for - iterations. - metadata (dict, optional): Metadata dictionary with additional metadata for the scan - **kwds: Additional arguments for the SpecsAnalyzer converter. For ex., passing - crop=True crops the data if cropping data is already present in the given instance. - Raises: - FileNotFoundError, IndexError + scan (int): The scan number of interest + path (str | Path, optional): Either a string of the path to the folder containing the + scan or a Path object. Defaults to "". + iterations (np.ndarray | slice | Sequence[int] | Sequence[slice], optional): + A 1-D array of the number of iterations over which the images are to be averaged. + The array can be a list, numpy array or a Tuple consisting of slice objects and + integers. For ex., ``np.s_[1:10, 15, -1]`` would be a valid input for iterations. + Defaults to None. + metadata (dict, optional): Metadata dictionary with additional metadata for the scan. + Defaults to empty dictionary. + collect_metadata (bool, optional): Option to collect further metadata e.g. from EPICS + archiver needed for NeXus conversion. Defaults to False. + **kwds: Additional arguments passed to ``SpecsAnalyzer.convert()``. Returns: - xres: xarray DataArray object with kinetic energy, angle/position - and optionally a third scanned axis (for ex., delay, temperature) - as coordinates. + xr.DataArray: xarray DataArray object with kinetic energy, angle/position and + optionally a third scanned axis (for ex., delay, temperature) as coordinates. """ scan_path = get_scan_path(path, scan, self._config["data_path"]) df_lut = parse_lut_to_df(scan_path) @@ -265,26 +265,31 @@ def load_scan( self._scan_info, self.config, dim, + metadata=copy.deepcopy(metadata), + collect_metadata=collect_metadata, ), **{"loader": loader_dict}, ) - if metadata is not None: - self.metadata.update(**metadata) res_xarray.attrs["metadata"] = self.metadata self._result = res_xarray return res_xarray - def crop_tool(self, scan: int = None, path: Union[Path, str] = "", **kwds): - """ - Croping tool interface to crop_tool method - of the SpecsAnalyzer class. + def crop_tool(self, scan: int = None, path: Path | str = "", **kwds): + """Croping tool interface to crop_tool method of the SpecsAnalyzer class. + + Args: + scan (int, optional): Scan number to load data from. Defaults to None. + path (Path | str, optional): Path in which to search the scan. + Defaults to config['data_path']. + + Raises: + ValueError: Raised if no image loaded, and scan not provided """ matplotlib.use("module://ipympl.backend_nbagg") if scan is not None: scan_path = get_scan_path(path, scan, self._config["data_path"]) - df_lut = parse_lut_to_df(scan_path) data = load_images( scan_path=scan_path, @@ -310,31 +315,37 @@ def crop_tool(self, scan: int = None, path: Union[Path, str] = "", **kwds): def check_scan( self, scan: int, - delays: Union[Sequence[int], int], - path: Union[str, Path] = "", - metadata: dict = None, + delays: Sequence[int] | int, + path: str | Path = "", + metadata: dict = {}, + collect_metadata: bool = False, **kwds, ) -> xr.DataArray: - """Function to explore a given 3-D scan as a function - of iterations for a given range of delays + """Function to explore a given 3-D scan as a function of iterations for a given range of + delays. + Args: - scan: The scan number of interest - delay: A single delay index or a range of delay indices + scan (int): The scan number of interest + delays (Sequence[int] | int): A single delay index or a sequence of delay indices to be averaged over. - path: Either a string of the path to the folder - containing the scan or a Path object - metadata (dict, optional): Metadata dictionary with additional metadata for the scan - **kwds: Additional arguments for the SpecsAnalyzer converter. For ex., passing - crop=True crops the data if cropping data is already present in the given instance. + path (str | Path, optional): Either a string of the path to the folder containing the + scan or a Path object. Defaults to config['data_path]. + metadata (dict, optional): Metadata dictionary with additional metadata for the scan. + Defaults to empty dictionary. + collect_metadata (bool, optional): Option to collect further metadata e.g. from EPICS + archiver needed for NeXus conversion. Defaults to False. + **kwds: Keyword arguments passed to ``SpecsAnalyzer.convert()``. + Raises: - FileNotFoundError + ValueError: Raised if a single image scan was selected + Returns: - A 3-D numpy array of dimensions (Ekin, K, Iterations) + xr.DataArray: 3-D xarray of dimensions (Ekin, Angle, Iterations) """ scan_path = get_scan_path(path, scan, self._config["data_path"]) df_lut = parse_lut_to_df(scan_path) - data, df_lut = load_images( + data = load_images( scan_path=scan_path, df_lut=df_lut, delays=delays, @@ -410,6 +421,8 @@ def check_scan( self._scan_info, self.config, dims[1], + metadata=metadata, + collect_metadata=collect_metadata, ), **{"loader": loader_dict}, ) @@ -507,7 +520,7 @@ def save( def process_sweep_scan( self, - raw_data: List[np.ndarray], + raw_data: list[np.ndarray], kinetic_energy: np.ndarray, pass_energy: float, lens_mode: str, @@ -518,7 +531,7 @@ def process_sweep_scan( step, and summing over all frames. Args: - raw_data (List[np.ndarray]): List of raw data images + raw_data (list[np.ndarray]): List of raw data images kinetic_energy (np.ndarray): Array of analyzer set kinetic energy values pass_energy (float): set analyser pass energy lens_mode (str): analzser lens mode, check calib2d for a list of modes CamelCase naming diff --git a/specsscan/helpers.py b/specsscan/helpers.py index e1e1e87..c1f342d 100644 --- a/specsscan/helpers.py +++ b/specsscan/helpers.py @@ -1,13 +1,11 @@ """This script contains helper functions used by the specscan class""" +from __future__ import annotations + import datetime as dt import json from pathlib import Path from typing import Any -from typing import Dict -from typing import List from typing import Sequence -from typing import Tuple -from typing import Union from urllib.error import HTTPError from urllib.error import URLError from urllib.request import urlopen @@ -16,16 +14,16 @@ import pandas as pd from tqdm.auto import tqdm -from specsanalyzer.config import complete_dictionary # name can be generalized +from specsanalyzer.config import complete_dictionary -def get_scan_path(path: Union[Path, str], scan: int, basepath: Union[Path, str]) -> Path: +def get_scan_path(path: Path | str, scan: int, basepath: Path | str) -> Path: """Returns the path to the given scan. Args: - path (Union[Path, str]): Path under which to search. If empty, the basepath will be queried + path (Path | str): Path under which to search. If empty, the basepath will be queried scan (int): Scan number - basepath (Union[Path, str]): Default base path to search for scans under + basepath (Path | str): Default base path to search for scans under Raises: FileNotFoundError: Raised if the path or scan cannot be found. @@ -42,7 +40,6 @@ def get_scan_path(path: Union[Path, str], scan: int, basepath: Union[Path, str]) else: # search for the given scan using the default path path = Path(basepath) - # path_scan = sorted(path.glob(f"20[1,2][9,0-9]/*/*/Raw Data/{scan}")) path_scan_list = find_scan(path, scan) if not path_scan_list: raise FileNotFoundError( @@ -55,42 +52,28 @@ def get_scan_path(path: Union[Path, str], scan: int, basepath: Union[Path, str]) def load_images( scan_path: Path, - df_lut: Union[pd.DataFrame, None] = None, - iterations: Union[ - np.ndarray, - slice, - Sequence[int], - Sequence[slice], - ] = None, - delays: Union[ - np.ndarray, - slice, - int, - Sequence[int], - Sequence[slice], - ] = None, + df_lut: pd.DataFrame = None, + iterations: np.ndarray | slice | Sequence[int] | Sequence[slice] = None, + delays: np.ndarray | slice | int | Sequence[int] | Sequence[slice] = None, tqdm_enable_nested: bool = False, -) -> List[np.ndarray]: - """Loads a 2D/3D numpy array of images for the given - scan path with an optional averaging - over the given iterations/delays. The function provides - functionality to both load_scan and check_scan methods of - the SpecsScan class. When iterations/delays is provided, - average is performed over the iterations/delays for all - delays/iterations. +) -> list[np.ndarray]: + """Loads a 2D/3D numpy array of images for the given scan path with an optional averaging + over the given iterations/delays. The function provides functionality to both load_scan + and check_scan methods of the SpecsScan class. When iterations/delays is provided, + average is performed over the iterations/delays for all delays/iterations. Args: scan_path (Path): object of class Path pointing to the scan folder - df_lut (Union[pd.DataFrame, None], optional): Pandas dataframe containing the contents - of LUT.txt as obtained from parse_lut_to_df(). Defaults to None. - iterations (Union[ np.ndarray, slice, Sequence[int], Sequence[slice], ], optional): A 1-D + df_lut (pd.DataFrame, optional): Pandas dataframe containing the contents of LUT.txt as + obtained from parse_lut_to_df(). Defaults to None. + iterations (np.ndarray | slice | Sequence[int] | Sequence[slice], optional): A 1-D array of the indices of iterations over which the images are to be averaged. The array can be a list, numpy array or a Tuple consisting of slice objects and integers. For - ex., np.s_[1:10, 15, -1] would be a valid input. Defaults to None. - delays (Union[ np.ndarray, slice, int, Sequence[int], Sequence[slice], ], optional): A 1-D + ex., ``np.s_[1:10, 15, -1]`` would be a valid input. Defaults to None. + delays (np.ndarray | slice | int | Sequence[int] | Sequence[slice], optional): A 1-D array of the indices of delays over which the images are to be averaged. The array can be a list, numpy array or a Tuple consisting of slice objects and integers. For ex., - np.s_[1:10, 15, -1] would be a valid input. Defaults to None. + ``np.s_[1:10, 15, -1]`` would be a valid input. Defaults to None. tqdm_enable_nested (bool, optional): Option to enable a nested progress bar. Defaults to False. @@ -99,7 +82,7 @@ def load_images( IndexError: Raised if no valid dimension for averaging is found. Returns: - List[np.ndarray]: A list of 2-D numpy arrays of raw data + list[np.ndarray]: A list of 2-D numpy arrays of raw data """ scan_list = sorted( file.stem for file in scan_path.joinpath("AVG").iterdir() if file.suffix == ".tsv" @@ -175,18 +158,15 @@ def load_images( return data -def get_raw2d( - scan_list: List[str], - raw_array: np.ndarray, -) -> np.ndarray: - """Converts a 1-D array of raw scan names - into 2-D based on the number of iterations +def get_raw2d(scan_list: list[str], raw_array: np.ndarray) -> np.ndarray: + """Converts a 1-D array of raw scan names into 2-D based on the number of iterations + Args: - scan_list: A list of AVG scan names. - raw_list: 1-D array of RAW scan names. + scan_list (list[str]): A list of AVG scan names. + raw_list (np.ndarray): 1-D array of RAW scan names. + Returns: - raw_2d: 2-D numpy array of size for ex., - (total_iterations, delays) for a delay scan. + np.ndarray: 2-D numpy array of size for ex., (total_iterations, delays) for a delay scan. """ total_iterations = len( @@ -219,12 +199,14 @@ def get_raw2d( return raw_2d -def parse_lut_to_df(scan_path: Path) -> Union[pd.DataFrame, None]: - """Loads the contents of LUT.txt file into a pandas - data frame to be used as metadata. +def parse_lut_to_df(scan_path: Path) -> pd.DataFrame: + """Loads the contents of LUT.txt file into a pandas data frame to be used as metadata. + Args: - scan_path: Path object for the scan path - Returns: A pandas DataFrame + scan_path (Path): Path object for the scan path + + Returns: + pd.DataFrame: A pandas DataFrame """ try: df_lut = pd.read_csv(scan_path.joinpath("RAW/LUT.txt"), sep="\t") @@ -249,44 +231,35 @@ def parse_lut_to_df(scan_path: Path) -> Union[pd.DataFrame, None]: def get_coords( scan_path: Path, scan_type: str, - scan_info: Dict[Any, Any], - df_lut: Union[pd.DataFrame, None] = None, -) -> Tuple[np.ndarray, str]: - """Reads the contents of scanvector.txt file - into a numpy array. + scan_info: dict[Any, Any], + df_lut: pd.DataFrame = None, +) -> tuple[np.ndarray, str]: + """Reads the contents of scanvector.txt file into a numpy array. + Args: - scan_path: Path object for the scan path - scan_type: Type of scan (delay, mirror etc.) - scan_info: scan_info class dict - df_lut: df_lut: Pandas dataframe containing - the contents of LUT.txt as obtained - from parse_lut_to_df() + scan_path (Path): Path object for the scan path + scan_type (str): Type of scan (delay, mirror etc.) + scan_info (dict[Any, Any]): scan_info class dict + df_lut (pd.DataFrame, optional): Pandas dataframe containing the contents of LUT.txt as + obtained from parse_lut_to_df(). Defaults to None. + Raises: - FileNotFoundError + FileNotFoundError: Raised in neither scanvector.txt nor LUT.txt are found. + Returns: - coords: 1-D numpy array containing coordinates - of the scanned axis. - dim: string containing the name of the coordinate + tuple[np.ndarray, str]: + - coords: 1-D numpy array containing coordinates of the scanned axis. + - dim: string containing the name of the coordinate """ try: - with open( - scan_path.joinpath("scanvector.txt"), - encoding="utf-8", - ) as file: + with open(scan_path.joinpath("scanvector.txt"), encoding="utf-8") as file: data = np.loadtxt(file, ndmin=2) coords, index = compare_coords(data) if scan_type == "mirror": dim = ["mirrorX", "mirrorY"][index] elif scan_type == "manipulator": - dim = [ - "X", - "Y", - "Z", - "polar", - "tilt", - "azimuth", - ][index] + dim = ["X", "Y", "Z", "polar", "tilt", "azimuth"][index] else: dim = scan_type @@ -295,9 +268,7 @@ def get_coords( return (np.array([]), "") if df_lut is not None: - print( - "scanvector.txt not found. Obtaining coordinates from LUT", - ) + print("scanvector.txt not found. Obtaining coordinates from LUT") df_new: pd.DataFrame = df_lut.loc[:, df_lut.columns[2:]] @@ -305,28 +276,26 @@ def get_coords( dim = df_new.columns[index] else: - raise FileNotFoundError( - "scanvector.txt file not found!", - ) from exc + raise FileNotFoundError("scanvector.txt file not found!") from exc if scan_type == "delay": t_0 = scan_info["TimeZero"] coords -= t_0 - coords *= 2 / (3 * 10**11) * 10**15 + coords *= 2 / 3e11 * 1e15 return coords, dim -def compare_coords( - axis_data: np.ndarray, -) -> Tuple[np.ndarray, int]: - """To check the most changing column in a given - 2-D numpy array. +def compare_coords(axis_data: np.ndarray) -> tuple[np.ndarray, int]: + """Identifies the most changing column in a given 2-D numpy array. + Args: - axis_data: 2-D numpy array containing LUT data + axis_data (np.ndarray): 2-D numpy array containing LUT data + Returns: - coords: Maximum changing column as a coordinate - index: Index of the coords in the axis_data array + tuple[np.ndarray, int]: + - coords: Maximum changing column as a coordinate + - index: Index of the coords in the axis_data array """ diff_list = [abs(col[-1] - col[0]) for col in axis_data.T] @@ -340,15 +309,16 @@ def compare_coords( return coords, index -def parse_info_to_dict(path: Path) -> Dict: - """Parses the contents of info.txt file - into a dictionary +def parse_info_to_dict(path: Path) -> dict: + """Parses the contents of info.txt file into a dictionary + Args: - path: Path object pointing to the scan folder + path (Path): Path object pointing to the scan folder + Returns: - info_dict: Parsed dictionary + dict: Parsed info_dict dictionary """ - info_dict: Dict[Any, Any] = {} + info_dict: dict[Any, Any] = {} try: with open(path.joinpath("info.txt"), encoding="utf-8") as info_file: for line in info_file.readlines(): @@ -374,26 +344,36 @@ def parse_info_to_dict(path: Path) -> Dict: return info_dict -def handle_meta( # pylint:disable=too-many-branches +def handle_meta( df_lut: pd.DataFrame, scan_info: dict, config: dict, dim: str, + metadata: dict = None, + collect_metadata: bool = False, ) -> dict: """Helper function for the handling metadata from different files + Args: - df_lut: Pandas dataframe containing - the contents of LUT.txt as obtained - from parse_lut_to_df() - scan_info: scan_info class dict containing - containing the contents of info.txt file - config: config dictionary containing the contents - of config.yaml file + df_lut (pd.DataFrame): Pandas dataframe containing the contents of LUT.txt as obtained + from ``parse_lut_to_df()`` + scan_info (dict): scan_info class dict containing containing the contents of info.txt file + config (dict): config dictionary containing the contents of config.yaml file + dim (str): The slow-axis dimension of the scan + metadata (dict, optional): Metadata dictionary with additional metadata for the scan. + Defaults to empty dictionary. + collect_metadata (bool, optional): Option to collect further metadata e.g. from EPICS + archiver needed for NeXus conversion. Defaults to False. + Returns: - metadata_dict: metadata dictionary containing additional metadata - from the EPICS archive. + dict: metadata dictionary containing additional metadata from the EPICS + archive. """ + if metadata is None: + metadata = {} + + print("Gathering metadata from different locations") # get metadata from LUT dataframe lut_meta = {} energy_scan_mode = "fixed" @@ -409,136 +389,143 @@ def handle_meta( # pylint:disable=too-many-branches if len(set(kinetic_energy)) > 1 and scan_info["ScanType"] == "voltage": energy_scan_mode = "sweep" - scan_meta = complete_dictionary(lut_meta, scan_info) # merging two dictionaries + metadata["scan_info"] = complete_dictionary( + metadata.get("scan_info", {}), + complete_dictionary(lut_meta, scan_info), + ) # merging dictionaries - # Get metadata from Epics archive, if not present already - print("Collecting data from the EPICS archive...") - metadata_dict = get_archive_meta( - scan_meta, - config, - ) + print("Collecting time stamps...") + if "time" in metadata["scan_info"]: + time_list = [metadata["scan_info"]["time"][0], metadata["scan_info"]["time"][-1]] + elif "StartTime" in metadata["scan_info"]: + time_list = [metadata["scan_info"]["StartTime"]] + else: + raise ValueError("Could not find timestamps in scan info.") - metadata_dict["scan_info"]["energy_scan_mode"] = energy_scan_mode + dt_list_iso = [time.replace(".", "-").replace(" ", "T") for time in time_list] + datetime_list = [dt.datetime.fromisoformat(dt_iso) for dt_iso in dt_list_iso] + ts_from = dt.datetime.timestamp(datetime_list[0]) # POSIX timestamp + ts_to = dt.datetime.timestamp(datetime_list[-1]) # POSIX timestamp + metadata["timing"] = { + "acquisition_start": dt.datetime.utcfromtimestamp(ts_from) + .replace(tzinfo=dt.timezone.utc) + .isoformat(), + "acquisition_stop": dt.datetime.utcfromtimestamp(ts_to) + .replace(tzinfo=dt.timezone.utc) + .isoformat(), + "acquisition_duration": int(ts_to - ts_from), + "collection_time": float(ts_to - ts_from), + } + + if collect_metadata: + # Get metadata from Epics archive if not present already + start = dt.datetime.utcfromtimestamp(ts_from).isoformat() + + # replace metadata names by epics channels + try: + replace_dict = config["epics_channels"] + for key in list(metadata["scan_info"]): + if key.lower() in replace_dict: + metadata["scan_info"][replace_dict[key.lower()]] = metadata["scan_info"][key] + metadata["scan_info"].pop(key) + epics_channels = replace_dict.values() + except KeyError: + epics_channels = [] + + channels_missing = set(epics_channels) - set(metadata["scan_info"].keys()) + if channels_missing: + print("Collecting data from the EPICS archive...") + for channel in channels_missing: + try: + _, vals = get_archiver_data( + archiver_url=config.get("archiver_url"), + archiver_channel=channel, + ts_from=ts_from, + ts_to=ts_to, + ) + metadata["scan_info"][f"{channel}"] = np.mean(vals) + + except IndexError: + metadata["scan_info"][f"{channel}"] = np.nan + print( + f"Data for channel {channel} doesn't exist for time {start}", + ) + except HTTPError as exc: + print( + f"Incorrect URL for the archive channel {channel}. " + "Make sure that the channel name and file start and end times are " + "correct.", + ) + print("Error code: ", exc) + except URLError as exc: + print( + f"Cannot access the archive URL for channel {channel}. " + f"Make sure that you are within the FHI network." + f"Skipping over channels {channels_missing}.", + ) + print("Error code: ", exc) + break + + metadata["scan_info"]["energy_scan_mode"] = energy_scan_mode lens_modes_all = { "real": config["spa_params"]["calib2d_dict"]["supported_space_modes"], "reciprocal": config["spa_params"]["calib2d_dict"]["supported_angle_modes"], } - lens_mode = scan_meta["LensMode"] + lens_mode = metadata["scan_info"]["LensMode"] for projection, mode_list in lens_modes_all.items(): if lens_mode in mode_list: - metadata_dict["scan_info"]["projection"] = projection + metadata["scan_info"]["projection"] = projection fast = "Angle" if projection == "reciprocal" else "Position" - metadata_dict["scan_info"]["scheme"] = ( + metadata["scan_info"]["scheme"] = ( "angular dispersive" if projection == "reciprocal" else "spatial dispersive" ) - metadata_dict["scan_info"]["slow_axes"] = dim - metadata_dict["scan_info"]["fast_axes"] = [ - "Ekin", - fast, - ] + metadata["scan_info"]["slow_axes"] = dim + metadata["scan_info"]["fast_axes"] = ["Ekin", fast] print("Done!") - return metadata_dict + return metadata -def get_archive_meta( - scan_meta: dict, - config: dict, -): - """ - Function to collect the EPICS archive metadata - for the handle_meta function. - """ +def get_archiver_data( + archiver_url: str, + archiver_channel: str, + ts_from: float, + ts_to: float, +) -> tuple[np.ndarray, np.ndarray]: + """Extract time stamps and corresponding data from and EPICS archiver instance - metadata_dict = {} - if "time" in scan_meta: - time_list = [scan_meta["time"][0], scan_meta["time"][-1]] - elif "StartTime" in scan_meta: - time_list = [scan_meta["StartTime"]] - else: - raise ValueError("Could not find timestamps in scan info.") + Args: + archiver_url (str): URL of the archiver data extraction interface + archiver_channel (str): EPICS channel to extract data for + ts_from (float): starting time stamp of the range of interest + ts_to (float): ending time stamp of the range of interest - dt_list_iso = [time.replace(".", "-").replace(" ", "T") for time in time_list] - datetime_list = [dt.datetime.fromisoformat(dt_iso) for dt_iso in dt_list_iso] - ts_from = dt.datetime.timestamp(datetime_list[0]) # POSIX timestamp - ts_to = dt.datetime.timestamp(datetime_list[-1]) # POSIX timestamp - metadata_dict["timing"] = { - "acquisition_start": dt.datetime.utcfromtimestamp(ts_from) - .replace( - tzinfo=dt.timezone.utc, - ) - .isoformat(), - "acquisition_stop": dt.datetime.utcfromtimestamp(ts_to) - .replace( - tzinfo=dt.timezone.utc, - ) - .isoformat(), - "acquisition_duration": int(ts_to - ts_from), - "collection_time": float(ts_to - ts_from), - } - filestart = dt.datetime.utcfromtimestamp(ts_from).isoformat() # Epics time in UTC? - fileend = dt.datetime.utcfromtimestamp(ts_to).isoformat() + Returns: + tuple[List, List]: The extracted time stamps and corresponding data + """ + iso_from = dt.datetime.utcfromtimestamp(ts_from).isoformat() + iso_to = dt.datetime.utcfromtimestamp(ts_to).isoformat() + req_str = archiver_url + archiver_channel + "&from=" + iso_from + "Z&to=" + iso_to + "Z" + with urlopen(req_str) as req: + data = json.load(req) + secs = [x["secs"] + x["nanos"] * 1e-9 for x in data[0]["data"]] + vals = [x["val"] for x in data[0]["data"]] - try: - replace_dict = config["epics_channels"] - for key in list(scan_meta): - if key.lower() in replace_dict: - scan_meta[replace_dict[key.lower()]] = scan_meta[key] - scan_meta.pop(key) - epics_channels = replace_dict.values() - except KeyError: - epics_channels = [] - print("No EPICS archive channels provided in the config") - metadata_dict["scan_info"] = scan_meta - - channels_missing = set(epics_channels) - set(scan_meta.keys()) - for channel in channels_missing: - try: - req_str = ( - "http://aa0.fhi-berlin.mpg.de:17668/retrieval/" - + "data/getData.json?pv=" - + channel - + "&from=" - + filestart - + "Z&to=" - + fileend - + "Z" - ) - with urlopen(req_str) as req: - data = json.load(req) - vals = [x["val"] for x in data[0]["data"]] - metadata_dict["scan_info"][f"{channel}"] = sum(vals) / len(vals) - except (IndexError, ZeroDivisionError): - metadata_dict["scan_info"][f"{channel}"] = np.nan - print(f"Data for channel {channel} doesn't exist for time {filestart}") - except HTTPError as error: - print( - f"Incorrect URL for the archive channel {channel}. " - "Make sure that the channel name, file start " - "and end times are correct.", - ) - print("Error code: ", error) - except URLError as error: - print( - f"Cannot access the archive URL for channel {channel}. " - f"Make sure that you are within the FHI network." - f"Skipping over channels {channels_missing}.", - ) - print("Error code: ", error) - break - return metadata_dict + return (np.asarray(secs), np.asarray(vals)) -def find_scan(path: Path, scan: int) -> List[Path]: +def find_scan(path: Path, scan: int) -> list[Path]: """Search function to locate the scan folder + Args: - path: Path object for data from the default config file - scan: Scan number of the scan of interest + path (Path): Path object for data from the default config file + scan (int): Scan number of the scan of interest + Returns: - scan_path: Path object pointing to the scan folder + List[Path]: scan_path: Path object pointing to the scan folder """ print("Scan path not provided, searching directories...") for file in path.iterdir(): @@ -561,20 +548,18 @@ def find_scan(path: Path, scan: int) -> List[Path]: return scan_path -def find_scan_type( # pylint:disable=too-many-nested-blocks +def find_scan_type( path: Path, scan_type: str, -) -> None: +): """Rudimentary function to print scan paths given the scan_type + Args: - path: Path object pointing to the year, for ex., + path (Path): Path object pointing to the year, for ex., Path("//nap32/topfloor/trARPES/PESData/2020") - scan_type: string containing the scan_type from the list + scan_type (str): string containing the scan_type from the list ["delay","temperature","manipulator","mirror","single"] - Returns: - None """ - for month in path.iterdir(): if month.is_dir(): for day in month.iterdir(): diff --git a/tests/data b/tests/data index 594c376..9a11106 160000 --- a/tests/data +++ b/tests/data @@ -1 +1 @@ -Subproject commit 594c37660eb089d1b5fb205db63bd4a238dc7845 +Subproject commit 9a11106b902dbf1089de50813eae649ccf6e3a83 diff --git a/tests/helpers.py b/tests/helpers.py index e817703..ae33e27 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,5 +1,4 @@ -"""Helper functions for tests -""" +"""Helper functions for tests""" import numpy as np import xarray as xr diff --git a/tests/test_config.py b/tests/test_config.py index ea7ae3a..0517a33 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,6 +1,4 @@ -"""This is a code that performs several tests for the settings loader. - -""" +"""This is a code that performs several tests for the settings loader.""" import os import tempfile from importlib.util import find_spec diff --git a/tests/test_convert.py b/tests/test_convert.py index bb7e61e..0c0959b 100755 --- a/tests/test_convert.py +++ b/tests/test_convert.py @@ -1,16 +1,16 @@ -# pylint: disable=duplicate-code -"""This is a code that performs several tests for the convert functions -""" +"""This is a code that performs several tests for the convert functions""" import os import numpy as np +import specsanalyzer from specsanalyzer import SpecsAnalyzer from specsanalyzer.convert import calculate_matrix_correction from specsanalyzer.convert import calculate_polynomial_coef_da from specsanalyzer.convert import get_damatrix_fromcalib2d -test_dir = os.path.dirname(__file__) +package_dir = os.path.dirname(specsanalyzer.__file__) +test_dir = package_dir + "/../tests/data/" # from specsanalyzer.convert import get_rr_da # from specsanalyzer.convert import mcp_position_mm @@ -23,7 +23,7 @@ def test_da_matrix(): # pylint: disable=too-many-locals ######################################## # Load the IGOR txt Di_coeff values for comparison - igor_data_path = os.fspath(f"{test_dir}/data/dataEPFL/R9132") + igor_data_path = os.fspath(f"{test_dir}/dataEPFL/R9132") # get the Da coefficients di_file_list = [f"{igor_data_path}/Da{i}_value.tsv" for i in np.arange(1, 8, 2)] @@ -41,7 +41,7 @@ def test_da_matrix(): # pylint: disable=too-many-locals igor_d_coef_list.append(np.loadtxt(f_handle, delimiter="\t")) igor_d_coef_matrix = np.flip(np.vstack(igor_d_coef_list), axis=1) - config_path = os.fspath(f"{test_dir}/data/dataEPFL/config/config.yaml") + config_path = os.fspath(f"{test_dir}/dataEPFL/config/config.yaml") spa = SpecsAnalyzer(config=config_path) config_dict = spa.config lens_mode = "WideAngleMode" @@ -74,8 +74,8 @@ def test_conversion_matrix(): # pylint:disable=too-many-locals """Check the consistency of the conversion matrix with the Igor calculations. """ - igor_data_path = os.fspath(f"{test_dir}/data/dataEPFL/R9132") - config_path = os.fspath(f"{test_dir}/data/dataEPFL/config/config.yaml") + igor_data_path = os.fspath(f"{test_dir}/dataEPFL/R9132") + config_path = os.fspath(f"{test_dir}/dataEPFL/config/config.yaml") spa = SpecsAnalyzer(config=config_path) config_dict = spa.config lens_mode = "WideAngleMode" @@ -161,19 +161,19 @@ def test_conversion(): # get the raw data raw_image_name = os.fspath( - f"{test_dir}/data/dataEPFL/R9132/Data9132_RAWDATA.tsv", + f"{test_dir}/dataEPFL/R9132/Data9132_RAWDATA.tsv", ) with open(raw_image_name, encoding="utf-8") as file: tsv_data = np.loadtxt(file, delimiter="\t") # get the reference data reference_image_name = os.fspath( - f"{test_dir}/data/dataEPFL/R9132/Data9132_IGOR_corrected.tsv", + f"{test_dir}/dataEPFL/R9132/Data9132_IGOR_corrected.tsv", ) with open(reference_image_name, encoding="utf-8") as file: reference = np.loadtxt(file, delimiter="\t") - config_path = os.fspath(f"{test_dir}/data/dataEPFL/config/config.yaml") + config_path = os.fspath(f"{test_dir}/dataEPFL/config/config.yaml") spa = SpecsAnalyzer(config=config_path) lens_mode = "WideAngleMode" kinetic_energy = 35.000000 @@ -204,12 +204,12 @@ def test_recycling(): """ # get the raw data raw_image_name = os.fspath( - f"{test_dir}/data/dataEPFL/R9132/Data9132_RAWDATA.tsv", + f"{test_dir}/dataEPFL/R9132/Data9132_RAWDATA.tsv", ) with open(raw_image_name, encoding="utf-8") as file: tsv_data = np.loadtxt(file, delimiter="\t") - config_path = os.fspath(f"{test_dir}/data/dataEPFL/config/config.yaml") + config_path = os.fspath(f"{test_dir}/dataEPFL/config/config.yaml") spa = SpecsAnalyzer(config=config_path) lens_mode = "WideAngleMode" kinetic_energy = 35.000000 @@ -243,12 +243,12 @@ def test_cropping(): """Test function for checking that cropping parameters are correctly appield""" # get the raw data raw_image_name = os.fspath( - f"{test_dir}/data/dataEPFL/R9132/Data9132_RAWDATA.tsv", + f"{test_dir}/dataEPFL/R9132/Data9132_RAWDATA.tsv", ) with open(raw_image_name, encoding="utf-8") as file: tsv_data = np.loadtxt(file, delimiter="\t") - config_path = os.fspath(f"{test_dir}/data/dataEPFL/config/config.yaml") + config_path = os.fspath(f"{test_dir}/dataEPFL/config/config.yaml") spa = SpecsAnalyzer(config=config_path) lens_mode = "WideAngleMode" kinetic_energy = 35.000000 diff --git a/tests/test_helpers.py b/tests/test_helpers.py index cf9e2ff..20f13e8 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -1,5 +1,5 @@ """This script performs tests for the helper functions - in the core.py script to support the load_scan method. +in the core.py script to support the load_scan method. """ import os from pathlib import Path diff --git a/tests/test_img_tools.py b/tests/test_img_tools.py index 9bbc8c5..542faaa 100755 --- a/tests/test_img_tools.py +++ b/tests/test_img_tools.py @@ -1,5 +1,4 @@ -"""This is a code that performs several tests for the image tool module -""" +"""This is a code that performs several tests for the image tool module""" import os import numpy as np diff --git a/tests/test_io.py b/tests/test_io.py index e097992..6164c43 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -1,5 +1,4 @@ -"""This is a code that performs several tests for the input/output functions -""" +"""This is a code that performs several tests for the input/output functions""" import os import random from pathlib import Path diff --git a/tests/test_lens_modes.py b/tests/test_lens_modes.py index bd9c00d..0ca9de1 100644 --- a/tests/test_lens_modes.py +++ b/tests/test_lens_modes.py @@ -1,5 +1,4 @@ -"""This is a code that performs several tests for the convert functions -""" +"""This is a code that performs several tests for the convert functions""" import os import numpy as np diff --git a/tests/test_specsanalyzer.py b/tests/test_specsanalyzer.py index cdc96b1..9d44b37 100755 --- a/tests/test_specsanalyzer.py +++ b/tests/test_specsanalyzer.py @@ -1,6 +1,4 @@ -"""This is a code that performs several tests for the SpecsAnalyzer -core class functions -""" +"""This is a code that performs several tests for the SpecsAnalyzer core class functions""" import os from importlib.util import find_spec diff --git a/tests/test_specsscan.py b/tests/test_specsscan.py index b34a63e..0811ca0 100755 --- a/tests/test_specsscan.py +++ b/tests/test_specsscan.py @@ -1,13 +1,16 @@ -"""This is a code that performs several tests for the SpecsScan -core class functions +"""This is a code that performs several tests for the SpecsScan core class functions """ import os +import numpy as np +import pytest + import specsscan from specsscan import __version__ from specsscan import SpecsScan package_dir = os.path.dirname(specsscan.__file__) +test_dir = package_dir + "/../tests/data/" def test_version(): @@ -23,6 +26,101 @@ def test_default_config(): assert sps.config["spa_params"]["apply_fft_filter"] is False +def test_conversion_2d(): + """Test the conversion of a single-image scan""" + sps = SpecsScan( + config=test_dir + "config.yaml", + user_config={}, + system_config={}, + ) + res_xarray = sps.load_scan( + scan=3610, + path=test_dir, + ) + assert res_xarray.dims == ("Angle", "Ekin") + + with pytest.raises(IndexError): + res_xarray = sps.load_scan( + scan=3610, + path=test_dir, + iterations=[0], + ) + + +def test_conversion_3d(): + """Test the conversion of a 3D scan""" + sps = SpecsScan( + config=test_dir + "config.yaml", + user_config={}, + system_config={}, + ) + res_xarray = sps.load_scan( + scan=4450, + path=test_dir, + ) + assert res_xarray.dims == ("Angle", "Ekin", "mirrorX") + + res_xarray2 = sps.load_scan( + scan=4450, + path=test_dir, + iterations=[0], + ) + assert res_xarray.sum(axis=(0, 1, 2)) != res_xarray2.sum(axis=(0, 1, 2)) + + res_xarray2 = sps.load_scan( + scan=4450, + path=test_dir, + iterations=np.s_[0:2], + ) + assert res_xarray.sum(axis=(0, 1, 2)) == res_xarray2.sum(axis=(0, 1, 2)) + + with pytest.raises(IndexError): + sps.check_scan( + scan=4450, + delays=range(1, 20), + path=test_dir, + ) + + +def test_checkscan(): + """Test the check_scan function""" + sps = SpecsScan( + config=test_dir + "config.yaml", + user_config={}, + system_config={}, + ) + + res_xarray = sps.check_scan( + scan=4450, + delays=[0], + path=test_dir, + ) + assert res_xarray.dims == ("Angle", "Ekin", "Iteration") + + with pytest.raises(IndexError): + sps.check_scan( + scan=4450, + delays=range(1, 20), + path=test_dir, + ) + + +def test_checkscan_2d_raises(): + """Test that the check_scan function raises if a single image is loaded""" + sps = SpecsScan( + config=test_dir + "config.yaml", + user_config={}, + system_config={}, + ) + + with pytest.raises(ValueError): + sps.check_scan( + scan=3610, + delays=[0], + path=test_dir, + ) + + def test_process_sweep_scan(): """Test the conversion of a sweep scan""" config = { @@ -43,10 +141,61 @@ def test_process_sweep_scan(): ) res_xarray = sps.load_scan( scan=6455, - path=package_dir + "/../tests/data/", + path=test_dir, ) assert res_xarray.energy[0].values.item() == 20.953256232558136 assert res_xarray.energy[-1].values.item() == 21.02424460465116 assert ( (res_xarray.sum(axis=0) - res_xarray.sum(axis=0).mean()) < 0.1 * res_xarray.sum(axis=0) ).all() + + +def test_crop_tool(): + """Test the crop tool""" + sps = SpecsScan( + config=test_dir + "config.yaml", + user_config={}, + system_config={}, + ) + + res_xarray = sps.load_scan( + scan=3610, + path=test_dir, + crop=True, + ) + + assert res_xarray.Angle[0] == -18 + assert res_xarray.Angle[-1] == 17.859375 + assert res_xarray.Ekin[0] == 18.69 + assert res_xarray.Ekin[-1] == 23.29656976744186 + + res_xarray = sps.load_scan( + scan=3610, + path=test_dir, + ek_range_min=0.1, + ek_range_max=0.9, + ang_range_min=0.1, + ang_range_max=0.9, + crop=True, + ) + assert res_xarray.Angle[0] == -14.34375 + assert res_xarray.Angle[-1] == 14.203125 + assert res_xarray.Ekin[0] == 19.160058139534886 + assert res_xarray.Ekin[-1] == 22.826511627906974 + + sps.crop_tool( + ek_range_min=0.1, + ek_range_max=0.9, + ang_range_min=0.1, + ang_range_max=0.9, + apply=True, + ) + + res_xarray = sps.load_scan( + scan=3610, + path=test_dir, + ) + assert res_xarray.Angle[0] == -14.34375 + assert res_xarray.Angle[-1] == 14.203125 + assert res_xarray.Ekin[0] == 19.160058139534886 + assert res_xarray.Ekin[-1] == 22.826511627906974 diff --git a/tutorial/1_specsanalyzer_conversion_examples.ipynb b/tutorial/1_specsanalyzer_conversion_examples.ipynb new file mode 100644 index 0000000..6886ccc --- /dev/null +++ b/tutorial/1_specsanalyzer_conversion_examples.ipynb @@ -0,0 +1,207 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 1: SpecsAnalyzer conversion\n", + "This is an example showcasing the conversion of Phoibos analyzer data with SpecsAnalyzer\n", + "\n", + "The image is loaded from a text file, and the conversion into xarrays with calibrated dimensions is demonstrated for different modes of operation " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "from specsanalyzer import SpecsAnalyzer\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "%matplotlib widget" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Image conversion\n", + "create specsanalyzer instance from config file" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "spa = SpecsAnalyzer(config=\"../tests/data/dataEPFL/config/config.yaml\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "convert single image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "lens_mode = \"WideAngleMode\"\n", + "kinetic_energy = 35.000000\n", + "pass_energy = 35.000000\n", + "work_function = 4.3\n", + "binning = 4\n", + "\n", + "raw_image_name = \"../tests/data/dataEPFL/R9132/Data9132_RAWDATA.tsv\"\n", + "with open(raw_image_name) as file:\n", + " tsv_data = np.loadtxt(file, delimiter=\"\\t\")\n", + "\n", + "res_xarray = spa.convert_image(\n", + " tsv_data,\n", + " lens_mode,\n", + " kinetic_energy,\n", + " pass_energy,\n", + " work_function,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res_xarray.dims" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure()\n", + "res_xarray.plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Adjusting offsets and angle\n", + "image rotation angle and center offsets can be adjusted by keyworkd arguments, or from the config." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res_xarray = spa.convert_image(\n", + " tsv_data,\n", + " lens_mode,\n", + " kinetic_energy,\n", + " pass_energy,\n", + " work_function,\n", + " rotation_angle=2,\n", + " angle_offset_px=-3,\n", + ")\n", + "res_xarray.plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Removal of mesh artefact\n", + "The mesh in front of the MCP introduces some visial artefacts. These can be mitigated by applying a Fourier filter approach, with Peaks in the Fourier plane to remove defined in the config file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "spa = SpecsAnalyzer(config=\"../tests/data/dataEPFL/config/config_filterON.yaml\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res_xarray = spa.convert_image(\n", + " tsv_data,\n", + " lens_mode,\n", + " kinetic_energy,\n", + " pass_energy,\n", + " work_function,\n", + ")\n", + "plt.figure()\n", + "res_xarray.plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Conversion into space modes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "lens_mode = \"HighMagnification2\"\n", + "res_xarray = spa.convert_image(\n", + " tsv_data,\n", + " lens_mode,\n", + " kinetic_energy,\n", + " pass_energy,\n", + " work_function,\n", + ")\n", + "plt.figure()\n", + "res_xarray.plot()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.10.4 ('specanalyserenv')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.12" + }, + "vscode": { + "interpreter": { + "hash": "01f3a50f1cec8b32686da9a100309d20236977f5c6d2fb4bd4818f1295405c21" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorial/example.ipynb b/tutorial/2_specsscan_example.ipynb similarity index 59% rename from tutorial/example.ipynb rename to tutorial/2_specsscan_example.ipynb index 54e6f9a..cab131c 100644 --- a/tutorial/example.ipynb +++ b/tutorial/2_specsscan_example.ipynb @@ -5,8 +5,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## This is an example showcasing the loading of trARPES data as collected using the Phoibos detector at FHI Berlin.\n", - "The band dispersion is loaded as a xarray dataframe following a conversion to the [NeXus format](https://manual.nexusformat.org/classes/contributed_definitions/NXmpes.html#nxmpes) using the [Nomad Parser Nexus](https://github.com/nomad-coe/nomad-parser-nexus)." + "## Example 2: SpecsScan loading\n", + "This is an example showcasing the loading of trARPES data as collected using the Phoibos detector at FHI Berlin.\n", + "\n", + "The band dispersion is loaded as a xarray dataframe demonstrating different modes of operation " ] }, { @@ -27,7 +29,7 @@ "%autoreload 2\n", "from specsscan import SpecsScan\n", "import matplotlib.pyplot as plt\n", - "\n", + "import numpy as np\n", "%matplotlib widget" ] }, @@ -36,25 +38,25 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Here, a SpecsScan class instance is created as per the configuration provided in [config.yaml](../tests/data/config.yaml). The user may set the entries in config.yaml file, for example, the data path and conversion parameters as per the requirements before creating this instance" + "Here, a SpecsScan class instance is created as per the configuration provided in [config.yaml](../tests/data/config.yaml). The user may set the entries in config.yaml file, for example, the data path and conversion parameters as per the requirements before creating this instance.\n", + "\n", + "In addition to the provided config files, config files from different locations are optionally included as well (see documentation)." ] }, { - "attachments": {}, - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "### The path may changed to point to the scan folder of the data of interest (for example, on a server drive)" + "sps = SpecsScan(config=\"../tests/data/config.yaml\")" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "sps = SpecsScan(config=\"../tests/data/config.yaml\")\n", - "path = \"../tests/data/\" # Path to the test data set" + "### Loading data" ] }, { @@ -62,7 +64,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The load_scan method loads the scan as an xarray along with the metadata needed for nexus conversion. The progress bars can be activated by changing the config parameter, enable_nested_progress_bar, to true in config.yaml. Additionally, the data can be cropped by passing a boolean \"crop\" to the loader, provided the crop parameters already exist in the given instance. " + "The load_scan method loads the scan as an xarray of the data converted into angular/energy coordinates along with the metadata of the scan." ] }, { @@ -71,28 +73,19 @@ "metadata": {}, "outputs": [], "source": [ + "path = \"../tests/data/\" # Path to the test data set\n", + "# The path may be changed to point to the scan folder of the data of interest (for example, on a server drive)\n", "res_xarray = sps.load_scan(\n", " scan=4450, # Scan number for an example mirror scan\n", " path = path,\n", - " crop=True\n", ")" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plt.figure()\n", - "res_xarray[:,:,0].plot()" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The loader has given a warning saying that the cropping parameters do not exist yet. Therefore, a cropping tool can be used to crop the data while also saving the crop ranges into a class attribute for later scans." + "The data are from a mirror scan, showing the mirror position as third dimension:" ] }, { @@ -101,15 +94,14 @@ "metadata": {}, "outputs": [], "source": [ - "sps.crop_tool()" + "res_xarray.dims" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Press crop applies the cropping to the test image, and stores the cropping information in the class.\n", - "Load the scan again to apply it to all images:" + "We can plot, e.g., selected steps of the scan:" ] }, { @@ -118,28 +110,22 @@ "metadata": {}, "outputs": [], "source": [ - "res_xarray = sps.load_scan(\n", - " scan=4450, # Scan number for an example mirror scan\n", - " path = path,\n", - " crop=True\n", - ")" + "plt.figure()\n", + "res_xarray[:,:,0].plot()" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "The data may be visualised as a 2D plot by slicing the xarray in 2 dimensions keeping the third fixed. In the plot below, the Angle vs Ekin data is plotted for a fixed mirrorX value." + "### Cropping data" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "sps.result.coords" + "The image contains data beyond the boundaries given by the illuminated area of the MCP, which should be removed. For this, the ``crop`` option of the converter can be used:" ] }, { @@ -148,17 +134,20 @@ "metadata": {}, "outputs": [], "source": [ - "plt.figure()\n", - "sps.result[:,:,0].plot()\n", - "plt.show()" + "res_xarray = sps.load_scan(\n", + " scan=4450, # Scan number for an example mirror scan\n", + " path = path,\n", + " crop=True,\n", + ")" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "The metadata associated with the scan is added as an attribute to the xarray" + "The loader has given a warning saying that the cropping parameters do not exist yet. Therefore, an interactive cropping tool can be used to crop the data while also saving the crop ranges into a class attribute for later scans. Pressing ``crop`` applies the cropping to the test image, and stores the cropping information in the class.\n", + "\n", + "One can provide relative cropping ranges either as keyword parameters, or in the config file, and optionally directly apply the settings to make the tool non-interactive." ] }, { @@ -167,15 +156,20 @@ "metadata": {}, "outputs": [], "source": [ - "sps.result.attrs[\"metadata\"].keys()" + "sps.crop_tool(\n", + " ek_range_min=0.08,\n", + " ek_range_max=0.88,\n", + " ang_range_min=0.15,\n", + " ang_range_max=0.85,\n", + " apply=True,\n", + ")" ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "3D scans, where the images are recorded as a function of a third parameter (generally delay or in this case, mirrorX), can also be loaded with an option to average only the given iterations passed as a list or slice object. " + "Load the scan again to apply it to all images:" ] }, { @@ -184,13 +178,21 @@ "metadata": {}, "outputs": [], "source": [ - "sps.load_scan(\n", - " scan=4450,\n", - " path=path,\n", - " iterations=[0]\n", + "res_xarray = sps.load_scan(\n", + " scan=4450, # Scan number for an example mirror scan\n", + " path = path,\n", ")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can e.g. also get a plot along the third dimension, by intergrating along the first.\n", + "\n", + "One can also access the conversion result from a class accessor:" + ] + }, { "cell_type": "code", "execution_count": null, @@ -198,8 +200,7 @@ "outputs": [], "source": [ "plt.figure()\n", - "sps.result[:,:,0].plot()\n", - "plt.show()" + "sps.result.loc[{\"Angle\": slice(-5, 5)}].sum(axis=0).plot()" ] }, { @@ -207,7 +208,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Another useful functionality is to load a 3D scan as a function of iterations averaged over the scan parameter (in this case, mirrorX). This is done using the check_scan method" + "The metadata associated with the scan is added as an attribute to the xarray" ] }, { @@ -216,20 +217,14 @@ "metadata": {}, "outputs": [], "source": [ - "res_xarray_check = sps.check_scan(\n", - " scan=4450,\n", - " delays=1, # for a fixed delay of index, 1\n", - " path=path\n", - ")" + "sps.result.attrs[\"metadata\"].keys()" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "res_xarray_check" + "### Loading with selected iterations" ] }, { @@ -237,17 +232,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Conversion to NeXus\n", - "This required the [nexusutils](https://github.com/nomad-coe/nomad-parser-nexus) package to be installed in the active kernel. Once installed, the convert function can be imported and called using the mpes reader which makes use of the NXmpes contributed application definition for NeXus for the Multidimensional Photoemission Spectroscopy community." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pynxtools.dataconverter.convert import convert" + "3D scans, where the images are recorded as a function of a third parameter (generally delay or in this case, mirrorX), can also be loaded with an option to average only the given iterations passed as a list or slice object. " ] }, { @@ -256,26 +241,20 @@ "metadata": {}, "outputs": [], "source": [ - "convert(input_file=[\"../tests/data/phoibos_config.json\", # config file for translating local metadata paths to NeXus paths\n", - " \"../tests/data/phoibos_eln_data.yaml\" # ELN file that adds/overwrites additional metadata\n", - " ],\n", - " objects=sps.result, # xarray object obtained from the specsscan loader\n", - " reader='mpes',\n", - " nxdl='NXmpes',\n", - " output='spectest.mpes.nxs')" + "plt.figure()\n", + "sps.load_scan(\n", + " scan=4450,\n", + " path=path,\n", + " iterations=np.s_[0, 1:2],\n", + ").sum(axis=2).plot()" ] }, { "attachments": {}, "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ - "## View the data with H5Web\n", - "H5Web is a tool for visualizing any data in the h5 data format. Since the NeXus format builds opon h5 it can be used to view this data as well. We just import the package and call H5Web with the output filename from the convert command above. \n", - "\n", - "You can also view this data with the H5Viewer or other tools from your local filesystem." + "Another useful functionality is to load a 3D scan as a function of iterations averaged over the scan parameter (in this case, mirrorX). This is done using the check_scan method" ] }, { @@ -284,7 +263,21 @@ "metadata": {}, "outputs": [], "source": [ - "from jupyterlab_h5web import H5Web" + "res_xarray_check = sps.check_scan(\n", + " scan=4450,\n", + " delays=0, # for a fixed delay of index, 1\n", + " path=path,\n", + ")\n", + "plt.figure()\n", + "res_xarray_check.loc[{\"Angle\": slice(-5, 5)}].sum(axis=(0)).plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Saving\n", + "Data can be saved, e.g., as hdf5 files including metadata for convenient processing in other notebooks or software." ] }, { @@ -293,7 +286,11 @@ "metadata": {}, "outputs": [], "source": [ - "H5Web('spectest.mpes.nxs')" + "sps.load_scan(\n", + " scan=4450, # Scan number for an example mirror scan\n", + " path = path,\n", + ")\n", + "sps.save(\"example_data.h5\")" ] }, { diff --git a/tutorial/specscan_conversion_to_NXmpes.ipynb b/tutorial/3_specsscan_conversion_to_NeXus.ipynb similarity index 69% rename from tutorial/specscan_conversion_to_NXmpes.ipynb rename to tutorial/3_specsscan_conversion_to_NeXus.ipynb index 6ac50f6..59f03d9 100755 --- a/tutorial/specscan_conversion_to_NXmpes.ipynb +++ b/tutorial/3_specsscan_conversion_to_NeXus.ipynb @@ -4,8 +4,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## This is an example showcasing the loading of trARPES data as collected using the Phoibos detector at FHI Berlin.\n", - "The band dispersion is loaded as a xarray dataframe following a conversion to the [NeXus format](https://manual.nexusformat.org/classes/contributed_definitions/NXmpes.html#nxmpes) using the [Nomad Parser Nexus](https://github.com/nomad-coe/nomad-parser-nexus)." + "## Example 3: Export to NeXus\n", + "This is an example showcasing the loading of a tilt map Fermi surface mapping\n", + "\n", + "The band dispersion is loaded as a xarray dataframe following a conversion to the [NXmpes_arpes NeXus format](https://manual.nexusformat.org/classes/contributed_definitions/NXmpes.html#nxmpes) using the [FAIRmat pynxtools](https://github.com/FAIRMAT-nfdi/pynxtools)." ] }, { @@ -26,22 +28,16 @@ "%load_ext autoreload\n", "%autoreload 2\n", "from specsscan import SpecsScan\n", - "from pathlib import Path\n", - "import matplotlib.pyplot as plt" + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "%matplotlib widget" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Here, a SpecsScan class instance is created as per the configuration provided in [config.yaml](../tests/data/config.yaml). The user may set the entries in config.yaml file, for example, the data path and conversion parameters as per the requirements before creating this instance" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### The path may changed to point to the scan folder of the data of interest (for example, on a server drive)" + "Definition of manual scan metadata. These should ideally come from an Electronic Lab Notebook." ] }, { @@ -53,7 +49,6 @@ "outputs": [], "source": [ "metadata = {}\n", - "# manual Meta data. These should ideally come from an Electronic Lab Notebook.\n", "#General\n", "metadata['experiment_summary'] = 'TbTe3 tilt map around EF with 800nm pump at 20fs after pump-probe overlap'\n", "metadata['entry_title'] = 'TbTe3 XUV Fermi surface map at 20 fs'\n", @@ -61,7 +56,6 @@ "\n", "#User\n", "# Fill general parameters of NXuser\n", - "# TODO: discuss how to deal with multiple users?\n", "metadata['user0'] = {}\n", "metadata['user0']['name'] = 'Laurenz Rettig'\n", "metadata['user0']['role'] = 'Principal Investigator'\n", @@ -69,13 +63,6 @@ "metadata['user0']['address'] = 'Faradayweg 4-6, 14195 Berlin'\n", "metadata['user0']['email'] = 'rettig@fhi-berlin.mpg.de'\n", "\n", - "metadata['user1'] = {}\n", - "metadata['user1']['name'] = 'William Windsor'\n", - "metadata['user1']['role'] = 'Principal Investigator'\n", - "metadata['user1']['affiliation'] = 'Fritz Haber Institute of the Max Planck Society'\n", - "metadata['user1']['address'] = 'Faradayweg 4-6, 14195 Berlin'\n", - "metadata['user1']['email'] = 'windsor@fhi-berlin.mpg.de'\n", - "\n", "metadata['instrument'] = {}\n", "# energy resolution\n", "metadata['instrument']['energy_resolution'] = 150.\n", @@ -116,18 +103,17 @@ "metadata['sample']['description'] = 'cleaved single crystal of TbTe3'\n", "metadata['sample']['name'] = 'TbTe3 Single Crystal'\n", "\n", - "#metadata[\"scan_info\"] = {}\n", - "#metadata[\"scan_info\"][\"trARPES:XGS600:PressureAC:P_RD\"] = 2.5E-11\n", - "#metadata[\"scan_info\"][\"trARPES:Carving:TEMP_RBV\"] = 70\n" + "metadata[\"scan_info\"] = {}\n", + "metadata[\"scan_info\"][\"trARPES:XGS600:PressureAC:P_RD\"] = 2.5E-11\n", + "metadata[\"scan_info\"][\"trARPES:Carving:TEMP_RBV\"] = 70\n", + "metadata[\"scan_info\"][\"trARPES:Sample:Measure\"] = 0\n" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "config = {\"nexus\":{\"definition\": \"NXmpes_arpes\"}, \"spa_params\":{\"crop\":True, \"ek_range_min\":0.07597844332538181, \"ek_range_max\":0.9117413199045858, \"ang_range_min\":0.16453159041394336, \"ang_range_max\":0.8840087145969499,}}" + "The SpecsScan instance is created from a config, that contains a rewrite entry to change names of axes" ] }, { @@ -138,6 +124,7 @@ }, "outputs": [], "source": [ + "config = {\"nexus\":{\"definition\": \"NXmpes\"}, \"spa_params\":{\"crop\":True, \"ek_range_min\":0.07597844332538181, \"ek_range_max\":0.9117413199045858, \"ang_range_min\":0.16453159041394336, \"ang_range_max\":0.8840087145969499,}}\n", "sps = SpecsScan(config=config, user_config=\"../specsscan/config/example_config_FHI.yaml\")\n", "path = \"../tests/data/\" # Path to the test data set" ] @@ -158,46 +145,34 @@ "outputs": [], "source": [ "res_xarray = sps.load_scan(\n", - " scan=6833, # Tilt scan\n", + " scan=1496, # Tilt scan\n", + " path=path,\n", " metadata=metadata,\n", + " collect_metadata=True,\n", ")" ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], + "cell_type": "markdown", + "metadata": {}, "source": [ - "%matplotlib inline\n", - "res_xarray[:,29,:].plot()\n", - "plt.show()" + "The resulting 3-dimensional data cube" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ - "%matplotlib inline\n", - "res_xarray.loc[{\"energy\":slice(21.6, 21.8)}].sum(axis=2).plot()\n", - "plt.show()" + "res_xarray.dims" ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], + "cell_type": "markdown", + "metadata": {}, "source": [ - "sps.save(\"FS_map_CsV3Sb5.h5\")" + "A Gamma-point cut" ] }, { @@ -208,21 +183,15 @@ }, "outputs": [], "source": [ - "sps.save(\"FS_map_CsV3Sb5.nxs\", undocumented=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The metadata associated with the scan is added as an attribute to the xarray" + "plt.figure()\n", + "res_xarray[:,30,:].T.plot()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "3D scans, where the images are recorded as a function of a third parameter (generally delay or in this case, mirrorX), can also be loaded with an option to average only the given iterations passed as a list or slice object. " + "The Fermi surface" ] }, { @@ -233,7 +202,15 @@ }, "outputs": [], "source": [ - "from jupyterlab_h5web import H5Web" + "plt.figure()\n", + "res_xarray.loc[{\"energy\":slice(21.6, 21.8)}].sum(axis=2).plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Save as nexus file" ] }, { @@ -244,7 +221,7 @@ }, "outputs": [], "source": [ - "H5Web('spectest.mpes.nxs')" + "sps.save(\"FSmapping.nxs\")" ] }, { diff --git a/tutorial/4_specsscan_load_sweep_scan.ipynb b/tutorial/4_specsscan_load_sweep_scan.ipynb new file mode 100644 index 0000000..2c2e0f6 --- /dev/null +++ b/tutorial/4_specsscan_load_sweep_scan.ipynb @@ -0,0 +1,119 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 4: Sweep Scan loading\n", + "This is an example showcasing the loading of a kinetic energy sweep scan\n", + "\n", + "The individual images are loaded, and summed onto the grid of data that overlap on all images" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "from specsscan import SpecsScan\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "%matplotlib widget" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, a SpecsScan class instance is created as per the configuration provided in [config.yaml](../tests/data/config.yaml). Crop parameters are set by an additional config dictionary." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\"spa_params\": {\n", + " 'ek_range_min': 0.07597844332538357,\n", + " 'ek_range_max': 0.8965456312395133,\n", + " 'ang_range_min': 0.16732026143790849,\n", + " 'ang_range_max': 0.8449673202614381,\n", + " \"angle_offset_px\":13,\n", + " \"rotation_angle\": 2,\n", + " \"crop\":True,\n", + "}}\n", + "sps = SpecsScan(config=config, user_config=\"../specsscan/config/example_config_FHI.yaml\")\n", + "path = \"../tests/data/\" # Path to the test data set" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The load_scan method performs the merging of the images and returns the scan as an xarray" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res_xarray = sps.load_scan(\n", + " scan=6455, # Scan number for an example sweep scan\n", + " path=path,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure()\n", + "res_xarray.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "specenv38", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.12" + }, + "vscode": { + "interpreter": { + "hash": "a164666994e9db75450cd7016dd7e51d42ea6e7c1e5e8017af1f8068ca906367" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorial/conversion_benchmarks.ipynb b/tutorial/conversion_benchmarks.ipynb deleted file mode 100644 index b9ae59c..0000000 --- a/tutorial/conversion_benchmarks.ipynb +++ /dev/null @@ -1,289 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# from importlib import reload\n", - "import curses #### pip install windows-curses\n", - "import matplotlib.pyplot as plt\n", - "import xarray as xr\n", - "import numpy as np\n", - "import specsanalyzer\n", - "from specsanalyzer import SpecsAnalyzer" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# reload(specsanalyzer)\n", - "spa = SpecsAnalyzer()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "with open(\"../tests/data/dataEPFL/R9132/Data9132_RAWDATA.tsv\") as file:\n", - " tsv_data = np.loadtxt(file, delimiter=\"\\t\")\n", - "plt.figure()\n", - "plt.imshow(tsv_data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Testing convert and crop on the imported data\n", - "spa = SpecsAnalyzer(config=\"../tests/data/dataEPFL/config/config.yaml\")\n", - "\n", - "lens_mode = \"WideAngleMode\"\n", - "kinetic_energy = 35.0\n", - "pass_energy = 35.0\n", - "work_function = 4.2\n", - "\n", - "res_xarray = spa.convert_image(\n", - " tsv_data, lens_mode, kinetic_energy, pass_energy, work_function\n", - ")\n", - "plt.figure()\n", - "res_xarray.plot(cmap=\"viridis\", vmin=0)\n", - "res_xarray.to_numpy().shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "with open(\"../tests/data/dataEPFL/R9132/Data9132_IGOR_corrected.tsv\") as file:\n", - " ref_data = np.loadtxt(file, delimiter=\"\\t\")\n", - " ref_data = ref_data\n", - "\n", - "import specsanalyzer.convert\n", - "from specsanalyzer.convert import get_damatrix_fromcalib2d\n", - "from specsanalyzer.convert import get_rr_da\n", - "from specsanalyzer.convert import calculate_polynomial_coef_da\n", - "from specsanalyzer.convert import mcp_position_mm\n", - "from specsanalyzer.convert import calculate_matrix_correction\n", - "\n", - "spa = SpecsAnalyzer(config=\"../tests/data/dataEPFL/config/config.yaml\")\n", - "config_dict = spa.config\n", - "lens_mode = \"WideAngleMode\"\n", - "kinetic_energy = 35\n", - "pass_energy = 35\n", - "work_function = 4.3\n", - "binning = 4\n", - "\n", - "(\n", - " ek_axis,\n", - " angle_axis,\n", - " angular_correction_matrix,\n", - " e_correction,\n", - " jacobian_determinant,\n", - ") = calculate_matrix_correction(\n", - " lens_mode,\n", - " kinetic_energy,\n", - " pass_energy,\n", - " work_function,\n", - " binning,\n", - " config_dict,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# this test would not work in the case of upsampling in python..\n", - "\n", - "\n", - "ref_xarray = xr.DataArray(\n", - " data=ref_data,\n", - " coords={\"Angle\": angle_axis, \"Ekin\": ek_axis},\n", - " dims=[\"Angle\", \"Ekin\"],\n", - ")\n", - "plt.figure()\n", - "ref_xarray.plot()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# calcualte the difference xarray, normalized to the ref pixelwise intensity\n", - "ff_xarray = (res_xarray - ref_xarray) / ref_xarray" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plt.figure()\n", - "ff_xarray.plot(cmap=\"RdBu_r\", vmax=1e-3)\n", - "plt.title(\"Difference/Mean pixel intensity\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(4, 8))\n", - "subplot1 = ref_xarray.plot(ax=ax1, cmap=\"viridis\", vmin=0)\n", - "subplot2 = res_xarray.plot(ax=ax2, cmap=\"viridis\", vmin=0)\n", - "subplot3 = ff_xarray.plot(cmap=\"RdBu_r\", ax=ax3, vmax=100)\n", - "plt.tight_layout()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%timeit res_xarray = spa.convert_image(tsv_data, lens_mode, kinetic_energy, pass_energy, work_function)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%timeit\n", - "convert1 = specsanalyzer.convert.physical_unit_data(\n", - " tsv_data,\n", - " angular_correction_matrix,\n", - " e_correction,\n", - " jacobian_determinant,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "config_dict = spa.config\n", - "# config_dict['calib2d_dict']['old_scans_params']\n", - "config_dict.keys()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "matrix_dict = spa.correction_matrix_dict\n", - "matrix_dict.keys()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "spa = SpecsAnalyzer(config=\"../tests/data/dataEPFL/config/config.yaml\")\n", - "config_dict = spa.config\n", - "lens_mode = \"WideAngleMode\"\n", - "kinetic_energy = 35\n", - "pass_energy = 35\n", - "work_function = 4.3\n", - "binning = 4\n", - "res_xarray = spa.convert_image(\n", - " tsv_data, lens_mode, kinetic_energy, pass_energy, work_function\n", - ")\n", - "kinetic_energy = 30\n", - "pass_energy = 35\n", - "work_function = 4.3\n", - "binning = 4\n", - "res_xarray = spa.convert_image(\n", - " tsv_data, lens_mode, kinetic_energy, pass_energy, work_function\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "matrix_dict = spa.correction_matrix_dict\n", - "matrix_dict.keys()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "matrix_dict[\"WideAngleMode\"].keys()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.10.4 ('specanalyserenv')", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.4" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "01f3a50f1cec8b32686da9a100309d20236977f5c6d2fb4bd4818f1295405c21" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/tutorial/convert_file_tests.ipynb b/tutorial/convert_file_tests.ipynb deleted file mode 100644 index a4e5073..0000000 --- a/tutorial/convert_file_tests.ipynb +++ /dev/null @@ -1,779 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "864500e2", - "metadata": {}, - "source": [ - "TESTING CONVERT FUNCTIONS FOR PHOIBOS 150 DATA, COMPARISON WITH THE IGOR DATA\n", - "\n", - "\n", - "USES THE FOLDER dataEPFL/R1932\n", - "\n", - "the folder contains all the igor output arrays for comparison\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2f2ab5b4", - "metadata": {}, - "outputs": [], - "source": [ - "# This uses the autoreload function form ipynb, useful to reload modificaiton of the source code\n", - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "45fafb84", - "metadata": {}, - "outputs": [], - "source": [ - "from specsanalyzer import SpecsAnalyzer\n", - "import matplotlib.pyplot as plt\n", - "import xarray as xr\n", - "import numpy as np\n", - "\n", - "spa = SpecsAnalyzer()\n", - "\n", - "import os\n", - "\n", - "testdatapath = \"../tests/data/dataEPFL/R9132/\"\n", - "filelist = os.listdir(testdatapath)\n", - "print(\"Data path= \", testdatapath)\n", - "# this folders contains the AVG folder with the data, an info.txt vector and a scan vector" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6c91b572", - "metadata": {}, - "outputs": [], - "source": [ - "# get the raw image name from the list\n", - "rawimagelist = [i for i in filelist if \"RAWDATA\" in i]\n", - "raw_image_name = os.path.join(testdatapath, rawimagelist[0])\n", - "print(raw_image_name)\n", - "\n", - "with open(raw_image_name) as file:\n", - " tsv_data = np.loadtxt(file, delimiter=\"\\t\")\n", - "plt.figure()\n", - "h = plt.imshow(tsv_data)\n", - "plt.colorbar()\n", - "print(\"The shape of the raw data is: \", tsv_data.shape)\n", - "# plt.xlim(120,150)\n", - "# plt.ylim(150,200)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "149af2f2", - "metadata": {}, - "outputs": [], - "source": [ - "from specsanalyzer import SpecsAnalyzer\n", - "import matplotlib.pyplot as plt\n", - "import xarray as xr\n", - "import numpy as np\n", - "\n", - "spa = SpecsAnalyzer()\n", - "scanparameters = spa.config" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "61888c46", - "metadata": {}, - "outputs": [], - "source": [ - "# load the module and import functions for reading the parameter table from the calib2d file\n", - "import specsanalyzer.convert\n", - "\n", - "# let's get all the functions to be tested\n", - "from specsanalyzer.convert import get_damatrix_fromcalib2d\n", - "from specsanalyzer.convert import get_rr_da\n", - "from specsanalyzer.convert import calculate_polynomial_coef_da\n", - "from specsanalyzer.convert import mcp_position_mm" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aa27895f", - "metadata": {}, - "outputs": [], - "source": [ - "from specsanalyzer import SpecsAnalyzer\n", - "import matplotlib.pyplot as plt\n", - "import xarray as xr\n", - "import numpy as np\n", - "\n", - "# call an instance of the class\n", - "spa = SpecsAnalyzer()\n", - "# io function used to get the calib 2d file in the class\n", - "calib2d_dictionary = specsanalyzer.io.parse_calib2d_to_dict(\n", - " \"../specsanalyzer/config/phoibos150.calib2d\"\n", - ")\n", - "# calib2d_dictionary=specsanalyzer.io.parse_calib2d_to_dict('./config/phoibos150.calib2d')\n", - "\n", - "# settings function used to get configuration files\n", - "config_file = specsanalyzer.config.parse_config(calib2d_dictionary)\n", - "\n", - "\n", - "# get the das and the rr vector\n", - "# rr_array, da_matrix= specsanalyzer.convert.get_rr_da(\"WideAngleMode\", config_file)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "59ee269d", - "metadata": {}, - "outputs": [], - "source": [ - "# Load the IGOR txt Di_coeff values for comparison\n", - "igordatapath = \"../tests/data/dataEPFL/R9132\"\n", - "igordatapath_content = os.listdir(igordatapath)\n", - "\n", - "# get the fitted coefficients\n", - "Di_coef_list = [i for i in igordatapath_content if \"_coef\" in i]\n", - "\n", - "igor_D_coef_list = []\n", - "for i, name in enumerate(sorted(Di_coef_list)):\n", - " tmp_name = os.path.join(igordatapath, name)\n", - " with open(tmp_name) as file:\n", - " # check the name in row 1\n", - " # first_line=file.readline()\n", - " # print(first_line)\n", - "\n", - " # igor_D_coef_list.append(np.loadtxt(file, delimiter='\\t', skiprows=1) )\n", - " igor_D_coef_list.append(np.loadtxt(file, delimiter=\"\\t\"))\n", - "# igor_D_coef_matrix=np.vstack(igor_D_coef_list)\n", - "igor_D_coef_matrix = np.flip(np.vstack(igor_D_coef_list), axis=1)\n", - "print(igor_D_coef_matrix) # row 0 D1 , row 1 D3, ...\n", - "print(np.vstack(igor_D_coef_list))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cd5a2937", - "metadata": {}, - "outputs": [], - "source": [ - "# Load the IGOR txt Da_values for comparison\n", - "igordatapath = \"../tests/data/dataEPFL/R9132\"\n", - "igordatapath_content = os.listdir(igordatapath)\n", - "\n", - "# get the fitted coefficients\n", - "Di_value_list = [i for i in igordatapath_content if \"_value.tsv\" in i]\n", - "\n", - "igor_D_value_list = []\n", - "for i, name in enumerate(sorted(Di_value_list)):\n", - " tmp_name = os.path.join(igordatapath, name)\n", - " with open(tmp_name) as file:\n", - " # check the name in row 1\n", - " # first_line=file.readline()\n", - " # print(first_line)\n", - "\n", - " # igor_D_value_list.append(np.loadtxt(file, delimiter='\\t', skiprows=1) )\n", - " igor_D_value_list.append(np.loadtxt(file, delimiter=\"\\t\"))\n", - "\n", - "igor_D_value_matrix = np.vstack(igor_D_value_list)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "056c501c", - "metadata": {}, - "outputs": [], - "source": [ - "Di_value_list\n", - "type(Di_value_list)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2469ffa1", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "64c0d5a8", - "metadata": {}, - "outputs": [], - "source": [ - "# let's calculate the corresponding quantities in Pytohn\n", - "\n", - "# load the module and import functions for reading the parameter table from the calib2d file\n", - "import specsanalyzer.convert\n", - "\n", - "# let's get all the functions to be tested\n", - "from specsanalyzer.convert import get_damatrix_fromcalib2d\n", - "from specsanalyzer.convert import get_rr_da\n", - "from specsanalyzer.convert import calculate_polynomial_coef_da\n", - "from specsanalyzer.convert import mcp_position_mm\n", - "from specsanalyzer.convert import calculate_matrix_correction\n", - "\n", - "\n", - "spa = SpecsAnalyzer(config=\"../tests/data/dataEPFL/config/config.yaml\")\n", - "config_dict = spa.config\n", - "lens_mode = \"WideAngleMode\"\n", - "kinetic_energy = 35.000000\n", - "pass_energy = 35.000000\n", - "work_function = 4.2\n", - "binning = 4\n", - "\n", - "eshift = np.array(config_dict[\"calib2d_dict\"][\"eShift\"])\n", - "\n", - "aInner, damatrix = get_damatrix_fromcalib2d(\n", - " lens_mode,\n", - " kinetic_energy,\n", - " pass_energy,\n", - " work_function,\n", - " config_dict,\n", - ")\n", - "\n", - "dapolymatrix = calculate_polynomial_coef_da(\n", - " damatrix, kinetic_energy, pass_energy, eshift\n", - ")\n", - "\n", - "\n", - "# note that for scan 9132 (and 9131) we have retardatio ratio= 0.88 -> this is wrong imo, checke the effect of the work function\n", - "# rr_inf = 10 rr_factor = 0.75 in igor" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1e8f8ed1", - "metadata": {}, - "outputs": [], - "source": [ - "print(\"das\")\n", - "print(damatrix)\n", - "print(igor_D_value_matrix)\n", - "print(\"Difference in the interpolated D coeffiecients\")\n", - "print(damatrix - igor_D_value_matrix)\n", - "# at the moment they differ by some %..\n", - "\n", - "print(\"coefs\")\n", - "print(dapolymatrix)\n", - "print(igor_D_coef_matrix)\n", - "print(\"Difference in the fitted polynomial coeffiecients\")\n", - "print(dapolymatrix - igor_D_coef_matrix)\n", - "# at the moment they differ by some %.." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b8b9d33c", - "metadata": {}, - "outputs": [], - "source": [ - "print(\n", - " np.testing.assert_allclose(dapolymatrix, igor_D_coef_matrix, rtol=1e-05)\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25884670", - "metadata": {}, - "outputs": [], - "source": [ - "# get the rr and the damatrix\n", - "rr, damatrix = get_rr_da(\"WideAngleMode\", config_dict)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b2c4bbf1", - "metadata": {}, - "outputs": [], - "source": [ - "# let's get the jacobian matrices generate by the igor code\n", - "# and compare it to the one obtained by calculate_matrix_correction\n", - "\n", - "# Jacobian\n", - "jname = [i for i in igordatapath_content if \"Jacobian\" in i][0]\n", - "with open(os.path.join(igordatapath, jname)) as file:\n", - " jacobian_reference = np.loadtxt(file, delimiter=\"\\t\").T\n", - "\n", - "print(\n", - " \"The shape of the jacobian_reference is: \",\n", - " jacobian_reference.shape,\n", - ")\n", - "# plt.xlim(120,150)\n", - "# plt.ylim(150,200)\n", - "\n", - "\n", - "(\n", - " ek_axis,\n", - " angle_axis,\n", - " angular_correction_matrix,\n", - " e_correction,\n", - " jacobian_determinant,\n", - ") = calculate_matrix_correction(\n", - " lens_mode,\n", - " kinetic_energy,\n", - " pass_energy,\n", - " work_function,\n", - " binning,\n", - " config_dict,\n", - ")\n", - "\n", - "\n", - "print(\n", - " \"The shape of the jacobian_determinant is: \",\n", - " jacobian_determinant.shape,\n", - ")\n", - "\n", - "delta_jacobian = jacobian_determinant - jacobian_reference\n", - "\n", - "fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(10, 4))\n", - "subplot1 = ax1.contourf(jacobian_determinant)\n", - "subplot2 = ax2.contourf(jacobian_reference)\n", - "subplot3 = ax3.contourf(delta_jacobian)\n", - "fig.colorbar(subplot1, ax=ax1)\n", - "fig.colorbar(subplot2, ax=ax2)\n", - "fig.colorbar(subplot3, ax=ax3)\n", - "\n", - "plt.tight_layout()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cb11932e", - "metadata": {}, - "outputs": [], - "source": [ - "assert (\n", - " np.testing.assert_allclose(\n", - " jacobian_determinant, jacobian_reference, rtol=1e-04\n", - " )\n", - " is None\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4b60a676", - "metadata": {}, - "outputs": [], - "source": [ - "# plt.plot(ek_axis,e_correction)\n", - "d_ecorrection = np.gradient(e_correction, ek_axis)\n", - "plt.plot(ek_axis, d_ecorrection)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "43fde75f", - "metadata": {}, - "outputs": [], - "source": [ - "# e_correction\n", - "jname = [i for i in igordatapath_content if \"E_Correction\" in i][0]\n", - "jname\n", - "with open(os.path.join(igordatapath, jname)) as file:\n", - " e_correction_reference = np.loadtxt(file, delimiter=\"\\t\")\n", - "\n", - "plt.plot(e_correction)\n", - "plt.plot(e_correction_reference)\n", - "\n", - "# ecorrection is different!!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a315bead", - "metadata": {}, - "outputs": [], - "source": [ - "# e_correction\n", - "jname = [i for i in igordatapath_content if \"Angular_Correction\" in i][0]\n", - "jname\n", - "with open(os.path.join(igordatapath, jname)) as file:\n", - " angle_correction_reference = np.loadtxt(file, delimiter=\"\\t\").T\n", - "\n", - "delta_angle = angle_correction_reference - angular_correction_matrix\n", - "\n", - "fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n", - "subplot1 = ax1.contourf(angular_correction_matrix)\n", - "subplot2 = ax2.contourf(angle_correction_reference)\n", - "subplot3 = ax3.contourf(delta_angle)\n", - "fig.colorbar(subplot1, ax=ax1)\n", - "fig.colorbar(subplot2, ax=ax2)\n", - "fig.colorbar(subplot3, ax=ax3)\n", - "plt.tight_layout()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "845828ac", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "\n", - "print(os.path.curdir)\n", - "# os.path.join(os.path.curdir, 'file.name')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3dd70214", - "metadata": {}, - "outputs": [], - "source": [ - "os.fspath(\"./tests/data/dataEPFL/R9132/Data9132_RAWDATA.tsv\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ded1d300", - "metadata": {}, - "outputs": [], - "source": [ - "def test_conversion():\n", - " \"Test if the conversion pipeline gives the same result as the Igor procedures\"\n", - "\n", - " # execution path: if you run in jupyter is is \".\", in the case of the test should already by under tests\n", - " cwd = os.getcwd()\n", - "\n", - " if os.path.basename(cwd) == \"specsanalyser\":\n", - " \"ok we are HERE\"\n", - " basepath = cwd\n", - " else:\n", - " basepath = os.path.dirname(cwd)\n", - " # get the raw data\n", - " print(basepath)\n", - " data_path = os.path.join(basepath, \"tests\", \"data\", \"dataEPFL\", \"R9132\")\n", - " raw_image_name = os.path.join(data_path, \"Data9132_RAWDATA.tsv\")\n", - "\n", - " # raw_image_name=os.fsencode('tests\\data\\dataEPFL\\R9132\\Data9132_RAWDATA.tsv')\n", - " print(\n", - " raw_image_name,\n", - " \"C:\\\\Users\\\\Michele\\\\Documents\\\\GitHub\\\\vscode\\\\specsanalyzer\\\\tests\\\\data\\\\dataEPFL\\\\R9132\\\\Data9132_RAWDATA.tsv\",\n", - " )\n", - "\n", - " with open(raw_image_name) as file:\n", - " tsv_data = np.loadtxt(file, delimiter=\"\\t\")\n", - "\n", - " # get the reference data\n", - " reference_image_name = raw_image_name = os.path.join(\n", - " data_path, \"Data9132_IGOR_corrected.tsv\"\n", - " )\n", - "\n", - " with open(reference_image_name) as file:\n", - " reference = np.loadtxt(file, delimiter=\"\\t\")\n", - "\n", - " configpath = os.path.join(\n", - " basepath, \"tests\", \"data\", \"dataEPFL\", \"config\", \"config.yaml\"\n", - " )\n", - " spa = SpecsAnalyzer(config=configpath)\n", - " lens_mode = \"WideAngleMode\"\n", - " kinetic_energy = 35.000000\n", - " pass_energy = 35.000000\n", - " work_function = 4.2\n", - "\n", - " converted = spa.convert_image(\n", - " raw_img=tsv_data,\n", - " lens_mode=lens_mode,\n", - " kinetic_energy=kinetic_energy,\n", - " pass_energy=pass_energy,\n", - " work_function=work_function,\n", - " apply_fft_filter=False,\n", - " )\n", - "\n", - " # TODO Does not work yet... Not sure how you produced the reference?\n", - "\n", - " # tolerance=reference.mean()*1e-3\n", - " # np.testing.assert_allclose(converted.data, reference, atol=tolerance)\n", - " return (converted.data, reference)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f3bcaf24", - "metadata": {}, - "outputs": [], - "source": [ - "(python_data, igor_data) = test_conversion()\n", - "\n", - "# remove nans\n", - "python_data[np.isnan(python_data)] = 0\n", - "igor_data[np.isnan(igor_data)] = 0\n", - "# normalize to unit amplitude\n", - "#python_data /= igor_data.max()\n", - "#igor_data /= igor_data.max()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "58515556", - "metadata": {}, - "outputs": [], - "source": [ - "fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(10, 4))\n", - "subplot1 = ax1.imshow(python_data)\n", - "subplot2 = ax2.imshow(igor_data)\n", - "subplot3 = ax3.imshow((python_data - igor_data))\n", - "fig.colorbar(subplot1, ax=ax1)\n", - "fig.colorbar(subplot2, ax=ax2)\n", - "fig.colorbar(subplot3, ax=ax3)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d3177aac", - "metadata": {}, - "outputs": [], - "source": [ - "tolerance = 5e-5\n", - "np.testing.assert_allclose(python_data, igor_data, atol=tolerance)\n", - "print(tolerance)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "84727e7c", - "metadata": {}, - "outputs": [], - "source": [ - "print(\n", - " os.fspath(\n", - " \"./tests/data/dataEPFL/R9132/Data9132_RAWDATA.tsv\",\n", - " )\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6950793e", - "metadata": {}, - "outputs": [], - "source": [ - "os.fsdecode(\"tests\\data\\dataEPFL\\R9132\\Data9132_RAWDATA.tsv\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8a0cec89", - "metadata": {}, - "outputs": [], - "source": [ - "os.fsencode(\"tests\\data\\dataEPFL\\R9132\\Data9132_RAWDATA.tsv\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a784db5b", - "metadata": {}, - "outputs": [], - "source": [ - "os.path.join(os.path.curdir, \"tests\", \"data\", \"dataEPFL\", \"R9132\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0f21dc4e", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "\n", - "cwd = Path.cwd()\n", - "print(cwd.parent.absolute())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "42db34dd", - "metadata": {}, - "outputs": [], - "source": [ - "os.path.basename(cwd.parent.absolute())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c8b84c75", - "metadata": {}, - "outputs": [], - "source": [ - "with open(\"../../testdata_converted.txt\") as file:\n", - " txt_data = np.loadtxt(file, delimiter=\"\\r\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f40dc591", - "metadata": {}, - "outputs": [], - "source": [ - "txt_data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1c5c78d8", - "metadata": {}, - "outputs": [], - "source": [ - "txt_data_array = np.reshape(txt_data, (256, 344))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8270886a", - "metadata": {}, - "outputs": [], - "source": [ - "txt_data_array" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "33c934bd", - "metadata": {}, - "outputs": [], - "source": [ - "plt.imshow(txt_data_array)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6d8c1753", - "metadata": {}, - "outputs": [], - "source": [ - "plt.imshow(python_data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c2b5974c", - "metadata": {}, - "outputs": [], - "source": [ - "plt.imshow(igor_data-530*txt_data_array)\n", - "plt.colorbar()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17050ed2", - "metadata": {}, - "outputs": [], - "source": [ - "igor_data.sum().sum()/tsv_data.sum().sum()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6d20dde6", - "metadata": {}, - "outputs": [], - "source": [ - "igor_data.sum().sum()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4b9d3754", - "metadata": {}, - "outputs": [], - "source": [ - "igor_data2 = igor_data/igor_data.sum().sum()*tsv_data.sum().sum()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "07cce131", - "metadata": {}, - "outputs": [], - "source": [ - "plt.imshow(igor_data2-txt_data_array)\n", - "plt.colorbar()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "384e981a", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.10.4 ('specanalyserenv')", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.12" - }, - "vscode": { - "interpreter": { - "hash": "01f3a50f1cec8b32686da9a100309d20236977f5c6d2fb4bd4818f1295405c21" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/tutorial/lensmodes_tests.ipynb b/tutorial/lensmodes_tests.ipynb deleted file mode 100644 index b728a28..0000000 --- a/tutorial/lensmodes_tests.ipynb +++ /dev/null @@ -1,274 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Test the behavious of the package when different modes are selects, and\n", - "for different retardatio ratios" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# testing WAM for various retardatio ratios\n", - "\n", - "# from importlib import reload\n", - "import curses #### pip install windows-curses\n", - "import matplotlib.pyplot as plt\n", - "import xarray as xr\n", - "import numpy as np\n", - "import os\n", - "import specsanalyzer\n", - "\n", - "# load the module and import functions for reading the parameter table from the calib2d file\n", - "import specsanalyzer.convert\n", - "\n", - "# let's get all the functions to be tested\n", - "from specsanalyzer import SpecsAnalyzer\n", - "from specsanalyzer.convert import get_damatrix_fromcalib2d\n", - "from specsanalyzer.convert import get_rr_da\n", - "from specsanalyzer.convert import calculate_polynomial_coef_da\n", - "from specsanalyzer.convert import mcp_position_mm\n", - "from specsanalyzer.convert import calculate_matrix_correction\n", - "\n", - "\n", - "spa = SpecsAnalyzer(config=\"../tests/data/dataEPFL/config/config.yaml\")\n", - "config_dict = spa.config\n", - "lens_mode = \"WideAngleMode\"\n", - "kinetic_energy = 35.000000\n", - "pass_energy = 35.000000\n", - "work_function = 4.3\n", - "binning = 4\n", - "\n", - "eshift = np.array(config_dict[\"calib2d_dict\"][\"eShift\"])\n", - "\n", - "aInner, damatrix = get_damatrix_fromcalib2d(\n", - " lens_mode,\n", - " kinetic_energy,\n", - " pass_energy,\n", - " work_function,\n", - " config_dict,\n", - ")\n", - "\n", - "dapolymatrix = calculate_polynomial_coef_da(\n", - " damatrix, kinetic_energy, pass_energy, eshift\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# testing ..\n", - "# check the behaviour for various lens modes\n", - "lensmodes_angle = [\n", - " \"WideAngleMode\",\n", - " \"LowAngularDispersion\",\n", - " \"MediumAngularDispersion\",\n", - " \"HighAngularDispersion\",\n", - " \"WideAngleMode\",\n", - " \"SuperWideAngleMode\",\n", - "]\n", - "lensmodes_space = [\n", - " \"LargeArea\",\n", - " \"MediumArea\",\n", - " \"SmallArea\",\n", - " \"SmallArea2\",\n", - " \"HighMagnification2\",\n", - " \"HighMagnification\",\n", - " \"MediumMagnification\",\n", - " \"LowMagnification\",\n", - "]\n", - "\n", - "# #print(lensmodes_angle)\n", - "# #print(lensmodes_space)\n", - "# #print()\n", - "# print(config_dict.keys())\n", - "# config_dict['calib2d_dict'].keys()\n", - "# correctiondic=spa.correction_matrix_dict\n", - "# correctiondic['supported_lens_modes']['lens_modes_angle']\n", - "\n", - "\n", - "# from specsanalyzer.core import get_modes_from_calib_dict\n", - "\n", - "# lista,listb=get_modes_from_calib_dict(config_dict['calib2d_dict'])\n", - "# lista\n", - "\"WideAngleModel\" in lensmodes_angle" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test_dir = r\"C:\\Users\\Michele\\Documents\\GitHub\\vscode\\specsanalyzer\\tests\"\n", - "\n", - "\n", - "def test_lens_raise():\n", - " error_lens_mode = \"WideAngleModel\"\n", - " expected_out = \"convert_image: unsupported lens mode: WideAngleModel\"\n", - "\n", - " \"\"\"Test if program raises suitable errors\"\"\"\n", - " raw_image_name = os.fspath(\n", - " f\"{test_dir}/data/dataEPFL/R9132/Data9132_RAWDATA.tsv\",\n", - " )\n", - " with open(raw_image_name) as file: # pylint: disable=W1514\n", - " tsv_data = np.loadtxt(file, delimiter=\"\\t\")\n", - "\n", - " configpath = os.fspath(f\"{test_dir}/data/dataEPFL/config/config.yaml\")\n", - " spa = SpecsAnalyzer(config=configpath)\n", - " kinetic_energy = 35.000000\n", - " pass_energy = 35.000000\n", - " work_function = 4.2\n", - "\n", - " try:\n", - " converted = spa.convert_image( # noqa: F841 # pylint: disable=W0612\n", - " raw_img=tsv_data,\n", - " lens_mode=error_lens_mode,\n", - " kinetic_energy=kinetic_energy,\n", - " pass_energy=pass_energy,\n", - " work_function=work_function,\n", - " apply_fft_filter=False,\n", - " )\n", - " test_result = True\n", - " except ValueError as error:\n", - " print(\"Found value error: \")\n", - " print(str(error))\n", - " test_result = str(error)\n", - " assert test_result == expected_out" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test_lens_raise()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test_dir = r\"C:\\Users\\Michele\\Documents\\GitHub\\vscode\\specsanalyzer\\tests\"\n", - "\n", - "\n", - "def test_lens_traceback():\n", - " error_lens_mode = \"WideAngleMode\"\n", - " expected_out = (\n", - " \"The supported modes were not found in the calib2d dictionary\"\n", - " )\n", - "\n", - " \"\"\"Test if program raises suitable errors\"\"\"\n", - " raw_image_name = os.fspath(\n", - " f\"{test_dir}/data/dataEPFL/R9132/Data9132_RAWDATA.tsv\",\n", - " )\n", - " with open(raw_image_name) as file: # pylint: disable=W1514\n", - " tsv_data = np.loadtxt(file, delimiter=\"\\t\")\n", - "\n", - " configpath = os.fspath(f\"{test_dir}/data/dataEPFL/config/config.yaml\")\n", - " spa = SpecsAnalyzer(config=configpath)\n", - " kinetic_energy = 35.000000\n", - " pass_energy = 35.000000\n", - " work_function = 4.2\n", - " # let's delibertaly remove the keys\n", - " spa.config[\"calib2d_dict\"].pop(\"supported_angle_modes\")\n", - " spa.config[\"calib2d_dict\"].pop(\"supported_space_modes\")\n", - "\n", - " try:\n", - "\n", - " converted = spa.convert_image( # noqa: F841 # pylint: disable=W0612\n", - " raw_img=tsv_data,\n", - " lens_mode=error_lens_mode,\n", - " kinetic_energy=kinetic_energy,\n", - " pass_energy=pass_energy,\n", - " work_function=work_function,\n", - " apply_fft_filter=False,\n", - " )\n", - "\n", - " test_result = True\n", - " except KeyError as error:\n", - " print(\"Found key error: \")\n", - " print(str(error))\n", - " test_result = str(error)[1:-1] # this removes the '\n", - " print(test_result, expected_out)\n", - " assert test_result == expected_out" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test_lens_traceback()\n", - "# spa.config[\"calib2d_dict\"].keys()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "spa.config[\"calib2d_dict\"].pop(\"supported_angle_modes\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "spa.config[\"calib2d_dict\"].keys()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.10.4 ('specanalyserenv')", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.4" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "01f3a50f1cec8b32686da9a100309d20236977f5c6d2fb4bd4818f1295405c21" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/tutorial/load_sweep_scan.ipynb b/tutorial/load_sweep_scan.ipynb deleted file mode 100644 index b3a415d..0000000 --- a/tutorial/load_sweep_scan.ipynb +++ /dev/null @@ -1,292 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## This is an example showcasing the loading of trARPES data as collected using the Phoibos detector at FHI Berlin.\n", - "The band dispersion is loaded as a xarray dataframe following a conversion to the [NeXus format](https://manual.nexusformat.org/classes/contributed_definitions/NXmpes.html#nxmpes) using the [Nomad Parser Nexus](https://github.com/nomad-coe/nomad-parser-nexus)." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, the SpecsScan class is imported which has the scan loader as its class method." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2\n", - "from specsscan import SpecsScan\n", - "from pathlib import Path\n", - "from matplotlib import pyplot as plt" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Here, a SpecsScan class instance is created as per the configuration provided in [config.yaml](../tests/data/config.yaml). The user may set the entries in config.yaml file, for example, the data path and conversion parameters as per the requirements before creating this instance" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### The path may changed to point to the scan folder of the data of interest (for example, on a server drive)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "config = {\"spa_params\": {\n", - " 'ek_range_min': 0.07597844332538357,\n", - " 'ek_range_max': 0.8965456312395133,\n", - " 'ang_range_min': 0.16732026143790849,\n", - " 'ang_range_max': 0.8449673202614381,\n", - " \"Ang_Offset_px\":13,\n", - " \"rotation_angle\": 2,\n", - " \"crop\":True,\n", - "}}\n", - "sps = SpecsScan(config=config, user_config=\"../specsscan/config/example_config_FHI.yaml\")\n", - "path = \"../tests/data/\" # Path to the test data set" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "metadata = {}\n", - "# manual Meta data. These should ideally come from an Electronic Lab Notebook.\n", - "#General\n", - "metadata['experiment_summary'] = 'TbTe3 tilt map around EF with 800nm pump at 20fs after pump-probe overlap'\n", - "metadata['entry_title'] = 'TbTe3 XUV Fermi surface map at 20 fs'\n", - "metadata['experiment_title'] = 'TbTe3 XUV Fermi surface map at 20 fs'\n", - "\n", - "#User\n", - "# Fill general parameters of NXuser\n", - "# TODO: discuss how to deal with multiple users?\n", - "metadata['user0'] = {}\n", - "metadata['user0']['name'] = 'Laurenz Rettig'\n", - "metadata['user0']['role'] = 'Principal Investigator'\n", - "metadata['user0']['affiliation'] = 'Fritz Haber Institute of the Max Planck Society'\n", - "metadata['user0']['address'] = 'Faradayweg 4-6, 14195 Berlin'\n", - "metadata['user0']['email'] = 'rettig@fhi-berlin.mpg.de'\n", - "\n", - "metadata['user1'] = {}\n", - "metadata['user1']['name'] = 'William Windsor'\n", - "metadata['user1']['role'] = 'Principal Investigator'\n", - "metadata['user1']['affiliation'] = 'Fritz Haber Institute of the Max Planck Society'\n", - "metadata['user1']['address'] = 'Faradayweg 4-6, 14195 Berlin'\n", - "metadata['user1']['email'] = 'windsor@fhi-berlin.mpg.de'\n", - "\n", - "metadata['instrument'] = {}\n", - "# energy resolution\n", - "metadata['instrument']['energy_resolution'] = 150.\n", - "metadata['instrument']['electronanalyser'] = {}\n", - "metadata['instrument']['electronanalyser']['energy_resolution'] = 120\n", - "metadata['instrument']['electronanalyser']['angular_resolution'] = 0.2\n", - "metadata['instrument']['electronanalyser']['spatial_resolution'] = 0.5\n", - "\n", - "#probe beam\n", - "metadata['instrument']['beam']={}\n", - "metadata['instrument']['beam']['probe']={}\n", - "metadata['instrument']['beam']['probe']['incident_energy'] = 21.7\n", - "metadata['instrument']['beam']['probe']['incident_energy_spread'] = 0.11\n", - "metadata['instrument']['beam']['probe']['pulse_duration'] = 20.\n", - "metadata['instrument']['beam']['probe']['frequency'] = 500.\n", - "metadata['instrument']['beam']['probe']['incident_polarization'] = [1, 1, 0, 0] # p pol Stokes vector\n", - "metadata['instrument']['beam']['probe']['extent'] = [80., 80.]\n", - "#pump beam\n", - "metadata['instrument']['beam']['pump']={}\n", - "metadata['instrument']['beam']['pump']['incident_energy'] = 1.55\n", - "metadata['instrument']['beam']['pump']['incident_energy_spread'] = 0.08\n", - "metadata['instrument']['beam']['pump']['pulse_duration'] = 35.\n", - "metadata['instrument']['beam']['pump']['frequency'] = 500.\n", - "metadata['instrument']['beam']['pump']['incident_polarization'] = [1, -1, 0, 0] # s pol Stokes vector\n", - "metadata['instrument']['beam']['pump']['incident_wavelength'] = 800.\n", - "metadata['instrument']['beam']['pump']['average_power'] = 224.\n", - "metadata['instrument']['beam']['pump']['pulse_energy'] = metadata['instrument']['beam']['pump']['average_power']/metadata['instrument']['beam']['pump']['frequency']#µJ\n", - "metadata['instrument']['beam']['pump']['extent'] = [300/4*2.34, 270/4*2.35] #Gaussian 4sigma -> FWHM\n", - "metadata['instrument']['beam']['pump']['fluence'] = 1.00\n", - "metadata['instrument']['beam']['pump']['delay'] = 0.02\n", - "\n", - "#sample\n", - "metadata['sample']={}\n", - "metadata['sample']['preparation_date'] = '2017-03-19T10:00:00+00:00'\n", - "metadata['sample']['preparation_description'] = 'Cleaved'\n", - "metadata['sample']['sample_history'] = 'Cleaved in UHV'\n", - "metadata['sample']['chemical_formula'] = 'TbTe3'\n", - "metadata['sample']['description'] = 'cleaved single crystal of TbTe3'\n", - "metadata['sample']['name'] = 'TbTe3 Single Crystal'\n", - "\n", - "#metadata[\"scan_info\"] = {}\n", - "#metadata[\"scan_info\"][\"trARPES:XGS600:PressureAC:P_RD\"] = 2.5E-11\n", - "#metadata[\"scan_info\"][\"trARPES:Carving:TEMP_RBV\"] = 70\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#sps.crop_tool(scan=6455)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The load_scan method loads the scan as an xarray along with the metadata needed for nexus conversion. The progress bars can be activated by changing the config parameter, enable_nested_progress_bar, to true in config.yaml " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res_xarray = sps.load_scan(\n", - " scan=6455, # Scan number for an example mirror scan\n", - " metadata=metadata,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plt.figure()\n", - "res_xarray.plot()\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sps.save(\"Scan6455.nxs\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The metadata associated with the scan is added as an attribute to the xarray" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res_xarray.attrs[\"metadata\"].keys()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "raw = res_xarray.attrs[\"metadata\"][\"loader\"][\"raw_data\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "energies = res_xarray.attrs[\"metadata\"][\"scan_info\"][\"KineticEnergy\"]" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "tags": [] - }, - "source": [ - "## View the data with H5Web\n", - "H5Web is a tool for visualizing any data in the h5 data format. Since the NeXus format builds opon h5 it can be used to view this data as well. We just import the package and call H5Web with the output filename from the convert command above. \n", - "\n", - "You can also view this data with the H5Viewer or other tools from your local filesystem." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from jupyterlab_h5web import H5Web" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "H5Web(Scan6455.nxs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "specenv38", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.12" - }, - "vscode": { - "interpreter": { - "hash": "a164666994e9db75450cd7016dd7e51d42ea6e7c1e5e8017af1f8068ca906367" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/tutorial/phoibos_eln_data.yaml b/tutorial/phoibos_eln_data.yaml deleted file mode 100644 index 935aa68..0000000 --- a/tutorial/phoibos_eln_data.yaml +++ /dev/null @@ -1,108 +0,0 @@ -Instrument: - energy_resolution: - unit: meV - value: 140.0 - temporal_resolution: - unit: fs - value: 35.0 - Analyzer: - energy_resolution: - unit: meV - value: 110.00000000000001 - angular_resolution: - unit: degrees - value: 0.08 - spatial_resolution: - unit: µm - value: 100 - Beam: - Probe: - extent: - unit: µm - value: - - 80.0 - - 80.0 - incident_energy: - unit: eV - value: 21.7 - incident_energy_spread: - unit: eV - value: 0.11 - incident_polarization: - - 1.0 - - 1.0 - - 0.0 - - 0.0 - pulse_duration: - unit: fs - value: 20.0 - Pump: - average_power: - unit: mW - value: 300.0 - extent: - unit: µm - value: - - 229.99999999999997 - - 265.0 - fluence: - unit: mJ / cm ** 2 - value: 0.15000000000000002 - incident_energy: - unit: eV - value: 1.5499999999999998 - incident_energy_spread: - unit: eV - value: 0.08 - incident_polarization: - - 1.0 - - -1.0 - - 0.0 - - 0.0 - incident_wavelength: - unit: nm - value: 799.9999999999999 - pulse_duration: - unit: fs - value: 35.0 - pulse_energy: - unit: µJ - value: 0.6 - Manipulator: - sample_temperature: - unit: K - value: 300.0 - Source: - Probe: - frequency: - unit: kHz - value: 500.0 - photon_energy: - unit: eV - value: 21.700000000000003 - Pump: - frequency: - unit: kHz - value: 500.0 - photon_energy: - unit: eV - value: 1.5500000000000003 -Sample: - chemical_formula: WSe2 - description: Sample - gas_pressure: - unit: mbar - value: 5.000000000000001e-11 - name: WSe2 Single Crystal - preparation_date: '2019-01-13T09:00:00+00:00' - sample_history: Cleaved - temperature: - unit: K - value: 300.0 -User: - address: Faradayweg 4-6, 14915 Berlin - affiliation: Fritz Haber Institute of the Max Planck Society - email: user@fhi-berlin.mpg.de - name: user - role: Principal Investigator -title: Valence Band Dynamics - 800 nm linear s-polarized pump, 0.6 mJ/cm2 absorbed fluence diff --git a/tutorial/remove_grid_by_fourier_filtering.ipynb b/tutorial/remove_grid_by_fourier_filtering.ipynb deleted file mode 100644 index 80d7163..0000000 --- a/tutorial/remove_grid_by_fourier_filtering.ipynb +++ /dev/null @@ -1,186 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "from importlib import reload\n", - "import specsanalyzer.img_tools\n", - "from specsanalyzer.img_tools import fourier_filter_2d\n", - "\n", - "reload(specsanalyzer.img_tools)\n", - "%matplotlib widget" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import pandas as pd\n", - "\n", - "print(os.fspath(\"./tests/data/dataFHI/Scan1232.tsv\"))\n", - "\n", - "with open(\"../tests/data/dataFHI/Scan1232.tsv\") as file:\n", - " tsv_data = np.loadtxt(file, delimiter=\"\\t\")\n", - "\n", - "plt.figure()\n", - "plt.imshow(tsv_data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "peaks = []\n", - "peaks.append(\n", - " {\n", - " \"pos_x\": 79,\n", - " \"pos_y\": 0,\n", - " \"sigma_x\": 8,\n", - " \"sigma_y\": 8,\n", - " \"amplitude\": 1,\n", - " }\n", - ")\n", - "peaks.append(\n", - " {\n", - " \"pos_x\": 176,\n", - " \"pos_y\": 0,\n", - " \"sigma_x\": 8,\n", - " \"sigma_y\": 8,\n", - " \"amplitude\": 1,\n", - " }\n", - ")\n", - "peaks.append(\n", - " {\n", - " \"pos_x\": 0,\n", - " \"pos_y\": 109,\n", - " \"sigma_x\": 5,\n", - " \"sigma_y\": 8,\n", - " \"amplitude\": 1,\n", - " }\n", - ")\n", - "peaks.append(\n", - " {\n", - " \"pos_x\": 78,\n", - " \"pos_y\": 109,\n", - " \"sigma_x\": 5,\n", - " \"sigma_y\": 5,\n", - " \"amplitude\": 1,\n", - " }\n", - ")\n", - "peaks.append(\n", - " {\n", - " \"pos_x\": 175,\n", - " \"pos_y\": 108,\n", - " \"sigma_x\": 5,\n", - " \"sigma_y\": 5,\n", - " \"amplitude\": 1,\n", - " }\n", - ")\n", - "peaks.append(\n", - " {\n", - " \"pos_x\": 254,\n", - " \"pos_y\": 109,\n", - " \"sigma_x\": 5,\n", - " \"sigma_y\": 8,\n", - " \"amplitude\": 1,\n", - " }\n", - ")\n", - "peaks" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fft = fourier_filter_2d(tsv_data, peaks, ret=\"fft\")\n", - "plt.figure()\n", - "plt.imshow(np.abs(fft[0:350, 0:180]), vmax=3000000)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mask = fourier_filter_2d(tsv_data, peaks, ret=\"mask\")\n", - "plt.figure()\n", - "plt.imshow(np.abs(mask[0:350, 0:180]), vmax=1)\n", - "plt.colorbar()\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "filtered_fft = fourier_filter_2d(tsv_data, peaks, ret=\"filtered_fft\")\n", - "plt.figure()\n", - "plt.imshow(np.abs(filtered_fft[0:350, 0:180]), vmax=1000000)\n", - "plt.colorbar()\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "filtered = fourier_filter_2d(tsv_data, peaks)\n", - "plt.figure()\n", - "plt.imshow(filtered)\n", - "plt.colorbar()\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.10.4 ('specanalyserenv')", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.4" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "01f3a50f1cec8b32686da9a100309d20236977f5c6d2fb4bd4818f1295405c21" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/tutorial/test.ipynb b/tutorial/test.ipynb deleted file mode 100644 index b7345f0..0000000 --- a/tutorial/test.ipynb +++ /dev/null @@ -1,107 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# from importlib import reload\n", - "import curses #### pip install windows-curses\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import xarray as xr\n", - "import numpy as np\n", - "\n", - "import specsanalyzer\n", - "from specsanalyzer import SpecsAnalyzer" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# reload(specsanalyzer)\n", - "spa = SpecsAnalyzer()\n", - "spa.__init__\n", - "spa.config[\"calib2d_dict\"].keys()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "with open(\"../tests/data/dataEPFL/R9132/Data9132_RAWDATA.tsv\") as file:\n", - " # with open('../tests/data/dataFHI/Scan1232.tsv') as file:\n", - " tsv_data = np.loadtxt(file, delimiter=\"\\t\")\n", - "plt.figure()\n", - "plt.imshow(tsv_data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Testing convert and crop on the imported data\n", - "spa = SpecsAnalyzer(config=\"../tests/data/dataEPFL/config/config.yaml\")\n", - "\n", - "lens_mode = \"WideAngleMode\"\n", - "kinetic_energy = 35.0\n", - "pass_energy = 35.0\n", - "work_function = 4.2\n", - "res_xarray = spa.convert_image(\n", - " tsv_data,\n", - " lens_mode,\n", - " kinetic_energy,\n", - " pass_energy,\n", - " work_function,\n", - ")\n", - "plt.figure()\n", - "res_xarray.plot(cmap=\"viridis\", vmin=0)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3.10.4 ('specanalyserenv')", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.3" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "01f3a50f1cec8b32686da9a100309d20236977f5c6d2fb4bd4818f1295405c21" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -}