diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 232c70dea..000000000 --- a/.coveragerc +++ /dev/null @@ -1,10 +0,0 @@ -[run] -source = gstools -omit = *docs*, *examples*, *tests*, */gstools/covmodel/plot.py, */gstools/field/plot.py - -[report] -exclude_lines = - pragma: no cover - if __name__ == '__main__': - def __repr__ - def __str__ diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 000000000..956855965 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,141 @@ +name: Continuous Integration + +on: + push: + branches: + - "master" + - "develop" + tags: + - "*" + pull_request: + branches: + - "develop" + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + source_check: + name: source check + runs-on: ubuntu-latest + strategy: + fail-fast: false + + steps: + - uses: actions/checkout@v2 + + - name: Set up Python 3.8 + uses: actions\setup-python@v2 + with: + python-version: 3.8 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install black pylint + pip install --editable . + + - name: black check + run: | + python -m black --check . + + - name: pylint check + run: | + python -m pylint gstools/ + + build_wheels: + name: wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + + - name: Build wheels + uses: joerick/cibuildwheel@v1.10.0 + env: + CIBW_BUILD: cp36-* cp37-* cp38-* cp39-* + CIBW_TEST_EXTRAS: test + CIBW_TEST_COMMAND: pytest -v {project}/tests + with: + output-dir: dist + + - uses: actions/upload-artifact@v2 + with: + path: ./dist/*.whl + + build_sdist: + name: sdist on ${{ matrix.os }} with py ${{ matrix.python-version }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: [3.6, 3.7, 3.8, 3.9] + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: '0' + + - name: Set up Python ${{ matrix.python-version }} + uses: actions\setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + env: + GSTOOLS_BUILD_PARALLEL: 1 + run: | + python -m pip install --upgrade pip + pip install build coveralls>=3.0.0 + pip install --editable .[test] + + - name: Run tests + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + python -m pytest --cov gstools --cov-report term-missing -v tests/ + python -m coveralls --service=github + + - name: Build sdist + run: | + # PEP 517 package builder from pypa + python -m build --sdist --outdir dist . + + - uses: actions/upload-artifact@v2 + if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.9' + with: + path: dist/*.tar.gz + + upload_to_pypi: + needs: [build_wheels, build_sdist] + runs-on: ubuntu-latest + + steps: + - uses: actions/download-artifact@v2 + with: + name: artifact + path: dist + + - name: Publish to Test PyPI + # only if working on master or develop + if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/develop' + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.test_pypi_password }} + repository_url: https://test.pypi.org/legacy/ + skip_existing: true + + - name: Publish to PyPI + # only if tagged + if: startsWith(github.ref, 'refs/tags') + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.pypi_password }} diff --git a/.gitignore b/.gitignore index b6706876e..ac070f571 100644 --- a/.gitignore +++ b/.gitignore @@ -111,6 +111,8 @@ info/ *.c *.cpp +# generated version file +gstools/_version.py # generated docs docs/source/examples/ @@ -123,3 +125,4 @@ docs/source/generated/ *.vtu *.vtr +*.vtk diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 000000000..f88c11ac0 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,14 @@ +version: 2 + +sphinx: + configuration: docs/source/conf.py + +formats: all + +python: + version: 3.7 + install: + - method: pip + path: . + extra_requirements: + - doc diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 125eed4bb..000000000 --- a/.travis.yml +++ /dev/null @@ -1,100 +0,0 @@ -language: python -python: 3.8 - -# setuptools-scm needs all tags in order to obtain a proper version -git: - depth: false - -env: - global: - - TWINE_USERNAME=geostatframework - - CIBW_BEFORE_BUILD="pip install numpy==1.17.3 cython==0.29.14 setuptools" - - CIBW_TEST_REQUIRES=pytest - - CIBW_TEST_COMMAND="pytest -v {project}/tests" - -before_install: - - | - if [[ "$TRAVIS_OS_NAME" = windows ]]; then - choco install python --version 3.8.0 - export PATH="/c/Python38:/c/Python38/Scripts:$PATH" - # make sure it's on PATH as 'python3' - ln -s /c/Python38/python.exe /c/Python38/python3.exe - fi - -script: - - python3 -m pip install cibuildwheel==1.3.0 - - python3 -m cibuildwheel --output-dir dist - -after_success: - - | - if [[ $TRAVIS_PULL_REQUEST == 'false' ]]; then - python3 -m pip install twine - python3 -m twine upload --verbose --skip-existing --repository-url https://test.pypi.org/legacy/ dist/* - if [[ $TRAVIS_TAG ]]; then python3 -m twine upload --verbose --skip-existing dist/*; fi - fi - -notifications: - email: - recipients: - - info@geostat-framework.org - -jobs: - include: - - name: "sdist and coverage" - services: docker - env: OMP_NUM_THREADS=4 - script: - - python3 -m pip install -U setuptools pytest-cov coveralls - - python3 -m pip install -U numpy==1.17.3 cython==0.29.14 - - python3 -m pip install -r requirements.txt - - python3 setup.py sdist -d dist - - python3 setup.py --openmp build_ext --inplace - - python3 -m pytest --cov gstools --cov-report term-missing -v tests/ - - python3 -m coveralls - - - name: "Linux py35" - services: docker - env: CIBW_BUILD="cp35-*" - - name: "Linux py36" - services: docker - env: CIBW_BUILD="cp36-*" - - name: "Linux py37" - services: docker - env: CIBW_BUILD="cp37-*" - - name: "Linux py38" - services: docker - env: CIBW_BUILD="cp38-*" - - - name: "MacOS py35" - os: osx - language: shell - env: CIBW_BUILD="cp35-*" - - name: "MacOS py36" - os: osx - language: shell - env: CIBW_BUILD="cp36-*" - - name: "MacOS py37" - os: osx - language: shell - env: CIBW_BUILD="cp37-*" - - name: "MacOS py38" - os: osx - language: shell - env: CIBW_BUILD="cp38-*" - - - name: "Win py35" - os: windows - language: shell - env: CIBW_BUILD="cp35-*" - - name: "Win py36" - os: windows - language: shell - env: CIBW_BUILD="cp36-*" - - name: "Win py37" - os: windows - language: shell - env: CIBW_BUILD="cp37-*" - - name: "Win py38" - os: windows - language: shell - env: CIBW_BUILD="cp38-*" diff --git a/AUTHORS.md b/AUTHORS.md index 23990822f..63601d87c 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -6,11 +6,12 @@ and was created by following people. ## Main Authors -- [Lennart Schüler](https://github.com/LSchueler), Email: -- [Sebastian Müller](https://github.com/MuellerSeb), Email: +- Sebastian Müller, GitHub: [@MuellerSeb](https://github.com/MuellerSeb), Email: +- Lennart Schüler, GitHub: [@LSchueler](https://github.com/LSchueler), Email: ## Contributors (in order of contributions) -- Falk Heße, Email: +- Falk Heße, GitHub: [@fhesze](https://github.com/fhesze), Email: - Bane Sullivan, GitHub: [@banesullivan](https://github.com/banesullivan) +- Tobias Glaubach, GitHub: [@TobiasGlaubach](https://github.com/TobiasGlaubach) \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index e66210a63..51fa960f7 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,147 @@ All notable changes to **GSTools** will be documented in this file. +## [1.3.0] - Pure Pink - 2021-04 + +### Topics + +#### Geographical Coordinates Support ([#113](https://github.com/GeoStat-Framework/GSTools/issues/113)) +- added boolean init parameter `latlon` to indicate a geographic model. When given, spatial dimension is fixed to `dim=3`, `anis` and `angles` will be ignored, since anisotropy is not well-defined on a sphere. +- add property `field_dim` to indicate the dimension of the resulting field. Will be 2 if `latlon=True` +- added yadrenko variogram, covariance and correlation method, since the geographic models are derived from standard models in 3D by plugging in the chordal distance of two points on a sphere derived from there great-circle distance `zeta`: + - `vario_yadrenko`: given by `variogram(2 * np.sin(zeta / 2))` + - `cov_yadrenko`: given by `covariance(2 * np.sin(zeta / 2))` + - `cor_yadrenko`: given by `correlation(2 * np.sin(zeta / 2))` +- added plotting routines for yadrenko methods described above +- the `isometrize` and `anisometrize` methods will convert `latlon` tuples (given in degree) to points on the unit-sphere in 3D and vice versa +- representation of geographical models don't display the `dim`, `anis` and `angles` parameters, but `latlon=True` +- `fit_variogram` will expect an estimated variogram with great-circle distances given in radians +- **Variogram estimation** + - `latlon` switch implemented in `estimate_vario` routine + - will return a variogram estimated by the great-circle distance (haversine formula) given in radians +- **Field** + - added plotting routines for latlon fields + - no vector fields possible on latlon fields + - corretly handle pos tuple for latlon fields + +#### Krige Unification ([#97](https://github.com/GeoStat-Framework/GSTools/issues/97)) +- Swiss Army Knife for kriging: The `Krige` class now provides everything in one place +- "Kriging the mean" is now possible with the switch `only_mean` in the call routine +- `Simple`/`Ordinary`/`Universal`/`ExtDrift`/`Detrended` are only shortcuts to `Krige` with limited input parameter list +- We now use the `covariance` function to build up the kriging matrix (instead of variogram) +- An `unbiased` switch was added to enable simple kriging (where the unbiased condition is not given) +- An `exact` switch was added to allow smother results, if a `nugget` is present in the model +- An `cond_err` parameter was added, where measurement error variances can be given for each conditional point +- pseudo-inverse matrix is now used to solve the kriging system (can be disabled by the new switch `pseudo_inv`), this is equal to solving the system with least-squares and prevents numerical errors +- added options `fit_normalizer` and `fit_variogram` to automatically fit normalizer and variogram to given data + +#### Directional Variograms and Auto-binning ([#87](https://github.com/GeoStat-Framework/GSTools/issues/87), [#106](https://github.com/GeoStat-Framework/GSTools/issues/106), [#131](https://github.com/GeoStat-Framework/GSTools/issues/131)) +- new routine name `vario_estimate` instead of `vario_estimate_unstructured` (old kept for legacy code) for simplicity +- new routine name `vario_estimate_axis` instead of `vario_estimate_structured` (old kept for legacy code) for simplicity +- **vario_estimate** + - added simple automatic binning routine to determine bins from given data (one third of box diameter as max bin distance, sturges rule for number of bins) + - allow to pass multiple fields for joint variogram estimation (e.g. for daily precipitation) on same mesh + - `no_data` option added to allow missing values + - **masked fields** + - user can now pass a masked array (or a list of masked arrays) to deselect data points. + - in addition, a `mask` keyword was added to provide an external mask + - **directional variograms** + - diretional variograms can now be estimated + - either provide a list of direction vectors or angles for directions (spherical coordinates) + - can be controlled by given angle tolerance and (optional) bandwidth + - prepared for nD + - structured fields (pos tuple describes axes) can now be passed to estimate an isotropic or directional variogram + - distance calculation in cython routines in now independent of dimension +- **vario_estimate_axis** + - estimation along array axis now possible in arbitrary dimensions + - `no_data` option added to allow missing values (sovles [#83](https://github.com/GeoStat-Framework/GSTools/issues/83)) + - axis can be given by name (`"x"`, `"y"`, `"z"`) or axis number (`0`, `1`, `2`, `3`, ...) + +#### Better Variogram fitting ([#78](https://github.com/GeoStat-Framework/GSTools/issues/78), [#145](https://github.com/GeoStat-Framework/GSTools/pull/145)) +- fixing sill possible now +- `loss` is now selectable for smoother handling of outliers +- r2 score can now be returned to get an impression of the goodness of fitting +- weights can be passed +- instead of deselecting parameters, one can also give fix values for each parameter +- default init guess for `len_scale` is now mean of given bin-centers +- default init guess for `var` and `nugget` is now mean of given variogram values + +#### CovModel update ([#109](https://github.com/GeoStat-Framework/GSTools/issues/109), [#122](https://github.com/GeoStat-Framework/GSTools/issues/122), [#157](https://github.com/GeoStat-Framework/GSTools/pull/157)) +- add new `rescale` argument and attribute to the `CovModel` class to be able to rescale the `len_scale` (usefull for unit conversion or rescaling `len_scale` to coincide with the `integral_scale` like it's the case with the Gaussian model) + See: [#90](https://github.com/GeoStat-Framework/GSTools/issues/90), [GeoStat-Framework/PyKrige#119](https://github.com/GeoStat-Framework/PyKrige/issues/119) +- added new `len_rescaled` attribute to the `CovModel` class, which is the rescaled `len_scale`: `len_rescaled = len_scale / rescale` +- new method `default_rescale` to provide default rescale factor (can be overridden) +- remove `doctest` calls +- docstring updates in CovModel and derived models +- updated all models to use the `cor` routine and make use of the `rescale` argument (See: [#90](https://github.com/GeoStat-Framework/GSTools/issues/90)) +- TPL models got a separate base class to not repeat code +- added **new models** (See: [#88](https://github.com/GeoStat-Framework/GSTools/issues/88)): + - `HyperSpherical`: (Replaces the old `Intersection` model) Derived from the intersection of hyper-spheres in arbitrary dimensions. Coincides with the linear model in 1D, the circular model in 2D and the classical spherical model in 3D + - `SuperSpherical`: like the HyperSpherical, but the shape parameter derived from dimension can be set by the user. Coincides with the HyperSpherical model by default + - `JBessel`: a hole model valid in all dimensions. The shape parameter controls the dimension it was derived from. For `nu=0.5` this model coincides with the well known `wave` hole model. + - `TPLSimple`: a simple truncated power law controlled by a shape parameter `nu`. Coincides with the truncated linear model for `nu=1` + - `Cubic`: to be compatible with scikit-gstat in the future +- all arguments are now stored as float internally ([#157](https://github.com/GeoStat-Framework/GSTools/pull/157)) +- string representation of the `CovModel` class is now using a float precision (`CovModel._prec=3`) to truncate longish output +- string representation of the `CovModel` class now only shows `anis` and `angles` if model is anisotropic resp. rotated +- dimension validity check: raise a warning, if given model is not valid in the desired dimension (See: [#86](https://github.com/GeoStat-Framework/GSTools/issues/86)) + +#### Normalizer, Trend and Mean ([#124](https://github.com/GeoStat-Framework/GSTools/issues/124)) + +- new `normalize` submodule containing power-transforms for data to gain normality +- Base-Class: `Normalizer` providing basic functionality including maximum likelihood fitting +- added: `LogNormal`, `BoxCox`, `BoxCoxShift`, `YeoJohnson`, `Modulus` and `Manly` +- normalizer, trend and mean can be passed to SRF, Krige and variogram estimation routines + - A trend can be a callable function, that represents a trend in input data. For example a linear decrease of temperature with height. + - The normalizer will be applied after the data was detrended, i.e. the trend was substracted from the data, in order to gain normality. + - The mean is now interpreted as the mean of the normalized data. The user could also provide a callable mean, but it is mostly meant to be constant. + +#### Arbitrary dimensions ([#112](https://github.com/GeoStat-Framework/GSTools/issues/112)) +- allow arbitrary dimensions in all routines (CovModel, Krige, SRF, variogram) +- anisotropy and rotation following a generalization of tait-bryan angles +- CovModel provides `isometrize` and `anisometrize` routines to convert points + +#### New Class for Conditioned Random Fields ([#130](https://github.com/GeoStat-Framework/GSTools/issues/130)) +- **THIS BREAKS BACKWARD COMPATIBILITY** +- `CondSRF` replaces the conditioning feature of the SRF class, which was cumbersome and limited to Ordinary and Simple kriging +- `CondSRF` behaves similar to the `SRF` class, but instead of a covariance model, it takes a kriging class as input. With this kriging class, all conditioning related settings are defined. + +### Enhancements +- Python 3.9 Support [#107](https://github.com/GeoStat-Framework/GSTools/issues/107) +- add routines to format struct. pos tuple by given `dim` or `shape` +- add routine to format struct. pos tuple by given `shape` (variogram helper) +- remove `field.tools` subpackage +- support `meshio>=4.0` and add as dependency +- PyVista mesh support [#59](https://github.com/GeoStat-Framework/GSTools/issues/59) +- added `EARTH_RADIUS` as constant providing earths radius in km (can be used to rescale models) +- add routines `latlon2pos` and `pos2latlon` to convert lat-lon coordinates to points on unit-sphere and vice versa +- a lot of new examples and tutorials +- `RandMeth` class got a switch to select the sampling strategy +- plotter for n-D fields added [#141](https://github.com/GeoStat-Framework/GSTools/issues/141) +- antialias for contour plots of 2D fields [#141](https://github.com/GeoStat-Framework/GSTools/issues/141) +- building from source is now configured with `pyproject.toml` to care about build dependencies, see [#154](https://github.com/GeoStat-Framework/GSTools/issues/154) + +### Changes +- drop support for Python 3.5 [#146](https://github.com/GeoStat-Framework/GSTools/pull/146) +- added a finit limit for shape-parameters in some CovModels [#147](https://github.com/GeoStat-Framework/GSTools/pull/147) +- drop usage of `pos2xyz` and `xyz2pos` +- remove structured option from generators (structured pos need to be converted first) +- explicitly assert dim=2,3 when generating vector fields +- simplify `pre_pos` routine to save pos tuple and reformat it an unstructured tuple +- simplify field shaping +- simplify plotting routines +- only the `"unstructured"` keyword is recognized everywhere, everything else is interpreted as `"structured"` (e.g. `"rectilinear"`) +- use GitHub-Actions instead of TravisCI +- parallel build now controlled by env-var `GSTOOLS_BUILD_PARALLEL=1`, see [#154](https://github.com/GeoStat-Framework/GSTools/issues/154) +- install extra target for `[dev]` dropped, can be reproduced by `pip install gstools[test, doc]`, see [#154](https://github.com/GeoStat-Framework/GSTools/issues/154) + +### Bugfixes +- typo in keyword argument for vario_estimate_structured [#80](https://github.com/GeoStat-Framework/GSTools/issues/80) +- isotropic rotation of SRF was not possible [#100](https://github.com/GeoStat-Framework/GSTools/issues/100) +- `CovModel.opt_arg` now sorted [#103](https://github.com/GeoStat-Framework/GSTools/issues/103) +- CovModel.fit: check if weights are given as a string (numpy comparison error) [#111](https://github.com/GeoStat-Framework/GSTools/issues/111) +- several pylint fixes ([#159](https://github.com/GeoStat-Framework/GSTools/pull/159)) + ## [1.2.1] - Volatile Violet - 2020-04-14 ### Bugfixes @@ -40,13 +181,13 @@ All notable changes to **GSTools** will be documented in this file. ## [1.1.1] - Reverberating Red - 2019-11-08 ### Enhancements -- added a changelog. See: https://github.com/GeoStat-Framework/GSTools/commit/fbea88300d0862393e52f4b7c3d2b15c2039498b +- added a changelog. See: [commit fbea883](https://github.com/GeoStat-Framework/GSTools/commit/fbea88300d0862393e52f4b7c3d2b15c2039498b) ### Changes - deprecation warnings are now printed if Python versions 2.7 or 3.4 are used #40 #41 ### Bugfixes -- define spectral_density instead of spectrum in covariance models since Cov-base derives spectrum. See: https://github.com/GeoStat-Framework/GSTools/commit/00f2747fd0503ff8806f2eebfba36acff813416b +- define spectral_density instead of spectrum in covariance models since Cov-base derives spectrum. See: [commit 00f2747](https://github.com/GeoStat-Framework/GSTools/commit/00f2747fd0503ff8806f2eebfba36acff813416b) - better boundaries for CovModel parameters. See: https://github.com/GeoStat-Framework/GSTools/issues/37 @@ -61,8 +202,8 @@ All notable changes to **GSTools** will be documented in this file. - incompressible flow fields can now be generated #14 - new submodule providing several field transformations like: Zinn&Harvey, log-normal, bimodal, ... #13 - Python 3.4 and 3.7 wheel support #19 -- field can now be generated directly on meshes from [``meshio``](https://github.com/nschloe/meshio) and [``ogs5py``](https://github.com/GeoStat-Framework/ogs5py) f4a3439400b8 -- the srf and kriging classes now store the last ``pos``, ``mesh_type`` and ``field`` values to keep them accessible 29f7f1b02 +- field can now be generated directly on meshes from [meshio](https://github.com/nschloe/meshio) and [ogs5py](https://github.com/GeoStat-Framework/ogs5py), see: [commit f4a3439](https://github.com/GeoStat-Framework/GSTools/commit/f4a3439400b81d8d9db81a5f7fbf6435f603cf05) +- the srf and kriging classes now store the last ``pos``, ``mesh_type`` and ``field`` values to keep them accessible, see: [commit 29f7f1b](https://github.com/GeoStat-Framework/GSTools/commit/29f7f1b029866379ce881f44765f72534d757fae) - tutorials on all important features of GSTools have been written for you guys #20 - a new interface to pyvista is provided to export fields to python vtk representation, which can be used for plotting, exploring and exporting fields #29 @@ -71,8 +212,8 @@ All notable changes to **GSTools** will be documented in this file. - the rotation angles are now interpreted in positive direction (counter clock wise) - the ``force_moments`` keyword was removed from the SRF call method, it is now in provided as a field transformation #13 - drop support of python implementations of the variogram estimators #18 -- the ``variogram_normed`` method was removed from the ``CovModel`` class due to redundance 25b164722ac6744ebc7e03f3c0bf1c30be1eba89 -- the position vector of 1D fields does not have to be provided in a list-like object with length 1 a6f5be8bf +- the ``variogram_normed`` method was removed from the ``CovModel`` class due to redundance [commit 25b1647](https://github.com/GeoStat-Framework/GSTools/commit/25b164722ac6744ebc7e03f3c0bf1c30be1eba89) +- the position vector of 1D fields does not have to be provided in a list-like object with length 1 [commit a6f5be8](https://github.com/GeoStat-Framework/GSTools/commit/a6f5be8bfd2db1f002e7889ecb8e9a037ea08886) ### Bugfixes - several minor bugfixes @@ -122,7 +263,8 @@ All notable changes to **GSTools** will be documented in this file. First release of GSTools. -[Unreleased]: https://github.com/GeoStat-Framework/gstools/compare/v1.2.1...HEAD +[Unreleased]: https://github.com/GeoStat-Framework/gstools/compare/v1.3.0...HEAD +[1.3.0]: https://github.com/GeoStat-Framework/gstools/compare/v1.2.1...v1.3.0 [1.2.1]: https://github.com/GeoStat-Framework/gstools/compare/v1.2.0...v1.2.1 [1.2.0]: https://github.com/GeoStat-Framework/gstools/compare/v1.1.1...v1.2.0 [1.1.1]: https://github.com/GeoStat-Framework/gstools/compare/v1.1.0...v1.1.1 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index efa199036..c6335c37f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,8 +29,66 @@ with your idea or suggestion and we'd love to discuss about it. - Fork the repo on [GitHub](https://github.com/GeoStat-Framework/GSTools) from the [develop branch](https://github.com/GeoStat-Framework/GSTools/tree/develop). - Add yourself to AUTHORS.md (if you want to). -- We use the black code format, please use the script `black --line-length 79 gstools/` after you have written your code. +- We use the black code format, please use the script `black .` after you have written your code. - Add some tests if possible. - Add an example showing your new feature in one of the examples sub-folders if possible. Follow this [Sphinx-Gallary guide](https://sphinx-gallery.github.io/stable/syntax.html#embed-rst-in-your-example-python-files) - Push to your fork and submit a pull request. + +### PyLint Settings + +Your code will be checked by [Pylint](https://github.com/PyCQA/pylint/) +with `pylint gstools` in the CI. +We made some generous default settings in `pyproject.toml` for the linter: + +- max-args = 20 +- max-locals = 50 +- max-branches = 30 +- max-statements = 80 +- max-attributes = 25 +- max-public-methods = 75 + +Since some classes in GSTools are quite huge and some function signatures are +somewhat longish. + +By default [R0801](https://vald-phoenix.github.io/pylint-errors/plerr/errors/similarities/R0801) +(duplicate-code) is disabled, since it produces a lot of false positive errors +for docstrings and `__init__.py` settings. + +We also disabled some pylint checks for some files by setting +comments like these at the beginning: +```python +# pylint: disable=C0103 +``` + +Here is a list of the occurring disabled errors: +- [C0103](https://vald-phoenix.github.io/pylint-errors/plerr/errors/basic/C0103) + (invalid-name) - `ax`, `r` etc. are marked as no valid names +- [C0302](https://vald-phoenix.github.io/pylint-errors/plerr/errors/format/C0302) + (too-many-lines) - namely the `CovModel` definition has more than 1000 lines +- [C0415](https://vald-phoenix.github.io/pylint-errors/plerr/errors/imports/C0415) + (import-outside-toplevel) - needed sometimes for deferred imports of optional + dependencies like `matplotlib` +- [R0201](https://vald-phoenix.github.io/pylint-errors/plerr/errors/classes/R0201) + (no-self-use) - methods with no `self` calls in some base-classes +- [W0212](https://vald-phoenix.github.io/pylint-errors/plerr/errors/classes/W0212) + (protected-access) - we didn't want to draw attention to `CovModel._prec` +- [W0221](https://vald-phoenix.github.io/pylint-errors/plerr/errors/classes/W0221) + (arguments-differ) - the `__call__` methods of `SRF` and `Krige` differ from `Field` +- [W0222](https://vald-phoenix.github.io/pylint-errors/plerr/errors/classes/W0222) + (signature-differ) - the `__call__` methods of `SRF` and `Krige` differ from `Field` +- [W0231](https://vald-phoenix.github.io/pylint-errors/plerr/errors/classes/W0231) + (super-init-not-called) - some child classes have their specialized `__init__` +- [W0613](https://vald-phoenix.github.io/pylint-errors/plerr/errors/variables/W0613) + (unused-argument) - needed sometimes to match required call signatures +- [W0632](https://vald-phoenix.github.io/pylint-errors/plerr/errors/variables/W0632) + (unbalanced-tuple-unpacking) - false positive for some call returns +- [E1101](https://vald-phoenix.github.io/pylint-errors/plerr/errors/typecheck/E1101) + (no-member) - some times false positive +- [E1102](https://vald-phoenix.github.io/pylint-errors/plerr/errors/typecheck/E1102) + (not-callable) - this is a false-positive result form some called properties +- [E1130](https://vald-phoenix.github.io/pylint-errors/plerr/errors/typecheck/E1130) + (invalid-unary-operand-type) - false positive at some points + +Although we disabled these errors at some points, we encourage you to prevent +disabling errors when it is possible. \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in index 2f67477f9..71c3bb1d5 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,9 +1,7 @@ -include README.md -include MANIFEST.in -include setup.py -include setup.cfg -recursive-include gstools *.py *.pyx *.c -recursive-include tests *.py -recursive-include docs/source * -include docs/Makefile docs/requirements.txt -include LICENSE +prune * +graft tests +recursive-include gstools *.py *.pyx +recursive-exclude gstools *.c *.cpp +include LICENSE README.md pyproject.toml setup.py setup.cfg +exclude CHANGELOG.md CONTRIBUTING.md AUTHORS.md +global-exclude __pycache__ *.py[cod] .* diff --git a/README.md b/README.md index 811823527..010ae06c8 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,9 @@ [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1313628.svg)](https://doi.org/10.5281/zenodo.1313628) [![PyPI version](https://badge.fury.io/py/gstools.svg)](https://badge.fury.io/py/gstools) [![Conda Version](https://img.shields.io/conda/vn/conda-forge/gstools.svg)](https://anaconda.org/conda-forge/gstools) -[![Build Status](https://travis-ci.com/GeoStat-Framework/GSTools.svg?branch=master)](https://travis-ci.com/GeoStat-Framework/GSTools) -[![Coverage Status](https://coveralls.io/repos/github/GeoStat-Framework/GSTools/badge.svg?branch=master)](https://coveralls.io/github/GeoStat-Framework/GSTools?branch=master) -[![Documentation Status](https://readthedocs.org/projects/gstools/badge/?version=stable)](https://geostat-framework.readthedocs.io/projects/gstools/en/stable/?badge=stable) +[![Build Status](https://github.com/GeoStat-Framework/GSTools/workflows/Continuous%20Integration/badge.svg?branch=develop)](https://github.com/GeoStat-Framework/GSTools/actions) +[![Coverage Status](https://coveralls.io/repos/github/GeoStat-Framework/GSTools/badge.svg?branch=develop)](https://coveralls.io/github/GeoStat-Framework/GSTools?branch=develop) +[![Documentation Status](https://readthedocs.org/projects/gstools/badge/?version=latest)](https://geostat-framework.readthedocs.io/projects/gstools/en/stable/?badge=stable) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black)

@@ -19,11 +19,14 @@ GeoStatTools provides geostatistical tools for various purposes: - random field generation +- simple, ordinary, universal and external drift kriging - conditioned field generation - incompressible random vector field generation -- simple and ordinary kriging -- variogram estimation and fitting +- (automatted) variogram estimation and fitting +- directional variogram estimation and modelling +- data normalization and transformation - many readily provided and even user-defined covariance models +- metric spatio-temporal modelling - plotting and exporting routines @@ -81,6 +84,9 @@ The documentation also includes some [tutorials][tut_link], showing the most imp - [Kriging][tut5_link] - [Conditioned random field generation][tut6_link] - [Field transformations][tut7_link] +- [Geographic Coordinates][tut8_link] +- [Spatio-Temporal Modelling][tut9_link] +- [Normalizing Data][tut10_link] - [Miscellaneous examples][tut0_link] The associated python scripts are provided in the `examples` folder. @@ -112,23 +118,47 @@ srf.plot() Random field

+GSTools also provides support for [geographic coordinates](https://en.wikipedia.org/wiki/Geographic_coordinate_system). +This works perfectly well with [cartopy](https://scitools.org.uk/cartopy/docs/latest/index.html). + +```python +import matplotlib.pyplot as plt +import cartopy.crs as ccrs +import gstools as gs +# define a structured field by latitude and longitude +lat = lon = range(-80, 81) +model = gs.Gaussian(latlon=True, len_scale=777, rescale=gs.EARTH_RADIUS) +srf = gs.SRF(model, seed=12345) +field = srf.structured((lat, lon)) +# Orthographic plotting with cartopy +ax = plt.subplot(projection=ccrs.Orthographic(-45, 45)) +cont = ax.contourf(lon, lat, field, transform=ccrs.PlateCarree()) +ax.coastlines() +ax.set_global() +plt.colorbar(cont) +``` + +

+lat-lon random field +

+ A similar example but for a three dimensional field is exported to a [VTK](https://vtk.org/) file, which can be visualized with [ParaView](https://www.paraview.org/) or [PyVista](https://docs.pyvista.org) in Python: ```python import gstools as gs # structured field with a size 100x100x100 and a grid-size of 1x1x1 x = y = z = range(100) -model = gs.Gaussian(dim=3, var=0.6, len_scale=20) +model = gs.Gaussian(dim=3, len_scale=[16, 8, 4], angles=(0.8, 0.4, 0.2)) srf = gs.SRF(model) srf((x, y, z), mesh_type='structured') srf.vtk_export('3d_field') # Save to a VTK file for ParaView mesh = srf.to_pyvista() # Create a PyVista mesh for plotting in Python -mesh.threshold_percent(0.5).plot() +mesh.contour(isosurfaces=8).plot() ```

-3d Random field +3d Random field

@@ -152,26 +182,25 @@ y = np.random.RandomState(20011012).rand(1000) * 100. model = gs.Exponential(dim=2, var=2, len_scale=8) srf = gs.SRF(model, mean=0, seed=19970221) field = srf((x, y)) -# estimate the variogram of the field with 40 bins -bins = np.arange(40) -bin_center, gamma = gs.vario_estimate_unstructured((x, y), field, bins) +# estimate the variogram of the field +bin_center, gamma = gs.vario_estimate((x, y), field) # fit the variogram with a stable model. (no nugget fitted) fit_model = gs.Stable(dim=2) fit_model.fit_variogram(bin_center, gamma, nugget=False) # output -ax = fit_model.plot(x_max=40) -ax.plot(bin_center, gamma) +ax = fit_model.plot(x_max=max(bin_center)) +ax.scatter(bin_center, gamma) print(fit_model) ``` Which gives: ```python -Stable(dim=2, var=1.92, len_scale=8.15, nugget=0.0, anis=[1.], angles=[0.], alpha=1.05) +Stable(dim=2, var=1.85, len_scale=7.42, nugget=0.0, anis=[1.0], angles=[0.0], alpha=1.09) ```

-Variogram +Variogram

@@ -194,15 +223,15 @@ cond_val = [0.47, 0.56, 0.74, 1.47, 1.74] gridx = np.linspace(0.0, 15.0, 151) -# spatial random field class +# conditioned spatial random field class model = gs.Gaussian(dim=1, var=0.5, len_scale=2) -srf = gs.SRF(model) -srf.set_condition(cond_pos, cond_val, "ordinary") +krige = gs.krige.Ordinary(model, cond_pos, cond_val) +cond_srf = gs.CondSRF(krige) # generate the ensemble of field realizations fields = [] for i in range(100): - fields.append(srf(gridx, seed=i)) + fields.append(cond_srf(gridx, seed=i)) plt.plot(gridx, fields[i], color="k", alpha=0.1) plt.scatter(cond_pos, cond_val, color="k") plt.show() @@ -253,8 +282,8 @@ import gstools as gs x = np.arange(100) y = np.arange(100) model = gs.Gaussian(dim=2, var=1, len_scale=10) -srf = gs.SRF(model, generator='VectorField') -srf((x, y), mesh_type='structured', seed=19841203) +srf = gs.SRF(model, generator='VectorField', seed=19841203) +srf((x, y), mesh_type='structured') srf.plot() ``` @@ -300,6 +329,7 @@ in memory for immediate 3D plotting in Python. - [hankel >= 1.0.2](https://github.com/steven-murray/hankel) - [emcee >= 3.0.0](https://github.com/dfm/emcee) - [pyevtk >= 1.1.1](https://github.com/pyscience-projects/pyevtk) +- [meshio>=4.0.3, <5.0](https://github.com/nschloe/meshio) ### Optional @@ -314,7 +344,7 @@ You can contact us via . ## License -[LGPLv3][license_link] © 2018-2020 +[LGPLv3][license_link] © 2018-2021 [pip_link]: https://pypi.org/project/gstools [conda_link]: https://docs.conda.io/en/latest/miniconda.html @@ -335,6 +365,9 @@ You can contact us via . [tut5_link]: https://geostat-framework.readthedocs.io/projects/gstools/en/stable/examples/05_kriging/index.html [tut6_link]: https://geostat-framework.readthedocs.io/projects/gstools/en/stable/examples/06_conditioned_fields/index.html [tut7_link]: https://geostat-framework.readthedocs.io/projects/gstools/en/stable/examples/07_transformations/index.html +[tut8_link]: https://geostat-framework.readthedocs.io/projects/gstools/en/stable/examples/08_geo_coordinates/index.html +[tut9_link]: https://geostat-framework.readthedocs.io/projects/gstools/en/stable/examples/09_spatio_temporal/index.html +[tut10_link]: https://geostat-framework.readthedocs.io/projects/gstools/en/stable/examples/10_normalizer/index.html [tut0_link]: https://geostat-framework.readthedocs.io/projects/gstools/en/stable/examples/00_misc/index.html [cor_link]: https://en.wikipedia.org/wiki/Autocovariance#Normalization [vtk_link]: https://www.vtk.org/ diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index c5a6a232c..000000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ --r requirements_doc.txt --r ../requirements_setup.txt --r ../requirements.txt diff --git a/docs/requirements_doc.txt b/docs/requirements_doc.txt deleted file mode 100755 index c9d3ee24e..000000000 --- a/docs/requirements_doc.txt +++ /dev/null @@ -1,5 +0,0 @@ -numpydoc -sphinx-gallery -matplotlib -pyvista -pykrige diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html index 27d23fe03..f5a6014e7 100644 --- a/docs/source/_templates/layout.html +++ b/docs/source/_templates/layout.html @@ -11,6 +11,7 @@ GeoStat Website GeoStat Github + GeoStat Examples GeoStat ReadTheDocs GeoStat PyPI
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst new file mode 100644 index 000000000..ab37940f4 --- /dev/null +++ b/docs/source/changelog.rst @@ -0,0 +1 @@ +.. mdinclude:: ../../CHANGELOG.md diff --git a/docs/source/conf.py b/docs/source/conf.py index b92146696..bc06b3649 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -25,6 +25,7 @@ import datetime import warnings + warnings.filterwarnings( "ignore", category=UserWarning, @@ -68,6 +69,7 @@ def setup(app): "sphinx.ext.napoleon", # parameters look better than with numpydoc only "numpydoc", "sphinx_gallery.gen_gallery", + "m2r2", ] # autosummaries from source-files @@ -94,8 +96,8 @@ def setup(app): # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = ".rst" +source_suffix = [".rst", ".md"] +# source_suffix = ".rst" # The master toctree document. # --> this is the sitemap (or content-list in latex -> needs a heading) @@ -106,8 +108,8 @@ def setup(app): # General information about the project. curr_year = datetime.datetime.now().year project = "GSTools" -copyright = "2018 - {}, Lennart Schueler, Sebastian Mueller".format(curr_year) -author = "Lennart Schueler, Sebastian Mueller" +copyright = "2018 - {}, Sebastian Müller, Lennart Schüler".format(curr_year) +author = "Sebastian Müller, Lennart Schüler" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -201,8 +203,8 @@ def setup(app): "pointsize": "10pt", "papersize": "a4paper", "fncychap": "\\usepackage[Glenn]{fncychap}", + # 'inputenc': r'\usepackage[utf8]{inputenc}', } - # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). @@ -211,7 +213,7 @@ def setup(app): master_doc, "GeoStatTools.tex", "GeoStatTools Documentation", - "Lennart Schueler, Sebastian Mueller", + "Sebastian Müller, Lennart Schüler", "manual", ) ] @@ -260,11 +262,23 @@ def setup(app): "emcee": ("https://emcee.readthedocs.io/en/latest/", None), } - # -- Sphinx Gallery Options from sphinx_gallery.sorting import FileNameSortKey +# Use pyvista's image scraper for example gallery +# import pyvista +# https://github.com/tkoyama010/pyvista-doc-translations/blob/85c835a3ada3a2adefac06ba70e15a101ffa9162/conf.py#L21 +# https://github.com/simpeg/discretize/blob/f414dd7ee7c5ba9a141cb2c37d4b71fdc531eae8/docs/conf.py#L334 +# Make sure off screen is set to true when building locally +# pyvista.OFF_SCREEN = True +# # necessary when building the sphinx gallery +# pyvista.BUILDING_GALLERY = True +# # Optional - set parameters like theme or window size +# pyvista.set_plot_theme("document") + sphinx_gallery_conf = { + # "image_scrapers": ("pyvista", "matplotlib"), + "remove_config_comments": True, # only show "print" output as output "capture_repr": (), # path to your examples scripts @@ -277,6 +291,9 @@ def setup(app): "../../examples/05_kriging/", "../../examples/06_conditioned_fields/", "../../examples/07_transformations/", + "../../examples/08_geo_coordinates/", + "../../examples/09_spatio_temporal/", + "../../examples/10_normalizer/", ], # path where to save gallery generated examples "gallery_dirs": [ @@ -288,6 +305,9 @@ def setup(app): "examples/05_kriging/", "examples/06_conditioned_fields/", "examples/07_transformations/", + "examples/08_geo_coordinates/", + "examples/09_spatio_temporal/", + "examples/10_normalizer/", ], # Pattern to search for example files "filename_pattern": r"\.py", @@ -299,8 +319,10 @@ def setup(app): "backreferences_dir": None, # Modules for which function level galleries are created. In "doc_module": "gstools", - # "image_scrapers": ('pyvista', 'matplotlib'), - # "first_notebook_cell": ("%matplotlib inline\n" - # "from pyvista import set_plot_theme\n" - # "set_plot_theme('document')"), + # "first_notebook_cell": ( + # "%matplotlib inline\n" + # "from pyvista import set_plot_theme\n" + # "set_plot_theme('document')" + # ), + "matplotlib_animations": True, } diff --git a/docs/source/contents.rst b/docs/source/contents.rst index a168096b2..afd0d9bc6 100644 --- a/docs/source/contents.rst +++ b/docs/source/contents.rst @@ -9,3 +9,4 @@ Contents index tutorials package + changelog diff --git a/docs/source/field.base.rst b/docs/source/field.base.rst deleted file mode 100755 index 5e89c99ec..000000000 --- a/docs/source/field.base.rst +++ /dev/null @@ -1,10 +0,0 @@ -gstools.field.base ------------------- - -.. automodule:: gstools.field.base - :members: - :undoc-members: - -.. raw:: latex - - \clearpage diff --git a/docs/source/field.rst b/docs/source/field.rst index 24ef6357f..c37b49660 100644 --- a/docs/source/field.rst +++ b/docs/source/field.rst @@ -2,10 +2,6 @@ gstools.field ============= .. automodule:: gstools.field - :members: - :undoc-members: - :inherited-members: - :show-inheritance: .. raw:: latex @@ -16,4 +12,3 @@ gstools.field field.generator.rst field.upscaling.rst - field.base.rst diff --git a/docs/source/index.rst b/docs/source/index.rst index bc3d23e99..bb06049af 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -6,9 +6,18 @@ GSTools Quickstart :width: 150px :align: center -GeoStatTools provides geostatistical tools for random field generation and -variogram estimation based on many readily provided and even user-defined -covariance models. +GeoStatTools provides geostatistical tools for various purposes: + +- random field generation +- simple, ordinary, universal and external drift kriging +- conditioned field generation +- incompressible random vector field generation +- (automatted) variogram estimation and fitting +- directional variogram estimation and modelling +- data normalization and transformation +- many readily provided and even user-defined covariance models +- metric spatio-temporal modelling +- plotting and exporting routines Installation @@ -52,24 +61,24 @@ To get the latest development version you can install it directly from GitHub: If something went wrong during installation, try the :code:`-I` `flag from pip `_. -To enable the OpenMP support, you have to provide a C compiler, Cython and OpenMP. -To get all other dependencies, it is recommended to first install gstools once -in the standard way just decribed. -Simply use the following commands: +To enable the OpenMP support, you have to provide a C compiler and OpenMP. +Parallel support is controlled by an environment variable ``GSTOOLS_BUILD_PARALLEL``, +that can be ``0`` or ``1`` (interpreted as ``0`` if not present). +GSTools then needs to be installed from source: .. code-block:: none - pip install gstools - pip install -I --no-deps --global-option="--openmp" gstools + export GSTOOLS_BUILD_PARALLEL=1 + pip install --no-binary=gstools gstools -Or for the development version: +Note, that the ``--no-binary=gstools`` option forces pip to not use a wheel for GSTools. + +For the development version, you can do almost the same: .. code-block:: none + export GSTOOLS_BUILD_PARALLEL=1 pip install git+git://github.com/GeoStat-Framework/GSTools.git@develop - pip install -I --no-deps --global-option="--openmp" git+git://github.com/GeoStat-Framework/GSTools.git@develop - -The flags :code:`-I --no-deps` force pip to reinstall gstools but not the dependencies. Citation @@ -97,10 +106,11 @@ showing the most important use cases of GSTools, which are - `Kriging `__ - `Conditioned random field generation `__ - `Field transformations `__ +- `Geographic Coordinates `__ +- `Spatio-Temporal Modelling `__ +- `Normalizing Data `__ - `Miscellaneous examples `__ -Some more examples are provided in the examples folder. - Spatial Random Field Generation =============================== @@ -133,6 +143,30 @@ with a :any:`Gaussian` covariance model. :width: 400px :align: center +GSTools also provides support for `geographic coordinates `_. +This works perfectly well with `cartopy `_. + +.. code-block:: python + + import matplotlib.pyplot as plt + import cartopy.crs as ccrs + import gstools as gs + # define a structured field by latitude and longitude + lat = lon = range(-80, 81) + model = gs.Gaussian(latlon=True, len_scale=777, rescale=gs.EARTH_RADIUS) + srf = gs.SRF(model, seed=12345) + field = srf.structured((lat, lon)) + # Orthographic plotting with cartopy + ax = plt.subplot(projection=ccrs.Orthographic(-45, 45)) + cont = ax.contourf(lon, lat, field, transform=ccrs.PlateCarree()) + ax.coastlines() + ax.set_global() + plt.colorbar(cont) + +.. image:: https://github.com/GeoStat-Framework/GeoStat-Framework.github.io/raw/master/img/GS_globe.png + :width: 400px + :align: center + A similar example but for a three dimensional field is exported to a `VTK `__ file, which can be visualized with `ParaView `_ or @@ -143,15 +177,15 @@ A similar example but for a three dimensional field is exported to a import gstools as gs # structured field with a size 100x100x100 and a grid-size of 1x1x1 x = y = z = range(100) - model = gs.Gaussian(dim=3, var=0.6, len_scale=20) + model = gs.Gaussian(dim=3, len_scale=[16, 8, 4], angles=(0.8, 0.4, 0.2)) srf = gs.SRF(model) srf((x, y, z), mesh_type='structured') srf.vtk_export('3d_field') # Save to a VTK file for ParaView mesh = srf.to_pyvista() # Create a PyVista mesh for plotting in Python - mesh.threshold_percent(0.5).plot() + mesh.contour(isosurfaces=8).plot() -.. image:: https://raw.githubusercontent.com/GeoStat-Framework/GSTools/master/docs/source/pics/3d_gau_field.png +.. image:: https://github.com/GeoStat-Framework/GeoStat-Framework.github.io/raw/master/img/GS_pyvista.png :width: 400px :align: center @@ -180,24 +214,23 @@ model again. model = gs.Exponential(dim=2, var=2, len_scale=8) srf = gs.SRF(model, mean=0, seed=19970221) field = srf((x, y)) - # estimate the variogram of the field with 40 bins - bins = np.arange(40) - bin_center, gamma = gs.vario_estimate_unstructured((x, y), field, bins) + # estimate the variogram of the field + bin_center, gamma = gs.vario_estimate((x, y), field) # fit the variogram with a stable model. (no nugget fitted) fit_model = gs.Stable(dim=2) fit_model.fit_variogram(bin_center, gamma, nugget=False) # output - ax = fit_model.plot(x_max=40) - ax.plot(bin_center, gamma) + ax = fit_model.plot(x_max=max(bin_center)) + ax.scatter(bin_center, gamma) print(fit_model) Which gives: .. code-block:: python - Stable(dim=2, var=1.92, len_scale=8.15, nugget=0.0, anis=[1.], angles=[0.], alpha=1.05) + Stable(dim=2, var=1.85, len_scale=7.42, nugget=0.0, anis=[1.0], angles=[0.0], alpha=1.09) -.. image:: https://raw.githubusercontent.com/GeoStat-Framework/GSTools/master/docs/source/pics/exp_vario_fit.png +.. image:: https://raw.githubusercontent.com/GeoStat-Framework/GeoStat-Framework.github.io/master/img/GS_vario_est.png :width: 400px :align: center @@ -228,15 +261,15 @@ generate 100 realizations and plot them: gridx = np.linspace(0.0, 15.0, 151) - # spatial random field class + # conditioned spatial random field class model = gs.Gaussian(dim=1, var=0.5, len_scale=2) - srf = gs.SRF(model) - srf.set_condition(cond_pos, cond_val, "ordinary") + krige = gs.krige.Ordinary(model, cond_pos, cond_val) + cond_srf = gs.CondSRF(krige) # generate the ensemble of field realizations fields = [] for i in range(100): - fields.append(srf(gridx, seed=i)) + fields.append(cond_srf(gridx, seed=i)) plt.plot(gridx, fields[i], color="k", alpha=0.1) plt.scatter(cond_pos, cond_val, color="k") plt.show() @@ -291,8 +324,8 @@ Example x = np.arange(100) y = np.arange(100) model = gs.Gaussian(dim=2, var=1, len_scale=10) - srf = gs.SRF(model, generator='VectorField') - srf((x, y), mesh_type='structured', seed=19841203) + srf = gs.SRF(model, generator='VectorField', seed=19841203) + srf((x, y), mesh_type='structured') srf.plot() yielding @@ -336,6 +369,7 @@ Requirements - `hankel >= 1.0.2 `_ - `emcee >= 3.0.0 `_ - `pyevtk >= 1.1.1 `_ +- `meshio>=4.0.3, <5.0 `_ Optional diff --git a/docs/source/normalizer.rst b/docs/source/normalizer.rst new file mode 100644 index 000000000..396c7ceaf --- /dev/null +++ b/docs/source/normalizer.rst @@ -0,0 +1,8 @@ +gstools.normalizer +================== + +.. automodule:: gstools.normalizer + +.. raw:: latex + + \clearpage diff --git a/docs/source/package.rst b/docs/source/package.rst index 73d3db961..792fa9731 100644 --- a/docs/source/package.rst +++ b/docs/source/package.rst @@ -18,3 +18,4 @@ GSTools API random.rst tools.rst transform.rst + normalizer.rst diff --git a/docs/source/pics/GS_3d_vector_field.png b/docs/source/pics/GS_3d_vector_field.png new file mode 100644 index 000000000..f1cf0883d Binary files /dev/null and b/docs/source/pics/GS_3d_vector_field.png differ diff --git a/docs/source/pics/GS_pyvista_cut.png b/docs/source/pics/GS_pyvista_cut.png new file mode 100644 index 000000000..bad73abc6 Binary files /dev/null and b/docs/source/pics/GS_pyvista_cut.png differ diff --git a/docs/source/pics/paraview.png b/docs/source/pics/paraview.png new file mode 100644 index 000000000..7c2a94947 Binary files /dev/null and b/docs/source/pics/paraview.png differ diff --git a/docs/source/tutorials.rst b/docs/source/tutorials.rst index 5c2b6787f..28d39e563 100644 --- a/docs/source/tutorials.rst +++ b/docs/source/tutorials.rst @@ -19,4 +19,7 @@ explore its whole beauty and power. examples/05_kriging/index examples/06_conditioned_fields/index examples/07_transformations/index + examples/08_geo_coordinates/index + examples/09_spatio_temporal/index + examples/10_normalizer/index examples/00_misc/index diff --git a/docs/source/variogram.rst b/docs/source/variogram.rst index cb6bd288d..2f50d2669 100644 --- a/docs/source/variogram.rst +++ b/docs/source/variogram.rst @@ -2,8 +2,6 @@ gstools.variogram ================= .. automodule:: gstools.variogram - :members: - :undoc-members: .. raw:: latex diff --git a/examples/00_misc/00_tpl_stable.py b/examples/00_misc/00_tpl_stable.py index 0b8b85878..a837fd0d4 100644 --- a/examples/00_misc/00_tpl_stable.py +++ b/examples/00_misc/00_tpl_stable.py @@ -1,6 +1,6 @@ r""" -TPL Stable ----------- +Truncated Power Law Variograms +------------------------------ GSTools also implements truncated power law variograms, which can be represented as a superposition of scale dependant modes diff --git a/examples/00_misc/01_export.py b/examples/00_misc/01_export.py index cbc89706b..ad754abf0 100644 --- a/examples/00_misc/01_export.py +++ b/examples/00_misc/01_export.py @@ -1,8 +1,13 @@ """ -Export ------- -""" +Exporting Fields +---------------- + +GSTools provides simple exporting routines to convert generated fields to +`VTK `__ files. +These can be viewed for example with `Paraview `__. +""" +# sphinx_gallery_thumbnail_path = 'pics/paraview.png' import gstools as gs x = y = range(100) @@ -10,3 +15,10 @@ srf = gs.SRF(model) field = srf((x, y), mesh_type="structured") srf.vtk_export(filename="field") + +############################################################################### +# The result displayed with Paraview: +# +# .. image:: https://raw.githubusercontent.com/GeoStat-Framework/GeoStat-Framework.github.io/master/img/paraview.png +# :width: 400px +# :align: center diff --git a/examples/00_misc/02_check_rand_meth_sampling.py b/examples/00_misc/02_check_rand_meth_sampling.py index c03213c65..e89c13408 100644 --- a/examples/00_misc/02_check_rand_meth_sampling.py +++ b/examples/00_misc/02_check_rand_meth_sampling.py @@ -58,7 +58,7 @@ def plot_rand_meth_samples(generator): ax.set_xlim([0, np.max(x)]) ax.set_title("Radius samples shown {}/{}".format(sample_in, len(rad))) ax.legend() - fig.show() + plt.show() model = gs.Stable(dim=3, alpha=1.5) diff --git a/examples/03_variogram/01_variogram_estimation.py b/examples/00_misc/04_herten.py similarity index 95% rename from examples/03_variogram/01_variogram_estimation.py rename to examples/00_misc/04_herten.py index 6685eb40d..4f27ed56d 100644 --- a/examples/03_variogram/01_variogram_estimation.py +++ b/examples/00_misc/04_herten.py @@ -1,6 +1,6 @@ """ -An Example with Actual Data ---------------------------- +Analyzing the Herten Aquifer with GSTools +----------------------------------------- This example is going to be a bit more extensive and we are going to do some basic data preprocessing for the actual variogram estimation. But this example @@ -145,8 +145,8 @@ def generate_transmissivity(): # results reproducible, we can also set a seed. -bins = np.linspace(0, 10, 50) -bin_center, gamma = gs.vario_estimate_unstructured( +bins = gs.standard_bins(pos=(x_u, y_u), max_dist=10) +bin_center, gamma = gs.vario_estimate( (x_u, y_u), herten_log_trans.reshape(-1), bins, @@ -209,8 +209,8 @@ def generate_transmissivity(): # With this much smaller data set, we can immediately estimate the variogram in # the x- and y-axis -gamma_x = gs.vario_estimate_structured(herten_trans_skip, direction="x") -gamma_y = gs.vario_estimate_structured(herten_trans_skip, direction="y") +gamma_x = gs.vario_estimate_axis(herten_trans_skip, direction="x") +gamma_y = gs.vario_estimate_axis(herten_trans_skip, direction="y") ############################################################################### # With these two estimated variograms, we can start fitting :any:`Exponential` @@ -230,7 +230,7 @@ def generate_transmissivity(): # dashed lines. plt.figure() # new figure -line, = plt.plot(bin_center, gamma, label="estimated variogram (isotropic)") +(line,) = plt.plot(bin_center, gamma, label="estimated variogram (isotropic)") plt.plot( bin_center, fit_model.variogram(bin_center), @@ -239,7 +239,7 @@ def generate_transmissivity(): label="exp. variogram (isotropic)", ) -line, = plt.plot(x_plot, gamma_x[:21], label="estimated variogram in x-dir") +(line,) = plt.plot(x_plot, gamma_x[:21], label="estimated variogram in x-dir") plt.plot( x_plot, fit_model_x.variogram(x_plot), @@ -248,7 +248,7 @@ def generate_transmissivity(): label="exp. variogram in x-dir", ) -line, = plt.plot(y_plot, gamma_y[:21], label="estimated variogram in y-dir") +(line,) = plt.plot(y_plot, gamma_y[:21], label="estimated variogram in y-dir") plt.plot( y_plot, fit_model_y.variogram(y_plot), diff --git a/examples/00_misc/README.rst b/examples/00_misc/README.rst index cf0212505..bef7ae572 100644 --- a/examples/00_misc/README.rst +++ b/examples/00_misc/README.rst @@ -1,7 +1,9 @@ -Miscellaneous -============= +Miscellaneous Tutorials +======================= -A few miscellaneous examples +More examples which do not really fit into other categories. Some are not more +than a code snippet, while others are more complex and more than one part of +GSTools is involved. -Gallery -------- +Examples +-------- diff --git a/examples/03_variogram/grid_dim_origin_spacing.txt b/examples/00_misc/grid_dim_origin_spacing.txt similarity index 100% rename from examples/03_variogram/grid_dim_origin_spacing.txt rename to examples/00_misc/grid_dim_origin_spacing.txt diff --git a/examples/03_variogram/herten_transmissivity.gz b/examples/00_misc/herten_transmissivity.gz similarity index 100% rename from examples/03_variogram/herten_transmissivity.gz rename to examples/00_misc/herten_transmissivity.gz diff --git a/examples/01_random_field/02_fancier.py b/examples/01_random_field/02_fancier.py index f27dccde1..9c090e60c 100644 --- a/examples/01_random_field/02_fancier.py +++ b/examples/01_random_field/02_fancier.py @@ -1,4 +1,4 @@ -""" +r""" Creating Fancier Fields ----------------------- diff --git a/examples/01_random_field/04_srf_merge.py b/examples/01_random_field/04_srf_merge.py index 366558da0..13890280e 100644 --- a/examples/01_random_field/04_srf_merge.py +++ b/examples/01_random_field/04_srf_merge.py @@ -6,6 +6,7 @@ to merge two unstructured rectangular fields. """ +# sphinx_gallery_thumbnail_number = 2 import numpy as np import gstools as gs diff --git a/examples/01_random_field/05_mesh_ensemble.py b/examples/01_random_field/05_mesh_ensemble.py new file mode 100755 index 000000000..81fbc16d9 --- /dev/null +++ b/examples/01_random_field/05_mesh_ensemble.py @@ -0,0 +1,93 @@ +""" +Generating Fields on Meshes +--------------------------- + +GSTools provides an interface for meshes, to support +`meshio `_ and +`ogs5py `_ meshes. + +When using `meshio`, the generated fields will be stored immediately in the +mesh container. + +There are two options to generate a field on a given mesh: + +- `points="points"` will generate a field on the mesh points +- `points="centroids"` will generate a field on the cell centroids + +In this example, we will generate a simple mesh with the aid of +`meshzoo `_. +""" + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.tri as tri +import meshzoo +import meshio +import gstools as gs + +# generate a triangulated hexagon with meshzoo +points, cells = meshzoo.ngon(6, 4) +mesh = meshio.Mesh(points, {"triangle": cells}) + +############################################################################### +# Now we prepare the SRF class as always. We will generate an ensemble of +# fields on the generated mesh. + +# number of fields +fields_no = 12 +# model setup +model = gs.Gaussian(dim=2, len_scale=0.5) +srf = gs.SRF(model, mean=1) + +############################################################################### +# To generate fields on a mesh, we provide a separate method: :any:`SRF.mesh`. +# First we generate fields on the mesh-centroids controlled by a seed. +# You can specify the field name by the keyword `name`. + +for i in range(fields_no): + srf.mesh(mesh, points="centroids", name="c-field-{}".format(i), seed=i) + +############################################################################### +# Now we generate fields on the mesh-points again controlled by a seed. + +for i in range(fields_no): + srf.mesh(mesh, points="points", name="p-field-{}".format(i), seed=i) + +############################################################################### +# To get an impression we now want to plot the generated fields. +# Luckily, matplotlib supports triangular meshes. + +triangulation = tri.Triangulation(points[:, 0], points[:, 1], cells) +# figure setup +cols = 4 +rows = int(np.ceil(fields_no / cols)) + +############################################################################### +# Cell data can be easily visualized with matplotlibs `tripcolor`. +# To highlight the cell structure, we use `triplot`. + +fig = plt.figure(figsize=[2 * cols, 2 * rows]) +for i, field in enumerate(mesh.cell_data, 1): + ax = fig.add_subplot(rows, cols, i) + ax.tripcolor(triangulation, mesh.cell_data[field][0]) + ax.triplot(triangulation, linewidth=0.5, color="k") + ax.set_aspect("equal") +fig.tight_layout() + +############################################################################### +# Point data is plotted via `tricontourf`. + +fig = plt.figure(figsize=[2 * cols, 2 * rows]) +for i, field in enumerate(mesh.point_data, 1): + ax = fig.add_subplot(rows, cols, i) + ax.tricontourf(triangulation, mesh.point_data[field]) + ax.triplot(triangulation, linewidth=0.5, color="k") + ax.set_aspect("equal") +fig.tight_layout() +plt.show() + +############################################################################### +# Last but not least, `meshio` can be used for what is does best: Exporting. +# Tada! + +mesh.write("mesh_ensemble.vtk") diff --git a/examples/01_random_field/06_pyvista_support.py b/examples/01_random_field/06_pyvista_support.py new file mode 100644 index 000000000..1077f3dd1 --- /dev/null +++ b/examples/01_random_field/06_pyvista_support.py @@ -0,0 +1,56 @@ +""" +Using PyVista meshes +-------------------- + +`PyVista `__ is a helper module for the +Visualization Toolkit (VTK) that takes a different approach on interfacing with +VTK through NumPy and direct array access. + +It provides mesh data structures and filtering methods for spatial datasets, +makes 3D plotting simple and is built for large/complex data geometries. + +The :any:`Field.mesh` method enables easy field creation on PyVista meshes +used by the :any:`SRF` or :any:`Krige` class. +""" +# sphinx_gallery_thumbnail_path = 'pics/GS_pyvista_cut.png' +import pyvista as pv +import gstools as gs + +############################################################################### +# We create a structured grid with PyVista containing 50 segments on all three +# axes each with a length of 2 (whatever unit). + +dim, spacing = (50, 50, 50), (2, 2, 2) +grid = pv.UniformGrid(dim, spacing) + +############################################################################### +# Now we set up the SRF class as always. We'll use an anisotropic model. + +model = gs.Gaussian(dim=3, len_scale=[16, 8, 4], angles=(0.8, 0.4, 0.2)) +srf = gs.SRF(model, seed=19970221) + +############################################################################### +# The PyVista mesh can now be directly passed to the :any:`SRF.mesh` method. +# When dealing with meshes, one can choose if the field should be generated +# on the mesh-points (`"points"`) or the cell-centroids (`"centroids"`). +# +# In addition we can set a name, under which the resulting field is stored +# in the mesh. + +srf.mesh(grid, points="points", name="random-field") + +############################################################################### +# Now we have access to PyVista's abundancy of methods to explore the field. +# +# .. note:: +# PyVista is not working on readthedocs, but you can try it out yourself by +# uncommenting the following line of code. + +# grid.contour(isosurfaces=8).plot() + +############################################################################### +# The result should look like this: +# +# .. image:: https://github.com/GeoStat-Framework/GeoStat-Framework.github.io/raw/master/img/GS_pyvista_cut.png +# :width: 400px +# :align: center diff --git a/examples/01_random_field/07_higher_dimensions.py b/examples/01_random_field/07_higher_dimensions.py new file mode 100755 index 000000000..43f19912c --- /dev/null +++ b/examples/01_random_field/07_higher_dimensions.py @@ -0,0 +1,81 @@ +""" +Higher Dimensions +----------------- + +GSTools provides experimental support for higher dimensions. + +Anisotropy is the same as in lower dimensions: + +- in `n` dimensions we need `(n-1)` anisotropy ratios + +Rotation on the other hand is a bit more complex. +With increasing dimensions more and more rotation angles are added in order +to properply describe the rotated axes of anisotropy. + +By design the first rotation angles coincide with the lower ones: + +- 2D (rotation in x-y plane) -> 3D: first angle describes xy-plane rotation +- 3D (Tait-Bryan angles) -> 4D: first 3 angles coincide with Tait-Bryan angles + +By increasing the dimension from `n` to `(n+1)`, `n` angles are added: + +- 2D (1 angle) -> 3D: 3 angles (2 added) +- 3D (3 angles) -> 4D: 6 angles (3 added) + +the following list of rotation-planes are described by the list of +angles in the model: + +1. x-y plane +2. x-z plane +3. y-z plane +4. x-v plane +5. y-v plane +6. z-v plane +7. ... + +The rotation direction in these planes have alternating signs +in order to match Tait-Bryan in 3D. + +Let's have a look at a 4D example, where we naively add a 4th dimension. +""" + +import matplotlib.pyplot as plt +import gstools as gs + +dim = 4 +size = 20 +pos = [range(size)] * dim +model = gs.Exponential(dim=dim, len_scale=5) +srf = gs.SRF(model, seed=20170519) +field = srf.structured(pos) + +############################################################################### +# In order to "prove" correctness, we can calculate an empirical variogram +# of the generated field and fit our model to it. + +bin_center, vario = gs.vario_estimate( + pos, field, sampling_size=2000, mesh_type="structured" +) +model.fit_variogram(bin_center, vario) +print(model) + +############################################################################### +# As you can see, the estimated variance and length scale match our input +# quite well. +# +# Let's have a look at the fit and a x-y cross-section of the 4D field: + +f, a = plt.subplots(1, 2, gridspec_kw={"width_ratios": [2, 1]}, figsize=[9, 3]) +model.plot(x_max=max(bin_center), ax=a[0]) +a[0].scatter(bin_center, vario) +a[1].imshow(field[:, :, 0, 0].T, origin="lower") +a[0].set_title("isotropic empirical variogram with fitted model") +a[1].set_title("x-y cross-section") +f.show() + +############################################################################### +# GSTools also provides plotting routines for higher dimensions. +# Fields are shown by 2D cross-sections, where other dimensions can be +# controlled via sliders. + +srf.plot() diff --git a/examples/01_random_field/README.rst b/examples/01_random_field/README.rst index a04525686..6b226b2f9 100644 --- a/examples/01_random_field/README.rst +++ b/examples/01_random_field/README.rst @@ -1,5 +1,5 @@ -Tutorial 1: Random Field Generation -=================================== +Random Field Generation +======================= The main feature of GSTools is the spatial random field generator :any:`SRF`, which can generate random fields following a given covariance model. @@ -13,5 +13,5 @@ and its discretised modes are evaluated at random frequencies. GSTools supports arbitrary and non-isotropic covariance models. -Gallery -------- +Examples +-------- diff --git a/examples/02_cov_model/00_intro.py b/examples/02_cov_model/00_intro.py index 55d18fdc6..ed69a0726 100644 --- a/examples/02_cov_model/00_intro.py +++ b/examples/02_cov_model/00_intro.py @@ -18,7 +18,7 @@ # use CovModel as the base-class class Gau(gs.CovModel): def cor(self, h): - return np.exp(-h ** 2) + return np.exp(-(h ** 2)) ############################################################################### diff --git a/examples/02_cov_model/02_aniso_rotation.py b/examples/02_cov_model/02_aniso_rotation.py index 58d7b7365..2a8bac788 100755 --- a/examples/02_cov_model/02_aniso_rotation.py +++ b/examples/02_cov_model/02_aniso_rotation.py @@ -52,3 +52,4 @@ # - in 3D: given by yaw, pitch, and roll (known as # `Tait–Bryan `_ # angles) +# - in nD: See the random field example about higher dimensions diff --git a/examples/02_cov_model/05_additional_para.py b/examples/02_cov_model/05_additional_para.py index dc9012a2b..7f2a70ace 100755 --- a/examples/02_cov_model/05_additional_para.py +++ b/examples/02_cov_model/05_additional_para.py @@ -19,7 +19,7 @@ def default_opt_arg(self): return {"alpha": 1.5} def cor(self, h): - return np.exp(-h ** self.alpha) + return np.exp(-(h ** self.alpha)) ############################################################################### diff --git a/examples/02_cov_model/06_fitting_para_ranges.py b/examples/02_cov_model/06_fitting_para_ranges.py index f73253ce9..c1a7db6fd 100755 --- a/examples/02_cov_model/06_fitting_para_ranges.py +++ b/examples/02_cov_model/06_fitting_para_ranges.py @@ -15,7 +15,7 @@ def default_opt_arg(self): return {"alpha": 1.5} def cor(self, h): - return np.exp(-h ** self.alpha) + return np.exp(-(h ** self.alpha)) # Exemplary variogram data (e.g. estimated from field observations) diff --git a/examples/02_cov_model/README.rst b/examples/02_cov_model/README.rst index 6ea0032af..6f8d3dad2 100644 --- a/examples/02_cov_model/README.rst +++ b/examples/02_cov_model/README.rst @@ -1,14 +1,13 @@ .. _tutorial_02_cov: -Tutorial 2: The Covariance Model -================================ +The Covariance Model +==================== One of the core-features of GSTools is the powerful :any:`CovModel` class, which allows you to easily define arbitrary covariance models by yourself. The resulting models provide a bunch of nice features to explore the covariance models. - A covariance model is used to characterize the `semi-variogram `_, denoted by :math:`\gamma`, of a spatial random field. @@ -16,24 +15,38 @@ In GSTools, we use the following form for an isotropic and stationary field: .. math:: \gamma\left(r\right)= - \sigma^2\cdot\left(1-\rho\left(r\right)\right)+n + \sigma^2\cdot\left(1-\mathrm{cor}\left(s\cdot\frac{r}{\ell}\right)\right)+n Where: - - :math:`\rho(r)` is the so called - `correlation `_ - function depending on the distance :math:`r` + - :math:`r` is the lag distance + - :math:`\ell` is the main correlation length + - :math:`s` is a scaling factor for unit conversion or normalization - :math:`\sigma^2` is the variance - :math:`n` is the nugget (subscale variance) + - :math:`\mathrm{cor}(h)` is the normalized correlation function depending on + the non-dimensional distance :math:`h=s\cdot\frac{r}{\ell}` + +Depending on the normalized correlation function, all covariance models in +GSTools are providing the following functions: + + - :math:`\rho(r)=\mathrm{cor}\left(s\cdot\frac{r}{\ell}\right)` + is the so called + `correlation `_ + function + - :math:`C(r)=\sigma^2\cdot\rho(r)` is the so called + `covariance `_ + function, which gives the name for our GSTools class .. note:: We are not limited to isotropic models. GSTools supports anisotropy ratios for length scales in orthogonal transversal directions like: - - :math:`x` (main direction) - - :math:`y` (1. transversal direction) - - :math:`z` (2. transversal direction) + - :math:`x_0` (main direction) + - :math:`x_1` (1. transversal direction) + - :math:`x_2` (2. transversal direction) + - ... These main directions can also be rotated. Just have a look at the corresponding examples. @@ -51,10 +64,14 @@ The following standard covariance models are provided by GSTools Matern Stable Rational + Cubic Linear Circular Spherical - Intersection + HyperSpherical + SuperSpherical + JBessel + TPLSimple As a special feature, we also provide truncated power law (TPL) covariance models @@ -63,5 +80,8 @@ As a special feature, we also provide truncated power law (TPL) covariance model TPLExponential TPLStable -Gallery -------- +These models provide a lower and upper length scale truncation +for superpositioned models. + +Examples +-------- diff --git a/examples/03_variogram/00_fit_variogram.py b/examples/03_variogram/00_fit_variogram.py index 8564209f9..fbaf5a8ea 100644 --- a/examples/03_variogram/00_fit_variogram.py +++ b/examples/03_variogram/00_fit_variogram.py @@ -18,7 +18,7 @@ # Estimate the variogram of the field with 40 bins. bins = np.arange(40) -bin_center, gamma = gs.vario_estimate_unstructured((x, y), field, bins) +bin_center, gamma = gs.vario_estimate((x, y), field, bins) ############################################################################### # Fit the variogram with a stable model (no nugget fitted). @@ -30,5 +30,5 @@ # Plot the fitting result. ax = fit_model.plot(x_max=40) -ax.plot(bin_center, gamma) +ax.scatter(bin_center, gamma) print(fit_model) diff --git a/examples/03_variogram/01_find_best_model.py b/examples/03_variogram/01_find_best_model.py new file mode 100755 index 000000000..269921e97 --- /dev/null +++ b/examples/03_variogram/01_find_best_model.py @@ -0,0 +1,65 @@ +""" +Finding the best fitting variogram model +---------------------------------------- +""" +import numpy as np +import gstools as gs +from matplotlib import pyplot as plt + +############################################################################### +# Generate a synthetic field with an exponential model. + +x = np.random.RandomState(19970221).rand(1000) * 100.0 +y = np.random.RandomState(20011012).rand(1000) * 100.0 +model = gs.Exponential(dim=2, var=2, len_scale=8) +srf = gs.SRF(model, mean=0, seed=19970221) +field = srf((x, y)) + +############################################################################### +# Estimate the variogram of the field with 40 bins and plot the result. + +bins = np.arange(40) +bin_center, gamma = gs.vario_estimate((x, y), field, bins) + +############################################################################### +# Define a set of models to test. + +models = { + "Gaussian": gs.Gaussian, + "Exponential": gs.Exponential, + "Matern": gs.Matern, + "Stable": gs.Stable, + "Rational": gs.Rational, + "Circular": gs.Circular, + "Spherical": gs.Spherical, + "SuperSpherical": gs.SuperSpherical, + "JBessel": gs.JBessel, +} +scores = {} + +############################################################################### +# Iterate over all models, fit their variogram and calculate the r2 score. + +# plot the estimated variogram +plt.scatter(bin_center, gamma, color="k", label="data") +ax = plt.gca() + +# fit all models to the estimated variogram +for model in models: + fit_model = models[model](dim=2) + para, pcov, r2 = fit_model.fit_variogram(bin_center, gamma, return_r2=True) + fit_model.plot(x_max=40, ax=ax) + scores[model] = r2 + +############################################################################### +# Create a ranking based on the score and determine the best models + +ranking = [ + (k, v) + for k, v in sorted(scores.items(), key=lambda item: item[1], reverse=True) +] +print("RANKING") +for i, (model, score) in enumerate(ranking, 1): + print(i, model, score) + +plt.show() diff --git a/examples/03_variogram/02_multi_vario.py b/examples/03_variogram/02_multi_vario.py new file mode 100755 index 000000000..62eb3d5b4 --- /dev/null +++ b/examples/03_variogram/02_multi_vario.py @@ -0,0 +1,43 @@ +""" +Multi-field variogram estimation +-------------------------------- + +In this example, we demonstrate how to estimate a variogram from multiple +fields on the same point-set that should have the same statistical properties. +""" +import numpy as np +import gstools as gs +import matplotlib.pyplot as plt + + +x = np.random.RandomState(19970221).rand(1000) * 100.0 +y = np.random.RandomState(20011012).rand(1000) * 100.0 +model = gs.Exponential(dim=2, var=2, len_scale=8) +srf = gs.SRF(model, mean=0) + +############################################################################### +# Generate two synthetic fields with an exponential model. + +field1 = srf((x, y), seed=19970221) +field2 = srf((x, y), seed=20011012) +fields = [field1, field2] + +############################################################################### +# Now we estimate the variograms for both fields individually and then again +# simultaneously with only one call. + +bins = np.arange(40) +bin_center, gamma1 = gs.vario_estimate((x, y), field1, bins) +bin_center, gamma2 = gs.vario_estimate((x, y), field2, bins) +bin_center, gamma = gs.vario_estimate((x, y), fields, bins) + +############################################################################### +# Now we demonstrate that the mean variogram from both fields coincides +# with the joined estimated one. + +plt.plot(bin_center, gamma1, label="field 1") +plt.plot(bin_center, gamma2, label="field 2") +plt.plot(bin_center, gamma, label="joined fields") +plt.plot(bin_center, 0.5 * (gamma1 + gamma2), ":", label="field 1+2 mean") +plt.legend() +plt.show() diff --git a/examples/03_variogram/03_directional_2d.py b/examples/03_variogram/03_directional_2d.py new file mode 100755 index 000000000..e33eab848 --- /dev/null +++ b/examples/03_variogram/03_directional_2d.py @@ -0,0 +1,64 @@ +""" +Directional variogram estimation and fitting in 2D +-------------------------------------------------- + +In this example, we demonstrate how to estimate a directional variogram by +setting the direction angles in 2D. + +Afterwards we will fit a model to this estimated variogram and show the result. +""" +import numpy as np +import gstools as gs +from matplotlib import pyplot as plt + +############################################################################### +# Generating synthetic field with anisotropy and a rotation of 22.5 degree. + +angle = np.pi / 8 +model = gs.Exponential(dim=2, len_scale=[10, 5], angles=angle) +x = y = range(101) +srf = gs.SRF(model, seed=123456) +field = srf((x, y), mesh_type="structured") + +############################################################################### +# Now we are going to estimate a directional variogram with an angular +# tolerance of 11.25 degree and a bandwith of 8. + +bins = range(0, 40, 2) +bin_center, dir_vario, counts = gs.vario_estimate( + *((x, y), field, bins), + direction=gs.rotated_main_axes(dim=2, angles=angle), + angles_tol=np.pi / 16, + bandwidth=8, + mesh_type="structured", + return_counts=True, +) + +############################################################################### +# Afterwards we can use the estimated variogram to fit a model to it: + +print("Original:") +print(model) +model.fit_variogram(bin_center, dir_vario) +print("Fitted:") +print(model) + +############################################################################### +# Plotting. + +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 5]) + +ax1.scatter(bin_center, dir_vario[0], label="emp. vario: pi/8") +ax1.scatter(bin_center, dir_vario[1], label="emp. vario: pi*5/8") +ax1.legend(loc="lower right") + +model.plot("vario_axis", axis=0, ax=ax1, x_max=40, label="fit on axis 0") +model.plot("vario_axis", axis=1, ax=ax1, x_max=40, label="fit on axis 1") +ax1.set_title("Fitting an anisotropic model") + +srf.plot(ax=ax2) +plt.show() + +############################################################################### +# Without fitting a model, we see that the correlation length in the main +# direction is greater than the transversal one. diff --git a/examples/03_variogram/04_directional_3d.py b/examples/03_variogram/04_directional_3d.py new file mode 100755 index 000000000..d7b11de3e --- /dev/null +++ b/examples/03_variogram/04_directional_3d.py @@ -0,0 +1,96 @@ +""" +Directional variogram estimation and fitting in 3D +-------------------------------------------------- + +In this example, we demonstrate how to estimate a directional variogram by +setting the estimation directions in 3D. + +Afterwards we will fit a model to this estimated variogram and show the result. +""" +import numpy as np +import gstools as gs +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + +############################################################################### +# Generating synthetic field with anisotropy and rotation by Tait-Bryan angles. + +dim = 3 +# rotation around z, y, x +angles = [np.deg2rad(90), np.deg2rad(45), np.deg2rad(22.5)] +model = gs.Gaussian(dim=3, len_scale=[16, 8, 4], angles=angles) +x = y = z = range(50) +pos = (x, y, z) +srf = gs.SRF(model, seed=1001) +field = srf.structured(pos) + +############################################################################### +# Here we generate the axes of the rotated coordinate system +# to get an impression what the rotation angles do. + +# All 3 axes of the rotated coordinate-system +main_axes = gs.rotated_main_axes(dim, angles) +axis1, axis2, axis3 = main_axes + +############################################################################### +# Now we estimate the variogram along the main axes. When the main axes are +# unknown, one would need to sample multiple directions and look for the one +# with the longest correlation length (flattest gradient). +# Then check the transversal directions and so on. + +bin_center, dir_vario, counts = gs.vario_estimate( + pos, + field, + direction=main_axes, + bandwidth=10, + sampling_size=2000, + sampling_seed=1001, + mesh_type="structured", + return_counts=True, +) + +############################################################################### +# Afterwards we can use the estimated variogram to fit a model to it. +# Note, that the rotation angles need to be set beforehand. + +print("Original:") +print(model) +model.fit_variogram(bin_center, dir_vario) +print("Fitted:") +print(model) + +############################################################################### +# Plotting main axes and the fitted directional variogram. + +fig = plt.figure(figsize=[10, 5]) +ax1 = fig.add_subplot(121, projection=Axes3D.name) +ax2 = fig.add_subplot(122) + +ax1.plot([0, axis1[0]], [0, axis1[1]], [0, axis1[2]], label="0.") +ax1.plot([0, axis2[0]], [0, axis2[1]], [0, axis2[2]], label="1.") +ax1.plot([0, axis3[0]], [0, axis3[1]], [0, axis3[2]], label="2.") +ax1.set_xlim(-1, 1) +ax1.set_ylim(-1, 1) +ax1.set_zlim(-1, 1) +ax1.set_xlabel("X") +ax1.set_ylabel("Y") +ax1.set_zlabel("Z") +ax1.set_title("Tait-Bryan main axis") +ax1.legend(loc="lower left") + +x_max = max(bin_center) +ax2.scatter(bin_center, dir_vario[0], label="0. axis") +ax2.scatter(bin_center, dir_vario[1], label="1. axis") +ax2.scatter(bin_center, dir_vario[2], label="2. axis") +model.plot("vario_axis", axis=0, ax=ax2, x_max=x_max, label="fit on axis 0") +model.plot("vario_axis", axis=1, ax=ax2, x_max=x_max, label="fit on axis 1") +model.plot("vario_axis", axis=2, ax=ax2, x_max=x_max, label="fit on axis 2") +ax2.set_title("Fitting an anisotropic model") +ax2.legend() + +plt.show() + +############################################################################### +# Also, let's have a look at the field. + +srf.plot() diff --git a/examples/03_variogram/05_auto_fit_variogram.py b/examples/03_variogram/05_auto_fit_variogram.py new file mode 100644 index 000000000..53b113d6a --- /dev/null +++ b/examples/03_variogram/05_auto_fit_variogram.py @@ -0,0 +1,35 @@ +""" +Fit Variogram with automatic binning +------------------------------------ +""" +import numpy as np +import gstools as gs + +############################################################################### +# Generate a synthetic field with an exponential model. + +x = np.random.RandomState(19970221).rand(1000) * 100.0 +y = np.random.RandomState(20011012).rand(1000) * 100.0 +model = gs.Exponential(dim=2, var=2, len_scale=8) +srf = gs.SRF(model, mean=0, seed=19970221) +field = srf((x, y)) +print(field.var()) +############################################################################### +# Estimate the variogram of the field with automatic binning. + +bin_center, gamma = gs.vario_estimate((x, y), field) +print("estimated bin number:", len(bin_center)) +print("maximal bin distance:", max(bin_center)) + +############################################################################### +# Fit the variogram with a stable model (no nugget fitted). + +fit_model = gs.Stable(dim=2) +fit_model.fit_variogram(bin_center, gamma, nugget=False) +print(fit_model) + +############################################################################### +# Plot the fitting result. + +ax = fit_model.plot(x_max=max(bin_center)) +ax.scatter(bin_center, gamma) diff --git a/examples/03_variogram/06_auto_bin_latlon.py b/examples/03_variogram/06_auto_bin_latlon.py new file mode 100644 index 000000000..22ccc377b --- /dev/null +++ b/examples/03_variogram/06_auto_bin_latlon.py @@ -0,0 +1,88 @@ +""" +Automatic binning with lat-lon data +----------------------------------- + +In this example we demonstrate automatic binning for a tiny data set +containing temperature records from Germany +(See the detailed DWD example for more information on the data). + +We use a data set from 20 meteo-stations choosen randomly. +""" +import numpy as np +import gstools as gs + +# lat, lon, temperature +data = np.array( + [ + [52.9336, 8.237, 15.7], + [48.6159, 13.0506, 13.9], + [52.4853, 7.9126, 15.1], + [50.7446, 9.345, 17.0], + [52.9437, 12.8518, 21.9], + [53.8633, 8.1275, 11.9], + [47.8342, 10.8667, 11.4], + [51.0881, 12.9326, 17.2], + [48.406, 11.3117, 12.9], + [49.7273, 8.1164, 17.2], + [49.4691, 11.8546, 13.4], + [48.0197, 12.2925, 13.9], + [50.4237, 7.4202, 18.1], + [53.0316, 13.9908, 21.3], + [53.8412, 13.6846, 21.3], + [54.6792, 13.4343, 17.4], + [49.9694, 9.9114, 18.6], + [51.3745, 11.292, 20.2], + [47.8774, 11.3643, 12.7], + [50.5908, 12.7139, 15.8], + ] +) +pos = data.T[:2] # lat, lon +field = data.T[2] # temperature + +############################################################################### +# Since the overall range of these meteo-stations is too low, we can use the +# data-variance as additional information during the fit of the variogram. + +emp_v = gs.vario_estimate(pos, field, latlon=True) +sph = gs.Spherical(latlon=True, rescale=gs.EARTH_RADIUS) +sph.fit_variogram(*emp_v, sill=np.var(field)) +ax = sph.plot(x_max=2 * np.max(emp_v[0])) +ax.scatter(*emp_v, label="Empirical variogram") +ax.legend() +print(sph) + +############################################################################### +# As we can see, the variogram fitting was successful and providing the data +# variance helped finding the right length-scale. +# +# Now, we'll use this covariance model to interpolate the given data with +# ordinary kriging. + +# enclosing box for data points +grid_lat = np.linspace(np.min(pos[0]), np.max(pos[0])) +grid_lon = np.linspace(np.min(pos[1]), np.max(pos[1])) +# ordinary kriging +krige = gs.krige.Ordinary(sph, pos, field) +krige((grid_lat, grid_lon), mesh_type="structured") +ax = krige.plot() +# plotting lat on y-axis and lon on x-axis +ax.scatter(pos[1], pos[0], 50, c=field, edgecolors="k", label="input") +ax.legend() + +############################################################################### +# Looks good, doesn't it? +# +# This workflow is also implemented in the :any:`Krige` class, by setting +# ``fit_variogram=True``. Then the whole procedure shortens: + +krige = gs.krige.Ordinary(sph, pos, field, fit_variogram=True) +krige.structured((grid_lat, grid_lon)) + +# plot the result +krige.plot() +# show the fitting results +print(krige.model) + +############################################################################### +# This example shows, that setting up variogram estimation and kriging routines +# is straight forward with GSTools! diff --git a/examples/03_variogram/README.rst b/examples/03_variogram/README.rst index d16c16c2b..8eb42a8a8 100644 --- a/examples/03_variogram/README.rst +++ b/examples/03_variogram/README.rst @@ -1,5 +1,5 @@ -Tutorial 3: Variogram Estimation -================================ +Variogram Estimation +==================== Estimating the spatial correlations is an important part of geostatistics. These spatial correlations can be expressed by the variogram, which can be @@ -10,5 +10,5 @@ The same `(semi-)variogram `__, we provide an interface to -`PyKrige `__. +`PyKrige `__ (>v1.5), which means +you can pass a GSTools covariance model to the kriging routines of PyKrige. -In the future you can pass a GSTools Covariance Model -to the PyKrige routines as ``variogram_model``. - -At the moment we only provide prepared -keyword arguments for the pykrige routines. - -To demonstrate the general workflow, we compare the ordinary kriging of PyKrige -with GSTools in 2D: +To demonstrate the general workflow, we compare ordinary kriging of PyKrige +with the corresponding GSTools routine in 2D: """ import numpy as np import gstools as gs @@ -22,42 +17,46 @@ from matplotlib import pyplot as plt # conditioning data -data = np.array( - [ - [0.3, 1.2, 0.47], - [1.9, 0.6, 0.56], - [1.1, 3.2, 0.74], - [3.3, 4.4, 1.47], - [4.7, 3.8, 1.74], - ] -) +cond_x = [0.3, 1.9, 1.1, 3.3, 4.7] +cond_y = [1.2, 0.6, 3.2, 4.4, 3.8] +cond_val = [0.47, 0.56, 0.74, 1.47, 1.74] # grid definition for output field gridx = np.arange(0.0, 5.5, 0.1) gridy = np.arange(0.0, 6.5, 0.1) ############################################################################### -# A GSTools based covariance model. +# A GSTools based :any:`Gaussian` covariance model: -cov_model = gs.Gaussian( +model = gs.Gaussian( dim=2, len_scale=1, anis=0.2, angles=-0.5, var=0.5, nugget=0.1 ) ############################################################################### -# Ordinary kriging with pykrige. -# A dictionary containing keyword arguments for the pykrige routines is -# provided by the gstools covariance models. - -pk_kwargs = cov_model.pykrige_kwargs -OK1 = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], **pk_kwargs) +# Ordinary Kriging with PyKrige +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# One can pass the defined GSTools model as +# variogram model, which will `not` be fitted to the given data. +# By providing the GSTools model, rotation and anisotropy are also +# automatically defined: + +OK1 = OrdinaryKriging(cond_x, cond_y, cond_val, variogram_model=model) z1, ss1 = OK1.execute("grid", gridx, gridy) plt.imshow(z1, origin="lower") plt.show() ############################################################################### -# Ordinary kriging with gstools for comparison. - -OK2 = gs.krige.Ordinary(cov_model, [data[:, 0], data[:, 1]], data[:, 2]) +# Ordinary Kriging with GSTools +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# The :any:`Ordinary` kriging class is provided by GSTools as a shortcut to +# define ordinary kriging with the general :any:`Krige` class. +# +# PyKrige's routines are using exact kriging by default (when given a nugget). +# To reproduce this behavior in GSTools, we have to set ``exact=True``. + +OK2 = gs.krige.Ordinary(model, [cond_x, cond_y], cond_val, exact=True) OK2.structured([gridx, gridy]) ax = OK2.plot() ax.set_aspect("equal") diff --git a/examples/05_kriging/05_universal_kriging.py b/examples/05_kriging/05_universal_kriging.py index cf93da0c3..cae334880 100755 --- a/examples/05_kriging/05_universal_kriging.py +++ b/examples/05_kriging/05_universal_kriging.py @@ -1,6 +1,17 @@ """ Universal Kriging ----------------- + +You can give a polynomial order or a list of self defined +functions representing the internal drift of the given values. +This drift will be fitted internally during the kriging interpolation. + +In the following we are creating artificial data, where a linear drift +was added. The resulting samples are then used as input for Universal kriging. + +The "linear" drift is then estimated during the interpolation. +To access only the estimated mean/drift, we provide a switch `only_mean` +in the call routine. """ import numpy as np from gstools import SRF, Gaussian, krige @@ -21,4 +32,8 @@ ax.scatter(cond_pos, cond_val, color="k", zorder=10, label="Conditions") ax.plot(gridx, gridx * 0.1 + 1, ":", label="linear drift") ax.plot(gridx, drift_field, "--", label="original field") + +mean = krig(gridx, only_mean=True) +ax.plot(gridx, mean, label="estimated drift") + ax.legend() diff --git a/examples/05_kriging/07_detrended_ordinary_kriging.py b/examples/05_kriging/07_detrended_ordinary_kriging.py index 585fdcd10..9e468e634 100755 --- a/examples/05_kriging/07_detrended_ordinary_kriging.py +++ b/examples/05_kriging/07_detrended_ordinary_kriging.py @@ -21,7 +21,7 @@ def trend(x): drift_field = drift(gridx) + trend(gridx) # kriging model = Gaussian(dim=1, var=0.1, len_scale=2) -krig_trend = krige.Ordinary(model, cond_pos, cond_val, trend) +krig_trend = krige.Ordinary(model, cond_pos, cond_val, trend=trend) krig_trend(gridx) ax = krig_trend.plot() ax.scatter(cond_pos, cond_val, color="k", zorder=10, label="Conditions") diff --git a/examples/05_kriging/08_measurement_errors.py b/examples/05_kriging/08_measurement_errors.py new file mode 100755 index 000000000..578ccaee5 --- /dev/null +++ b/examples/05_kriging/08_measurement_errors.py @@ -0,0 +1,55 @@ +r""" +Incorporating measurement errors +-------------------------------- + +To incorporate the nugget effect and/or given measurement errors, +one can set `exact` to `False` and provide either individual measurement errors +for each point or set the nugget as a constant measurement error everywhere. + +In the following we will show the influence of the nugget and +measurement errors. +""" + +import numpy as np +import gstools as gs + +# condtions +cond_pos = [0.3, 1.1, 1.9, 3.3, 4.7] +cond_val = [0.47, 0.74, 0.56, 1.47, 1.74] +cond_err = [0.01, 0.0, 0.1, 0.05, 0] +# resulting grid +gridx = np.linspace(0.0, 15.0, 151) +# spatial random field class +model = gs.Gaussian(dim=1, var=0.9, len_scale=1, nugget=0.1) + +############################################################################### +# Here we will use Simple kriging (`unbiased=False`) to interpolate the given +# conditions. + +krig = gs.Krige( + model=model, + cond_pos=cond_pos, + cond_val=cond_val, + mean=1, + unbiased=False, + exact=False, + cond_err=cond_err, +) +krig(gridx) + +############################################################################### +# Let's plot the data. You can see, that the estimated values differ more from +# the input, when the given measurement errors get bigger. +# In addition we plot the standard deviation. + +ax = krig.plot() +ax.scatter(cond_pos, cond_val, color="k", zorder=10, label="Conditions") +ax.fill_between( + gridx, + # plus/minus standard deviation (70 percent confidence interval) + krig.field - np.sqrt(krig.krige_var), + krig.field + np.sqrt(krig.krige_var), + alpha=0.3, + label="Standard deviation", +) +ax.legend() diff --git a/examples/05_kriging/09_pseudo_inverse.py b/examples/05_kriging/09_pseudo_inverse.py new file mode 100755 index 000000000..c0e4e2746 --- /dev/null +++ b/examples/05_kriging/09_pseudo_inverse.py @@ -0,0 +1,38 @@ +r""" +Redundant data and pseudo-inverse +--------------------------------- + +It can happen, that the kriging system gets numerically unstable. +One reason could be, that the input data contains redundant conditioning points +that hold different values. + +To smoothly deal with such situations, you can use the pseudo +inverse for the kriging matrix, which is enabled by default. + +This will result in the average value for the redundant data. + +Example +^^^^^^^ + +In the following we have two different values at the same location. +The resulting kriging field will hold the average at this point. +""" +import numpy as np +from gstools import Gaussian, krige + +# condtions +cond_pos = [0.3, 1.9, 1.1, 3.3, 1.1] +cond_val = [0.47, 0.56, 0.74, 1.47, 1.14] +# resulting grid +gridx = np.linspace(0.0, 8.0, 81) +# spatial random field class +model = Gaussian(dim=1, var=0.5, len_scale=1) + +############################################################################### +krig = krige.Ordinary(model, cond_pos=cond_pos, cond_val=cond_val) +krig(gridx) + +############################################################################### +ax = krig.plot() +ax.scatter(cond_pos, cond_val, color="k", zorder=10, label="Conditions") +ax.legend() diff --git a/examples/05_kriging/README.rst b/examples/05_kriging/README.rst index f0d9ef717..ef92e425f 100644 --- a/examples/05_kriging/README.rst +++ b/examples/05_kriging/README.rst @@ -1,9 +1,10 @@ .. _tutorial_05_kriging: -Tutorial 5: Kriging -=================== +Kriging +======= -The subpackage :py:mod:`gstools.krige` provides routines for Gaussian process regression, also known as kriging. +The subpackage :py:mod:`gstools.krige` provides routines for Gaussian process regression, +also known as kriging. Kriging is a method of data interpolation based on predefined covariance models. The aim of kriging is to derive the value of a field at some point :math:`x_0`, @@ -19,8 +20,49 @@ The weights :math:`W = (w_1,\ldots,w_n)` depent on the given covariance model an The different kriging approaches provide different ways of calculating :math:`W`. - -The routines for kriging are almost identical to the routines for spatial random fields. +The :any:`Krige` class provides everything in one place and you can switch on/off +the features you want: + +* `unbiased`: the weights have to sum up to `1`. If true, this results in + :any:`Ordinary` kriging, where the mean is estimated, otherwise it will result in + :any:`Simple` kriging, where the mean has to be given. +* `drift_functions`: you can give a polynomial order or a list of self defined + functions representing the internal drift of the given values. This drift will + be fitted internally during the kriging interpolation. This results in :any:`Universal` kriging. +* `ext_drift`: You can also give an external drift per point to the routine. + In contrast to the internal drift, that is evaluated at the desired points with + the given functions, the external drift has to given for each point form an "external" + source. This results in :any:`ExtDrift` kriging. +* `trend`, `mean`, `normalizer`: These are used to pre- and post-process data. + If you already have fitted a trend model that is provided as a callable function, + you can give it to the kriging routine. Normalizer are power-transformations + to gain normality. + `mean` behaves similar to `trend` but is applied at another position: + + 1. conditioning data is de-trended (substracting trend) + 2. detrended conditioning data is then normalized (in order to follow a normal distribution) + 3. normalized conditioning data is set to zero mean (subtracting mean) + + Cosequently, when there is no normalizer given, trend and mean are the same thing + and only one should be used. + :any:`Detrended` kriging is a shortcut to provide only a trend and simple kriging + with normal data. +* `exact` and `cond_err`: To incorporate the nugget effect and/or measurement errors, + one can set `exact` to `False` and provide either individual measurement errors + for each point or set the nugget as a constant measurement error everywhere. +* `pseudo_inv`: Sometimes the inversion of the kriging matrix can be numerically unstable. + This occurs for examples in cases of redundant input values. In this case we provide a switch to + use the pseudo-inverse of the matrix. Then redundant conditional values will automatically + be averaged. + +.. note:: + + All mentioned features can be combined within the :any:`Krige` class. + All other kriging classes are just shortcuts to this class with a limited list + of input parameters. + +The routines for kriging are almost identical to the routines for spatial random fields, +with regard to their handling. First you define a covariance model, as described in :ref:`tutorial_02_cov`, then you initialize the kriging class with this model: @@ -47,11 +89,12 @@ The following kriging methods are provided within the submodule :any:`gstools.krige`. .. autosummary:: + Krige Simple Ordinary Universal ExtDrift Detrended -Gallery -------- +Examples +-------- diff --git a/examples/06_conditioned_fields/00_condition_ensemble.py b/examples/06_conditioned_fields/00_condition_ensemble.py index ca9ab20fa..a622fb463 100644 --- a/examples/06_conditioned_fields/00_condition_ensemble.py +++ b/examples/06_conditioned_fields/00_condition_ensemble.py @@ -1,10 +1,10 @@ """ -Example: Conditioning with Ordinary Kriging -------------------------------------------- +Conditioning with Ordinary Kriging +---------------------------------- -Here we use ordinary kriging in 1D (for plotting reasons) with 5 given observations/conditions, +Here we use ordinary kriging in 1D (for plotting reasons) +with 5 given observations/conditions, to generate an ensemble of conditioned random fields. -The estimated mean can be accessed by ``srf.mean``. """ import numpy as np import matplotlib.pyplot as plt @@ -16,27 +16,41 @@ gridx = np.linspace(0.0, 15.0, 151) ############################################################################### +# The conditioned spatial random field class depends on a Krige class in order +# to handle the conditions. +# This is created as described in the kriging tutorial. +# +# Here we use a Gaussian covariance model and ordinary kriging for conditioning +# the spatial random field. -# spatial random field class -model = gs.Gaussian(dim=1, var=0.5, len_scale=2) -srf = gs.SRF(model) -srf.set_condition(cond_pos, cond_val, "ordinary") +model = gs.Gaussian(dim=1, var=0.5, len_scale=1.5) +krige = gs.krige.Ordinary(model, cond_pos, cond_val) +cond_srf = gs.CondSRF(krige) ############################################################################### fields = [] for i in range(100): - # print(i) if i % 10 == 0 else None - fields.append(srf(gridx, seed=i)) + fields.append(cond_srf(gridx, seed=i)) label = "Conditioned ensemble" if i == 0 else None plt.plot(gridx, fields[i], color="k", alpha=0.1, label=label) -plt.plot(gridx, np.full_like(gridx, srf.mean), label="estimated mean") +plt.plot(gridx, cond_srf.krige(gridx, only_mean=True), label="estimated mean") plt.plot(gridx, np.mean(fields, axis=0), linestyle=":", label="Ensemble mean") -plt.plot(gridx, srf.krige_field, linestyle="dashed", label="kriged field") +plt.plot(gridx, cond_srf.krige.field, linestyle="dashed", label="kriged field") plt.scatter(cond_pos, cond_val, color="k", zorder=10, label="Conditions") +# 99 percent confidence interval +conf = gs.tools.confidence_scaling(0.99) +plt.fill_between( + gridx, + cond_srf.krige.field - conf * np.sqrt(cond_srf.krige.krige_var), + cond_srf.krige.field + conf * np.sqrt(cond_srf.krige.krige_var), + alpha=0.3, + label="99% confidence interval", +) plt.legend() plt.show() ############################################################################### # As you can see, the kriging field coincides with the ensemble mean of the -# conditioned random fields and the estimated mean is the mean of the far-field. +# conditioned random fields and the estimated mean +# is the mean of the far-field. diff --git a/examples/06_conditioned_fields/01_2D_condition_ensemble.py b/examples/06_conditioned_fields/01_2D_condition_ensemble.py new file mode 100644 index 000000000..35d00bfd9 --- /dev/null +++ b/examples/06_conditioned_fields/01_2D_condition_ensemble.py @@ -0,0 +1,58 @@ +""" +Creating an Ensemble of conditioned 2D Fields +--------------------------------------------- + +Let's create an ensemble of conditioned random fields in 2D. +""" +import numpy as np +import matplotlib.pyplot as plt +import gstools as gs + + +# conditioning data (x, y, value) +cond_pos = [[0.3, 1.9, 1.1, 3.3, 4.7], [1.2, 0.6, 3.2, 4.4, 3.8]] +cond_val = [0.47, 0.56, 0.74, 1.47, 1.74] + +# grid definition for output field +x = np.arange(0, 5, 0.1) +y = np.arange(0, 5, 0.1) + +model = gs.Gaussian(dim=2, var=0.5, len_scale=5, anis=0.5, angles=-0.5) +krige = gs.Krige(model, cond_pos=cond_pos, cond_val=cond_val) +cond_srf = gs.CondSRF(krige) + +############################################################################### +# We create a list containing the generated conditioned fields. + +ens_no = 4 +field = [] +for i in range(ens_no): + field.append(cond_srf.structured([x, y], seed=i)) + +############################################################################### +# Now let's have a look at the pairwise differences between the generated +# fields. We will see, that they coincide at the given conditions. + +fig, ax = plt.subplots(ens_no + 1, ens_no + 1, figsize=(8, 8)) +# plotting kwargs for scatter and image +sc_kwargs = dict(c=cond_val, edgecolors="k", vmin=0, vmax=np.max(field)) +im_kwargs = dict(extent=2 * [0, 5], origin="lower", vmin=0, vmax=np.max(field)) +for i in range(ens_no): + # conditioned fields and conditions + ax[i + 1, 0].imshow(field[i].T, **im_kwargs) + ax[i + 1, 0].scatter(*cond_pos, **sc_kwargs) + ax[i + 1, 0].set_ylabel(f"Field {i+1}", fontsize=10) + ax[0, i + 1].imshow(field[i].T, **im_kwargs) + ax[0, i + 1].scatter(*cond_pos, **sc_kwargs) + ax[0, i + 1].set_title(f"Field {i+1}", fontsize=10) + # absolute differences + for j in range(ens_no): + ax[i + 1, j + 1].imshow(np.abs(field[i] - field[j]).T, **im_kwargs) + +# beautify plots +ax[0, 0].axis("off") +for a in ax.flatten(): + a.set_xticklabels([]), a.set_yticklabels([]) + a.set_xticks([]), a.set_yticks([]) +fig.subplots_adjust(wspace=0, hspace=0) +fig.show() diff --git a/examples/06_conditioned_fields/README.rst b/examples/06_conditioned_fields/README.rst index bd5a6d9de..4d7e67981 100644 --- a/examples/06_conditioned_fields/README.rst +++ b/examples/06_conditioned_fields/README.rst @@ -1,34 +1,26 @@ -Tutorial 6: Conditioned Fields -============================== +Conditioned Fields +================== Kriged fields tend to approach the field mean outside the area of observations. To generate random fields, that coincide with given observations, but are still random according to a given covariance model away from the observations proximity, we provide the generation of conditioned random fields. - The idea behind conditioned random fields builds up on kriging. First we generate a field with a kriging method, then we generate a random field, -and finally we generate another kriged field to eliminate the error between -the random field and the kriged field of the given observations. - -To do so, you can choose between ordinary and simple kriging. -In case of ordinary kriging, the mean of the SRF will be overwritten by the -estimated mean. - -The setup of the spatial random field is the same as described in -:ref:`tutorial_02_cov`. -You just need to add the conditions as described in :ref:`tutorial_05_kriging`: - -.. code-block:: python +with 0 as mean and 1 as variance that will be multiplied with the kriging +standard deviation. - srf.set_condition(cond_pos, cond_val, "simple") +To do so, you can instantiate a :any:`CondSRF` class with a configured +:any:`Krige` class. -or: +The setup of the a conditioned random field should be as follows: .. code-block:: python - srf.set_condition(cond_pos, cond_val, "ordinary") + krige = gs.Krige(model, cond_pos, cond_val) + cond_srf = gs.CondSRF(krige) + field = cond_srf(grid) -Gallery -------- +Examples +-------- diff --git a/examples/07_transformations/README.rst b/examples/07_transformations/README.rst index d3a2ba065..2be183ee0 100644 --- a/examples/07_transformations/README.rst +++ b/examples/07_transformations/README.rst @@ -1,5 +1,5 @@ -Tutorial 7: Field transformations -================================= +Field transformations +===================== The generated fields of gstools are ordinary Gaussian random fields. In application there are several transformations to describe real world @@ -33,5 +33,5 @@ Simply import the transform submodule and apply a transformation to the srf clas ... tf.normal_to_lognormal(srf) -Gallery -------- +Examples +-------- diff --git a/examples/08_geo_coordinates/00_field_generation.py b/examples/08_geo_coordinates/00_field_generation.py new file mode 100755 index 000000000..b5685c7d9 --- /dev/null +++ b/examples/08_geo_coordinates/00_field_generation.py @@ -0,0 +1,59 @@ +""" +Working with lat-lon random fields +---------------------------------- + +In this example, we demonstrate how to generate a random field on +geographical coordinates. + +First we setup a model, with ``latlon=True``, to get the associated +Yadrenko model. + +In addition, we will use the earth radius provided by :any:`EARTH_RADIUS`, +to have a meaningful length scale in km. + +To generate the field, we simply pass ``(lat, lon)`` as the position tuple +to the :any:`SRF` class. +""" +import gstools as gs + +model = gs.Gaussian(latlon=True, var=1, len_scale=777, rescale=gs.EARTH_RADIUS) + +lat = lon = range(-80, 81) +srf = gs.SRF(model, seed=1234) +field = srf.structured((lat, lon)) +srf.plot() + +############################################################################### +# This was easy as always! Now we can use this field to estimate the empirical +# variogram in order to prove, that the generated field has the correct +# geo-statistical properties. +# The :any:`vario_estimate` routine also provides a ``latlon`` switch to +# indicate, that the given field is defined on geographical variables. +# +# As we will see, everthing went well... phew! + +bin_edges = [0.01 * i for i in range(30)] +bin_center, emp_vario = gs.vario_estimate( + (lat, lon), + field, + bin_edges, + latlon=True, + mesh_type="structured", + sampling_size=2000, + sampling_seed=12345, +) + +ax = model.plot("vario_yadrenko", x_max=0.3) +model.fit_variogram(bin_center, emp_vario, nugget=False) +model.plot("vario_yadrenko", ax=ax, label="fitted", x_max=0.3) +ax.scatter(bin_center, emp_vario, color="k") +print(model) + +############################################################################### +# .. note:: +# +# Note, that the estimated variogram coincides with the yadrenko variogram, +# which means it depends on the great-circle distance given in radians. +# +# Keep that in mind when defining bins: The range is at most +# :math:`\pi\approx 3.14`, which corresponds to the half globe. diff --git a/examples/08_geo_coordinates/01_dwd_krige.py b/examples/08_geo_coordinates/01_dwd_krige.py new file mode 100755 index 000000000..bac4b31cb --- /dev/null +++ b/examples/08_geo_coordinates/01_dwd_krige.py @@ -0,0 +1,171 @@ +""" +Kriging geographical data +------------------------- + +In this example we are going to interpolate actual temperature data from +the German weather service `DWD `_. + +Data is retrieved utilizing the beautiful package +`wetterdienst `_, +which serves as an API for the DWD data. + +For better visualization, we also download a simple shapefile of the German +borderline with `cartopy `_. + +In order to keep the number of dependecies low, the calls of both functions +shown beneath are commented out. +""" +# sphinx_gallery_thumbnail_number = 2 +import numpy as np +import matplotlib.pyplot as plt +import gstools as gs + + +def get_borders_germany(): + """Download simple german shape file with cartopy.""" + from cartopy.io import shapereader as shp_read # version 0.18.0 + import geopandas as gp # 0.8.1 + + shpfile = shp_read.natural_earth("50m", "cultural", "admin_0_countries") + df = gp.read_file(shpfile) # only use the simplest polygon + poly = df.loc[df["ADMIN"] == "Germany"]["geometry"].values[0][0] + np.savetxt("de_borders.txt", list(poly.exterior.coords)) + + +def get_dwd_temperature(date="2020-06-09 12:00:00"): + """Get air temperature from german weather stations from 9.6.20 12:00.""" + from wetterdienst.dwd import observations as obs # version 0.13.0 + + settings = dict( + resolution=obs.DWDObservationResolution.HOURLY, + start_date=date, + end_date=date, + ) + sites = obs.DWDObservationStations( + parameter_set=obs.DWDObservationParameterSet.TEMPERATURE_AIR, + period=obs.DWDObservationPeriod.RECENT, + **settings, + ) + ids, lat, lon = sites.all().loc[:, ["STATION_ID", "LAT", "LON"]].values.T + observations = obs.DWDObservationData( + station_ids=ids, + parameters=obs.DWDObservationParameter.HOURLY.TEMPERATURE_AIR_200, + periods=obs.DWDObservationPeriod.RECENT, + **settings, + ) + temp = observations.all().VALUE.values + sel = np.isfinite(temp) + # select only valid temperature data + ids, lat, lon, temp = ids.astype(float)[sel], lat[sel], lon[sel], temp[sel] + head = "id, lat, lon, temp" # add a header to the file + np.savetxt("temp_obs.txt", np.array([ids, lat, lon, temp]).T, header=head) + + +############################################################################### +# If you want to download the data again, +# uncomment the two following lines. We will simply load the resulting +# files to gain the border polygon and the observed temperature along with +# the station locations given by lat-lon values. + +# get_borders_germany() +# get_dwd_temperature(date="2020-06-09 12:00:00") + +border = np.loadtxt("de_borders.txt") +ids, lat, lon, temp = np.loadtxt("temp_obs.txt").T + +############################################################################### +# First we will estimate the variogram of our temperature data. +# As the maximal bin distance we choose 8 degrees, which corresponds to a +# chordal length of about 900 km. + +bins = gs.standard_bins((lat, lon), max_dist=np.deg2rad(8), latlon=True) +bin_c, vario = gs.vario_estimate((lat, lon), temp, bins, latlon=True) + +############################################################################### +# Now we can use this estimated variogram to fit a model to it. +# Here we will use a :any:`Spherical` model. We select the ``latlon`` option +# to use the `Yadrenko` variant of the model to gain a valid model for lat-lon +# coordinates and we rescale it to the earth-radius. Otherwise the length +# scale would be given in radians representing the great-circle distance. +# +# We deselect the nugget from fitting and plot the result afterwards. +# +# .. note:: +# +# You need to plot the Yadrenko variogram, since the standard variogram +# still holds the ordinary routine that is not respecting the great-circle +# distance. + +model = gs.Spherical(latlon=True, rescale=gs.EARTH_RADIUS) +model.fit_variogram(bin_c, vario, nugget=False) +ax = model.plot("vario_yadrenko", x_max=bins[-1]) +ax.scatter(bin_c, vario) +print(model) + +############################################################################### +# As we see, we have a rather large correlation length of 600 km. +# +# Now we want to interpolate the data using :any:`Universal` kriging. +# In order to tinker around with the data, we will use a north-south drift +# by assuming a linear correlation with the latitude. +# This can be done as follows: + + +def north_south_drift(lat, lon): + return lat + + +uk = gs.krige.Universal( + model=model, + cond_pos=(lat, lon), + cond_val=temp, + drift_functions=north_south_drift, +) + +############################################################################### +# Now we generate the kriging field, by defining a lat-lon grid that covers +# the whole of Germany. The :any:`Krige` class provides the option to only +# krige the mean field, so one can have a glimpse at the estimated drift. + +g_lat = np.arange(47, 56.1, 0.1) +g_lon = np.arange(5, 16.1, 0.1) + +field, k_var = uk((g_lat, g_lon), mesh_type="structured") +mean = uk((g_lat, g_lon), mesh_type="structured", only_mean=True) + +############################################################################### +# And that's it. Now let's have a look at the generated field and the input +# data along with the estimated mean: + +levels = np.linspace(5, 23, 64) +fig, ax = plt.subplots(1, 3, figsize=[10, 5], sharey=True) +sca = ax[0].scatter(lon, lat, c=temp, vmin=5, vmax=23, cmap="coolwarm") +co1 = ax[1].contourf(g_lon, g_lat, field, levels, cmap="coolwarm") +co2 = ax[2].contourf(g_lon, g_lat, mean, levels, cmap="coolwarm") + +[ax[i].plot(border[:, 0], border[:, 1], color="k") for i in range(3)] +[ax[i].set_xlim([5, 16]) for i in range(3)] +[ax[i].set_xlabel("Lon in deg") for i in range(3)] +ax[0].set_ylabel("Lat in deg") + +ax[0].set_title("Temperature observations at 2m\nfrom DWD (2020-06-09 12:00)") +ax[1].set_title("Interpolated temperature\nwith North-South drift") +ax[2].set_title("Estimated mean drift\nfrom Universal Kriging") + +fmt = dict(orientation="horizontal", shrink=0.5, fraction=0.1, pad=0.2) +fig.colorbar(co2, ax=ax, **fmt).set_label("T in [°C]") + +############################################################################### +# To get a better impression of the estimated north-south drift, we'll take +# a look at a cross-section at a longitude of 10 degree: + +fig, ax = plt.subplots() +ax.plot(g_lat, field[:, 50], label="Interpolated temperature") +ax.plot(g_lat, mean[:, 50], label="North-South mean drift") +ax.set_xlabel("Lat in deg") +ax.set_ylabel("T in [°C]") +ax.set_title("North-South cross-section at 10°") +ax.legend() + +############################################################################### +# Interpretion of the results is now up to you! ;-) diff --git a/examples/08_geo_coordinates/README.rst b/examples/08_geo_coordinates/README.rst new file mode 100644 index 000000000..87b419dfe --- /dev/null +++ b/examples/08_geo_coordinates/README.rst @@ -0,0 +1,67 @@ +Geographic Coordinates +====================== + +GSTools provides support for +`geographic coordinates `_ +given by: + +- latitude ``lat``: specifies the north–south position of a point on the Earth's surface +- longitude ``lon``: specifies the east–west position of a point on the Earth's surface + +If you want to use this feature for field generation or Kriging, you +have to set up a geographical covariance Model by setting ``latlon=True`` +in your desired model (see :any:`CovModel`): + +.. code-block:: python + + import numpy as np + import gstools as gs + + model = gs.Gaussian(latlon=True, var=2, len_scale=np.pi / 16) + +By doing so, the model will use the associated `Yadrenko` model on a sphere +(see `here `_). +The `len_scale` is given in radians to scale the arc-length. +In order to have a more meaningful length scale, one can use the ``rescale`` +argument: + +.. code-block:: python + + import gstools as gs + + model = gs.Gaussian(latlon=True, var=2, len_scale=500, rescale=gs.EARTH_RADIUS) + +Then ``len_scale`` can be interpreted as given in km. + +A `Yadrenko` model :math:`C` is derived from a valid +isotropic covariance model in 3D :math:`C_{3D}` by the following relation: + +.. math:: + C(\zeta)=C_{3D}\left(2 \cdot \sin\left(\frac{\zeta}{2}\right)\right) + +Where :math:`\zeta` is the +`great-circle distance `_. + +.. note:: + + ``lat`` and ``lon`` are given in degree, whereas the great-circle distance + :math:`zeta` is given in radians. + +Note, that :math:`2 \cdot \sin(\frac{\zeta}{2})` is the +`chordal distance `_ +of two points on a sphere, which means we simply think of the earth surface +as a sphere, that is cut out of the surrounding three dimensional space, +when using the `Yadrenko` model. + +.. note:: + + Anisotropy is not available with the geographical models, since their + geometry is not euclidean. When passing values for :any:`CovModel.anis` + or :any:`CovModel.angles`, they will be ignored. + + Since the Yadrenko model comes from a 3D model, the model dimension will + be 3 (see :any:`CovModel.dim`) but the `field_dim` will be 2 in this case + (see :any:`CovModel.field_dim`). + +Examples +-------- diff --git a/examples/08_geo_coordinates/de_borders.txt b/examples/08_geo_coordinates/de_borders.txt new file mode 100644 index 000000000..c8cdb5a88 --- /dev/null +++ b/examples/08_geo_coordinates/de_borders.txt @@ -0,0 +1,492 @@ +9.524023437500005684e+00 4.752421874999999574e+01 +9.350000000000022737e+00 4.759892578124999574e+01 +9.182812500000011369e+00 4.767070312499999574e+01 +9.127539062500005684e+00 4.767070312499999574e+01 +8.881152343750017053e+00 4.765639648437499432e+01 +8.874023437500000000e+00 4.766269531249999858e+01 +8.831152343750005684e+00 4.770361328125000000e+01 +8.793066406250005684e+00 4.771655273437500000e+01 +8.770117187500005684e+00 4.770991210937499716e+01 +8.754785156250022737e+00 4.769804687499999574e+01 +8.728320312500017053e+00 4.770004882812499858e+01 +8.617871093750011369e+00 4.776611328125000000e+01 +8.572656250000022737e+00 4.777563476562500000e+01 +8.509863281250005684e+00 4.776689453124999574e+01 +8.435742187500011369e+00 4.773134765624999432e+01 +8.403417968750005684e+00 4.768779296874999574e+01 +8.413281250000011369e+00 4.766269531249999858e+01 +8.451757812500005684e+00 4.765180664062499716e+01 +8.552343750000005684e+00 4.765913085937499716e+01 +8.567089843750011369e+00 4.765190429687499574e+01 +8.570507812500011369e+00 4.763779296874999858e+01 +8.559472656250022737e+00 4.762402343750000000e+01 +8.477636718750005684e+00 4.761269531249999432e+01 +8.454003906250022737e+00 4.759619140625000000e+01 +8.430078125000022737e+00 4.759213867187499858e+01 +8.414746093750011369e+00 4.758959960937500000e+01 +8.327832031250011369e+00 4.760693359375000000e+01 +8.198242187500000000e+00 4.760693359375000000e+01 +8.093750000000000000e+00 4.757617187500000000e+01 +7.927050781250017053e+00 4.756386718749999432e+01 +7.698046875000017053e+00 4.756987304687499574e+01 +7.615625000000022737e+00 4.759272460937499716e+01 +7.565429687500000000e+00 4.760654296874999858e+01 +7.529394531250005684e+00 4.767387695312499574e+01 +7.538574218750000000e+00 4.777363281249999716e+01 +7.593261718750000000e+00 4.790566406249999432e+01 +7.608496093750005684e+00 4.800258789062499432e+01 +7.584179687500011369e+00 4.806430664062499858e+01 +7.616601562500022737e+00 4.815678710937499574e+01 +7.705664062500005684e+00 4.828002929687500000e+01 +7.765136718750000000e+00 4.841000976562499858e+01 +7.794824218750022737e+00 4.854682617187499716e+01 +7.837988281250005684e+00 4.863603515624999574e+01 +7.922753906250022737e+00 4.869853515624999574e+01 +8.124023437500000000e+00 4.887329101562500000e+01 +8.140332031250011369e+00 4.888642578124999716e+01 +8.134863281250005684e+00 4.897358398437499716e+01 +8.080664062500005684e+00 4.898588867187499574e+01 +8.001269531250017053e+00 4.901093749999999716e+01 +7.799218750000022737e+00 4.904189453124999432e+01 +7.610937500000005684e+00 4.906176757812500000e+01 +7.525488281250005684e+00 4.908637695312499716e+01 +7.450585937500022737e+00 4.915219726562499858e+01 +7.404199218750022737e+00 4.915307617187500000e+01 +7.313378906250022737e+00 4.912954101562499432e+01 +7.199902343750011369e+00 4.911362304687499858e+01 +7.117382812500011369e+00 4.912753906249999858e+01 +7.065722656250017053e+00 4.912485351562499858e+01 +7.036718750000005684e+00 4.911269531249999432e+01 +7.022167968750011369e+00 4.912343749999999432e+01 +7.001464843750000000e+00 4.917988281249999716e+01 +6.958300781250017053e+00 4.919462890624999574e+01 +6.891210937500005684e+00 4.920751953125000000e+01 +6.849511718750022737e+00 4.920195312499999574e+01 +6.820703125000022737e+00 4.917392578124999858e+01 +6.776269531250022737e+00 4.915415039062499858e+01 +6.735449218750005684e+00 4.916059570312499716e+01 +6.607617187500011369e+00 4.929086914062499858e+01 +6.574707031250000000e+00 4.931967773437499858e+01 +6.566308593750022737e+00 4.934619140625000000e+01 +6.534277343750005684e+00 4.939467773437499432e+01 +6.458105468750005684e+00 4.944287109375000000e+01 +6.382226562500022737e+00 4.945815429687499432e+01 +6.344335937500005684e+00 4.945273437499999858e+01 +6.348437500000017053e+00 4.951269531250000000e+01 +6.378320312500022737e+00 4.959960937500000000e+01 +6.406738281250000000e+00 4.964497070312499716e+01 +6.444628906250017053e+00 4.968203124999999432e+01 +6.484765625000022737e+00 4.970781249999999574e+01 +6.493750000000005684e+00 4.975439453125000000e+01 +6.487304687500000000e+00 4.979848632812499432e+01 +6.440917968750000000e+00 4.980532226562499432e+01 +6.324609375000022737e+00 4.983789062500000000e+01 +6.256054687500011369e+00 4.987216796874999858e+01 +6.204882812500017053e+00 4.991513671874999858e+01 +6.138183593750000000e+00 4.997431640624999716e+01 +6.109765625000022737e+00 5.003437499999999716e+01 +6.108300781250022737e+00 5.009423828125000000e+01 +6.116503906250017053e+00 5.012099609374999432e+01 +6.121289062500011369e+00 5.013935546874999716e+01 +6.175097656250017053e+00 5.023266601562500000e+01 +6.364453125000011369e+00 5.031616210937500000e+01 +6.343652343750022737e+00 5.040024414062499858e+01 +6.340917968750005684e+00 5.045175781249999858e+01 +6.294921875000000000e+00 5.048549804687499432e+01 +6.203027343750022737e+00 5.049912109374999858e+01 +6.178710937500000000e+00 5.052250976562499574e+01 +6.168457031250000000e+00 5.054536132812499716e+01 +6.235937500000005684e+00 5.059667968750000000e+01 +6.154492187500011369e+00 5.063725585937499574e+01 +6.119433593750017053e+00 5.067924804687499574e+01 +6.005957031250005684e+00 5.073222656249999574e+01 +5.993945312500017053e+00 5.075043945312499716e+01 +6.048437500000005684e+00 5.090488281249999858e+01 +6.006835937500000000e+00 5.094995117187500000e+01 +5.955078125000000000e+00 5.097294921874999574e+01 +5.894726562500011369e+00 5.098422851562499858e+01 +5.867187500000000000e+00 5.100566406249999574e+01 +5.857519531250005684e+00 5.103012695312499858e+01 +5.868359375000011369e+00 5.104531249999999432e+01 +5.939257812500017053e+00 5.104082031249999574e+01 +5.961035156250005684e+00 5.105668945312499574e+01 +6.129980468750005684e+00 5.114741210937499716e+01 +6.136914062500011369e+00 5.116484374999999574e+01 +6.113378906250005684e+00 5.117470703124999432e+01 +6.082421875000022737e+00 5.117998046874999574e+01 +6.074804687500005684e+00 5.119902343749999574e+01 +6.075878906250011369e+00 5.122412109375000000e+01 +6.166210937500011369e+00 5.135483398437499858e+01 +6.192871093750000000e+00 5.141059570312499716e+01 +6.198828125000005684e+00 5.144999999999999574e+01 +6.193261718750022737e+00 5.148891601562499432e+01 +6.141601562500000000e+00 5.155009765624999574e+01 +6.091113281250017053e+00 5.159892578124999574e+01 +6.089355468750000000e+00 5.163779296874999858e+01 +6.052734375000000000e+00 5.165825195312499574e+01 +5.948535156250017053e+00 5.176240234374999716e+01 +5.948730468750000000e+00 5.180268554687499716e+01 +6.007617187500017053e+00 5.183398437500000000e+01 +6.089843750000000000e+00 5.185395507812499716e+01 +6.117187500000000000e+00 5.187041015624999574e+01 +6.166503906250000000e+00 5.188076171875000142e+01 +6.297070312500011369e+00 5.185073242187500142e+01 +6.355664062500011369e+00 5.182465820312499716e+01 +6.372167968750005684e+00 5.183002929687499716e+01 +6.425000000000011369e+00 5.185839843750000000e+01 +6.517578125000000000e+00 5.185395507812499716e+01 +6.741796875000005684e+00 5.191088867187500000e+01 +6.775195312500017053e+00 5.193828124999999574e+01 +6.800390625000005684e+00 5.196738281249999858e+01 +6.802441406250011369e+00 5.198017578124999716e+01 +6.715625000000017053e+00 5.203618164062499574e+01 +6.712988281250005684e+00 5.205688476562500000e+01 +6.724511718750022737e+00 5.208022460937500142e+01 +6.749023437500000000e+00 5.209868164062499574e+01 +6.800390625000005684e+00 5.211123046875000142e+01 +6.855078125000005684e+00 5.213579101562499574e+01 +6.977246093750011369e+00 5.220551757812499716e+01 +7.019628906250005684e+00 5.226601562500000142e+01 +7.032617187500022737e+00 5.233149414062499716e+01 +7.035156250000000000e+00 5.238022460937499858e+01 +7.001855468750022737e+00 5.241899414062499574e+01 +6.968164062500022737e+00 5.244409179687500000e+01 +6.922070312500011369e+00 5.244028320312499858e+01 +6.832519531250000000e+00 5.244228515625000142e+01 +6.748828125000017053e+00 5.246401367187500142e+01 +6.702929687500017053e+00 5.249921874999999716e+01 +6.691601562500011369e+00 5.253017578125000142e+01 +6.712402343750000000e+00 5.254965820312499858e+01 +6.718750000000000000e+00 5.257358398437499858e+01 +6.705371093750017053e+00 5.259765625000000000e+01 +6.710742187500017053e+00 5.261787109374999716e+01 +6.748437500000022737e+00 5.263408203124999574e+01 +7.013183593750000000e+00 5.263354492187500000e+01 +7.033007812500017053e+00 5.265136718750000000e+01 +7.050878906250005684e+00 5.274477539062500142e+01 +7.117089843750022737e+00 5.288701171874999574e+01 +7.179492187500017053e+00 5.296621093750000142e+01 +7.189941406250000000e+00 5.299951171875000000e+01 +7.188964843750000000e+00 5.318720703124999716e+01 +7.197265625000000000e+00 5.328227539062499574e+01 +7.152050781250011369e+00 5.332695312499999574e+01 +7.053320312500005684e+00 5.337583007812499858e+01 +7.074316406250005684e+00 5.347763671874999858e+01 +7.107128906250011369e+00 5.355698242187499858e+01 +7.206445312500022737e+00 5.365454101562500000e+01 +7.285253906250005684e+00 5.368134765624999716e+01 +7.629199218750017053e+00 5.369726562500000000e+01 +8.009277343750000000e+00 5.369072265624999574e+01 +8.167089843750005684e+00 5.354340820312499716e+01 +8.108496093750005684e+00 5.346767578125000142e+01 +8.200781250000005684e+00 5.343242187499999574e+01 +8.245214843750005684e+00 5.344531250000000000e+01 +8.279003906250011369e+00 5.351118164062499716e+01 +8.301562500000017053e+00 5.358413085937500142e+01 +8.333886718750022737e+00 5.360620117187500000e+01 +8.451367187500011369e+00 5.355170898437499716e+01 +8.492675781250000000e+00 5.351435546874999716e+01 +8.495214843750005684e+00 5.339423828124999716e+01 +8.538476562500022737e+00 5.355688476562500000e+01 +8.506250000000022737e+00 5.367075195312499858e+01 +8.528417968750005684e+00 5.378110351562499858e+01 +8.575585937500022737e+00 5.383847656249999858e+01 +8.618945312500017053e+00 5.387500000000000000e+01 +8.897753906250017053e+00 5.383569335937500000e+01 +9.205566406250000000e+00 5.385595703125000000e+01 +9.321972656250011369e+00 5.381347656250000000e+01 +9.585351562500022737e+00 5.360048828125000142e+01 +9.673144531250017053e+00 5.356562499999999716e+01 +9.783984375000017053e+00 5.355463867187499716e+01 +9.631250000000022737e+00 5.360019531249999858e+01 +9.312011718750000000e+00 5.385913085937500000e+01 +9.216406250000005684e+00 5.389121093749999858e+01 +9.069628906250017053e+00 5.390092773437499574e+01 +8.978125000000005684e+00 5.392622070312499716e+01 +8.920410156250000000e+00 5.396533203125000000e+01 +8.903515625000011369e+00 5.400029296874999574e+01 +8.906640625000022737e+00 5.426079101562499574e+01 +8.851562500000000000e+00 5.429956054687500000e+01 +8.780371093750005684e+00 5.431303710937499574e+01 +8.736035156250011369e+00 5.429521484374999574e+01 +8.644921875000022737e+00 5.429497070312499574e+01 +8.625781250000017053e+00 5.435395507812499716e+01 +8.648046875000005684e+00 5.439765624999999716e+01 +8.831152343750005684e+00 5.442753906249999574e+01 +8.951855468750011369e+00 5.446757812499999574e+01 +8.957226562500011369e+00 5.453833007812500000e+01 +8.880957031250005684e+00 5.459394531249999716e+01 +8.789648437500005684e+00 5.469594726562500142e+01 +8.682324218750011369e+00 5.479184570312499858e+01 +8.670312500000022737e+00 5.490341796874999858e+01 +8.670703125000017053e+00 5.490332031250000000e+01 +8.857226562500017053e+00 5.490112304687500000e+01 +8.902929687500005684e+00 5.489692382812499716e+01 +9.185839843750017053e+00 5.484467773437499716e+01 +9.254980468750005684e+00 5.480800781250000142e+01 +9.341992187500011369e+00 5.480629882812500142e+01 +9.498730468750011369e+00 5.484042968749999858e+01 +9.615820312500005684e+00 5.485541992187499716e+01 +9.661230468750005684e+00 5.483437500000000142e+01 +9.725000000000022737e+00 5.482553710937499858e+01 +9.739746093750000000e+00 5.482553710937499858e+01 +9.745898437500017053e+00 5.480717773437499574e+01 +9.892285156250011369e+00 5.478061523437499858e+01 +9.953808593750011369e+00 5.473828125000000000e+01 +1.002216796875001137e+01 5.467392578124999858e+01 +1.002880859375000000e+01 5.458129882812500000e+01 +9.941308593750022737e+00 5.451464843750000000e+01 +9.868652343750000000e+00 5.447246093749999574e+01 +1.014345703125002274e+01 5.448842773437500142e+01 +1.017080078125002274e+01 5.445019531250000000e+01 +1.021240234375000000e+01 5.440893554687500000e+01 +1.036044921875000568e+01 5.443833007812499858e+01 +1.073154296875000568e+01 5.431625976562499858e+01 +1.095595703125002274e+01 5.437568359374999716e+01 +1.101337890625001137e+01 5.437915039062500000e+01 +1.106435546875002274e+01 5.428051757812500000e+01 +1.100859375000001705e+01 5.418115234375000000e+01 +1.081074218750001137e+01 5.407514648437499716e+01 +1.085458984375000568e+01 5.400981445312499574e+01 +1.091777343750001705e+01 5.399531249999999716e+01 +1.110429687500001705e+01 5.400917968750000142e+01 +1.139960937500001137e+01 5.394462890624999574e+01 +1.146113281250001137e+01 5.396474609375000142e+01 +1.170058593750002274e+01 5.411352539062500000e+01 +1.179628906250002274e+01 5.414545898437499716e+01 +1.211132812500000000e+01 5.416831054687499858e+01 +1.216865234375001137e+01 5.422587890624999574e+01 +1.229628906250002274e+01 5.428378906249999858e+01 +1.237851562500000568e+01 5.434702148437499858e+01 +1.257539062500001137e+01 5.446738281249999858e+01 +1.277910156250001705e+01 5.444570312500000142e+01 +1.289804687500000568e+01 5.442265624999999574e+01 +1.302861328125001705e+01 5.441103515625000142e+01 +1.314746093750000000e+01 5.428271484375000000e+01 +1.344804687500001705e+01 5.414086914062500000e+01 +1.372421875000000568e+01 5.415322265625000142e+01 +1.382226562500000000e+01 5.401904296875000000e+01 +1.386552734375001705e+01 5.385336914062499858e+01 +1.395039062500001137e+01 5.380136718749999858e+01 +1.402500000000000568e+01 5.376743164062499858e+01 +1.425000000000000000e+01 5.373188476562499716e+01 +1.425888671875000568e+01 5.372963867187500142e+01 +1.426611328125000000e+01 5.370712890624999858e+01 +1.427988281250000568e+01 5.362475585937500000e+01 +1.429873046875002274e+01 5.355644531249999574e+01 +1.441455078125000000e+01 5.328349609374999574e+01 +1.441230468750001137e+01 5.321674804687499716e+01 +1.441093750000001705e+01 5.319902343749999574e+01 +1.436855468750002274e+01 5.310556640624999858e+01 +1.429316406250001137e+01 5.302675781250000142e+01 +1.419365234375001705e+01 5.298232421875000142e+01 +1.413886718750001137e+01 5.293286132812500000e+01 +1.412861328125001137e+01 5.287822265624999574e+01 +1.425371093750001705e+01 5.278251953124999574e+01 +1.451406250000002274e+01 5.264560546874999858e+01 +1.461943359375001705e+01 5.252851562499999716e+01 +1.456972656250002274e+01 5.243110351562499716e+01 +1.455458984375002274e+01 5.235966796874999574e+01 +1.457392578125001137e+01 5.231416015624999716e+01 +1.461562500000002274e+01 5.227763671874999574e+01 +1.467988281250001137e+01 5.225000000000000000e+01 +1.470537109375001705e+01 5.220747070312499716e+01 +1.469238281250000000e+01 5.215004882812500142e+01 +1.470458984375000000e+01 5.211020507812499858e+01 +1.475253906250000568e+01 5.208183593749999574e+01 +1.474814453125000568e+01 5.207080078125000000e+01 +1.472480468750001137e+01 5.203085937499999858e+01 +1.469296875000000568e+01 5.195800781250000000e+01 +1.467490234375000568e+01 5.190483398437499574e+01 +1.460166015625000568e+01 5.183237304687499858e+01 +1.462392578125002274e+01 5.177080078124999574e+01 +1.468134765625001137e+01 5.169819335937499716e+01 +1.472490234375001705e+01 5.166171874999999858e+01 +1.473867187500002274e+01 5.162714843749999716e+01 +1.471093750000000000e+01 5.154492187500000000e+01 +1.472470703125000568e+01 5.152387695312499716e+01 +1.490595703125001137e+01 5.146333007812499716e+01 +1.493554687500000000e+01 5.143535156249999574e+01 +1.495312500000000000e+01 5.137714843749999716e+01 +1.501660156250000000e+01 5.125273437499999574e+01 +1.496386718750000000e+01 5.109511718749999432e+01 +1.491748046875000000e+01 5.100874023437499716e+01 +1.481425781250001705e+01 5.087163085937499574e+01 +1.480937500000001705e+01 5.085898437499999858e+01 +1.479746093750000568e+01 5.084233398437499574e+01 +1.476650390625002274e+01 5.081831054687499716e+01 +1.472333984375001137e+01 5.081469726562500000e+01 +1.465820312500000000e+01 5.083261718749999858e+01 +1.461357421875001705e+01 5.085556640624999858e+01 +1.462382812500001705e+01 5.091474609374999716e+01 +1.459521484375000000e+01 5.091860351562499432e+01 +1.455966796875000568e+01 5.095493164062499858e+01 +1.454570312500001705e+01 5.099394531249999574e+01 +1.450732421875000000e+01 5.100986328124999858e+01 +1.436728515625000568e+01 5.102626953124999432e+01 +1.431972656250002274e+01 5.103779296874999716e+01 +1.428320312500000000e+01 5.102949218749999716e+01 +1.425585937500000000e+01 5.100185546874999432e+01 +1.427333984375002274e+01 5.097690429687499858e+01 +1.429941406250000568e+01 5.095258789062499716e+01 +1.437705078125000568e+01 5.091406250000000000e+01 +1.436904296875002274e+01 5.089873046874999574e+01 +1.420175781250000568e+01 5.086123046874999432e+01 +1.409648437500001705e+01 5.082275390625000000e+01 +1.399843750000002274e+01 5.080112304687499858e+01 +1.389853515625000568e+01 5.076127929687499574e+01 +1.370136718750001137e+01 5.071650390624999716e+01 +1.355673828125000568e+01 5.070463867187499574e+01 +1.352656250000001137e+01 5.069282226562499716e+01 +1.347255859375002274e+01 5.061694335937500000e+01 +1.343613281250000568e+01 5.060107421875000000e+01 +1.340117187500001705e+01 5.060932617187499716e+01 +1.337460937500000568e+01 5.062172851562499432e+01 +1.334101562500001137e+01 5.061142578124999858e+01 +1.330605468750002274e+01 5.058632812499999432e+01 +1.326953125000000000e+01 5.057641601562500000e+01 +1.323769531250002274e+01 5.057675781249999858e+01 +1.318115234375000000e+01 5.051049804687500000e+01 +1.301640625000001705e+01 5.049038085937499432e+01 +1.299707031250000000e+01 5.045605468750000000e+01 +1.296679687500000000e+01 5.041621093749999716e+01 +1.294267578125001705e+01 5.040644531249999716e+01 +1.286826171875000568e+01 5.042221679687499858e+01 +1.276542968750001705e+01 5.043095703124999574e+01 +1.270644531250002274e+01 5.040913085937499716e+01 +1.263554687500001705e+01 5.039707031249999858e+01 +1.254902343750001137e+01 5.039340820312499858e+01 +1.245263671875000000e+01 5.034980468749999716e+01 +1.235859375000001137e+01 5.027324218749999574e+01 +1.230566406250000000e+01 5.020571289062499432e+01 +1.227734375000000000e+01 5.018144531249999574e+01 +1.223115234375001137e+01 5.024487304687500000e+01 +1.217480468750000000e+01 5.028837890624999574e+01 +1.213486328125000568e+01 5.031093749999999432e+01 +1.209921875000000568e+01 5.031098632812499716e+01 +1.208984375000000000e+01 5.030175781250000000e+01 +1.208974609375002274e+01 5.026855468750000000e+01 +1.212783203125002274e+01 5.021342773437499574e+01 +1.217500000000001137e+01 5.017583007812499574e+01 +1.218251953125002274e+01 5.014804687499999858e+01 +1.220781250000001705e+01 5.009750976562499858e+01 +1.227646484375000568e+01 5.004233398437499858e+01 +1.238417968750002274e+01 4.999858398437499574e+01 +1.245761718750000568e+01 4.995551757812499716e+01 +1.251201171875001705e+01 4.989580078124999574e+01 +1.251250000000001705e+01 4.987744140625000000e+01 +1.249755859375000000e+01 4.985307617187499574e+01 +1.247187500000001137e+01 4.983007812500000000e+01 +1.245019531250000000e+01 4.980014648437499858e+01 +1.239052734375002274e+01 4.973964843749999432e+01 +1.240820312500000000e+01 4.971318359374999574e+01 +1.245703125000000000e+01 4.967978515624999858e+01 +1.250029296875001705e+01 4.963969726562499574e+01 +1.255576171875000568e+01 4.957485351562499432e+01 +1.263203125000001137e+01 4.946123046874999574e+01 +1.268115234375000000e+01 4.941450195312499716e+01 +1.274785156250001705e+01 4.936621093750000000e+01 +1.281337890625002274e+01 4.932934570312500000e+01 +1.291669921875001137e+01 4.933046874999999432e+01 +1.302373046875001705e+01 4.926010742187499858e+01 +1.314052734375002274e+01 4.915834960937499432e+01 +1.322783203125001705e+01 4.911166992187499858e+01 +1.328876953125001137e+01 4.909746093749999574e+01 +1.333906250000001137e+01 4.906079101562500000e+01 +1.338369140625002274e+01 4.900810546874999574e+01 +1.340117187500001705e+01 4.897758789062499574e+01 +1.344072265625001705e+01 4.895556640625000000e+01 +1.354765625000001705e+01 4.895966796874999716e+01 +1.368496093750002274e+01 4.887670898437500000e+01 +1.376992187500002274e+01 4.881596679687499574e+01 +1.381474609375001705e+01 4.876694335937499858e+01 +1.380292968750001137e+01 4.874750976562499716e+01 +1.379746093750000568e+01 4.868642578124999432e+01 +1.379882812500000000e+01 4.862167968749999858e+01 +1.378535156250001137e+01 4.858745117187499574e+01 +1.372392578125001705e+01 4.854238281249999432e+01 +1.369218750000001705e+01 4.853276367187499574e+01 +1.367519531250002274e+01 4.852304687499999858e+01 +1.348662109375001705e+01 4.858183593749999574e+01 +1.347167968750000000e+01 4.857182617187499574e+01 +1.345986328125002274e+01 4.856455078124999858e+01 +1.340937500000001137e+01 4.839414062499999858e+01 +1.337460937500000568e+01 4.836137695312499574e+01 +1.332285156250000568e+01 4.833124999999999716e+01 +1.321523437500002274e+01 4.830190429687499432e+01 +1.314042968750001705e+01 4.828994140624999432e+01 +1.308212890625000568e+01 4.827509765624999716e+01 +1.289746093750000000e+01 4.820371093749999858e+01 +1.281425781250001705e+01 4.816083984374999716e+01 +1.276035156250000568e+01 4.810698242187499574e+01 +1.276005859375001705e+01 4.807597656249999574e+01 +1.284990234375001705e+01 4.798481445312499716e+01 +1.295351562500002274e+01 4.789062500000000000e+01 +1.295419921875000568e+01 4.780776367187499432e+01 +1.290830078125000568e+01 4.774580078124999716e+01 +1.289765625000001137e+01 4.772187499999999716e+01 +1.292812500000002274e+01 4.771284179687499716e+01 +1.298554687500001137e+01 4.770942382812499716e+01 +1.303359375000002274e+01 4.769873046875000000e+01 +1.305410156250002274e+01 4.765512695312499858e+01 +1.304794921875000568e+01 4.757915039062499574e+01 +1.303154296875001705e+01 4.750800781249999716e+01 +1.301435546875001137e+01 4.747807617187499574e+01 +1.296806640625001705e+01 4.747568359374999858e+01 +1.287890625000000000e+01 4.750644531249999858e+01 +1.280937500000001705e+01 4.754218749999999716e+01 +1.278281250000000568e+01 4.756416015624999716e+01 +1.278115234375002274e+01 4.759042968749999858e+01 +1.279619140625001705e+01 4.760703124999999858e+01 +1.277138671875002274e+01 4.763940429687500000e+01 +1.268583984375001705e+01 4.766933593749999432e+01 +1.259423828125000000e+01 4.765629882812499574e+01 +1.252656250000001137e+01 4.763613281249999432e+01 +1.248291015625000000e+01 4.763730468749999858e+01 +1.243574218750001137e+01 4.766611328124999858e+01 +1.236318359375002274e+01 4.768818359374999716e+01 +1.226835937500001705e+01 4.770273437499999858e+01 +1.220927734375001705e+01 4.771826171875000000e+01 +1.219687500000000568e+01 4.770908203124999858e+01 +1.220380859375001137e+01 4.764672851562500000e+01 +1.218564453125000568e+01 4.761953124999999432e+01 +1.171679687500000000e+01 4.758349609375000000e+01 +1.157392578125001137e+01 4.754975585937499716e+01 +1.146992187500001137e+01 4.750610351562500000e+01 +1.139296875000002274e+01 4.748715820312499858e+01 +1.137412109375000568e+01 4.746025390624999574e+01 +1.129794921875000568e+01 4.742490234374999858e+01 +1.121191406250000000e+01 4.741362304687499574e+01 +1.119121093750001705e+01 4.742519531249999432e+01 +1.113603515625001705e+01 4.740888671874999716e+01 +1.104199218750000000e+01 4.739311523437499574e+01 +1.098085937500002274e+01 4.739814453124999716e+01 +1.095214843750000000e+01 4.742670898437499716e+01 +1.089394531250002274e+01 4.747045898437500000e+01 +1.087060546875000000e+01 4.750078124999999574e+01 +1.087304687500000000e+01 4.752021484374999716e+01 +1.074160156250002274e+01 4.752412109374999716e+01 +1.065869140625000000e+01 4.754721679687499858e+01 +1.048281250000002274e+01 4.754179687499999574e+01 +1.043945312500000000e+01 4.755156249999999574e+01 +1.043037109375001137e+01 4.754106445312499574e+01 +1.040390625000000568e+01 4.741699218750000000e+01 +1.036914062500000000e+01 4.736606445312499858e+01 +1.031279296875001705e+01 4.731342773437499716e+01 +1.024062500000002274e+01 4.728413085937499716e+01 +1.018300781250002274e+01 4.727880859375000000e+01 +1.018574218750001137e+01 4.731718749999999574e+01 +1.020029296875000568e+01 4.736342773437499432e+01 +1.015878906250000568e+01 4.737426757812500000e+01 +1.009648437500001705e+01 4.737958984374999716e+01 +1.006630859375002274e+01 4.739335937499999574e+01 +1.007421875000000000e+01 4.742851562499999574e+01 +1.005986328125001705e+01 4.744907226562499858e+01 +1.003408203125002274e+01 4.747358398437499716e+01 +9.971582031250022737e+00 4.750532226562499716e+01 +9.839160156250017053e+00 4.755229492187499574e+01 +9.748925781250022737e+00 4.757553710937499858e+01 +9.715136718750017053e+00 4.755078125000000000e+01 +9.650585937500011369e+00 4.752587890625000000e+01 +9.548925781250005684e+00 4.753403320312499858e+01 +9.524023437500005684e+00 4.752421874999999574e+01 diff --git a/examples/08_geo_coordinates/temp_obs.txt b/examples/08_geo_coordinates/temp_obs.txt new file mode 100644 index 000000000..aa8e60fc8 --- /dev/null +++ b/examples/08_geo_coordinates/temp_obs.txt @@ -0,0 +1,494 @@ +# id, lat, lon, temp +4.400000000000000000e+01 5.293359999999999843e+01 8.237000000000000099e+00 1.569999999999999929e+01 +7.300000000000000000e+01 4.861590000000000344e+01 1.305059999999999931e+01 1.390000000000000036e+01 +7.800000000000000000e+01 5.248530000000000229e+01 7.912600000000000300e+00 1.509999999999999964e+01 +9.100000000000000000e+01 5.074459999999999837e+01 9.345000000000000639e+00 1.700000000000000000e+01 +9.600000000000000000e+01 5.294369999999999976e+01 1.285180000000000078e+01 2.189999999999999858e+01 +1.020000000000000000e+02 5.386330000000000240e+01 8.127499999999999503e+00 1.190000000000000036e+01 +1.250000000000000000e+02 4.783420000000000272e+01 1.086669999999999980e+01 1.140000000000000036e+01 +1.310000000000000000e+02 5.108809999999999718e+01 1.293260000000000076e+01 1.719999999999999929e+01 +1.420000000000000000e+02 4.840599999999999881e+01 1.131170000000000009e+01 1.290000000000000036e+01 +1.500000000000000000e+02 4.972729999999999961e+01 8.116400000000000503e+00 1.719999999999999929e+01 +1.510000000000000000e+02 4.946909999999999741e+01 1.185459999999999958e+01 1.340000000000000036e+01 +1.540000000000000000e+02 4.801970000000000027e+01 1.229250000000000043e+01 1.390000000000000036e+01 +1.610000000000000000e+02 5.042369999999999663e+01 7.420200000000000351e+00 1.810000000000000142e+01 +1.640000000000000000e+02 5.303159999999999741e+01 1.399080000000000013e+01 2.130000000000000071e+01 +1.670000000000000000e+02 5.384120000000000061e+01 1.368459999999999965e+01 2.130000000000000071e+01 +1.830000000000000000e+02 5.467920000000000158e+01 1.343430000000000035e+01 1.739999999999999858e+01 +1.910000000000000000e+02 4.996940000000000026e+01 9.911400000000000432e+00 1.860000000000000142e+01 +1.980000000000000000e+02 5.137449999999999761e+01 1.129199999999999982e+01 2.019999999999999929e+01 +2.170000000000000000e+02 4.787740000000000151e+01 1.136430000000000007e+01 1.269999999999999929e+01 +2.220000000000000000e+02 5.059080000000000155e+01 1.271390000000000065e+01 1.580000000000000071e+01 +2.320000000000000000e+02 4.842530000000000001e+01 1.094170000000000087e+01 1.340000000000000036e+01 +2.570000000000000000e+02 4.872699999999999676e+01 8.245699999999999363e+00 1.359999999999999964e+01 +2.590000000000000000e+02 4.780639999999999645e+01 7.638700000000000045e+00 1.440000000000000036e+01 +2.820000000000000000e+02 4.987429999999999808e+01 1.092060000000000031e+01 1.580000000000000071e+01 +2.940000000000000000e+02 5.231989999999999696e+01 9.429999999999999716e+00 2.150000000000000000e+01 +2.980000000000000000e+02 5.434060000000000201e+01 1.271080000000000076e+01 1.769999999999999929e+01 +3.030000000000000000e+02 5.206139999999999901e+01 1.349959999999999916e+01 2.119999999999999929e+01 +3.140000000000000000e+02 5.116040000000000276e+01 1.450420000000000087e+01 2.039999999999999858e+01 +3.200000000000000000e+02 4.996670000000000300e+01 1.151970000000000027e+01 1.469999999999999929e+01 +3.300000000000000000e+02 4.956170000000000186e+01 8.967299999999999827e+00 1.409999999999999964e+01 +3.420000000000000000e+02 5.231700000000000017e+01 8.169399999999999551e+00 1.639999999999999858e+01 +3.680000000000000000e+02 5.281519999999999726e+01 9.924799999999999400e+00 1.919999999999999929e+01 +3.770000000000000000e+02 4.910699999999999932e+01 7.996699999999999697e+00 1.669999999999999929e+01 +3.790000000000000000e+02 5.090740000000000265e+01 1.126650000000000063e+01 1.810000000000000142e+01 +3.900000000000000000e+02 5.098369999999999891e+01 8.368299999999999628e+00 1.480000000000000071e+01 +4.000000000000000000e+02 5.263089999999999691e+01 1.350220000000000020e+01 2.250000000000000000e+01 +4.030000000000000000e+02 5.245369999999999777e+01 1.330170000000000030e+01 1.919999999999999929e+01 +4.100000000000000000e+02 5.240400000000000347e+01 1.373090000000000011e+01 2.080000000000000071e+01 +4.200000000000000000e+02 5.254469999999999885e+01 1.355979999999999919e+01 2.169999999999999929e+01 +4.270000000000000000e+02 5.238069999999999737e+01 1.353059999999999974e+01 2.250000000000000000e+01 +4.300000000000000000e+02 5.256439999999999912e+01 1.330879999999999974e+01 2.060000000000000142e+01 +4.330000000000000000e+02 5.246750000000000114e+01 1.340210000000000079e+01 2.060000000000000142e+01 +4.450000000000000000e+02 5.182180000000000319e+01 1.171100000000000030e+01 2.200000000000000000e+01 +4.600000000000000000e+02 4.926409999999999911e+01 6.686799999999999855e+00 1.519999999999999929e+01 +5.350000000000000000e+02 5.003719999999999857e+01 7.307900000000000063e+00 1.769999999999999929e+01 +5.910000000000000000e+02 5.339110000000000156e+01 1.068779999999999930e+01 1.889999999999999858e+01 +5.960000000000000000e+02 5.400280000000000058e+01 1.119079999999999941e+01 1.630000000000000071e+01 +6.030000000000000000e+02 5.072930000000000206e+01 7.203999999999999737e+00 1.760000000000000142e+01 +6.170000000000000000e+02 5.187299999999999756e+01 6.886300000000000310e+00 1.559999999999999964e+01 +6.560000000000000000e+02 5.172339999999999804e+01 1.060210000000000008e+01 1.580000000000000071e+01 +6.620000000000000000e+02 5.229149999999999920e+01 1.044640000000000057e+01 1.939999999999999858e+01 +6.910000000000000000e+02 5.304500000000000171e+01 8.797900000000000276e+00 1.650000000000000000e+01 +7.010000000000000000e+02 5.353320000000000078e+01 8.576100000000000279e+00 1.380000000000000071e+01 +7.040000000000000000e+02 5.344509999999999650e+01 9.138999999999999346e+00 1.610000000000000142e+01 +7.220000000000000000e+02 5.179860000000000042e+01 1.061829999999999963e+01 1.009999999999999964e+01 +7.550000000000000000e+02 4.951820000000000022e+01 9.321300000000000807e+00 1.600000000000000000e+01 +7.570000000000000000e+02 4.796249999999999858e+01 7.998300000000000409e+00 1.280000000000000071e+01 +7.600000000000000000e+02 5.336290000000000333e+01 9.943500000000000227e+00 1.750000000000000000e+01 +7.660000000000000000e+02 5.017459999999999809e+01 7.059499999999999886e+00 1.680000000000000071e+01 +7.690000000000000000e+02 5.228170000000000073e+01 9.088900000000000645e+00 1.889999999999999858e+01 +8.170000000000000000e+02 5.103059999999999974e+01 8.814600000000000435e+00 1.850000000000000000e+01 +8.400000000000000000e+02 5.043130000000000024e+01 1.261139999999999972e+01 1.080000000000000071e+01 +8.500000000000000000e+02 5.259590000000000032e+01 1.002960000000000029e+01 1.939999999999999858e+01 +8.530000000000000000e+02 5.079129999999999967e+01 1.287199999999999989e+01 1.710000000000000142e+01 +8.560000000000000000e+02 4.788430000000000319e+01 1.254039999999999999e+01 1.330000000000000071e+01 +8.670000000000000000e+02 5.030660000000000309e+01 1.096790000000000020e+01 1.739999999999999858e+01 +8.800000000000000000e+02 5.177600000000000335e+01 1.431680000000000064e+01 1.989999999999999858e+01 +8.910000000000000000e+02 5.387129999999999797e+01 8.705799999999999983e+00 1.469999999999999929e+01 +8.960000000000000000e+02 5.107780000000000342e+01 1.086190000000000033e+01 1.869999999999999929e+01 +9.170000000000000000e+02 4.988089999999999691e+01 8.677899999999999281e+00 1.639999999999999858e+01 +9.530000000000000000e+02 4.976189999999999714e+01 7.054199999999999804e+00 1.660000000000000142e+01 +9.540000000000000000e+02 5.417960000000000065e+01 7.458700000000000330e+00 1.250000000000000000e+01 +9.630000000000000000e+02 5.258809999999999718e+01 8.342399999999999594e+00 1.660000000000000142e+01 +9.790000000000000000e+02 5.073640000000000327e+01 8.267200000000000770e+00 2.060000000000000142e+01 +9.830000000000000000e+02 4.855619999999999692e+01 1.055990000000000073e+01 1.350000000000000000e+01 +9.910000000000000000e+02 5.091159999999999997e+01 1.370870000000000033e+01 1.869999999999999929e+01 +1.001000000000000000e+03 5.164509999999999934e+01 1.357469999999999999e+01 2.110000000000000142e+01 +1.048000000000000000e+03 5.112800000000000011e+01 1.375430000000000064e+01 1.869999999999999929e+01 +1.050000000000000000e+03 5.102210000000000178e+01 1.384699999999999953e+01 2.000000000000000000e+01 +1.051000000000000000e+03 5.102479999999999905e+01 1.377500000000000036e+01 1.980000000000000071e+01 +1.052000000000000000e+03 5.221739999999999782e+01 1.216409999999999947e+01 2.150000000000000000e+01 +1.072000000000000000e+03 4.947189999999999799e+01 8.192899999999999849e+00 1.669999999999999929e+01 +1.078000000000000000e+03 5.129599999999999937e+01 6.768600000000000172e+00 1.660000000000000142e+01 +1.103000000000000000e+03 4.810029999999999717e+01 1.198719999999999963e+01 1.340000000000000036e+01 +1.107000000000000000e+03 4.985199999999999676e+01 1.049910000000000032e+01 1.559999999999999964e+01 +1.161000000000000000e+03 4.887769999999999726e+01 1.123489999999999966e+01 1.450000000000000000e+01 +1.197000000000000000e+03 4.898949999999999960e+01 1.013119999999999976e+01 1.459999999999999964e+01 +1.200000000000000000e+03 5.406909999999999883e+01 9.010500000000000398e+00 1.519999999999999929e+01 +1.207000000000000000e+03 5.027049999999999841e+01 1.227420000000000044e+01 1.450000000000000000e+01 +1.214000000000000000e+03 4.820120000000000005e+01 8.108800000000000452e+00 1.250000000000000000e+01 +1.224000000000000000e+03 4.813779999999999859e+01 7.835099999999999731e+00 1.400000000000000000e+01 +1.228000000000000000e+03 5.416510000000000247e+01 6.346000000000000085e+00 1.180000000000000071e+01 +1.246000000000000000e+03 5.184179999999999922e+01 8.060700000000000642e+00 1.819999999999999929e+01 +1.262000000000000000e+03 4.834770000000000323e+01 1.181339999999999968e+01 1.350000000000000000e+01 +1.266000000000000000e+03 5.429919999999999902e+01 9.316200000000000259e+00 1.559999999999999964e+01 +1.270000000000000000e+03 5.098290000000000077e+01 1.096080000000000076e+01 1.639999999999999858e+01 +1.279000000000000000e+03 4.964970000000000283e+01 1.100740000000000052e+01 1.480000000000000071e+01 +1.297000000000000000e+03 5.120409999999999684e+01 1.001379999999999981e+01 1.639999999999999858e+01 +1.300000000000000000e+03 5.125399999999999778e+01 8.156499999999999417e+00 1.309999999999999964e+01 +1.303000000000000000e+03 5.140409999999999968e+01 6.967699999999999783e+00 1.559999999999999964e+01 +1.327000000000000000e+03 5.071189999999999998e+01 6.790499999999999758e+00 1.440000000000000036e+01 +1.332000000000000000e+03 4.848319999999999652e+01 1.272409999999999997e+01 1.290000000000000036e+01 +1.339000000000000000e+03 5.291570000000000107e+01 1.018849999999999945e+01 1.869999999999999929e+01 +1.346000000000000000e+03 4.787489999999999668e+01 8.003800000000000026e+00 4.700000000000000178e+00 +1.357000000000000000e+03 4.998069999999999879e+01 1.183760000000000012e+01 1.340000000000000036e+01 +1.358000000000000000e+03 5.042830000000000013e+01 1.295350000000000001e+01 9.400000000000000355e+00 +1.411000000000000000e+03 5.053090000000000259e+01 1.004800000000000004e+01 1.300000000000000000e+01 +1.420000000000000000e+03 5.002590000000000003e+01 8.521300000000000097e+00 1.800000000000000000e+01 +1.424000000000000000e+03 5.012689999999999912e+01 8.669399999999999551e+00 1.819999999999999929e+01 +1.443000000000000000e+03 4.802320000000000277e+01 7.834299999999999820e+00 1.380000000000000071e+01 +1.451000000000000000e+03 5.382770000000000010e+01 9.249299999999999855e+00 1.619999999999999929e+01 +1.468000000000000000e+03 4.845380000000000109e+01 8.409000000000000696e+00 9.599999999999999645e+00 +1.503000000000000000e+03 5.306430000000000291e+01 7.902199999999999669e+00 1.559999999999999964e+01 +1.504000000000000000e+03 5.111899999999999977e+01 9.279899999999999594e+00 1.869999999999999929e+01 +1.526000000000000000e+03 5.056680000000000064e+01 9.653299999999999770e+00 1.819999999999999929e+01 +1.544000000000000000e+03 5.251290000000000191e+01 1.139409999999999989e+01 2.100000000000000000e+01 +1.550000000000000000e+03 4.748299999999999699e+01 1.106209999999999916e+01 1.440000000000000036e+01 +1.580000000000000000e+03 4.998590000000000089e+01 7.954799999999999649e+00 1.989999999999999858e+01 +1.584000000000000000e+03 4.792419999999999902e+01 8.647299999999999542e+00 1.130000000000000071e+01 +1.587000000000000000e+03 4.894809999999999661e+01 1.142890000000000050e+01 1.309999999999999964e+01 +1.590000000000000000e+03 5.149419999999999931e+01 6.246299999999999741e+00 1.680000000000000071e+01 +1.602000000000000000e+03 4.843299999999999983e+01 7.993000000000000327e+00 1.450000000000000000e+01 +1.605000000000000000e+03 5.238750000000000284e+01 1.216009999999999991e+01 2.230000000000000071e+01 +1.612000000000000000e+03 5.088130000000000308e+01 1.212889999999999979e+01 1.830000000000000071e+01 +1.639000000000000000e+03 5.060170000000000101e+01 8.643900000000000361e+00 1.760000000000000142e+01 +1.645000000000000000e+03 5.096560000000000201e+01 9.050000000000000711e+00 1.930000000000000071e+01 +1.666000000000000000e+03 5.482730000000000103e+01 9.505800000000000693e+00 1.469999999999999929e+01 +1.684000000000000000e+03 5.116219999999999857e+01 1.495059999999999967e+01 1.869999999999999929e+01 +1.691000000000000000e+03 5.150019999999999953e+01 9.950699999999999434e+00 1.580000000000000071e+01 +1.694000000000000000e+03 5.360600000000000165e+01 1.210330000000000084e+01 1.960000000000000142e+01 +1.721000000000000000e+03 4.966400000000000148e+01 1.122390000000000043e+01 1.259999999999999964e+01 +1.735000000000000000e+03 4.878940000000000055e+01 1.362899999999999956e+01 1.280000000000000071e+01 +1.736000000000000000e+03 5.357309999999999661e+01 1.067970000000000041e+01 1.800000000000000000e+01 +1.757000000000000000e+03 5.409669999999999845e+01 1.340559999999999974e+01 1.919999999999999929e+01 +1.759000000000000000e+03 5.424369999999999692e+01 1.391019999999999968e+01 1.850000000000000000e+01 +1.766000000000000000e+03 5.213439999999999941e+01 7.696900000000000297e+00 1.610000000000000142e+01 +1.832000000000000000e+03 4.911290000000000333e+01 1.313380000000000081e+01 7.000000000000000000e+00 +1.863000000000000000e+03 5.026670000000000016e+01 9.185399999999999565e+00 1.730000000000000071e+01 +1.869000000000000000e+03 5.331530000000000058e+01 1.393379999999999974e+01 2.019999999999999929e+01 +1.886000000000000000e+03 4.848780000000000001e+01 1.026079999999999970e+01 1.290000000000000036e+01 +1.964000000000000000e+03 4.994449999999999790e+01 6.382100000000000328e+00 1.730000000000000071e+01 +1.975000000000000000e+03 5.363320000000000221e+01 9.988099999999999312e+00 1.739999999999999858e+01 +1.981000000000000000e+03 5.347769999999999868e+01 9.895699999999999719e+00 1.860000000000000142e+01 +2.014000000000000000e+03 5.246439999999999770e+01 9.677899999999999281e+00 2.039999999999999858e+01 +2.023000000000000000e+03 4.879180000000000206e+01 1.070620000000000083e+01 1.390000000000000036e+01 +2.039000000000000000e+03 5.190019999999999811e+01 1.056990000000000052e+01 1.789999999999999858e+01 +2.044000000000000000e+03 5.165200000000000102e+01 1.113669999999999938e+01 1.750000000000000000e+01 +2.074000000000000000e+03 4.837519999999999953e+01 8.980000000000000426e+00 1.130000000000000071e+01 +2.110000000000000000e+03 5.104110000000000014e+01 6.104199999999999626e+00 1.490000000000000036e+01 +2.115000000000000000e+03 5.417499999999999716e+01 7.892000000000000348e+00 1.400000000000000000e+01 +2.171000000000000000e+03 5.085199999999999676e+01 9.737700000000000244e+00 1.869999999999999929e+01 +2.174000000000000000e+03 5.162550000000000239e+01 1.036950000000000038e+01 1.619999999999999929e+01 +2.201000000000000000e+03 5.457500000000000284e+01 1.310440000000000005e+01 1.680000000000000071e+01 +2.211000000000000000e+03 5.073709999999999809e+01 7.652800000000000047e+00 1.430000000000000071e+01 +2.252000000000000000e+03 5.089900000000000091e+01 1.474569999999999936e+01 1.880000000000000071e+01 +2.261000000000000000e+03 5.031230000000000047e+01 1.187599999999999945e+01 1.590000000000000036e+01 +2.290000000000000000e+03 4.780089999999999861e+01 1.101079999999999970e+01 9.800000000000000711e+00 +2.303000000000000000e+03 5.431459999999999866e+01 9.538999999999999702e+00 1.630000000000000071e+01 +2.306000000000000000e+03 5.431940000000000168e+01 1.067319999999999958e+01 1.480000000000000071e+01 +2.315000000000000000e+03 5.176570000000000249e+01 1.316660000000000075e+01 1.930000000000000071e+01 +2.319000000000000000e+03 4.788230000000000075e+01 1.169609999999999950e+01 1.309999999999999964e+01 +2.323000000000000000e+03 5.185289999999999822e+01 9.495300000000000296e+00 1.960000000000000142e+01 +2.362000000000000000e+03 5.056510000000000105e+01 7.484300000000000175e+00 1.650000000000000000e+01 +2.385000000000000000e+03 4.969270000000000209e+01 7.326399999999999579e+00 1.630000000000000071e+01 +2.410000000000000000e+03 4.871119999999999806e+01 1.153619999999999912e+01 1.350000000000000000e+01 +2.429000000000000000e+03 5.398969999999999914e+01 9.569599999999999440e+00 1.689999999999999858e+01 +2.437000000000000000e+03 5.445700000000000074e+01 9.520300000000000651e+00 1.580000000000000071e+01 +2.444000000000000000e+03 5.092510000000000048e+01 1.158300000000000018e+01 2.019999999999999929e+01 +2.480000000000000000e+03 5.006430000000000291e+01 8.993000000000000327e+00 1.810000000000000142e+01 +2.483000000000000000e+03 5.118030000000000257e+01 8.489100000000000534e+00 1.380000000000000071e+01 +2.485000000000000000e+03 4.891700000000000159e+01 9.687099999999999156e+00 1.380000000000000071e+01 +2.486000000000000000e+03 4.942620000000000147e+01 7.755700000000000038e+00 1.669999999999999929e+01 +2.497000000000000000e+03 5.050139999999999674e+01 6.526399999999999757e+00 1.269999999999999929e+01 +2.559000000000000000e+03 4.772330000000000183e+01 1.033479999999999954e+01 1.240000000000000036e+01 +2.564000000000000000e+03 5.437760000000000105e+01 1.014240000000000030e+01 1.530000000000000071e+01 +2.575000000000000000e+03 4.918039999999999878e+01 9.980000000000000426e+00 1.469999999999999929e+01 +2.578000000000000000e+03 5.399949999999999761e+01 1.143410000000000082e+01 1.639999999999999858e+01 +2.597000000000000000e+03 5.022399999999999665e+01 1.007920000000000016e+01 1.730000000000000071e+01 +2.600000000000000000e+03 4.973629999999999995e+01 1.017810000000000059e+01 1.769999999999999929e+01 +2.601000000000000000e+03 5.022180000000000177e+01 8.446899999999999409e+00 1.259999999999999964e+01 +2.618000000000000000e+03 5.084579999999999700e+01 1.048029999999999973e+01 1.409999999999999964e+01 +2.627000000000000000e+03 5.155539999999999878e+01 1.388449999999999918e+01 2.030000000000000071e+01 +2.629000000000000000e+03 5.176120000000000232e+01 6.095399999999999707e+00 1.580000000000000071e+01 +2.638000000000000000e+03 4.810540000000000305e+01 8.754799999999999471e+00 9.000000000000000000e+00 +2.641000000000000000e+03 5.151850000000000307e+01 1.290649999999999942e+01 2.000000000000000000e+01 +2.667000000000000000e+03 5.086460000000000292e+01 7.157499999999999751e+00 1.830000000000000071e+01 +2.680000000000000000e+03 5.028399999999999892e+01 1.044560000000000066e+01 1.800000000000000000e+01 +2.700000000000000000e+03 4.883019999999999783e+01 1.148719999999999963e+01 1.380000000000000071e+01 +2.704000000000000000e+03 5.175110000000000099e+01 1.200939999999999941e+01 2.069999999999999929e+01 +2.708000000000000000e+03 4.766519999999999868e+01 1.108050000000000068e+01 1.269999999999999929e+01 +2.712000000000000000e+03 4.769519999999999982e+01 9.130699999999999150e+00 1.450000000000000000e+01 +2.750000000000000000e+03 5.025229999999999819e+01 1.132089999999999996e+01 1.719999999999999929e+01 +2.773000000000000000e+03 4.942830000000000013e+01 1.190160000000000018e+01 1.269999999999999929e+01 +2.794000000000000000e+03 5.293630000000000280e+01 1.240930000000000000e+01 2.150000000000000000e+01 +2.796000000000000000e+03 5.391559999999999775e+01 1.227899999999999991e+01 1.880000000000000071e+01 +2.812000000000000000e+03 4.836469999999999914e+01 7.828000000000000291e+00 1.450000000000000000e+01 +2.814000000000000000e+03 4.851209999999999667e+01 9.764499999999999957e+00 1.119999999999999929e+01 +2.856000000000000000e+03 5.191729999999999734e+01 1.308779999999999966e+01 1.900000000000000000e+01 +2.878000000000000000e+03 5.139090000000000202e+01 1.187860000000000049e+01 1.900000000000000000e+01 +2.886000000000000000e+03 4.821759999999999735e+01 9.909700000000000841e+00 1.200000000000000000e+01 +2.905000000000000000e+03 4.818489999999999895e+01 1.085069999999999979e+01 1.230000000000000071e+01 +2.907000000000000000e+03 5.479030000000000200e+01 8.951399999999999579e+00 1.340000000000000036e+01 +2.925000000000000000e+03 5.139330000000000354e+01 1.031230000000000047e+01 1.739999999999999858e+01 +2.928000000000000000e+03 5.131510000000000105e+01 1.244619999999999926e+01 2.110000000000000142e+01 +2.932000000000000000e+03 5.143480000000000274e+01 1.223959999999999937e+01 2.030000000000000071e+01 +2.947000000000000000e+03 5.113329999999999842e+01 8.034800000000000608e+00 1.540000000000000036e+01 +2.951000000000000000e+03 5.310070000000000334e+01 1.148639999999999972e+01 1.989999999999999858e+01 +2.953000000000000000e+03 4.785969999999999658e+01 8.230800000000000338e+00 9.599999999999999645e+00 +2.961000000000000000e+03 5.449960000000000093e+01 1.027369999999999983e+01 1.380000000000000071e+01 +2.968000000000000000e+03 5.098940000000000339e+01 6.977699999999999569e+00 1.730000000000000071e+01 +2.985000000000000000e+03 5.093829999999999814e+01 1.420930000000000071e+01 1.939999999999999858e+01 +3.015000000000000000e+03 5.220850000000000080e+01 1.411800000000000033e+01 2.010000000000000142e+01 +3.028000000000000000e+03 5.178540000000000276e+01 8.838800000000000878e+00 1.780000000000000071e+01 +3.031000000000000000e+03 5.163360000000000127e+01 8.394500000000000739e+00 1.900000000000000000e+01 +3.032000000000000000e+03 5.501100000000000279e+01 8.412499999999999645e+00 1.340000000000000036e+01 +3.034000000000000000e+03 5.045049999999999812e+01 1.163499999999999979e+01 1.639999999999999858e+01 +3.042000000000000000e+03 5.056170000000000186e+01 8.238599999999999923e+00 1.930000000000000071e+01 +3.083000000000000000e+03 5.192669999999999675e+01 1.387969999999999970e+01 2.119999999999999929e+01 +3.086000000000000000e+03 5.380250000000000199e+01 1.069890000000000008e+01 1.789999999999999858e+01 +3.093000000000000000e+03 5.297240000000000038e+01 1.113739999999999952e+01 1.860000000000000142e+01 +3.098000000000000000e+03 5.124519999999999698e+01 7.642500000000000071e+00 1.530000000000000071e+01 +3.126000000000000000e+03 5.210289999999999822e+01 1.158270000000000088e+01 2.050000000000000000e+01 +3.137000000000000000e+03 4.996560000000000201e+01 8.213900000000000645e+00 1.689999999999999858e+01 +3.147000000000000000e+03 4.877250000000000085e+01 1.221790000000000020e+01 1.330000000000000071e+01 +3.155000000000000000e+03 5.010150000000000148e+01 6.800900000000000389e+00 1.610000000000000142e+01 +3.158000000000000000e+03 5.254679999999999751e+01 1.454519999999999946e+01 2.089999999999999858e+01 +3.164000000000000000e+03 5.084920000000000329e+01 8.774599999999999511e+00 1.960000000000000142e+01 +3.166000000000000000e+03 5.065100000000000335e+01 1.314690000000000047e+01 1.450000000000000000e+01 +3.167000000000000000e+03 5.066210000000000235e+01 7.960300000000000153e+00 1.559999999999999964e+01 +3.196000000000000000e+03 5.332229999999999848e+01 1.193190000000000062e+01 2.180000000000000071e+01 +3.204000000000000000e+03 5.073349999999999937e+01 1.088150000000000084e+01 1.660000000000000142e+01 +3.226000000000000000e+03 5.172590000000000288e+01 1.151089999999999947e+01 2.050000000000000000e+01 +3.231000000000000000e+03 5.056119999999999948e+01 1.037710000000000043e+01 1.540000000000000036e+01 +3.234000000000000000e+03 5.112939999999999685e+01 1.343280000000000030e+01 1.880000000000000071e+01 +3.244000000000000000e+03 4.798199999999999932e+01 1.013840000000000074e+01 1.200000000000000000e+01 +3.254000000000000000e+03 5.271560000000000201e+01 7.317599999999999660e+00 1.610000000000000142e+01 +3.257000000000000000e+03 4.947729999999999961e+01 9.762199999999999989e+00 1.669999999999999929e+01 +3.268000000000000000e+03 4.816940000000000310e+01 8.943300000000000693e+00 9.500000000000000000e+00 +3.271000000000000000e+03 4.885479999999999734e+01 1.291890000000000072e+01 1.469999999999999929e+01 +3.278000000000000000e+03 4.853770000000000095e+01 9.273400000000000531e+00 1.240000000000000036e+01 +3.284000000000000000e+03 4.966910000000000025e+01 9.008499999999999730e+00 1.630000000000000071e+01 +3.287000000000000000e+03 4.971759999999999735e+01 9.099700000000000344e+00 1.490000000000000036e+01 +3.289000000000000000e+03 5.072809999999999775e+01 1.178379999999999939e+01 1.789999999999999858e+01 +3.307000000000000000e+03 4.747789999999999822e+01 1.126529999999999987e+01 1.250000000000000000e+01 +3.319000000000000000e+03 4.976440000000000197e+01 9.253000000000000114e+00 1.630000000000000071e+01 +3.340000000000000000e+03 5.043829999999999814e+01 7.806099999999999817e+00 1.789999999999999858e+01 +3.362000000000000000e+03 4.897209999999999752e+01 8.873400000000000176e+00 1.390000000000000036e+01 +3.366000000000000000e+03 4.827900000000000347e+01 1.250239999999999974e+01 1.400000000000000000e+01 +3.376000000000000000e+03 5.251760000000000161e+01 1.412320000000000064e+01 2.069999999999999929e+01 +3.379000000000000000e+03 4.816320000000000334e+01 1.154289999999999949e+01 1.390000000000000036e+01 +3.402000000000000000e+03 4.838510000000000133e+01 9.483700000000000685e+00 1.059999999999999964e+01 +3.426000000000000000e+03 5.156600000000000250e+01 1.470079999999999920e+01 1.989999999999999858e+01 +3.442000000000000000e+03 5.035739999999999839e+01 8.750600000000000378e+00 1.610000000000000142e+01 +3.484000000000000000e+03 4.870859999999999701e+01 1.121470000000000056e+01 1.580000000000000071e+01 +3.485000000000000000e+03 4.831150000000000233e+01 1.037729999999999997e+01 1.340000000000000036e+01 +3.490000000000000000e+03 5.053459999999999752e+01 7.085300000000000153e+00 1.730000000000000071e+01 +3.509000000000000000e+03 5.310199999999999676e+01 1.304209999999999958e+01 2.069999999999999929e+01 +3.513000000000000000e+03 5.050019999999999953e+01 1.113439999999999941e+01 1.280000000000000071e+01 +3.527000000000000000e+03 5.089229999999999876e+01 9.404999999999999361e+00 1.590000000000000036e+01 +3.540000000000000000e+03 5.084459999999999980e+01 7.371999999999999886e+00 1.789999999999999858e+01 +3.545000000000000000e+03 4.934400000000000119e+01 7.229700000000000237e+00 1.839999999999999858e+01 +3.571000000000000000e+03 4.981739999999999924e+01 1.186379999999999946e+01 1.369999999999999929e+01 +3.591000000000000000e+03 5.067430000000000234e+01 6.424000000000000377e+00 1.240000000000000036e+01 +3.603000000000000000e+03 4.938949999999999818e+01 9.966699999999999449e+00 1.459999999999999964e+01 +3.612000000000000000e+03 5.267110000000000269e+01 9.222899999999999210e+00 1.919999999999999929e+01 +3.621000000000000000e+03 4.882529999999999859e+01 1.050670000000000037e+01 1.390000000000000036e+01 +3.623000000000000000e+03 5.082939999999999969e+01 6.660199999999999676e+00 1.459999999999999964e+01 +3.631000000000000000e+03 5.371229999999999905e+01 7.151900000000000368e+00 1.380000000000000071e+01 +3.639000000000000000e+03 5.376469999999999771e+01 8.658300000000000551e+00 1.480000000000000071e+01 +3.660000000000000000e+03 5.036019999999999897e+01 6.869699999999999918e+00 1.469999999999999929e+01 +3.667000000000000000e+03 4.942580000000000240e+01 1.125380000000000003e+01 1.340000000000000036e+01 +3.668000000000000000e+03 4.950300000000000011e+01 1.105489999999999995e+01 1.419999999999999929e+01 +3.679000000000000000e+03 4.761869999999999692e+01 1.216649999999999920e+01 1.569999999999999929e+01 +3.730000000000000000e+03 4.739840000000000231e+01 1.027590000000000003e+01 1.450000000000000000e+01 +3.734000000000000000e+03 4.912800000000000011e+01 9.352499999999999147e+00 1.700000000000000000e+01 +3.739000000000000000e+03 4.945210000000000150e+01 1.243650000000000055e+01 1.180000000000000071e+01 +3.761000000000000000e+03 4.920700000000000074e+01 9.517599999999999838e+00 1.660000000000000142e+01 +3.811000000000000000e+03 5.129599999999999937e+01 1.309280000000000044e+01 1.950000000000000000e+01 +3.821000000000000000e+03 5.108729999999999905e+01 1.192919999999999980e+01 1.950000000000000000e+01 +3.836000000000000000e+03 5.045380000000000109e+01 1.022109999999999985e+01 1.700000000000000000e+01 +3.857000000000000000e+03 4.763620000000000232e+01 1.038920000000000066e+01 1.159999999999999964e+01 +3.875000000000000000e+03 4.915100000000000335e+01 1.168960000000000043e+01 1.230000000000000071e+01 +3.897000000000000000e+03 5.408930000000000149e+01 1.087729999999999997e+01 1.669999999999999929e+01 +3.904000000000000000e+03 4.953540000000000276e+01 6.378899999999999793e+00 1.880000000000000071e+01 +3.925000000000000000e+03 4.893289999999999651e+01 8.697300000000000253e+00 1.290000000000000036e+01 +3.927000000000000000e+03 4.793449999999999989e+01 9.286899999999999267e+00 1.130000000000000071e+01 +3.939000000000000000e+03 4.919120000000000203e+01 7.587900000000000311e+00 1.569999999999999929e+01 +3.946000000000000000e+03 5.048190000000000310e+01 1.213000000000000078e+01 1.719999999999999929e+01 +3.975000000000000000e+03 4.947769999999999868e+01 1.153570000000000029e+01 1.269999999999999929e+01 +3.987000000000000000e+03 5.238130000000000308e+01 1.306220000000000070e+01 1.989999999999999858e+01 +4.024000000000000000e+03 5.436430000000000007e+01 1.347710000000000008e+01 1.800000000000000000e+01 +4.032000000000000000e+03 5.179529999999999745e+01 1.113199999999999967e+01 1.950000000000000000e+01 +4.036000000000000000e+03 5.138949999999999818e+01 1.154119999999999990e+01 1.930000000000000071e+01 +4.039000000000000000e+03 5.373310000000000031e+01 9.877599999999999270e+00 1.719999999999999929e+01 +4.063000000000000000e+03 5.244610000000000127e+01 8.590600000000000236e+00 1.730000000000000071e+01 +4.094000000000000000e+03 4.780619999999999692e+01 9.620599999999999596e+00 1.390000000000000036e+01 +4.104000000000000000e+03 4.904249999999999687e+01 1.210190000000000055e+01 1.430000000000000071e+01 +4.127000000000000000e+03 5.099060000000000059e+01 7.695800000000000196e+00 1.639999999999999858e+01 +4.160000000000000000e+03 4.874249999999999972e+01 8.923999999999999488e+00 1.130000000000000071e+01 +4.169000000000000000e+03 4.867029999999999745e+01 7.993900000000000006e+00 1.440000000000000036e+01 +4.175000000000000000e+03 4.755899999999999750e+01 7.772100000000000009e+00 1.430000000000000071e+01 +4.177000000000000000e+03 4.897259999999999991e+01 8.330099999999999838e+00 1.480000000000000071e+01 +4.189000000000000000e+03 4.814789999999999992e+01 9.459600000000000009e+00 1.209999999999999964e+01 +4.261000000000000000e+03 4.787530000000000285e+01 1.212800000000000011e+01 1.469999999999999929e+01 +4.271000000000000000e+03 5.418030000000000257e+01 1.208079999999999998e+01 1.669999999999999929e+01 +4.275000000000000000e+03 5.312879999999999825e+01 9.339800000000000324e+00 1.719999999999999929e+01 +4.280000000000000000e+03 4.921620000000000061e+01 1.110350000000000037e+01 1.380000000000000071e+01 +4.287000000000000000e+03 4.938479999999999848e+01 1.017319999999999958e+01 1.500000000000000000e+01 +4.300000000000000000e+03 4.818139999999999645e+01 8.635600000000000165e+00 1.140000000000000036e+01 +4.301000000000000000e+03 4.985020000000000095e+01 7.871000000000000441e+00 2.089999999999999858e+01 +4.323000000000000000e+03 4.964679999999999893e+01 7.883700000000000152e+00 1.509999999999999964e+01 +4.336000000000000000e+03 4.921280000000000143e+01 7.107700000000000351e+00 1.700000000000000000e+01 +4.349000000000000000e+03 4.895689999999999742e+01 9.070999999999999730e+00 1.369999999999999929e+01 +4.354000000000000000e+03 4.878320000000000078e+01 1.331460000000000043e+01 1.400000000000000000e+01 +4.371000000000000000e+03 5.210419999999999874e+01 8.752100000000000435e+00 1.860000000000000142e+01 +4.377000000000000000e+03 5.035179999999999723e+01 1.000339999999999918e+01 1.550000000000000000e+01 +4.393000000000000000e+03 5.432789999999999964e+01 8.603099999999999525e+00 1.469999999999999929e+01 +4.411000000000000000e+03 4.991949999999999932e+01 8.967100000000000293e+00 1.789999999999999858e+01 +4.445000000000000000e+03 5.176579999999999870e+01 1.065329999999999977e+01 1.569999999999999929e+01 +4.464000000000000000e+03 5.056790000000000163e+01 1.180410000000000004e+01 1.580000000000000071e+01 +4.466000000000000000e+03 5.452750000000000341e+01 9.548700000000000188e+00 1.569999999999999929e+01 +4.480000000000000000e+03 5.034470000000000312e+01 9.553399999999999892e+00 1.810000000000000142e+01 +4.501000000000000000e+03 5.065460000000000207e+01 1.076929999999999943e+01 1.180000000000000071e+01 +4.508000000000000000e+03 5.029679999999999751e+01 6.419400000000000439e+00 1.300000000000000000e+01 +4.548000000000000000e+03 5.018469999999999942e+01 1.207910000000000039e+01 1.490000000000000036e+01 +4.559000000000000000e+03 4.916440000000000055e+01 1.261749999999999972e+01 1.359999999999999964e+01 +4.560000000000000000e+03 5.049249999999999972e+01 9.122600000000000264e+00 1.730000000000000071e+01 +4.592000000000000000e+03 4.932780000000000342e+01 1.208709999999999951e+01 1.330000000000000071e+01 +4.605000000000000000e+03 5.064410000000000167e+01 1.119359999999999999e+01 1.830000000000000071e+01 +4.625000000000000000e+03 5.364249999999999829e+01 1.138719999999999999e+01 1.860000000000000142e+01 +4.642000000000000000e+03 5.289110000000000156e+01 1.172969999999999935e+01 2.050000000000000000e+01 +4.651000000000000000e+03 5.190400000000000347e+01 1.018849999999999945e+01 1.839999999999999858e+01 +4.703000000000000000e+03 4.807189999999999941e+01 9.194300000000000139e+00 1.200000000000000000e+01 +4.706000000000000000e+03 4.827179999999999893e+01 1.302730000000000032e+01 1.400000000000000000e+01 +4.709000000000000000e+03 4.999960000000000093e+01 7.598099999999999632e+00 1.540000000000000036e+01 +4.745000000000000000e+03 5.296039999999999992e+01 9.792999999999999261e+00 1.819999999999999929e+01 +4.763000000000000000e+03 5.106069999999999709e+01 9.926600000000000534e+00 1.789999999999999858e+01 +4.841000000000000000e+03 5.369460000000000122e+01 8.873499999999999943e+00 1.509999999999999964e+01 +4.857000000000000000e+03 5.355340000000000344e+01 9.609700000000000131e+00 1.750000000000000000e+01 +4.878000000000000000e+03 5.166460000000000008e+01 1.088109999999999999e+01 1.680000000000000071e+01 +4.887000000000000000e+03 4.866559999999999775e+01 9.864800000000000679e+00 1.140000000000000036e+01 +4.896000000000000000e+03 5.466539999999999822e+01 9.804999999999999716e+00 1.519999999999999929e+01 +4.911000000000000000e+03 4.882750000000000057e+01 1.255969999999999942e+01 1.350000000000000000e+01 +4.928000000000000000e+03 4.882809999999999917e+01 9.199999999999999289e+00 1.269999999999999929e+01 +4.931000000000000000e+03 4.868829999999999814e+01 9.223499999999999588e+00 1.230000000000000071e+01 +4.978000000000000000e+03 5.063900000000000290e+01 1.002280000000000015e+01 1.610000000000000142e+01 +4.997000000000000000e+03 5.097710000000000008e+01 1.234190000000000076e+01 1.989999999999999858e+01 +5.009000000000000000e+03 5.376100000000000279e+01 1.255739999999999945e+01 1.810000000000000142e+01 +5.014000000000000000e+03 5.327579999999999671e+01 8.985699999999999577e+00 1.650000000000000000e+01 +5.017000000000000000e+03 5.040019999999999811e+01 1.138889999999999958e+01 1.440000000000000036e+01 +5.029000000000000000e+03 4.947370000000000090e+01 7.038499999999999979e+00 1.639999999999999858e+01 +5.046000000000000000e+03 4.985759999999999792e+01 1.235420000000000051e+01 1.380000000000000071e+01 +5.064000000000000000e+03 5.128970000000000340e+01 6.443699999999999761e+00 1.680000000000000071e+01 +5.097000000000000000e+03 5.406609999999999872e+01 1.276750000000000007e+01 1.919999999999999929e+01 +5.099000000000000000e+03 4.973259999999999792e+01 6.613100000000000200e+00 1.880000000000000071e+01 +5.100000000000000000e+03 4.974790000000000134e+01 6.658299999999999663e+00 1.860000000000000142e+01 +5.109000000000000000e+03 5.359969999999999857e+01 1.330390000000000050e+01 2.000000000000000000e+01 +5.111000000000000000e+03 4.803110000000000213e+01 1.253960000000000008e+01 1.359999999999999964e+01 +5.133000000000000000e+03 5.133440000000000225e+01 8.913199999999999790e+00 1.860000000000000142e+01 +5.142000000000000000e+03 5.374439999999999884e+01 1.406969999999999921e+01 1.950000000000000000e+01 +5.146000000000000000e+03 5.294140000000000157e+01 1.052890000000000015e+01 2.010000000000000142e+01 +5.149000000000000000e+03 4.957410000000000139e+01 1.019149999999999956e+01 1.630000000000000071e+01 +5.158000000000000000e+03 5.216009999999999991e+01 1.117590000000000039e+01 1.919999999999999929e+01 +5.229000000000000000e+03 4.804529999999999745e+01 8.460800000000000765e+00 1.059999999999999964e+01 +5.275000000000000000e+03 4.924450000000000216e+01 8.537399999999999878e+00 1.710000000000000142e+01 +5.279000000000000000e+03 5.161939999999999884e+01 9.574899999999999523e+00 1.500000000000000000e+01 +5.280000000000000000e+03 5.392240000000000322e+01 1.022669999999999924e+01 1.780000000000000071e+01 +5.300000000000000000e+03 5.025959999999999894e+01 8.360699999999999577e+00 1.660000000000000142e+01 +5.335000000000000000e+03 5.089629999999999654e+01 1.054840000000000089e+01 1.739999999999999858e+01 +5.347000000000000000e+03 5.150390000000000157e+01 9.111800000000000566e+00 1.789999999999999858e+01 +5.349000000000000000e+03 5.351959999999999695e+01 1.266539999999999999e+01 2.130000000000000071e+01 +5.371000000000000000e+03 5.049730000000000274e+01 9.942700000000000315e+00 1.159999999999999964e+01 +5.397000000000000000e+03 4.966629999999999967e+01 1.218449999999999989e+01 1.369999999999999929e+01 +5.404000000000000000e+03 4.840240000000000009e+01 1.169459999999999944e+01 1.350000000000000000e+01 +5.424000000000000000e+03 5.101769999999999783e+01 1.135440000000000005e+01 1.980000000000000071e+01 +5.426000000000000000e+03 4.937579999999999814e+01 8.121299999999999741e+00 1.300000000000000000e+01 +5.433000000000000000e+03 4.955340000000000344e+01 6.812000000000000277e+00 1.780000000000000071e+01 +5.440000000000000000e+03 4.901149999999999807e+01 1.093079999999999963e+01 1.400000000000000000e+01 +5.480000000000000000e+03 5.157630000000000337e+01 7.887900000000000134e+00 1.800000000000000000e+01 +5.490000000000000000e+03 5.184539999999999793e+01 1.076859999999999928e+01 1.810000000000000142e+01 +5.516000000000000000e+03 5.452830000000000155e+01 1.106060000000000088e+01 1.500000000000000000e+01 +5.538000000000000000e+03 4.788269999999999982e+01 1.115760000000000041e+01 1.269999999999999929e+01 +5.541000000000000000e+03 5.013199999999999790e+01 8.317000000000000171e+00 1.689999999999999858e+01 +5.546000000000000000e+03 5.212069999999999936e+01 1.245850000000000080e+01 1.910000000000000142e+01 +5.562000000000000000e+03 4.865160000000000196e+01 8.680099999999999483e+00 1.109999999999999964e+01 +5.629000000000000000e+03 5.188920000000000243e+01 1.264450000000000074e+01 2.060000000000000142e+01 +5.640000000000000000e+03 5.355040000000000333e+01 7.667200000000000237e+00 1.409999999999999964e+01 +5.643000000000000000e+03 5.318639999999999901e+01 1.249489999999999945e+01 2.200000000000000000e+01 +5.664000000000000000e+03 4.829529999999999745e+01 8.239100000000000534e+00 1.340000000000000036e+01 +5.676000000000000000e+03 5.239620000000000033e+01 1.068919999999999959e+01 1.969999999999999929e+01 +5.688000000000000000e+03 4.770029999999999859e+01 8.105700000000000571e+00 1.059999999999999964e+01 +5.692000000000000000e+03 4.960510000000000019e+01 8.365899999999999892e+00 1.689999999999999858e+01 +5.705000000000000000e+03 4.977040000000000219e+01 9.957599999999999341e+00 1.719999999999999929e+01 +5.715000000000000000e+03 5.246050000000000324e+01 9.431100000000000705e+00 2.000000000000000000e+01 +5.717000000000000000e+03 5.122560000000000002e+01 7.105199999999999960e+00 1.639999999999999858e+01 +5.731000000000000000e+03 4.767830000000000013e+01 8.380100000000000549e+00 1.390000000000000036e+01 +5.745000000000000000e+03 5.296640000000000015e+01 1.332680000000000042e+01 2.100000000000000000e+01 +5.750000000000000000e+03 5.103139999999999787e+01 1.214949999999999974e+01 2.019999999999999929e+01 +5.779000000000000000e+03 5.073140000000000072e+01 1.375159999999999982e+01 1.309999999999999964e+01 +5.792000000000000000e+03 4.742099999999999937e+01 1.098479999999999990e+01 2.799999999999999822e+00 +5.797000000000000000e+03 5.068789999999999907e+01 1.243290000000000006e+01 1.750000000000000000e+01 +5.800000000000000000e+03 4.902799999999999869e+01 1.323850000000000016e+01 1.250000000000000000e+01 +5.822000000000000000e+03 5.286299999999999955e+01 8.698800000000000310e+00 1.689999999999999858e+01 +5.825000000000000000e+03 5.261979999999999791e+01 1.278669999999999973e+01 2.089999999999999858e+01 +5.839000000000000000e+03 5.338810000000000144e+01 7.228699999999999903e+00 1.509999999999999964e+01 +5.856000000000000000e+03 4.854509999999999792e+01 1.335319999999999929e+01 1.319999999999999929e+01 +5.871000000000000000e+03 4.994619999999999749e+01 7.264499999999999957e+00 1.650000000000000000e+01 +5.906000000000000000e+03 4.950619999999999976e+01 8.558500000000000441e+00 1.630000000000000071e+01 +5.930000000000000000e+03 5.464099999999999824e+01 1.002379999999999960e+01 1.490000000000000036e+01 +5.941000000000000000e+03 4.767540000000000333e+01 1.246979999999999933e+01 1.440000000000000036e+01 +6.093000000000000000e+03 5.321390000000000242e+01 1.047039999999999971e+01 1.839999999999999858e+01 +6.105000000000000000e+03 5.431940000000000168e+01 9.805099999999999483e+00 1.569999999999999929e+01 +6.109000000000000000e+03 5.338369999999999749e+01 1.437279999999999980e+01 2.089999999999999858e+01 +6.129000000000000000e+03 5.105930000000000035e+01 1.442660000000000053e+01 1.939999999999999858e+01 +6.157000000000000000e+03 5.364099999999999824e+01 8.080799999999999983e+00 1.430000000000000071e+01 +6.158000000000000000e+03 4.922469999999999857e+01 1.060839999999999961e+01 1.369999999999999929e+01 +6.159000000000000000e+03 5.295420000000000016e+01 7.319600000000000328e+00 1.519999999999999929e+01 +6.163000000000000000e+03 5.416539999999999822e+01 1.035190000000000055e+01 1.619999999999999929e+01 +6.170000000000000000e+03 5.201919999999999789e+01 1.472540000000000049e+01 1.960000000000000142e+01 +6.197000000000000000e+03 5.186639999999999873e+01 9.271000000000000796e+00 1.719999999999999929e+01 +6.199000000000000000e+03 5.424839999999999662e+01 1.304180000000000028e+01 1.930000000000000071e+01 +6.217000000000000000e+03 4.924060000000000059e+01 6.935100000000000264e+00 1.860000000000000142e+01 +6.258000000000000000e+03 4.768449999999999989e+01 9.440899999999999181e+00 1.530000000000000071e+01 +6.259000000000000000e+03 4.902100000000000080e+01 9.603300000000000836e+00 1.430000000000000071e+01 +6.260000000000000000e+03 4.933279999999999887e+01 9.704000000000000625e+00 1.559999999999999964e+01 +6.262000000000000000e+03 4.876950000000000074e+01 9.873699999999999477e+00 1.430000000000000071e+01 +6.263000000000000000e+03 4.777380000000000138e+01 8.821899999999999409e+00 1.359999999999999964e+01 +6.264000000000000000e+03 5.141400000000000148e+01 8.650000000000000355e+00 1.580000000000000071e+01 +6.265000000000000000e+03 5.236129999999999995e+01 1.238669999999999938e+01 2.219999999999999929e+01 +6.266000000000000000e+03 5.203040000000000020e+01 1.096260000000000012e+01 1.860000000000000142e+01 +6.272000000000000000e+03 5.084259999999999735e+01 1.025179999999999936e+01 1.630000000000000071e+01 +6.273000000000000000e+03 5.250750000000000028e+01 1.185510000000000019e+01 2.050000000000000000e+01 +6.275000000000000000e+03 4.867049999999999699e+01 9.462699999999999889e+00 1.369999999999999929e+01 +6.305000000000000000e+03 5.120609999999999928e+01 1.049779999999999980e+01 1.930000000000000071e+01 +6.310000000000000000e+03 5.410490000000000066e+01 1.382390000000000008e+01 1.869999999999999929e+01 +6.312000000000000000e+03 4.953139999999999787e+01 1.064179999999999993e+01 1.480000000000000071e+01 +6.314000000000000000e+03 5.105069999999999908e+01 1.330030000000000001e+01 1.769999999999999929e+01 +6.336000000000000000e+03 5.001319999999999766e+01 9.653999999999999915e+00 1.590000000000000036e+01 +6.337000000000000000e+03 5.176630000000000109e+01 7.519400000000000084e+00 1.730000000000000071e+01 +6.344000000000000000e+03 5.039399999999999835e+01 8.142300000000000537e+00 1.950000000000000000e+01 +6.346000000000000000e+03 4.820700000000000074e+01 1.120350000000000001e+01 1.290000000000000036e+01 +6.347000000000000000e+03 5.005789999999999651e+01 1.029720000000000013e+01 1.710000000000000142e+01 +7.075000000000000000e+03 4.870199999999999818e+01 1.184929999999999950e+01 1.309999999999999964e+01 +7.099000000000000000e+03 5.201109999999999900e+01 1.039659999999999940e+01 1.660000000000000142e+01 +7.105000000000000000e+03 4.783500000000000085e+01 1.265479999999999983e+01 1.350000000000000000e+01 +7.106000000000000000e+03 5.207139999999999702e+01 8.456500000000000128e+00 1.789999999999999858e+01 +7.187000000000000000e+03 4.976359999999999673e+01 9.406299999999999883e+00 1.710000000000000142e+01 +7.298000000000000000e+03 5.452680000000000149e+01 9.042500000000000426e+00 1.509999999999999964e+01 +7.319000000000000000e+03 4.873740000000000094e+01 1.073930000000000007e+01 1.450000000000000000e+01 +7.321000000000000000e+03 5.115070000000000050e+01 1.133210000000000051e+01 1.939999999999999858e+01 +7.329000000000000000e+03 5.054670000000000130e+01 1.228630000000000067e+01 1.600000000000000000e+01 +7.330000000000000000e+03 5.146329999999999671e+01 7.977999999999999758e+00 1.700000000000000000e+01 +7.331000000000000000e+03 4.860990000000000322e+01 1.026740000000000030e+01 1.269999999999999929e+01 +7.341000000000000000e+03 5.009000000000000341e+01 8.786199999999999122e+00 1.750000000000000000e+01 +7.343000000000000000e+03 5.061990000000000123e+01 1.348160000000000025e+01 1.390000000000000036e+01 +7.350000000000000000e+03 4.910880000000000223e+01 1.282310000000000016e+01 1.190000000000000036e+01 +7.351000000000000000e+03 5.331750000000000256e+01 1.341750000000000043e+01 2.110000000000000142e+01 +7.364000000000000000e+03 5.168200000000000216e+01 1.230419999999999980e+01 2.010000000000000142e+01 +7.367000000000000000e+03 5.196430000000000149e+01 9.807199999999999918e+00 1.889999999999999858e+01 +7.368000000000000000e+03 5.100070000000000192e+01 1.036209999999999987e+01 1.600000000000000000e+01 +7.369000000000000000e+03 4.916230000000000189e+01 1.036609999999999943e+01 1.319999999999999929e+01 +7.370000000000000000e+03 4.939099999999999824e+01 1.268379999999999974e+01 1.219999999999999929e+01 +7.373000000000000000e+03 5.359839999999999804e+01 6.702399999999999913e+00 1.430000000000000071e+01 +7.374000000000000000e+03 5.208129999999999882e+01 6.940900000000000070e+00 1.469999999999999929e+01 +7.389000000000000000e+03 5.274609999999999843e+01 1.384270000000000067e+01 2.139999999999999858e+01 +7.393000000000000000e+03 5.144930000000000092e+01 1.425329999999999941e+01 1.989999999999999858e+01 +7.394000000000000000e+03 5.003150000000000119e+01 1.197450000000000081e+01 1.359999999999999964e+01 +7.395000000000000000e+03 4.865950000000000131e+01 1.253880000000000017e+01 1.380000000000000071e+01 +7.396000000000000000e+03 5.050840000000000174e+01 9.224700000000000344e+00 1.350000000000000000e+01 +7.403000000000000000e+03 4.779549999999999699e+01 1.003240000000000087e+01 1.230000000000000071e+01 +7.410000000000000000e+03 5.075130000000000052e+01 9.022399999999999309e+00 1.650000000000000000e+01 +7.412000000000000000e+03 5.000829999999999842e+01 9.423799999999999955e+00 1.509999999999999964e+01 +7.419000000000000000e+03 5.066100000000000136e+01 1.207559999999999967e+01 1.810000000000000142e+01 +7.420000000000000000e+03 5.110439999999999827e+01 1.171119999999999983e+01 1.960000000000000142e+01 +7.424000000000000000e+03 4.777239999999999753e+01 1.290729999999999933e+01 1.639999999999999858e+01 +7.427000000000000000e+03 5.401879999999999882e+01 9.925499999999999545e+00 1.660000000000000142e+01 +7.428000000000000000e+03 5.041669999999999874e+01 1.081559999999999988e+01 1.700000000000000000e+01 +7.431000000000000000e+03 4.801299999999999812e+01 1.155240000000000045e+01 1.290000000000000036e+01 +7.432000000000000000e+03 5.264229999999999876e+01 1.066269999999999918e+01 1.919999999999999929e+01 +1.367000000000000000e+04 5.150880000000000081e+01 6.701800000000000423e+00 1.730000000000000071e+01 +1.367400000000000000e+04 4.929429999999999978e+01 8.905300000000000438e+00 1.550000000000000000e+01 +1.367500000000000000e+04 5.208180000000000121e+01 9.407700000000000173e+00 2.039999999999999858e+01 +1.369600000000000000e+04 5.159660000000000224e+01 7.404799999999999827e+00 1.710000000000000142e+01 +1.370000000000000000e+04 5.133290000000000219e+01 7.341099999999999959e+00 1.580000000000000071e+01 +1.371000000000000000e+04 4.857339999999999947e+01 1.225760000000000005e+01 1.269999999999999929e+01 +1.371100000000000000e+04 5.068200000000000216e+01 1.151500000000000057e+01 1.839999999999999858e+01 +1.371300000000000000e+04 5.108990000000000009e+01 7.628899999999999793e+00 1.650000000000000000e+01 +1.377700000000000000e+04 5.224669999999999703e+01 1.095919999999999916e+01 1.980000000000000071e+01 +1.396500000000000000e+04 4.826389999999999958e+01 8.813399999999999679e+00 1.040000000000000036e+01 +1.500000000000000000e+04 5.079829999999999757e+01 6.024399999999999977e+00 1.290000000000000036e+01 +1.520700000000000000e+04 5.128349999999999653e+01 9.358999999999999986e+00 1.710000000000000142e+01 +1.544400000000000000e+04 4.844180000000000064e+01 9.921599999999999753e+00 1.169999999999999929e+01 +1.555500000000000000e+04 4.787610000000000099e+01 1.058489999999999931e+01 1.080000000000000071e+01 diff --git a/examples/09_spatio_temporal/01_precip_1d.py b/examples/09_spatio_temporal/01_precip_1d.py new file mode 100644 index 000000000..7bf585590 --- /dev/null +++ b/examples/09_spatio_temporal/01_precip_1d.py @@ -0,0 +1,128 @@ +""" +Creating a 1D Synthetic Precipitation Field +------------------------------------------- + +In this example we will create a time series of a 1D synthetic precipitation +field. + +We'll start off by creating a Gaussian random field with an exponential +variogram, which seems to reproduce the spatial correlations of precipitation +fields quite well. We'll create a daily timeseries over a one dimensional cross +section of 50km. This workflow is suited for sub daily precipitation time +series. +""" + +import copy +import numpy as np +import matplotlib.pyplot as plt +import gstools as gs + +# fix the seed for reproducibility +seed = 20170521 +# spatial axis of 50km with a resolution of 1km +x = np.arange(0, 50, 1.0) +# half daily timesteps over three months +t = np.arange(0.0, 90.0, 0.5) + +# total spatio-temporal dimension +st_dim = 1 + 1 +# space-time anisotropy ratio given in units d / km +st_anis = 0.4 + +# an exponential variogram with a corr. lengths of 2d and 5km +model = gs.Exponential(dim=st_dim, var=1.0, len_scale=5.0, anis=st_anis) +# create a spatial random field instance +srf = gs.SRF(model, seed=seed) + +pos, time = [x], [t] + +# a Gaussian random field which is also saved internally for the transformations +srf.structured(pos + time) +P_gau = copy.deepcopy(srf.field) + +############################################################################### +# Next, we could take care of the dry periods. Therefore we would simply +# introduce a lower threshold value. But we will combine this step with the +# next one. Anyway, for demonstration purposes, we will also do it with the +# threshold value now. + +threshold = 0.85 +P_cut = copy.deepcopy(srf.field) +P_cut[P_cut <= threshold] = 0.0 + +############################################################################### +# With the above lines of code we have created a cut off Gaussian spatial +# random field with an exponential variogram. But precipitation fields are not +# distributed Gaussian. Thus, we will now transform the field with an inverse +# box-cox transformation (create a non-Gaussian field) , which is often used to +# account for the skewness of precipitation fields. Different values have been +# suggested for the transformation parameter lambda, but we will stick to 1/2. +# As already mentioned, we will perform the cutoff for the dry periods with +# this transformation implicitly with the shift. The warning will tell you +# that values have indeed been cut off and it can be ignored. We call the +# resulting field Gaussian anamorphosis. + +# the lower this value, the more will be cut off, a value of 0.2 cuts off +# nearly everything in this example. +cutoff = 0.55 +gs.transform.boxcox(srf, lmbda=0.5, shift=-1.0 / cutoff) + +############################################################################### +# As a last step, the amount of precipitation is set. This should of course be +# calibrated towards observations (the same goes for the threshold, the +# variance, correlation length, and so on). + +amount = 2.0 +srf.field *= amount +P_ana = srf.field + +############################################################################### +# Finally we can have a look at the fields resulting from each step. Note, that +# the cutoff of the cut Gaussian only approximates the cutoff values from the +# box-cox transformation. For a closer look, we will examine a cross section +# at an arbitrary location. And afterwards we will create a contour plot for +# visual candy. + +fig, axs = plt.subplots(2, 2, sharex=True, sharey=True) + +axs[0, 0].set_title("Gaussian") +axs[0, 0].plot(t, P_gau[20, :]) +axs[0, 0].set_ylabel(r"$P$ / mm") + +axs[0, 1].set_title("Cut Gaussian") +axs[0, 1].plot(t, P_cut[20, :]) + +axs[1, 0].set_title("Cut Gaussian Anamorphosis") +axs[1, 0].plot(t, P_ana[20, :]) +axs[1, 0].set_xlabel(r"$t$ / d") +axs[1, 0].set_ylabel(r"$P$ / mm") + +axs[1, 1].set_title("Different Cross Section") +axs[1, 1].plot(t, P_ana[10, :]) +axs[1, 1].set_xlabel(r"$t$ / d") + +plt.tight_layout() + +fig, axs = plt.subplots(2, 2, sharex=True, sharey=True) + +axs[0, 0].set_title("Gaussian") +cont = axs[0, 0].contourf(t, x, P_gau, cmap="PuBu", levels=10) +cbar = fig.colorbar(cont, ax=axs[0, 0]) +cbar.ax.set_ylabel(r"$P$ / mm") +axs[0, 0].set_ylabel(r"$x$ / km") + +axs[0, 1].set_title("Cut Gaussian") +cont = axs[0, 1].contourf(t, x, P_cut, cmap="PuBu", levels=10) +cbar = fig.colorbar(cont, ax=axs[0, 1]) +cbar.ax.set_ylabel(r"$P$ / mm") +axs[0, 1].set_xlabel(r"$t$ / d") + +axs[1, 0].set_title("Cut Gaussian Anamorphosis") +cont = axs[1, 0].contourf(t, x, P_ana, cmap="PuBu", levels=10) +cbar = fig.colorbar(cont, ax=axs[1, 0]) +cbar.ax.set_ylabel(r"$P$ / mm") +axs[1, 0].set_xlabel(r"$t$ / d") +axs[1, 0].set_ylabel(r"$x$ / km") + +fig.delaxes(axs[1, 1]) +plt.tight_layout() diff --git a/examples/09_spatio_temporal/02_precip_2d.py b/examples/09_spatio_temporal/02_precip_2d.py new file mode 100644 index 000000000..a026d5504 --- /dev/null +++ b/examples/09_spatio_temporal/02_precip_2d.py @@ -0,0 +1,75 @@ +""" +Creating a 2D Synthetic Precipitation Field +------------------------------------------- + +In this example we'll create a time series of a 2D synthetic precipitation +field. + +Very similar to the previous tutorial, we'll start off by creating a Gaussian +random field with an exponential variogram, which seems to reproduce the +spatial correlations of precipitation fields quite well. We'll create a daily +timeseries over a two dimensional domain of 50km x 40km. This workflow is +suited for sub daily precipitation time series. +""" + +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.animation as animation +import gstools as gs + +# fix the seed for reproducibility +seed = 20170521 +# 1st spatial axis of 50km with a resolution of 1km +x = np.arange(0, 50, 1.0) +# 2nd spatial axis of 40km with a resolution of 1km +y = np.arange(0, 40, 1.0) +# half daily timesteps over three months +t = np.arange(0.0, 90.0, 0.5) + +# total spatio-temporal dimension +st_dim = 2 + 1 +# space-time anisotropy ratio given in units d / km +st_anis = 0.4 + +# an exponential variogram with a corr. lengths of 5km, 5km, and 2d +model = gs.Exponential(dim=st_dim, var=1.0, len_scale=5.0, anis=st_anis) +# create a spatial random field instance +srf = gs.SRF(model, seed=seed) + +pos, time = [x, y], [t] + +# the Gaussian random field +srf.structured(pos + time) + +# account for the skewness and the dry periods +cutoff = 0.55 +gs.transform.boxcox(srf, lmbda=0.5, shift=-1.0 / cutoff) + +# adjust the amount of precipitation +amount = 4.0 +srf.field *= amount + +############################################################################### +# plot the 2d precipitation field over time as an animation. + + +def _update_ani(time_step): + im.set_array(srf.field[:, :, time_step].T) + return (im,) + + +fig, ax = plt.subplots() +im = ax.imshow( + srf.field[:, :, 0].T, + cmap="Blues", + interpolation="bicubic", + origin="lower", +) +cbar = fig.colorbar(im) +cbar.ax.set_ylabel(r"Precipitation $P$ / mm") +ax.set_xlabel(r"$x$ / km") +ax.set_ylabel(r"$y$ / km") + +ani = animation.FuncAnimation( + fig, _update_ani, len(t), interval=100, blit=True +) diff --git a/examples/09_spatio_temporal/README.rst b/examples/09_spatio_temporal/README.rst new file mode 100644 index 000000000..07aa5faaf --- /dev/null +++ b/examples/09_spatio_temporal/README.rst @@ -0,0 +1,46 @@ +Spatio-Temporal Modeling +======================== + +Spatio-Temporal modelling can provide insights into time dependent processes +like rainfall, air temperature or crop yield. + +GSTools provides the metric spatio-temporal model for all covariance models +by enhancing the spatial model dimension with a time dimension to result in +the spatio-temporal dimension ``st_dim`` and setting a +spatio-temporal anisotropy ratio with ``st_anis``: + +.. code-block:: python + + import gstools as gs + dim = 3 # spatial dimension + st_dim = dim + 1 + st_anis = 0.4 + st_model = gs.Exponential(dim=st_dim, anis=st_anis) + +Since it is given in the name "spatio-temporal", +we will always treat the time as last dimension. +This enables us to have spatial anisotropy and rotation defined as in +non-temporal models, without altering the behavior in the time dimension: + +.. code-block:: python + + anis = [0.4, 0.2] # spatial anisotropy in 3D + angles = [0.5, 0.4, 0.3] # spatial rotation in 3D + st_model = gs.Exponential(dim=st_dim, anis=anis+[st_anis], angles=angles) + +In order to generate spatio-temporal position tuples, GSTools provides a +convenient function :any:`generate_st_grid`. The output can be used for +spatio-temporal random field generation (or kriging resp. conditioned fields): + +.. code-block:: python + + pos = dim * [1, 2, 3] # 3 points in space (1,1,1), (2,2,2) and (3,3,3) + time = range(10) # 10 time steps + st_grid = gs.generate_st_grid(pos, time) + st_rf = gs.SRF(st_model) + st_field = st_rf(st_grid).reshape(-1, len(time)) + +Then we can access the different time-steps by the last array index. + +Examples +-------- diff --git a/examples/10_normalizer/00_lognormal_kriging.py b/examples/10_normalizer/00_lognormal_kriging.py new file mode 100644 index 000000000..2ce268f00 --- /dev/null +++ b/examples/10_normalizer/00_lognormal_kriging.py @@ -0,0 +1,53 @@ +r""" +Log-Normal Kriging +------------------ + +Log Normal kriging is a term to describe a special workflow for kriging to +deal with log-normal data, like conductivity or transmissivity in hydrogeology. + +It simply means to first convert the input data to a normal distribution, i.e. +applying a logarithic function, then interpolating these values with kriging +and transforming the result back with the exponential function. + +The resulting kriging variance describes the error variance of the log-values +of the target variable. + +In this example we will use ordinary kriging. +""" +import numpy as np +import gstools as gs + +# condtions +cond_pos = [0.3, 1.9, 1.1, 3.3, 4.7] +cond_val = [0.47, 0.56, 0.74, 1.47, 1.74] +# resulting grid +gridx = np.linspace(0.0, 15.0, 151) +# stable covariance model +model = gs.Stable(dim=1, var=0.5, len_scale=2.56, alpha=1.9) + +############################################################################### +# In order to result in log-normal kriging, we will use the :any:`LogNormal` +# Normalizer. This is a parameter-less normalizer, so we don't have to fit it. +normalizer = gs.normalizer.LogNormal + +############################################################################### +# Now we generate the interpolated field as well as the mean field. +# This can be done by setting `only_mean=True` in :any:`Krige.__call__`. +# The result is then stored as `mean_field`. +# +# In terms of log-normal kriging, this mean represents the geometric mean of +# the field. +krige = gs.krige.Ordinary(model, cond_pos, cond_val, normalizer=normalizer) +# interpolate the field +krige(gridx) +# also generate the mean field +krige(gridx, only_mean=True) + +############################################################################### +# And that's it. Let's have a look at the results. +ax = krige.plot() +# plotting the geometric mean +krige.plot("mean_field", ax=ax) +# plotting the conditioning data +ax.scatter(cond_pos, cond_val, color="k", zorder=10, label="Conditions") +ax.legend() diff --git a/examples/10_normalizer/01_auto_fit.py b/examples/10_normalizer/01_auto_fit.py new file mode 100644 index 000000000..f8c2fede8 --- /dev/null +++ b/examples/10_normalizer/01_auto_fit.py @@ -0,0 +1,105 @@ +""" +Automatic fitting +----------------- + +In order to demonstrate how to automatically fit normalizer and variograms, +we generate synthetic log-normal data, that should be interpolated with +ordinary kriging. + +Normalizers are fitted by minimizing the likelihood function and variograms +are fitted by estimating the empirical variogram with automatic binning and +fitting the theoretical model to it. Thereby the sill is constrained to match +the field variance. + +Artificial data +^^^^^^^^^^^^^^^ + +Here we generate log-normal data following a Gaussian covariance model. +We will generate the "original" field on a 60x60 mesh, from which we will take +samples in order to pretend a situation of data-scarcity. +""" +import numpy as np +import gstools as gs +import matplotlib.pyplot as plt + +# structured field with edge length of 50 +x = y = range(51) +pos = gs.generate_grid([x, y]) +model = gs.Gaussian(dim=2, var=1, len_scale=10) +srf = gs.SRF(model, seed=20170519, normalizer=gs.normalizer.LogNormal()) +# generate the original field +srf(pos) + +############################################################################### +# Here, we sample 60 points and set the conditioning points and values. + +ids = np.arange(srf.field.size) +samples = np.random.RandomState(20210201).choice(ids, size=60, replace=False) + +# sample conditioning points from generated field +cond_pos = pos[:, samples] +cond_val = srf.field[samples] + +############################################################################### +# Fitting and Interpolation +# ^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# Now we want to interpolate the "measured" samples +# and we want to normalize the given data with the BoxCox transformation. +# +# Here we set up the kriging routine and use a :any:`Stable` model, that should +# be fitted automatically to the given data +# and we pass the :any:`BoxCox` normalizer in order to gain normality. +# +# The normalizer will be fitted automatically to the data, +# by setting ``fit_normalizer=True``. +# +# The covariance/variogram model will be fitted by an automatic workflow +# by setting ``fit_variogram=True``. + +krige = gs.krige.Ordinary( + model=gs.Stable(dim=2), + cond_pos=cond_pos, + cond_val=cond_val, + normalizer=gs.normalizer.BoxCox(), + fit_normalizer=True, + fit_variogram=True, +) + +############################################################################### +# First, let's have a look at the fitting results: + +print(krige.model) +print(krige.normalizer) + +############################################################################### +# As we see, it went quite well. Variance is a bit underestimated, but +# length scale and nugget are good. The shape parameter of the stable model +# is correctly estimated to be close to `2`, +# so we result in a Gaussian like model. +# +# The BoxCox parameter `lmbda` was estimated to be almost 0, which means, +# the log-normal distribution was correctly fitted. +# +# Now let's run the kriging interpolation. + +krige(pos) + +############################################################################### +# Plotting +# ^^^^^^^^ +# +# Finally let's compare the original, sampled and interpolated fields. +# As we'll see, there is a lot of information in the covariance structure +# of the measurement samples and the field is reconstructed quite accurately. + +fig, ax = plt.subplots(1, 3, figsize=[8, 3]) +ax[0].imshow(srf.field.reshape(len(x), len(y)).T, origin="lower") +ax[1].scatter(*cond_pos, c=cond_val) +ax[2].imshow(krige.field.reshape(len(x), len(y)).T, origin="lower") +# titles +ax[0].set_title("original field") +ax[1].set_title("sampled field") +ax[2].set_title("interpolated field") +# set aspect ratio to equal in all plots +[ax[i].set_aspect("equal") for i in range(3)] diff --git a/examples/10_normalizer/02_compare.py b/examples/10_normalizer/02_compare.py new file mode 100644 index 000000000..fe0bff510 --- /dev/null +++ b/examples/10_normalizer/02_compare.py @@ -0,0 +1,65 @@ +""" +Normalizer Comparison +--------------------- + +Let's compare the transformation behavior of the provided normalizers. + +But first, we define a convenience routine and make some imports as always. +""" +import numpy as np +import gstools as gs +import matplotlib.pyplot as plt + + +def dashes(i=1, max_n=12, width=1): + """Return line dashes.""" + return i * [width, width] + [max_n * 2 * width - 2 * i * width, width] + + +############################################################################### +# We select 4 normalizers depending on a single parameter lambda and +# plot their transformation behavior within the interval [-5, 5]. +# +# For the shape parameter lambda, we create a list of 8 values ranging from +# -1 to 2.5. + +lmbdas = [i * 0.5 for i in range(-2, 6)] +normalizers = [ + gs.normalizer.BoxCox, + gs.normalizer.YeoJohnson, + gs.normalizer.Modulus, + gs.normalizer.Manly, +] + +############################################################################### +# Let's plot them! + +fig, ax = plt.subplots(2, 2, figsize=[8, 8]) +for i, norm in enumerate(normalizers): + # correctly setting the data range + x_rng = norm().normalize_range + x = np.linspace(max(-5, x_rng[0] + 0.01), min(5, x_rng[1] - 0.01)) + for j, lmbda in enumerate(lmbdas): + ax.flat[i].plot( + x, + norm(lmbda=lmbda).normalize(x), + label=r"$\lambda=" + str(lmbda) + "$", + color="k", + alpha=0.2 + j * 0.1, + dashes=dashes(j), + ) + # axis formatting + ax.flat[i].grid(which="both", color="grey", linestyle="-", alpha=0.2) + ax.flat[i].set_ylim((-5, 5)) + ax.flat[i].set_xlim((-5, 5)) + ax.flat[i].set_title(norm().name) +# figure formatting +handles, labels = ax.flat[-1].get_legend_handles_labels() +fig.legend(handles, labels, loc="lower center", ncol=4, handlelength=3.0) +fig.suptitle("Normalizer Comparison", fontsize=20) +fig.show() + +############################################################################### +# The missing :any:`LogNormal` transformation is covered by the :any:`BoxCox` +# transformation for lambda=0. The :any:`BoxCoxShift` transformation is +# simply the :any:`BoxCox` transformation shifted on the X-axis. diff --git a/examples/10_normalizer/README.rst b/examples/10_normalizer/README.rst new file mode 100644 index 000000000..930756be9 --- /dev/null +++ b/examples/10_normalizer/README.rst @@ -0,0 +1,55 @@ +Normalizing Data +================ + +When dealing with real-world data, one can't assume it to be normal distributed. +In fact, many properties are modeled by applying different transformations, +for example conductivity is often assumed to be log-normal or precipitation +is transformed using the famous box-cox power transformation. + +These "normalizers" are often represented as parameteric power transforms and +one is interested in finding the best parameter to gain normality in the input +data. + +This is of special interest when kriging should be applied, since the target +variable of the kriging interpolation is assumed to be normal distributed. + +GSTools provides a set of Normalizers and routines to automatically fit these +to input data by minimizing the likelihood function. + +Mean, Trend and Normalizers +--------------------------- + +All Field classes (:any:`SRF`, :any:`Krige` or :any:`CondSRF`) provide the input +of `mean`, `normalizer` and `trend`: + +* A `trend` can be a callable function, that represents a trend in input data. + For example a linear decrease of temperature with height. + +* The `normalizer` will be applied after the data was detrended, i.e. the trend + was substracted from the data, in order to gain normality. + +* The `mean` is now interpreted as the mean of the normalized data. The user + could also provide a callable mean, but it is mostly meant to be constant. + +When no normalizer is given, `trend` and `mean` basically behave the same. +We just decided that a trend is associated with raw data and a mean is used +in the context of normally distributed data. + +Provided Normalizers +-------------------- + +The following normalizers can be passed to all Field-classes and variogram +estimation routines or can be used as standalone tools to analyse data. + +.. currentmodule:: gstools.normalizer + +.. autosummary:: + LogNormal + BoxCox + BoxCoxShift + YeoJohnson + Modulus + Manly + +Examples +-------- diff --git a/gstools/__init__.py b/gstools/__init__.py index 1bafdfe4e..e2871cfcd 100644 --- a/gstools/__init__.py +++ b/gstools/__init__.py @@ -21,24 +21,35 @@ random tools transform + normalizer Classes ======= +Kriging +^^^^^^^ +Swiss-Army-Knife for Kriging. For short cut classes see: :any:`gstools.krige` + +.. currentmodule:: gstools.krige + +.. autosummary:: + Krige + Spatial Random Field ^^^^^^^^^^^^^^^^^^^^ -Class for random field generation +Classes for (conditioned) random field generation .. currentmodule:: gstools.field .. autosummary:: SRF + CondSRF Covariance Base-Class ^^^^^^^^^^^^^^^^^^^^^ Class to construct user defined covariance models -.. currentmodule:: gstools.covmodel.base +.. currentmodule:: gstools.covmodel .. autosummary:: CovModel @@ -49,27 +60,28 @@ Standard Covariance Models ~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. currentmodule:: gstools.covmodel.models - .. autosummary:: Gaussian Exponential Matern - Rational Stable + Rational + Cubic Linear Circular Spherical - Intersection + HyperSpherical + SuperSpherical + JBessel Truncated Power Law Covariance Models ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. currentmodule:: gstools.covmodel.tpl_models .. autosummary:: TPLGaussian TPLExponential TPLStable + TPLSimple Functions ========= @@ -82,26 +94,55 @@ .. autosummary:: vtk_export - vtk_export_structured - vtk_export_unstructured to_vtk - to_vtk_structured - to_vtk_unstructured -variogram estimation +Geometric +^^^^^^^^^ +Some convenient functions for geometric operations + +.. autosummary:: + rotated_main_axes + generate_grid + generate_st_grid + +Variogram Estimation ^^^^^^^^^^^^^^^^^^^^ -Estimate the variogram of a given field +Estimate the variogram of a given field with these routines .. currentmodule:: gstools.variogram .. autosummary:: - vario_estimate_structured - vario_estimate_unstructured -""" + vario_estimate + vario_estimate_axis + standard_bins + +Misc +==== -from gstools import field, variogram, random, covmodel, tools, krige, transform -from gstools.field import SRF +.. currentmodule:: gstools.tools + +.. autosummary:: + EARTH_RADIUS + +""" +# Hooray! +from gstools import ( + field, + variogram, + random, + covmodel, + tools, + krige, + transform, + normalizer, +) +from gstools.krige import Krige +from gstools.field import SRF, CondSRF from gstools.tools import ( + rotated_main_axes, + generate_grid, + generate_st_grid, + EARTH_RADIUS, vtk_export, vtk_export_structured, vtk_export_unstructured, @@ -110,54 +151,77 @@ to_vtk_unstructured, ) from gstools.variogram import ( + vario_estimate, + vario_estimate_axis, vario_estimate_structured, vario_estimate_unstructured, + standard_bins, ) from gstools.covmodel import ( CovModel, Gaussian, Exponential, Matern, - Rational, Stable, + Rational, + Cubic, Linear, Circular, Spherical, - Intersection, + HyperSpherical, + SuperSpherical, + JBessel, TPLGaussian, TPLExponential, TPLStable, + TPLSimple, ) try: from gstools._version import __version__ -except ImportError: # pragma: nocover +except ModuleNotFoundError: # pragma: nocover # package is not installed __version__ = "0.0.0.dev0" __all__ = ["__version__"] __all__ += ["covmodel", "field", "variogram", "krige", "random", "tools"] -__all__ += ["transform"] +__all__ += ["transform", "normalizer"] __all__ += [ "CovModel", "Gaussian", "Exponential", "Matern", - "Rational", "Stable", + "Rational", + "Cubic", "Linear", "Circular", "Spherical", - "Intersection", + "HyperSpherical", + "SuperSpherical", + "JBessel", "TPLGaussian", "TPLExponential", "TPLStable", + "TPLSimple", ] -__all__ += ["vario_estimate_structured", "vario_estimate_unstructured"] +__all__ += [ + "vario_estimate", + "vario_estimate_axis", + "vario_estimate_structured", + "vario_estimate_unstructured", + "standard_bins", +] __all__ += [ + "Krige", "SRF", + "CondSRF", + "rotated_main_axes", + "generate_grid", + "generate_st_grid", + "EARTH_RADIUS", "vtk_export", "vtk_export_structured", "vtk_export_unstructured", diff --git a/gstools/covmodel/__init__.py b/gstools/covmodel/__init__.py index f638f2cf7..d672e769a 100644 --- a/gstools/covmodel/__init__.py +++ b/gstools/covmodel/__init__.py @@ -29,12 +29,15 @@ Gaussian Exponential Matern - Rational Stable + Rational + Cubic Linear Circular Spherical - Intersection + HyperSpherical + SuperSpherical + JBessel Truncated Power Law Covariance Models @@ -44,6 +47,7 @@ TPLGaussian TPLExponential TPLStable + TPLSimple """ from gstools.covmodel.base import CovModel @@ -51,27 +55,39 @@ Gaussian, Exponential, Matern, - Rational, Stable, + Rational, + Cubic, Linear, Circular, Spherical, - Intersection, + HyperSpherical, + SuperSpherical, + JBessel, +) +from gstools.covmodel.tpl_models import ( + TPLGaussian, + TPLExponential, + TPLStable, + TPLSimple, ) -from gstools.covmodel.tpl_models import TPLGaussian, TPLExponential, TPLStable __all__ = [ "CovModel", "Gaussian", "Exponential", "Matern", - "Rational", "Stable", + "Rational", + "Cubic", "Linear", "Circular", "Spherical", - "Intersection", + "HyperSpherical", + "SuperSpherical", + "JBessel", "TPLGaussian", "TPLExponential", "TPLStable", + "TPLSimple", ] diff --git a/gstools/covmodel/base.py b/gstools/covmodel/base.py index cbac3892b..b36a45c89 100644 --- a/gstools/covmodel/base.py +++ b/gstools/covmodel/base.py @@ -9,24 +9,35 @@ .. autosummary:: CovModel """ -# pylint: disable=C0103, R0201 - -import warnings +# pylint: disable=C0103, R0201, E1101, C0302, W0613 import copy import numpy as np from scipy.integrate import quad as integral -from scipy.optimize import curve_fit, root from hankel import SymmetricFourierTransform as SFT -from gstools.field.tools import make_isotropic, unrotate_mesh -from gstools.tools.geometric import pos2xyz +from gstools.tools.geometric import ( + set_angles, + matrix_anisometrize, + matrix_isometrize, + rotated_main_axes, + latlon2pos, + pos2latlon, +) from gstools.covmodel.tools import ( - InitSubclassMeta, - rad_fac, + _init_subclass, + set_opt_args, set_len_anis, - set_angles, check_bounds, + spectral_rad_pdf, + percentile_scale, + set_arg_bounds, + check_arg_bounds, + set_dim, + compare, + model_repr, + default_arg_from_bounds, ) from gstools.covmodel import plot +from gstools.covmodel.fit import fit_variogram __all__ = ["CovModel"] @@ -34,14 +45,7 @@ HANKEL_DEFAULT = {"a": -1, "b": 1, "N": 200, "h": 0.001, "alt": True} -class AttributeWarning(UserWarning): - pass - - -# The CovModel Base-Class ##################################################### - - -class CovModel(metaclass=InitSubclassMeta): +class CovModel: r"""Base class for the GSTools covariance models. Parameters @@ -80,7 +84,24 @@ class CovModel(metaclass=InitSubclassMeta): integral_scale : :class:`float` or :class:`list` or :any:`None`, optional If given, ``len_scale`` will be ignored and recalculated, so that the integral scale of the model matches the given one. - Default: ``None`` + Default: :any:`None` + rescale : :class:`float` or :any:`None`, optional + Optional rescaling factor to divide the length scale with. + This could be used for unit convertion or rescaling the length scale + to coincide with e.g. the integral scale. + Will be set by each model individually. + Default: :any:`None` + latlon : :class:`bool`, optional + Whether the model is describing 2D fields on earths surface described + by latitude and longitude. When using this, the model will internally + use the associated 'Yadrenko' model to represent a valid model. + This means, the spatial distance :math:`r` will be replaced by + :math:`2\sin(\alpha/2)`, where :math:`\alpha` is the great-circle + distance, which is equal to the spatial distance of two points in 3D. + As a consequence, `dim` will be set to `3` and anisotropy will be + disabled. `rescale` can be set to e.g. earth's radius, + to have a meaningful `len_scale` parameter. + Default: False var_raw : :class:`float` or :any:`None`, optional raw variance of the model which will be multiplied with :any:`CovModel.var_factor` to result in the actual variance. @@ -93,18 +114,9 @@ class CovModel(metaclass=InitSubclassMeta): used for the spectrum calculation. Use with caution (Better: Don't!). ``None`` is equivalent to ``{"a": -1, "b": 1, "N": 1000, "h": 0.001}``. Default: :any:`None` - - Examples - -------- - >>> from gstools import CovModel - >>> import numpy as np - >>> class Gau(CovModel): - ... def cor(self, h): - ... return np.exp(-h**2) - ... - >>> model = Gau() - >>> model.spectrum(2) - 0.00825830126008459 + **opt_arg + Optional arguments are covered by these keyword arguments. + If present, they are described in the section `Other Parameters`. """ def __init__( @@ -116,9 +128,11 @@ def __init__( anis=1.0, angles=0.0, integral_scale=None, + rescale=None, + latlon=False, var_raw=None, hankel_kw=None, - **opt_arg + **opt_arg, ): # assert, that we use a subclass # this is the case, if __init_subclass__ is called, which creates @@ -126,63 +140,52 @@ def __init__( if not hasattr(self, "variogram"): raise TypeError("Don't instantiate 'CovModel' directly!") - # optional arguments for the variogram-model - # look up the defaults for the optional arguments (defined by the user) - default = self.default_opt_arg() - # add the default vaules if not specified - for def_arg in default: - if def_arg not in opt_arg: - opt_arg[def_arg] = default[def_arg] - # save names of the optional arguments - self._opt_arg = list(opt_arg.keys()) - # add the optional arguments as attributes to the class - for opt_name in opt_arg: - if opt_name in dir(self): # "dir" also respects properties - raise ValueError( - "parameter '" - + opt_name - + "' has a 'bad' name, since it is already present in " - + "the class. It could not be added to the model" - ) - if opt_name not in self.default_opt_arg().keys(): - warnings.warn( - "The given optional argument '{}' ".format(opt_name) - + "is unknown or has at least no defined standard value. " - + "Or you made a Typo... hehe.", - AttributeWarning, - ) - # Magic happens here - setattr(self, opt_name, opt_arg[opt_name]) - - # set standard boundaries for variance, len_scale, nugget and opt_arg - self._var_bounds = None - self._len_scale_bounds = None - self._nugget_bounds = None - self._opt_arg_bounds = {} - bounds = self.default_arg_bounds() - bounds.update(self.default_opt_arg_bounds()) - self.set_arg_bounds(**bounds) - # prepare dim setting self._dim = None + self._hankel_kw = None + self._sft = None + # prepare parameters (they are checked in dim setting) + self._rescale = None self._len_scale = None self._anis = None self._angles = None + # prepare parameters boundaries + self._var_bounds = None + self._len_scale_bounds = None + self._nugget_bounds = None + self._anis_bounds = None + self._opt_arg_bounds = {} + # Set latlon first + self._latlon = bool(latlon) # SFT class will be created within dim.setter but needs hankel_kw - self._hankel_kw = None - self._sft = None self.hankel_kw = hankel_kw self.dim = dim + + # optional arguments for the variogram-model + set_opt_args(self, opt_arg) + + # set standard boundaries for variance, len_scale, nugget and opt_arg + bounds = self.default_arg_bounds() + bounds.update(self.default_opt_arg_bounds()) + self.set_arg_bounds(check_args=False, **bounds) + # set parameters - self._nugget = nugget - self._angles = set_angles(self.dim, angles) - self._len_scale, self._anis = set_len_anis(self.dim, len_scale, anis) + self.rescale = rescale + self._nugget = float(nugget) + # set anisotropy and len_scale, disable anisotropy for latlon models + self._len_scale, anis = set_len_anis(self.dim, len_scale, anis) + if self.latlon: + self._anis = np.array((self.dim - 1) * [1], dtype=np.double) + self._angles = np.array(self.dim * [0], dtype=np.double) + else: + self._anis = anis + self._angles = set_angles(self.dim, angles) # set var at last, because of the var_factor (to be right initialized) if var_raw is None: self._var = None self.var = var else: - self._var = var_raw + self._var = float(var_raw) self._integral_scale = None self.integral_scale = integral_scale # set var again, if int_scale affects var_factor @@ -190,147 +193,64 @@ def __init__( self._var = None self.var = var else: - self._var = var_raw + self._var = float(var_raw) # final check for parameter bounds self.check_arg_bounds() # additional checks for the optional arguments (provided by user) self.check_opt_arg() + # precision for printing + self._prec = 3 - ########################################################################### - ### one of these functions needs to be overridden ######################### - ########################################################################### - + # one of these functions needs to be overridden def __init_subclass__(cls): - r"""Initialize gstools covariance model. - - Warnings - -------- - Don't instantiate ``CovModel`` directly. You need to inherit a - child class which overrides one of the following methods: - - * ``model.variogram(r)`` - :math:`\gamma\left(r\right)= - \sigma^2\cdot\left(1-\rho\left(r\right)\right)+n` - * ``model.covariance(r)`` - :math:`C\left(r\right)= - \sigma^2\cdot\rho\left(r\right)` - * ``model.correlation(r)`` - :math:`\rho\left(r\right)` - - Best practice is to use the ``correlation`` function, or the ``cor`` - function. The latter one takes the dimensionles distance h=r/l. - """ - # override one of these ############################################### - - def variogram(self, r): - r"""Isotropic variogram of the model. - - Given by: :math:`\gamma\left(r\right)= - \sigma^2\cdot\left(1-\rho\left(r\right)\right)+n` - - Where :math:`\rho(r)` is the correlation function. - """ - return self.var - self.covariance(r) + self.nugget - - def covariance(self, r): - r"""Covariance of the model. + """Initialize gstools covariance model.""" + _init_subclass(cls) - Given by: :math:`C\left(r\right)= - \sigma^2\cdot\rho\left(r\right)` - - Where :math:`\rho(r)` is the correlation function. - """ - return self.var * self.correlation(r) - - def correlation(self, r): - r"""Correlation function (or normalized covariance) of the model. - - Given by: :math:`\rho\left(r\right)` - - It has to be a monotonic decreasing function with - :math:`\rho(0)=1` and :math:`\rho(\infty)=0`. - """ - return 1.0 - (self.variogram(r) - self.nugget) / self.var - - def correlation_from_cor(self, r): - r"""Correlation function (or normalized covariance) of the model. - - Given by: :math:`\rho\left(r\right)` - - It has to be a monotonic decreasing function with - :math:`\rho(0)=1` and :math:`\rho(\infty)=0`. - """ - r = np.array(np.abs(r), dtype=np.double) - return self.cor(r / self.len_scale) - - def cor_from_correlation(self, h): - r"""Normalziled correlation function taking a normalized range. - - Given by: :math:`\mathrm{cor}\left(r/\ell\right) = \rho(r)` - """ - h = np.array(np.abs(h), dtype=np.double) - return self.correlation(h * self.len_scale) - - ####################################################################### - - abstract = True - if hasattr(cls, "cor"): - cls.correlation = correlation_from_cor - abstract = False - else: - cls.cor = cor_from_correlation - if not hasattr(cls, "variogram"): - cls.variogram = variogram - else: - abstract = False - if not hasattr(cls, "covariance"): - cls.covariance = covariance - else: - abstract = False - if not hasattr(cls, "correlation"): - cls.correlation = correlation - else: - abstract = False - if abstract: - raise TypeError( - "Can't instantiate class '" - + cls.__name__ - + "', " - + "without overriding at least on of the methods " - + "'variogram', 'covariance' or 'correlation'." - ) - - # modify the docstrings ############################################### - - # class docstring gets attributes added + # modify the docstrings: class docstring gets attributes added if cls.__doc__ is None: - cls.__doc__ = ( - "User defined GSTools Covariance-Model " - + CovModel.__doc__[44:-296] - ) - else: - cls.__doc__ += CovModel.__doc__[44:-296] + cls.__doc__ = "User defined GSTools Covariance-Model." + cls.__doc__ += CovModel.__doc__[45:] # overridden functions get standard doc if no new doc was created - ignore = ["__", "variogram", "covariance", "correlation"] - for attr in cls.__dict__: - if any( - [attr.startswith(ign) for ign in ignore] - ) or attr not in dir(CovModel): + ign = ["__", "variogram", "covariance", "cor"] + for att in cls.__dict__: + if any(att.startswith(i) for i in ign) or att not in dir(CovModel): continue - attr_doc = getattr(CovModel, attr).__doc__ - attr_cls = cls.__dict__[attr] + attr_doc = getattr(CovModel, att).__doc__ + attr_cls = cls.__dict__[att] if attr_cls.__doc__ is None: attr_cls.__doc__ = attr_doc - ### special variogram functions ########################################### + # special variogram functions - def _get_iso_rad(self, pos): - x, y, z = pos2xyz(pos, max_dim=self.dim) - if self.do_rotation: - x, y, z = unrotate_mesh(self.dim, self.angles, x, y, z) - if not self.is_isotropic: - y, z = make_isotropic(self.dim, self.anis, y, z) - return np.linalg.norm((x, y, z)[: self.dim], axis=0) + def vario_axis(self, r, axis=0): + r"""Variogram along axis of anisotropy.""" + if axis == 0: + return self.variogram(r) + return self.variogram(np.abs(r) / self.anis[axis - 1]) + + def cov_axis(self, r, axis=0): + r"""Covariance along axis of anisotropy.""" + if axis == 0: + return self.covariance(r) + return self.covariance(np.abs(r) / self.anis[axis - 1]) + + def cor_axis(self, r, axis=0): + r"""Correlation along axis of anisotropy.""" + if axis == 0: + return self.correlation(r) + return self.correlation(np.abs(r) / self.anis[axis - 1]) + + def vario_yadrenko(self, zeta): + r"""Yadrenko variogram for great-circle distance from latlon-pos.""" + return self.variogram(2 * np.sin(zeta / 2)) + + def cov_yadrenko(self, zeta): + r"""Yadrenko covariance for great-circle distance from latlon-pos.""" + return self.covariance(2 * np.sin(zeta / 2)) + + def cor_yadrenko(self, zeta): + r"""Yadrenko correlation for great-circle distance from latlon-pos.""" + return self.correlation(2 * np.sin(zeta / 2)) def vario_spatial(self, pos): r"""Spatial variogram respecting anisotropy and rotation.""" @@ -344,34 +264,22 @@ def cor_spatial(self, pos): r"""Spatial correlation respecting anisotropy and rotation.""" return self.correlation(self._get_iso_rad(pos)) - def cov_nugget(self, r): - r"""Covariance of the model respecting the nugget at r=0. - - Given by: :math:`C\left(r\right)= - \sigma^2\cdot\rho\left(r\right)` - - Where :math:`\rho(r)` is the correlation function. - """ + def vario_nugget(self, r): + """Isotropic variogram of the model respecting the nugget at r=0.""" r = np.array(np.abs(r), dtype=np.double) r_gz = np.logical_not(np.isclose(r, 0)) res = np.empty_like(r, dtype=np.double) - res[r_gz] = self.covariance(r[r_gz]) - res[np.logical_not(r_gz)] = self.sill + res[r_gz] = self.variogram(r[r_gz]) + res[np.logical_not(r_gz)] = 0.0 return res - def vario_nugget(self, r): - r"""Isotropic variogram of the model respecting the nugget at r=0. - - Given by: :math:`\gamma\left(r\right)= - \sigma^2\cdot\left(1-\rho\left(r\right)\right)+n` - - Where :math:`\rho(r)` is the correlation function. - """ + def cov_nugget(self, r): + """Isotropic covariance of the model respecting the nugget at r=0.""" r = np.array(np.abs(r), dtype=np.double) r_gz = np.logical_not(np.isclose(r, 0)) res = np.empty_like(r, dtype=np.double) - res[r_gz] = self.variogram(r[r_gz]) - res[np.logical_not(r_gz)] = 0.0 + res[r_gz] = self.covariance(r[r_gz]) + res[np.logical_not(r_gz)] = self.sill return res def plot(self, func="variogram", **kwargs): # pragma: no cover @@ -389,6 +297,12 @@ def plot(self, func="variogram", **kwargs): # pragma: no cover * "vario_spatial" * "cov_spatial" * "cor_spatial" + * "vario_yadrenko" + * "cov_yadrenko" + * "cor_yadrenko" + * "vario_axis" + * "cov_axis" + * "cor_axis" * "spectrum" * "spectral_density" * "spectral_rad_pdf" @@ -404,68 +318,60 @@ def plot(self, func="variogram", **kwargs): # pragma: no cover routine = getattr(plot, "plot_" + func) return routine(self, **kwargs) - ########################################################################### - ### pykrige functions ##################################################### - ########################################################################### + # pykrige functions def pykrige_vario(self, args=None, r=0): - r"""Isotropic variogram of the model for pykrige. - - Given by: :math:`\gamma\left(r\right)= - \sigma^2\cdot\left(1-\rho\left(r\right)\right)+n` - - Where :math:`\rho(r)` is the correlation function. - """ - return self.variogram(r) + """Isotropic variogram of the model for pykrige.""" + return self.variogram(r) # pragma: no cover @property def pykrige_anis(self): """2D anisotropy ratio for pykrige.""" if self.dim == 2: return 1 / self.anis[0] - return 1.0 + return 1.0 # pragma: no cover @property def pykrige_anis_y(self): """3D anisotropy ratio in y direction for pykrige.""" if self.dim >= 2: return 1 / self.anis[0] - return 1.0 + return 1.0 # pragma: no cover @property def pykrige_anis_z(self): """3D anisotropy ratio in z direction for pykrige.""" if self.dim == 3: return 1 / self.anis[1] - return 1.0 + return 1.0 # pragma: no cover @property def pykrige_angle(self): """2D rotation angle for pykrige.""" if self.dim == 2: return self.angles[0] / np.pi * 180 - return 0.0 + return 0.0 # pragma: no cover @property def pykrige_angle_z(self): """3D rotation angle around z for pykrige.""" if self.dim >= 2: return self.angles[0] / np.pi * 180 - return 0.0 + return 0.0 # pragma: no cover @property def pykrige_angle_y(self): """3D rotation angle around y for pykrige.""" if self.dim == 3: return self.angles[1] / np.pi * 180 - return 0.0 + return 0.0 # pragma: no cover @property def pykrige_angle_x(self): """3D rotation angle around x for pykrige.""" if self.dim == 3: return self.angles[2] / np.pi * 180 - return 0.0 + return 0.0 # pragma: no cover @property def pykrige_kwargs(self): @@ -493,16 +399,17 @@ def pykrige_kwargs(self): kwargs.update(add_kwargs) return kwargs - ########################################################################### - ### methods for optional arguments (can be overridden) #################### - ########################################################################### + # methods for optional/default arguments (can be overridden) def default_opt_arg(self): """Provide default optional arguments by the user. - Should be given as a dictionary. + Should be given as a dictionary when overridden. """ - return {} + return { + opt: default_arg_from_bounds(bnd) + for (opt, bnd) in self.default_opt_arg_bounds().items() + } def default_opt_arg_bounds(self): """Provide default boundaries for optional arguments.""" @@ -522,7 +429,10 @@ def check_opt_arg(self): * Any return value will be ignored * This method will only be run once, when the class is initialized """ - pass + + def check_dim(self, dim): + """Check the given dimension.""" + return True def fix_dim(self): """Set a fix dimension for the model.""" @@ -532,7 +442,11 @@ def var_factor(self): """Factor for the variance.""" return 1.0 - ### calculation of different scales ####################################### + def default_rescale(self): + """Provide default rescaling factor.""" + return 1.0 + + # calculation of different scales def calc_integral_scale(self): """Calculate the integral scale of the isotrope model.""" @@ -545,22 +459,9 @@ def percentile_scale(self, per=0.9): This is the distance, where the given percentile of the variance is reached by the variogram """ - # check the given percentile - if not 0.0 < per < 1.0: - raise ValueError( - "percentile needs to be within (0, 1), got: " + str(per) - ) - - # define a curve, that has its root at the wanted point - def curve(x): - return 1.0 - self.correlation(x) - per + return percentile_scale(self, per) - # take 'per * len_scale' as initial guess - return root(curve, per * self.len_scale)["x"][0] - - ########################################################################### - ### spectrum methods (can be overridden for speedup) ###################### - ########################################################################### + # spectrum methods (can be overridden for speedup) def spectrum(self, k): r""" @@ -568,8 +469,8 @@ def spectrum(self, k): This is given by: - .. math:: S(k) = \left(\frac{1}{2\pi}\right)^n - \int C(r) e^{i b\mathbf{k}\cdot\mathbf{r}} d^n\mathbf{r} + .. math:: S(\mathbf{k}) = \left(\frac{1}{2\pi}\right)^n + \int C(r) e^{i \mathbf{k}\cdot\mathbf{r}} d^n\mathbf{r} Internally, this is calculated by the hankel transformation: @@ -606,21 +507,7 @@ def spectral_density(self, k): def spectral_rad_pdf(self, r): """Radial spectral density of the model.""" - r = np.array(np.abs(r), dtype=np.double) - if self.dim > 1: - r_gz = np.logical_not(np.isclose(r, 0)) - # to prevent numerical errors, we just calculate where r>0 - res = np.zeros_like(r, dtype=np.double) - res[r_gz] = rad_fac(self.dim, r[r_gz]) * self.spectral_density( - r[r_gz] - ) - else: - res = rad_fac(self.dim, r) * self.spectral_density(r) - # prevent numerical errors in hankel for small r values (set 0) - res[np.logical_not(np.isfinite(res))] = 0.0 - # prevent numerical errors in hankel for big r (set non-negative) - res = np.maximum(res, 0.0) - return res + return spectral_rad_pdf(self, r) def ln_spectral_rad_pdf(self, r): """Log radial spectral density of the model.""" @@ -635,25 +522,157 @@ def _has_ppf(self): """State if a ppf is defined with 'spectral_rad_ppf'.""" return hasattr(self, "spectral_rad_ppf") - ### fitting routine ####################################################### + # spatial routines + + def isometrize(self, pos): + """Make a position tuple ready for isotropic operations.""" + pos = np.array(pos, dtype=np.double).reshape((self.field_dim, -1)) + if self.latlon: + return latlon2pos(pos) + return np.dot(matrix_isometrize(self.dim, self.angles, self.anis), pos) + + def anisometrize(self, pos): + """Bring a position tuple into the anisotropic coordinate-system.""" + pos = np.array(pos, dtype=np.double).reshape((self.dim, -1)) + if self.latlon: + return pos2latlon(pos) + return np.dot( + matrix_anisometrize(self.dim, self.angles, self.anis), pos + ) + + def main_axes(self): + """Axes of the rotated coordinate-system.""" + return rotated_main_axes(self.dim, self.angles) + + def _get_iso_rad(self, pos): + """Isometrized radians.""" + return np.linalg.norm(self.isometrize(pos), axis=0) + + # fitting routine - def fit_variogram(self, x_data, y_data, maxfev=1000, **para_deselect): + def fit_variogram( + self, + x_data, + y_data, + anis=True, + sill=None, + init_guess="default", + weights=None, + method="trf", + loss="soft_l1", + max_eval=None, + return_r2=False, + curve_fit_kwargs=None, + **para_select, + ): """ - Fiting the isotropic variogram-model to given data. + Fiting the variogram-model to an empirical variogram. Parameters ---------- x_data : :class:`numpy.ndarray` - The radii of the meassured variogram. + The bin-centers of the empirical variogram. y_data : :class:`numpy.ndarray` The messured variogram - maxfev : int, optional - The maximum number of calls to the function in scipy curvefit. - Default: 1000 - **para_deselect - You can deselect the parameters to be fitted, by setting - them "False" as keywords. By default, all parameters are - fitted. + If multiple are given, they are interpreted as the directional + variograms along the main axis of the associated rotated + coordinate system. + Anisotropy ratios will be estimated in that case. + anis : :class:`bool`, optional + In case of a directional variogram, you can control anisotropy + by this argument. Deselect the parameter from fitting, by setting + it "False". + You could also pass a fixed value to be set in the model. + Then the anisotropy ratios wont be altered during fitting. + Default: True + sill : :class:`float` or :class:`bool`, optional + Here you can provide a fixed sill for the variogram. + It needs to be in a fitting range for the var and nugget bounds. + If variance or nugget are not selected for estimation, + the nugget will be recalculated to fulfill: + + * sill = var + nugget + * if the variance is bigger than the sill, + nugget will bet set to its lower bound + and the variance will be set to the fitting partial sill. + + If variance is deselected, it needs to be less than the sill, + otherwise a ValueError comes up. Same for nugget. + If sill=False, it will be deslected from estimation + and set to the current sill of the model. + Then, the procedure above is applied. + Default: None + init_guess : :class:`str` or :class:`dict`, optional + Initial guess for the estimation. Either: + + * "default": using the default values of the covariance model + ("len_scale" will be mean of given bin centers; + "var" and "nugget" will be mean of given variogram values + (if in given bounds)) + * "current": using the current values of the covariance model + * dict: dictionary with parameter names and given value + (separate "default" can bet set to "default" or "current" for + unspecified values to get same behavior as given above + ("default" by default)) + Example: ``{"len_scale": 10, "default": "current"}`` + + Default: "default" + weights : :class:`str`, :class:`numpy.ndarray`, :class:`callable`, optional + Weights applied to each point in the estimation. Either: + + * 'inv': inverse distance ``1 / (x_data + 1)`` + * list: weights given per bin + * callable: function applied to x_data + + If callable, it must take a 1-d ndarray. + Then ``weights = f(x_data)``. + Default: None + method : {'trf', 'dogbox'}, optional + Algorithm to perform minimization. + + * 'trf' : Trust Region Reflective algorithm, + particularly suitable for large sparse problems with bounds. + Generally robust method. + * 'dogbox' : dogleg algorithm with rectangular trust regions, + typical use case is small problems with bounds. + Not recommended for problems with rank-deficient Jacobian. + + Default: 'trf' + loss : :class:`str` or :class:`callable`, optional + Determines the loss function in scipys curve_fit. + The following keyword values are allowed: + + * 'linear' (default) : ``rho(z) = z``. Gives a standard + least-squares problem. + * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth + approximation of l1 (absolute value) loss. Usually a good + choice for robust least squares. + * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works + similarly to 'soft_l1'. + * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers + influence, but may cause difficulties in optimization process. + * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on + a single residual, has properties similar to 'cauchy'. + + If callable, it must take a 1-d ndarray ``z=f**2`` and return an + array_like with shape (3, m) where row 0 contains function values, + row 1 contains first derivatives and row 2 contains second + derivatives. Default: 'soft_l1' + max_eval : :class:`int` or :any:`None`, optional + Maximum number of function evaluations before the termination. + If None (default), the value is chosen automatically: 100 * n. + return_r2 : :class:`bool`, optional + Whether to return the r2 score of the estimation. + Default: False + curve_fit_kwargs : :class:`dict`, optional + Other keyword arguments passed to scipys curve_fit. Default: None + **para_select + You can deselect parameters from fitting, by setting + them "False" using their names as keywords. + You could also pass fixed values for each parameter. + Then these values will be applied and the involved parameters wont + be fitted. + By default, all parameters are fitted. Returns ------- @@ -661,7 +680,11 @@ def fit_variogram(self, x_data, y_data, maxfev=1000, **para_deselect): Dictonary with the fitted parameter values pcov : :class:`numpy.ndarray` The estimated covariance of `popt` from - :any:`scipy.optimize.curve_fit` + :any:`scipy.optimize.curve_fit`. + To compute one standard deviation errors + on the parameters use ``perr = np.sqrt(np.diag(pcov))``. + r2_score : :class:`float`, optional + r2 score of the curve fitting results. Only if return_r2 is True. Notes ----- @@ -670,95 +693,23 @@ def fit_variogram(self, x_data, y_data, maxfev=1000, **para_deselect): The fitted parameters will be instantly set in the model. """ - # select all parameters to be fitted - para = {"var": True, "len_scale": True, "nugget": True} - for opt in self.opt_arg: - para[opt] = True - # deselect unwanted parameters - para.update(para_deselect) - - # we need arg1, otherwise curve_fit throws an error (bug?!) - def curve(x, arg1, *args): - """Adapted Variogram function.""" - args = (arg1,) + args - para_skip = 0 - opt_skip = 0 - if para["var"]: - var_tmp = args[para_skip] - para_skip += 1 - if para["len_scale"]: - self.len_scale = args[para_skip] - para_skip += 1 - if para["nugget"]: - self.nugget = args[para_skip] - para_skip += 1 - for opt in self.opt_arg: - if para[opt]: - setattr(self, opt, args[para_skip + opt_skip]) - opt_skip += 1 - # set var at last because of var_factor (other parameter needed) - if para["var"]: - self.var = var_tmp - return self.variogram(x) - - # set the lower/upper boundaries for the variogram-parameters - low_bounds = [] - top_bounds = [] - if para["var"]: - low_bounds.append(self.var_bounds[0]) - top_bounds.append(self.var_bounds[1]) - if para["len_scale"]: - low_bounds.append(self.len_scale_bounds[0]) - top_bounds.append(self.len_scale_bounds[1]) - if para["nugget"]: - low_bounds.append(self.nugget_bounds[0]) - top_bounds.append(self.nugget_bounds[1]) - for opt in self.opt_arg: - if para[opt]: - low_bounds.append(self.opt_arg_bounds[opt][0]) - top_bounds.append(self.opt_arg_bounds[opt][1]) - # fit the variogram - popt, pcov = curve_fit( - curve, - x_data, - y_data, - bounds=(low_bounds, top_bounds), - maxfev=maxfev, + return fit_variogram( + model=self, + x_data=x_data, + y_data=y_data, + anis=anis, + sill=sill, + init_guess=init_guess, + weights=weights, + method=method, + loss=loss, + max_eval=max_eval, + return_r2=return_r2, + curve_fit_kwargs=curve_fit_kwargs, + **para_select, ) - fit_para = {} - para_skip = 0 - opt_skip = 0 - if para["var"]: - var_tmp = popt[para_skip] - fit_para["var"] = popt[para_skip] - para_skip += 1 - else: - fit_para["var"] = self.var - if para["len_scale"]: - self.len_scale = popt[para_skip] - fit_para["len_scale"] = popt[para_skip] - para_skip += 1 - else: - fit_para["len_scale"] = self.len_scale - if para["nugget"]: - self.nugget = popt[para_skip] - fit_para["nugget"] = popt[para_skip] - para_skip += 1 - else: - fit_para["nugget"] = self.nugget - for opt in self.opt_arg: - if para[opt]: - setattr(self, opt, popt[para_skip + opt_skip]) - fit_para[opt] = popt[para_skip + opt_skip] - opt_skip += 1 - else: - fit_para[opt] = getattr(self, opt) - # set var at last because of var_factor (other parameter needed) - if para["var"]: - self.var = var_tmp - return fit_para, pcov - - ### bounds setting and checks ############################################# + + # bounds setting and checks def default_arg_bounds(self): """Provide default boundaries for arguments. @@ -769,87 +720,32 @@ def default_arg_bounds(self): "var": (0.0, np.inf, "oo"), "len_scale": (0.0, np.inf, "oo"), "nugget": (0.0, np.inf, "co"), + "anis": (0.0, np.inf, "oo"), } return res - def set_arg_bounds(self, **kwargs): + def set_arg_bounds(self, check_args=True, **kwargs): r"""Set bounds for the parameters of the model. Parameters ---------- + check_args : bool, optional + Whether to check if the arguments are in their valid bounds. + In case not, a propper default value will be determined. + Default: True **kwargs Parameter name as keyword ("var", "len_scale", "nugget", ) - and a list of 2 or 3 values as value: - - * ``[a, b]`` or - * ``[a, b, ]`` - + and a list of 2 or 3 values: ``[a, b]`` or ``[a, b, ]`` where is one of ``"oo"``, ``"cc"``, ``"oc"`` or ``"co"`` to define if the bounds are open ("o") or closed ("c"). """ - for opt in kwargs: - if opt in self.opt_arg: - if not check_bounds(kwargs[opt]): - raise ValueError( - "Given bounds for '" - + opt - + "' are not valid, got: " - + str(kwargs[opt]) - ) - self._opt_arg_bounds[opt] = kwargs[opt] - if opt == "var": - self.var_bounds = kwargs[opt] - if opt == "len_scale": - self.len_scale_bounds = kwargs[opt] - if opt == "nugget": - self.nugget_bounds = kwargs[opt] + return set_arg_bounds(self, check_args, **kwargs) def check_arg_bounds(self): - """Check arguments to be within the given bounds.""" - # check var, len_scale, nugget and optional-arguments - for arg in self.arg_bounds: - bnd = list(self.arg_bounds[arg]) - val = getattr(self, arg) - if len(bnd) == 2: - bnd.append("cc") # use closed intervals by default - if bnd[2][0] == "c": - if val < bnd[0]: - raise ValueError( - str(arg) - + " needs to be >= " - + str(bnd[0]) - + ", got: " - + str(val) - ) - else: - if val <= bnd[0]: - raise ValueError( - str(arg) - + " needs to be > " - + str(bnd[0]) - + ", got: " - + str(val) - ) - if bnd[2][1] == "c": - if val > bnd[1]: - raise ValueError( - str(arg) - + " needs to be <= " - + str(bnd[1]) - + ", got: " - + str(val) - ) - else: - if val >= bnd[1]: - raise ValueError( - str(arg) - + " needs to be < " - + str(bnd[1]) - + ", got: " - + str(val) - ) - - ### bounds properties ##################################################### + """Check arguments to be within their given bounds.""" + return check_arg_bounds(self) + + # bounds properties @property def var_bounds(self): @@ -857,11 +753,7 @@ def var_bounds(self): Notes ----- - Is a list of 2 or 3 values: - - * ``[a, b]`` or - * ``[a, b, ]`` - + Is a list of 2 or 3 values: ``[a, b]`` or ``[a, b, ]`` where is one of ``"oo"``, ``"cc"``, ``"oc"`` or ``"co"`` to define if the bounds are open ("o") or closed ("c"). """ @@ -871,9 +763,7 @@ def var_bounds(self): def var_bounds(self, bounds): if not check_bounds(bounds): raise ValueError( - "Given bounds for 'var' are not " - + "valid, got: " - + str(bounds) + f"Given bounds for 'var' are not valid, got: {bounds}" ) self._var_bounds = bounds @@ -883,11 +773,7 @@ def len_scale_bounds(self): Notes ----- - Is a list of 2 or 3 values: - - * ``[a, b]`` or - * ``[a, b, ]`` - + Is a list of 2 or 3 values: ``[a, b]`` or ``[a, b, ]`` where is one of ``"oo"``, ``"cc"``, ``"oc"`` or ``"co"`` to define if the bounds are open ("o") or closed ("c"). """ @@ -897,9 +783,7 @@ def len_scale_bounds(self): def len_scale_bounds(self, bounds): if not check_bounds(bounds): raise ValueError( - "Given bounds for 'len_scale' are not " - + "valid, got: " - + str(bounds) + f"Given bounds for 'len_scale' are not valid, got: {bounds}" ) self._len_scale_bounds = bounds @@ -909,11 +793,7 @@ def nugget_bounds(self): Notes ----- - Is a list of 2 or 3 values: - - * ``[a, b]`` or - * ``[a, b, ]`` - + Is a list of 2 or 3 values: ``[a, b]`` or ``[a, b, ]`` where is one of ``"oo"``, ``"cc"``, ``"oc"`` or ``"co"`` to define if the bounds are open ("o") or closed ("c"). """ @@ -923,12 +803,30 @@ def nugget_bounds(self): def nugget_bounds(self, bounds): if not check_bounds(bounds): raise ValueError( - "Given bounds for 'nugget' are not " - + "valid, got: " - + str(bounds) + f"Given bounds for 'nugget' are not valid, got: {bounds}" ) self._nugget_bounds = bounds + @property + def anis_bounds(self): + """:class:`list`: Bounds for the nugget. + + Notes + ----- + Is a list of 2 or 3 values: ``[a, b]`` or ``[a, b, ]`` where + is one of ``"oo"``, ``"cc"``, ``"oc"`` or ``"co"`` + to define if the bounds are open ("o") or closed ("c"). + """ + return self._anis_bounds + + @anis_bounds.setter + def anis_bounds(self, bounds): + if not check_bounds(bounds): + raise ValueError( + f"Given bounds for 'anis' are not valid, got: {bounds}" + ) + self._anis_bounds = bounds + @property def opt_arg_bounds(self): """:class:`dict`: Bounds for the optional arguments. @@ -936,10 +834,7 @@ def opt_arg_bounds(self): Notes ----- Keys are the opt-arg names and values are lists of 2 or 3 values: - - * ``[a, b]`` or - * ``[a, b, ]`` - + ``[a, b]`` or ``[a, b, ]`` where is one of ``"oo"``, ``"cc"``, ``"oc"`` or ``"co"`` to define if the bounds are open ("o") or closed ("c"). """ @@ -951,11 +846,8 @@ def arg_bounds(self): Notes ----- - Keys are the opt-arg names and values are lists of 2 or 3 values: - - * ``[a, b]`` or - * ``[a, b, ]`` - + Keys are the arg names and values are lists of 2 or 3 values: + ``[a, b]`` or ``[a, b, ]`` where is one of ``"oo"``, ``"cc"``, ``"oc"`` or ``"co"`` to define if the bounds are open ("o") or closed ("c"). """ @@ -963,11 +855,24 @@ def arg_bounds(self): "var": self.var_bounds, "len_scale": self.len_scale_bounds, "nugget": self.nugget_bounds, + "anis": self.anis_bounds, } res.update(self.opt_arg_bounds) return res - ### standard parameters ################################################### + # geographical coordinates related + + @property + def latlon(self): + """:class:`bool`: Whether the model depends on geographical coords.""" + return self._latlon + + @property + def field_dim(self): + """:class:`int`: The field dimension of the model.""" + return 2 if self.latlon else self.dim + + # standard parameters @property def dim(self): @@ -976,23 +881,7 @@ def dim(self): @dim.setter def dim(self, dim): - # check if a fixed dimension should be used - if self.fix_dim() is not None: - print(self.name + ": using fixed dimension " + str(self.fix_dim())) - dim = self.fix_dim() - # set the dimension - if dim < 1 or dim > 3: - raise ValueError("Only dimensions of 1 <= d <= 3 are supported.") - self._dim = int(dim) - # create fourier transform just once (recreate for dim change) - self._sft = SFT(ndim=self.dim, **self.hankel_kw) - # recalculate dimension related parameters - if self._anis is not None: - self._len_scale, self._anis = set_len_anis( - self.dim, self._len_scale, self._anis - ) - if self._angles is not None: - self._angles = set_angles(self.dim, self._angles) + set_dim(self, dim) @property def var(self): @@ -1001,7 +890,7 @@ def var(self): @var.setter def var(self, var): - self._var = var / self.var_factor() + self._var = float(var) / self.var_factor() self.check_arg_bounds() @property @@ -1014,7 +903,7 @@ def var_raw(self): @var_raw.setter def var_raw(self, var_raw): - self._var = var_raw + self._var = float(var_raw) self.check_arg_bounds() @property @@ -1024,7 +913,7 @@ def nugget(self): @nugget.setter def nugget(self, nugget): - self._nugget = nugget + self._nugget = float(nugget) self.check_arg_bounds() @property @@ -1034,11 +923,28 @@ def len_scale(self): @len_scale.setter def len_scale(self, len_scale): - self._len_scale, self._anis = set_len_anis( - self.dim, len_scale, self.anis - ) + self._len_scale, anis = set_len_anis(self.dim, len_scale, self.anis) + if self.latlon: + self._anis = np.array((self.dim - 1) * [1], dtype=np.double) + else: + self._anis = anis self.check_arg_bounds() + @property + def rescale(self): + """:class:`float`: Rescale factor for the length scale of the model.""" + return self._rescale + + @rescale.setter + def rescale(self, rescale): + rescale = self.default_rescale() if rescale is None else rescale + self._rescale = abs(float(rescale)) + + @property + def len_rescaled(self): + """:class:`float`: The rescaled main length scale of the model.""" + return self._len_scale / self._rescale + @property def anis(self): """:class:`numpy.ndarray`: The anisotropy factors of the model.""" @@ -1046,9 +952,12 @@ def anis(self): @anis.setter def anis(self, anis): - self._len_scale, self._anis = set_len_anis( - self.dim, self.len_scale, anis - ) + if self.latlon: + self._anis = np.array((self.dim - 1) * [1], dtype=np.double) + else: + self._len_scale, self._anis = set_len_anis( + self.dim, self.len_scale, anis + ) self.check_arg_bounds() @property @@ -1058,7 +967,10 @@ def angles(self): @angles.setter def angles(self, angles): - self._angles = set_angles(self.dim, angles) + if self.latlon: + self._angles = np.array(self.dim * [0], dtype=np.double) + else: + self._angles = set_angles(self.dim, angles) self.check_arg_bounds() @property @@ -1085,9 +997,8 @@ def integral_scale(self, integral_scale): self.len_scale = integral_scale / int_tmp if not np.isclose(self.integral_scale, integral_scale, rtol=1e-3): raise ValueError( - self.name - + ": Integral scale could not be set correctly!" - + " Please just give a len_scale!" + f"{self.name}: Integral scale could not be set correctly! " + "Please just provide a 'len_scale'!" ) @property @@ -1145,6 +1056,27 @@ def arg(self): """:class:`list` of :class:`str`: Names of all arguments.""" return ["var", "len_scale", "nugget", "anis", "angles"] + self._opt_arg + @property + def arg_list(self): + """:class:`list` of :class:`float`: Values of all arguments.""" + alist = [self.var, self.len_scale, self.nugget, self.anis, self.angles] + for opt in self.opt_arg: + alist.append(getattr(self, opt)) + return alist + + @property + def iso_arg(self): + """:class:`list` of :class:`str`: Names of isotropic arguments.""" + return ["var", "len_scale", "nugget"] + self._opt_arg + + @property + def iso_arg_list(self): + """:class:`list` of :class:`float`: Values of isotropic arguments.""" + alist = [self.var, self.len_scale, self.nugget] + for opt in self.opt_arg: + alist.append(getattr(self, opt)) + return alist + @property def opt_arg(self): """:class:`list` of :class:`str`: Names of the optional arguments.""" @@ -1192,9 +1124,7 @@ def name(self): @property def do_rotation(self): """:any:`bool`: State if a rotation is performed.""" - return ( - not np.all(np.isclose(self.angles, 0.0)) and not self.is_isotropic - ) + return not np.all(np.isclose(self.angles, 0.0)) @property def is_isotropic(self): @@ -1205,54 +1135,8 @@ def __eq__(self, other): """Compare CovModels.""" if not isinstance(other, CovModel): return False - # prevent attribute error in opt_arg if the are not equal - if set(self.opt_arg) != set(other.opt_arg): - return False - # prevent dim error in anis and angles - if self.dim != other.dim: - return False - equal = True - equal &= self.name == other.name - equal &= np.isclose(self.var, other.var) - equal &= np.isclose(self.var_raw, other.var_raw) # ?! needless? - equal &= np.isclose(self.nugget, other.nugget) - equal &= np.isclose(self.len_scale, other.len_scale) - equal &= np.all(np.isclose(self.anis, other.anis)) - equal &= np.all(np.isclose(self.angles, other.angles)) - for opt in self.opt_arg: - equal &= np.isclose(getattr(self, opt), getattr(other, opt)) - return equal - - def __ne__(self, other): - """Compare CovModels.""" - return not self.__eq__(other) - - def __str__(self): - """Return String representation.""" - return self.__repr__() + return compare(self, other) def __repr__(self): """Return String representation.""" - opt_str = "" - for opt in self.opt_arg: - opt_str += ", " + opt + "={}".format(getattr(self, opt)) - return ( - "{0}(dim={1}, var={2}, len_scale={3}, " - "nugget={4}, anis={5}, angles={6}".format( - self.name, - self.dim, - self.var, - self.len_scale, - self.nugget, - self.anis, - self.angles, - ) - + opt_str - + ")" - ) - - -if __name__ == "__main__": # pragma: no cover - import doctest - - doctest.testmod() + return model_repr(self) diff --git a/gstools/covmodel/fit.py b/gstools/covmodel/fit.py new file mode 100755 index 000000000..c67c9839b --- /dev/null +++ b/gstools/covmodel/fit.py @@ -0,0 +1,537 @@ +# -*- coding: utf-8 -*- +""" +GStools subpackage providing tools for the covariance-model. + +.. currentmodule:: gstools.covmodel.fit + +The following classes and functions are provided + +.. autosummary:: + fit_variogram +""" +# pylint: disable=C0103, W0632 +import numpy as np +from scipy.optimize import curve_fit +from gstools.covmodel.tools import check_arg_in_bounds, default_arg_from_bounds +from gstools.tools.geometric import set_anis + + +__all__ = ["fit_variogram"] + + +DEFAULT_PARA = ["var", "len_scale", "nugget"] + + +def fit_variogram( + model, + x_data, + y_data, + anis=True, + sill=None, + init_guess="default", + weights=None, + method="trf", + loss="soft_l1", + max_eval=None, + return_r2=False, + curve_fit_kwargs=None, + **para_select, +): + """ + Fitting a variogram-model to an empirical variogram. + + Parameters + ---------- + model : :any:`CovModel` + Covariance Model to fit. + x_data : :class:`numpy.ndarray` + The bin-centers of the empirical variogram. + y_data : :class:`numpy.ndarray` + The messured variogram + If multiple are given, they are interpreted as the directional + variograms along the main axis of the associated rotated + coordinate system. + Anisotropy ratios will be estimated in that case. + anis : :class:`bool`, optional + In case of a directional variogram, you can control anisotropy + by this argument. Deselect the parameter from fitting, by setting + it "False". + You could also pass a fixed value to be set in the model. + Then the anisotropy ratios won't be altered during fitting. + Default: True + sill : :class:`float` or :class:`bool` or :any:`None`, optional + Here you can provide a fixed sill for the variogram. + It needs to be in a fitting range for the var and nugget bounds. + If variance or nugget are not selected for estimation, + the nugget will be recalculated to fulfill: + + * sill = var + nugget + * if the variance is bigger than the sill, + nugget will bet set to its lower bound + and the variance will be set to the fitting partial sill. + + If variance is deselected, it needs to be less than the sill, + otherwise a ValueError comes up. Same for nugget. + If sill=False, it will be deslected from estimation + and set to the current sill of the model. + Then, the procedure above is applied. + Default: None + init_guess : :class:`str` or :class:`dict`, optional + Initial guess for the estimation. Either: + + * "default": using the default values of the covariance model + ("len_scale" will be mean of given bin centers; + "var" and "nugget" will be mean of given variogram values + (if in given bounds)) + * "current": using the current values of the covariance model + * dict: dictionary with parameter names and given value + (separate "default" can bet set to "default" or "current" for + unspecified values to get same behavior as given above + ("default" by default)) + Example: ``{"len_scale": 10, "default": "current"}`` + + Default: "default" + weights : :class:`str`, :class:`numpy.ndarray`, :class:`callable`optional + Weights applied to each point in the estimation. Either: + + * 'inv': inverse distance ``1 / (x_data + 1)`` + * list: weights given per bin + * callable: function applied to x_data + + If callable, it must take a 1-d ndarray. Then ``weights = f(x_data)``. + Default: None + method : {'trf', 'dogbox'}, optional + Algorithm to perform minimization. + + * 'trf' : Trust Region Reflective algorithm, particularly suitable + for large sparse problems with bounds. Generally robust method. + * 'dogbox' : dogleg algorithm with rectangular trust regions, + typical use case is small problems with bounds. Not recommended + for problems with rank-deficient Jacobian. + + Default: 'trf' + loss : :class:`str` or :class:`callable`, optional + Determines the loss function in scipys curve_fit. + The following keyword values are allowed: + + * 'linear' (default) : ``rho(z) = z``. Gives a standard + least-squares problem. + * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth + approximation of l1 (absolute value) loss. Usually a good + choice for robust least squares. + * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works + similarly to 'soft_l1'. + * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers + influence, but may cause difficulties in optimization process. + * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on + a single residual, has properties similar to 'cauchy'. + + If callable, it must take a 1-d ndarray ``z=f**2`` and return an + array_like with shape (3, m) where row 0 contains function values, + row 1 contains first derivatives and row 2 contains second + derivatives. Default: 'soft_l1' + max_eval : :class:`int` or :any:`None`, optional + Maximum number of function evaluations before the termination. + If None (default), the value is chosen automatically: 100 * n. + return_r2 : :class:`bool`, optional + Whether to return the r2 score of the estimation. + Default: False + curve_fit_kwargs : :class:`dict`, optional + Other keyword arguments passed to scipys curve_fit. Default: None + **para_select + You can deselect parameters from fitting, by setting + them "False" using their names as keywords. + You could also pass fixed values for each parameter. + Then these values will be applied and the involved parameters wont + be fitted. + By default, all parameters are fitted. + + Returns + ------- + fit_para : :class:`dict` + Dictonary with the fitted parameter values + pcov : :class:`numpy.ndarray` + The estimated covariance of `popt` from + :any:`scipy.optimize.curve_fit`. + To compute one standard deviation errors + on the parameters use ``perr = np.sqrt(np.diag(pcov))``. + r2_score : :class:`float`, optional + r2 score of the curve fitting results. Only if return_r2 is True. + + Notes + ----- + You can set the bounds for each parameter by accessing + :any:`CovModel.set_arg_bounds`. + + The fitted parameters will be instantly set in the model. + """ + # preprocess selected parameters + para, sill, constrain_sill, anis = _pre_para( + model, para_select, sill, anis + ) + # check curve_fit kwargs + curve_fit_kwargs = {} if curve_fit_kwargs is None else curve_fit_kwargs + # check method + if method not in ["trf", "dogbox"]: + raise ValueError("fit: method needs to be either 'trf' or 'dogbox'") + # prepare variogram data + # => concatenate directional variograms to have a 1D array for x and y + x_data, y_data, is_dir_vario = _check_vario(model, x_data, y_data) + # prepare init guess dictionary + init_guess = _pre_init_guess( + model, init_guess, np.mean(x_data), np.mean(y_data) + ) + # only fit anisotropy if a directional variogram was given + anis &= is_dir_vario + # set weights + _set_weights(model, weights, x_data, curve_fit_kwargs, is_dir_vario) + # set the lower/upper boundaries for the variogram-parameters + bounds, init_guess_list = _init_curve_fit_para( + model, para, init_guess, constrain_sill, sill, anis + ) + # create the fitting curve + curve_fit_kwargs["f"] = _get_curve( + model, para, constrain_sill, sill, anis, is_dir_vario + ) + # set the remaining kwargs for curve_fit + curve_fit_kwargs["bounds"] = bounds + curve_fit_kwargs["p0"] = init_guess_list + curve_fit_kwargs["xdata"] = x_data + curve_fit_kwargs["ydata"] = y_data + curve_fit_kwargs["loss"] = loss + curve_fit_kwargs["max_nfev"] = max_eval + curve_fit_kwargs["method"] = method + # fit the variogram + popt, pcov = curve_fit(**curve_fit_kwargs) + # convert the results + fit_para = _post_fitting(model, para, popt, anis, is_dir_vario) + # calculate the r2 score if wanted + if return_r2: + return fit_para, pcov, _r2_score(model, x_data, y_data, is_dir_vario) + return fit_para, pcov + + +def _pre_para(model, para_select, sill, anis): + """Preprocess selected parameters.""" + var_last = False + for par in para_select: + if par not in model.arg_bounds: + raise ValueError(f"fit: unknown parameter in selection: {par}") + if not isinstance(para_select[par], bool): + if par == "var": + var_last = True + var_tmp = float(para_select[par]) + else: + setattr(model, par, float(para_select[par])) + para_select[par] = False + # set variance last due to possible recalculations + if var_last: + model.var = var_tmp + # remove those that were set to True + para_select = {k: v for k, v in para_select.items() if not v} + # handling the sill + sill = None if (isinstance(sill, bool) and sill) else sill + if sill is not None: + sill = model.sill if isinstance(sill, bool) else float(sill) + constrain_sill = True + sill_low = model.arg_bounds["var"][0] + model.arg_bounds["nugget"][0] + sill_up = model.arg_bounds["var"][1] + model.arg_bounds["nugget"][1] + if not sill_low <= sill <= sill_up: + raise ValueError("fit: sill out of bounds.") + if "var" in para_select and "nugget" in para_select: + if model.var > sill: + model.nugget = model.arg_bounds["nugget"][0] + model.var = sill - model.nugget + else: + model.nugget = sill - model.var + elif "var" in para_select: + if model.var > sill: + raise ValueError( + "fit: if sill is fixed and variance deselected, " + "the set variance should be less than the given sill." + ) + para_select["nugget"] = False + model.nugget = sill - model.var + elif "nugget" in para_select: + if model.nugget > sill: + raise ValueError( + "fit: if sill is fixed and nugget deselected, " + "the set nugget should be less than the given sill." + ) + para_select["var"] = False + model.var = sill - model.nugget + else: + # deselect the nugget, to recalculate it accordingly + # nugget = sill - var + para_select["nugget"] = False + else: + constrain_sill = False + # select all parameters to be fitted + para = {par: True for par in DEFAULT_PARA} + para.update({opt: True for opt in model.opt_arg}) + # now deselect unwanted parameters + para.update(para_select) + # check if anisotropy should be fitted or set + if not isinstance(anis, bool): + model.anis = anis + anis = False + return para, sill, constrain_sill, anis + + +def _pre_init_guess(model, init_guess, mean_x=1.0, mean_y=1.0): + # init guess should be a dict + if not isinstance(init_guess, dict): + init_guess = {"default": init_guess} + # "default" init guess is the respective default value + default_guess = init_guess.pop("default", "default") + if default_guess not in ["default", "current"]: + raise ValueError(f"fit_variogram: unknown def. guess: {default_guess}") + default = default_guess == "default" + # check invalid names for given init guesses + invalid_para = set(init_guess) - set(model.iso_arg + ["anis"]) + if invalid_para: + raise ValueError(f"fit_variogram: unknown init guess: {invalid_para}") + bnd = model.arg_bounds + # default length scale is mean of given bin centers (respecting "rescale") + init_guess.setdefault( + "len_scale", mean_x * model.rescale if default else model.len_scale + ) + # init guess for variance and nugget is mean of given variogram + for par in ["var", "nugget"]: + init_guess.setdefault(par, mean_y if default else getattr(model, par)) + # anis setting + init_guess.setdefault( + "anis", default_arg_from_bounds(bnd["anis"]) if default else model.anis + ) + # correctly handle given values for anis (need a list of values) + init_guess["anis"] = list(set_anis(model.dim, init_guess["anis"])) + # set optional arguments + for opt in model.opt_arg: + init_guess.setdefault( + opt, + default_arg_from_bounds(bnd[opt]) + if default + else getattr(model, opt), + ) + # convert all init guesses to float (except "anis") + for arg in model.iso_arg: + init_guess[arg] = float(init_guess[arg]) + return init_guess + + +def _check_vario(model, x_data, y_data): + # prepare variogram data + x_data = np.array(x_data).reshape(-1) + y_data = np.array(y_data).reshape(-1) + # if multiple variograms are given, they will be interpreted + # as directional variograms along the main rotated axes of the model + is_dir_vario = False + if model.dim > 1 and x_data.size * model.dim == y_data.size: + is_dir_vario = True + # concatenate multiple variograms + x_data = np.tile(x_data, model.dim) + elif x_data.size != y_data.size: + raise ValueError( + "CovModel.fit_variogram: Wrong number of empirical variograms! " + "Either provide only one variogram to fit an isotropic model, " + "or directional ones for all main axes to fit anisotropy." + ) + if is_dir_vario and model.latlon: + raise ValueError( + "CovModel.fit_variogram: lat-lon models don't support anisotropy." + ) + if model.latlon: + # convert to yadrenko model + x_data = 2 * np.sin(x_data / 2) + return x_data, y_data, is_dir_vario + + +def _set_weights(model, weights, x_data, curve_fit_kwargs, is_dir_vario): + if weights is not None: + if callable(weights): + weights = 1.0 / weights(x_data) + elif isinstance(weights, str) and weights == "inv": + weights = 1.0 + x_data + elif is_dir_vario: + if weights.size * model.dim == x_data.size: + weights = np.tile(weights, model.dim) + weights = 1.0 / np.array(weights).reshape(-1) + else: + weights = 1.0 / np.array(weights).reshape(-1) + curve_fit_kwargs["sigma"] = weights + curve_fit_kwargs["absolute_sigma"] = True + + +def _init_curve_fit_para(model, para, init_guess, constrain_sill, sill, anis): + """Create initial guess and bounds for fitting.""" + low_bounds = [] + top_bounds = [] + init_guess_list = [] + for par in DEFAULT_PARA: + if para[par]: + low_bounds.append(model.arg_bounds[par][0]) + if par == "var" and constrain_sill: # var <= sill in this case + top_bounds.append(sill) + else: + top_bounds.append(model.arg_bounds[par][1]) + init_guess_list.append( + _init_guess( + bounds=[low_bounds[-1], top_bounds[-1]], + default=init_guess[par], + ) + ) + for opt in model.opt_arg: + if para[opt]: + low_bounds.append(model.arg_bounds[opt][0]) + top_bounds.append(model.arg_bounds[opt][1]) + init_guess_list.append( + _init_guess( + bounds=[low_bounds[-1], top_bounds[-1]], + default=init_guess[opt], + ) + ) + if anis: + for i in range(model.dim - 1): + low_bounds.append(model.anis_bounds[0]) + top_bounds.append(model.anis_bounds[1]) + init_guess_list.append( + _init_guess( + bounds=[low_bounds[-1], top_bounds[-1]], + default=init_guess["anis"][i], + ) + ) + return (low_bounds, top_bounds), init_guess_list + + +def _init_guess(bounds, default): + """Proper determination of initial guess.""" + if bounds[0] < default < bounds[1]: + return default + return default_arg_from_bounds(bounds) + + +def _get_curve(model, para, constrain_sill, sill, anis, is_dir_vario): + """Create the curve for scipys curve_fit.""" + var_save = model.var + + # we need arg1, otherwise curve_fit throws an error (bug?!) + def curve(x, arg1, *args): + """Adapted Variogram function.""" + args = (arg1,) + args + para_skip = 0 + opt_skip = 0 + if para["var"]: + var_tmp = args[para_skip] + if constrain_sill: + nugget_tmp = sill - var_tmp + # punishment, if resulting nugget out of range for fixed sill + if check_arg_in_bounds(model, "nugget", nugget_tmp) > 0: + return np.full_like(x, np.inf) + # nugget estimation deselected in this case + model.nugget = nugget_tmp + para_skip += 1 + if para["len_scale"]: + model.len_scale = args[para_skip] + para_skip += 1 + if para["nugget"]: + model.nugget = args[para_skip] + para_skip += 1 + for opt in model.opt_arg: + if para[opt]: + setattr(model, opt, args[para_skip + opt_skip]) + opt_skip += 1 + # set var at last because of var_factor (other parameter needed) + if para["var"]: + model.var = var_tmp + # needs to be reset for TPL models when len_scale was changed + else: + model.var = var_save + if is_dir_vario: + if anis: + model.anis = args[1 - model.dim :] + xs = x[: x.size // model.dim] + out = np.array([], dtype=np.double) + for i in range(model.dim): + out = np.concatenate((out, model.vario_axis(xs, axis=i))) + return out + return model.variogram(x) + + return curve + + +def _post_fitting(model, para, popt, anis, is_dir_vario): + """Postprocess fitting results and application to model.""" + fit_para = {} + para_skip = 0 + opt_skip = 0 + for par in DEFAULT_PARA: + if para[par]: + if par == "var": # set variance last + var_tmp = popt[para_skip] + else: + setattr(model, par, popt[para_skip]) + fit_para[par] = popt[para_skip] + para_skip += 1 + else: + fit_para[par] = getattr(model, par) + for opt in model.opt_arg: + if para[opt]: + setattr(model, opt, popt[para_skip + opt_skip]) + fit_para[opt] = popt[para_skip + opt_skip] + opt_skip += 1 + else: + fit_para[opt] = getattr(model, opt) + if is_dir_vario: + if anis: + model.anis = popt[1 - model.dim :] + fit_para["anis"] = model.anis + # set var at last because of var_factor (other parameter needed) + if para["var"]: + model.var = var_tmp + return fit_para + + +def _r2_score(model, x_data, y_data, is_dir_vario): + """Calculate the R2 score.""" + if is_dir_vario: + xs = x_data[: x_data.size // model.dim] + vario = np.array([], dtype=np.double) + for i in range(model.dim): + vario = np.concatenate((vario, model.vario_axis(xs, axis=i))) + else: + vario = model.variogram(x_data) + residuals = y_data - vario + ss_res = np.sum(residuals ** 2) + ss_tot = np.sum((y_data - np.mean(y_data)) ** 2) + return 1.0 - (ss_res / ss_tot) + + +def logistic_weights(p=0.1, mean=0.7): # pragma: no cover + """ + Return a logistic weights function. + + Parameters + ---------- + p : :class:`float`, optional + Parameter for the growth rate. + Within this percentage of the data range, the function will + be in the upper resp. lower percentile p. The default is 0.1. + mean : :class:`float`, optional + Percentage of the data range, where this function has its + sigmoid's midpoint. The default is 0.7. + + Returns + ------- + callable + Weighting function. + """ + # define the callable weights function + def func(x_data): + """Callable function for the weights.""" + x_range = np.amax(x_data) - np.amin(x_data) + # logit function for growth rate + growth = np.log(p / (1 - p)) / (p * x_range) + x_mean = mean * x_range + np.amin(x_data) + return 1.0 / (1.0 + np.exp(growth * (x_mean - x_data))) + + return func diff --git a/gstools/covmodel/models.py b/gstools/covmodel/models.py index 72d270ad0..c05a92dcb 100644 --- a/gstools/covmodel/models.py +++ b/gstools/covmodel/models.py @@ -4,7 +4,7 @@ .. currentmodule:: gstools.covmodel.models -The following classes and functions are provided +The following classes are provided .. autosummary:: Gaussian @@ -12,17 +12,20 @@ Matern Stable Rational + Cubic Linear Circular Spherical - Intersection + HyperSpherical + SuperSpherical + JBessel """ -# pylint: disable=C0103, E1101, E1137 - +# pylint: disable=C0103, E1101, R0201 import warnings import numpy as np from scipy import special as sps -from gstools.covmodel import CovModel +from gstools.covmodel.tools import AttributeWarning +from gstools.covmodel.base import CovModel __all__ = [ "Gaussian", @@ -30,62 +33,68 @@ "Matern", "Stable", "Rational", + "Cubic", "Linear", "Circular", "Spherical", - "Intersection", + "HyperSpherical", + "SuperSpherical", + "JBessel", ] -# Gaussian Model ############################################################## - - class Gaussian(CovModel): r"""The Gaussian covariance model. Notes ----- - This model is given by the following correlation function: + This model is given by the following variogram [Webster2007]_: .. math:: - \rho(r) = - \exp\left(- \frac{\pi}{4} \cdot \left(\frac{r}{\ell}\right)^2\right) + \gamma(r)= + \sigma^{2} + \left(1-\exp\left(-\left(s\cdot\frac{r}{\ell}\right)^{2}\right)\right)+n + + Where the standard rescale factor is :math:`s=\frac{\sqrt{\pi}}{2}`. + References + ---------- + .. [Webster2007] Webster, R. and Oliver, M. A. + "Geostatistics for environmental scientists.", + John Wiley & Sons. (2007) """ - def correlation(self, r): - r"""Gaussian correlation function. + def cor(self, h): + """Gaussian normalized correlation function.""" + return np.exp(-(h ** 2)) - .. math:: - \rho(r) = - \exp\left(- \frac{\pi}{4}\cdot \left(\frac{r}{\ell}\right)^2\right) - """ - r = np.array(np.abs(r), dtype=np.double) - return np.exp(-np.pi / 4 * (r / self.len_scale) ** 2) + def default_rescale(self): + """Gaussian rescaling factor to result in integral scale.""" + return np.sqrt(np.pi) / 2.0 def spectral_density(self, k): # noqa: D102 k = np.array(k, dtype=np.double) - return (self.len_scale / np.pi) ** self.dim * np.exp( - -((k * self.len_scale) ** 2) / np.pi + return (self.len_rescaled / 2.0 / np.sqrt(np.pi)) ** self.dim * np.exp( + -((k * self.len_rescaled / 2.0) ** 2) ) def spectral_rad_cdf(self, r): - """Radial spectral cdf.""" + """Gaussian radial spectral cdf.""" r = np.array(r, dtype=np.double) if self.dim == 1: - return sps.erf(self.len_scale * r / np.sqrt(np.pi)) + return sps.erf(r * self.len_rescaled / 2.0) if self.dim == 2: - return 1.0 - np.exp(-((r * self.len_scale) ** 2) / np.pi) + return 1.0 - np.exp(-((r * self.len_rescaled / 2.0) ** 2)) if self.dim == 3: return sps.erf( - self.len_scale * r / np.sqrt(np.pi) - ) - 2 * r * self.len_scale / np.pi * np.exp( - -((r * self.len_scale) ** 2) / np.pi + r * self.len_rescaled / 2.0 + ) - r * self.len_rescaled / np.sqrt(np.pi) * np.exp( + -((r * self.len_rescaled / 2.0) ** 2) ) - return None + return None # pragma: no cover def spectral_rad_ppf(self, u): - """Radial spectral ppf. + """Gaussian radial spectral ppf. Notes ----- @@ -93,20 +102,19 @@ def spectral_rad_ppf(self, u): """ u = np.array(u, dtype=np.double) if self.dim == 1: - return sps.erfinv(u) * np.sqrt(np.pi) / self.len_scale + return 2.0 / self.len_rescaled * sps.erfinv(u) if self.dim == 2: - return np.sqrt(np.pi) / self.len_scale * np.sqrt(-np.log(1.0 - u)) - return None + return 2.0 / self.len_rescaled * np.sqrt(-np.log(1.0 - u)) + return None # pragma: no cover + + def _has_cdf(self): + return self.dim in [1, 2, 3] def _has_ppf(self): - # since the ppf is not analytical for dim=3, we have to state that - return False if self.dim == 3 else True + return self.dim in [1, 2] def calc_integral_scale(self): # noqa: D102 - return self.len_scale - - -# Exponential Model ########################################################### + return self.len_rescaled * np.sqrt(np.pi) / 2.0 class Exponential(CovModel): @@ -114,53 +122,57 @@ class Exponential(CovModel): Notes ----- - This model is given by the following correlation function: + This model is given by the following variogram [Webster2007]_: .. math:: - \rho(r) = - \exp\left(- \frac{r}{\ell} \right) + \gamma(r)= + \sigma^{2} + \left(1-\exp\left(-s\cdot\frac{r}{\ell}\right)\right)+n - """ + Where the standard rescale factor is :math:`s=1`. - def correlation(self, r): - r"""Exponential correlation function. + References + ---------- + .. [Webster2007] Webster, R. and Oliver, M. A. + "Geostatistics for environmental scientists.", + John Wiley & Sons. (2007) + """ - .. math:: - \rho(r) = - \exp\left(- \frac{r}{\ell} \right) - """ - r = np.array(np.abs(r), dtype=np.double) - return np.exp(-1 * r / self.len_scale) + def cor(self, h): + """Exponential normalized correlation function.""" + return np.exp(-h) def spectral_density(self, k): # noqa: D102 k = np.array(k, dtype=np.double) return ( - self.len_scale ** self.dim - * sps.gamma((self.dim + 1) / 2) - / (np.pi * (1.0 + (k * self.len_scale) ** 2)) - ** ((self.dim + 1) / 2) + self.len_rescaled ** self.dim + * sps.gamma((self.dim + 1) / 2.0) + / (np.pi * (1.0 + (k * self.len_rescaled) ** 2)) + ** ((self.dim + 1) / 2.0) ) def spectral_rad_cdf(self, r): - """Radial spectral cdf.""" + """Exponential radial spectral cdf.""" r = np.array(r, dtype=np.double) if self.dim == 1: - return np.arctan(r * self.len_scale) * 2 / np.pi + return np.arctan(r * self.len_rescaled) * 2.0 / np.pi if self.dim == 2: - return 1.0 - 1 / np.sqrt(1 + (r * self.len_scale) ** 2) + return 1.0 - 1.0 / np.sqrt(1.0 + (r * self.len_rescaled) ** 2) if self.dim == 3: return ( ( - np.arctan(r * self.len_scale) - - r * self.len_scale / (1 + (r * self.len_scale) ** 2) + np.arctan(r * self.len_rescaled) + - r + * self.len_rescaled + / (1.0 + (r * self.len_rescaled) ** 2) ) - * 2 + * 2.0 / np.pi ) - return None + return None # pragma: no cover def spectral_rad_ppf(self, u): - """Radial spectral ppf. + """Exponential radial spectral ppf. Notes ----- @@ -168,7 +180,7 @@ def spectral_rad_ppf(self, u): """ u = np.array(u, dtype=np.double) if self.dim == 1: - return np.tan(np.pi / 2 * u) / self.len_scale + return np.tan(np.pi / 2 * u) / self.len_rescaled if self.dim == 2: u_power = np.divide( 1, @@ -176,90 +188,17 @@ def spectral_rad_ppf(self, u): out=np.full_like(u, np.inf), where=np.logical_not(np.isclose(u, 0)), ) - return np.sqrt(u_power - 1.0) / self.len_scale - return None - - def _has_ppf(self): - # since the ppf is not analytical for dim=3, we have to state that - return False if self.dim == 3 else True - - def calc_integral_scale(self): # noqa: D102 - return self.len_scale + return np.sqrt(u_power - 1.0) / self.len_rescaled + return None # pragma: no cover + def _has_cdf(self): + return self.dim in [1, 2, 3] -# Rational Model ############################################################## - - -class Rational(CovModel): - r"""The rational quadratic covariance model. - - Notes - ----- - This model is given by the following correlation function: - - .. math:: - \rho(r) = - \left(1 + \frac{1}{2\alpha} \cdot - \left(\frac{r}{\ell}\right)^2\right)^{-\alpha} - - :math:`\alpha` is a shape parameter and should be > 0.5. - - Other Parameters - ---------------- - **opt_arg - The following parameters are covered by these keyword arguments - alpha : :class:`float`, optional - Shape parameter. Standard range: ``(0, inf)`` - Default: ``1.0`` - """ - - def default_opt_arg(self): - """Defaults for the optional arguments. - - * ``{"alpha": 1.0}`` - - Returns - ------- - :class:`dict` - Defaults for optional arguments - """ - return {"alpha": 1.0} - - def default_opt_arg_bounds(self): - """Defaults for boundaries of the optional arguments. - - * ``{"alpha": [0.5, inf]}`` - - Returns - ------- - :class:`dict` - Boundaries for optional arguments - """ - return {"alpha": [0.5, np.inf]} - - def correlation(self, r): - r"""Rational correlation function. - - .. math:: - \rho(r) = - \left(1 + \frac{1}{2\alpha} \cdot - \left(\frac{r}{\ell}\right)^2\right)^{-\alpha} - """ - r = np.array(np.abs(r), dtype=np.double) - return np.power( - 1 + 0.5 / self.alpha * (r / self.len_scale) ** 2, -self.alpha - ) + def _has_ppf(self): + return self.dim in [1, 2] def calc_integral_scale(self): # noqa: D102 - return ( - self.len_scale - * np.sqrt(np.pi * self.alpha * 0.5) - * sps.gamma(self.alpha - 0.5) - / sps.gamma(self.alpha) - ) - - -# Stable Model ################################################################ + return self.len_rescaled class Stable(CovModel): @@ -267,18 +206,23 @@ class Stable(CovModel): Notes ----- - This model is given by the following correlation function: + This model is given by the following correlation function + [Wackernagel2003]_: .. math:: \rho(r) = - \exp\left(- \left(\frac{r}{\ell}\right)^{\alpha}\right) + \exp\left(- \left(s\cdot\frac{r}{\ell}\right)^{\alpha}\right) + Where the standard rescale factor is :math:`s=1`. :math:`\alpha` is a shape parameter with :math:`\alpha\in(0,2]` + References + ---------- + .. [Wackernagel2003] Wackernagel, H. "Multivariate geostatistics", + Springer, Berlin, Heidelberg (2003) + Other Parameters ---------------- - **opt_arg - The following parameters are covered by these keyword arguments alpha : :class:`float`, optional Shape parameter. Standard range: ``(0, 2]`` Default: ``1.5`` @@ -320,24 +264,16 @@ def check_opt_arg(self): if self.alpha < 0.3: warnings.warn( "Stable: parameter 'alpha' is < 0.3, " - + "count with unstable results" + "count with unstable results", + AttributeWarning, ) - def correlation(self, r): - r"""Stable correlation function. - - .. math:: - \rho(r) = - \exp\left(- \left(\frac{r}{\ell}\right)^{\alpha}\right) - """ - r = np.array(np.abs(r), dtype=np.double) - return np.exp(-np.power(r / self.len_scale, self.alpha)) + def cor(self, h): + r"""Stable normalized correlation function.""" + return np.exp(-np.power(h, self.alpha)) def calc_integral_scale(self): # noqa: D102 - return self.len_scale * sps.gamma(1.0 + 1.0 / self.alpha) - - -# Matérn Model ################################################################ + return self.len_rescaled * sps.gamma(1.0 + 1.0 / self.alpha) class Matern(CovModel): @@ -345,30 +281,35 @@ class Matern(CovModel): Notes ----- - This model is given by the following correlation function: + This model is given by the following correlation function [Rasmussen2003]_: .. math:: \rho(r) = \frac{2^{1-\nu}}{\Gamma\left(\nu\right)} \cdot - \left(\sqrt{\nu}\cdot\frac{r}{\ell}\right)^{\nu} \cdot - \mathrm{K}_{\nu}\left(\sqrt{\nu}\cdot\frac{r}{\ell}\right) + \left(\sqrt{\nu}\cdot s\cdot\frac{r}{\ell}\right)^{\nu} \cdot + \mathrm{K}_{\nu}\left(\sqrt{\nu}\cdot s\cdot\frac{r}{\ell}\right) - Where :math:`\Gamma` is the gamma function and :math:`\mathrm{K}_{\nu}` + Where the standard rescale factor is :math:`s=1`. + :math:`\Gamma` is the gamma function and :math:`\mathrm{K}_{\nu}` is the modified Bessel function of the second kind. :math:`\nu` is a shape parameter and should be >= 0.2. - If :math:`\nu > 20`, a gaussian model is used, since it is the limit - case: + If :math:`\nu > 20`, a gaussian model is used, since it represents + the limiting case: .. math:: \rho(r) = - \exp\left(- \frac{1}{4} \cdot \left(\frac{r}{\ell}\right)^2\right) + \exp\left(-\left(s\cdot\frac{r}{2\ell}\right)^2\right) + + References + ---------- + .. [Rasmussen2003] Rasmussen, C. E., + "Gaussian processes in machine learning." Summer school on + machine learning. Springer, Berlin, Heidelberg, (2003) Other Parameters ---------------- - **opt_arg - The following parameters are covered by these keyword arguments nu : :class:`float`, optional Shape parameter. Standard range: ``[0.2, 30]`` Default: ``1.0`` @@ -389,7 +330,7 @@ def default_opt_arg(self): def default_opt_arg_bounds(self): """Defaults for boundaries of the optional arguments. - * ``{"nu": [0.5, 30.0, "cc"]}`` + * ``{"nu": [0.2, 30.0, "cc"]}`` Returns ------- @@ -398,28 +339,20 @@ def default_opt_arg_bounds(self): """ return {"nu": [0.2, 30.0, "cc"]} - def correlation(self, r): - r"""Matérn correlation function. - - .. math:: - \rho(r) = - \frac{2^{1-\nu}}{\Gamma\left(\nu\right)} \cdot - \left(\sqrt{\nu}\cdot\frac{r}{\ell}\right)^{\nu} \cdot - \mathrm{K}_{\nu}\left(\sqrt{\nu}\cdot\frac{r}{\ell}\right) - """ - r = np.array(np.abs(r), dtype=np.double) + def cor(self, h): + """Matérn normalized correlation function.""" + h = np.array(np.abs(h), dtype=np.double) # for nu > 20 we just use the gaussian model if self.nu > 20.0: - return np.exp(-((r / self.len_scale) ** 2) / 4) + return np.exp(-((h / 2.0) ** 2)) # calculate by log-transformation to prevent numerical errors - r_gz = r[r > 0.0] - res = np.ones_like(r) - # with np.errstate(over="ignore", invalid="ignore"): - res[r > 0.0] = np.exp( + h_gz = h[h > 0.0] + res = np.ones_like(h) + res[h > 0.0] = np.exp( (1.0 - self.nu) * np.log(2) - sps.loggamma(self.nu) - + self.nu * np.log(np.sqrt(self.nu) * r_gz / self.len_scale) - ) * sps.kv(self.nu, np.sqrt(self.nu) * r_gz / self.len_scale) + + self.nu * np.log(np.sqrt(self.nu) * h_gz) + ) * sps.kv(self.nu, np.sqrt(self.nu) * h_gz) # if nu >> 1 we get errors for the farfield, there 0 is approached res[np.logical_not(np.isfinite(res))] = 0.0 # covariance is positiv @@ -431,20 +364,20 @@ def spectral_density(self, k): # noqa: D102 # for nu > 20 we just use an approximation of the gaussian model if self.nu > 20.0: return ( - (self.len_scale / np.sqrt(np.pi)) ** self.dim - * np.exp(-((k * self.len_scale) ** 2)) + (self.len_rescaled / np.sqrt(np.pi)) ** self.dim + * np.exp(-((k * self.len_rescaled) ** 2)) * ( 1 + ( - ((k * self.len_scale) ** 2 - self.dim / 2.0) ** 2 + ((k * self.len_rescaled) ** 2 - self.dim / 2.0) ** 2 - self.dim / 2.0 ) / self.nu ) ) - return (self.len_scale / np.sqrt(np.pi)) ** self.dim * np.exp( + return (self.len_rescaled / np.sqrt(np.pi)) ** self.dim * np.exp( -(self.nu + self.dim / 2.0) - * np.log(1.0 + (k * self.len_scale) ** 2 / self.nu) + * np.log(1.0 + (k * self.len_rescaled) ** 2 / self.nu) + sps.loggamma(self.nu + self.dim / 2.0) - sps.loggamma(self.nu) - self.dim * np.log(np.sqrt(self.nu)) @@ -452,11 +385,119 @@ def spectral_density(self, k): # noqa: D102 def calc_integral_scale(self): # noqa: D102 return ( - self.len_scale * np.pi / np.sqrt(self.nu) / sps.beta(self.nu, 0.5) + self.len_rescaled + * np.pi + / np.sqrt(self.nu) + / sps.beta(self.nu, 0.5) ) -# Bounded linear Model ######################################################## +class Rational(CovModel): + r"""The rational quadratic covariance model. + + Notes + ----- + This model is given by the following correlation function [Rasmussen2003]_: + + .. math:: + \rho(r) = + \left(1 + \frac{1}{\alpha} \cdot + \left(s\cdot\frac{r}{\ell}\right)^2\right)^{-\alpha} + + Where the standard rescale factor is :math:`s=1`. + :math:`\alpha` is a shape parameter and should be > 0.5. + + For :math:`\alpha\to\infty` this model converges to the Gaussian model: + + .. math:: + \rho(r)= + \exp\left(-\left(s\cdot\frac{r}{\ell}\right)^{2}\right) + + References + ---------- + .. [Rasmussen2003] Rasmussen, C. E., + "Gaussian processes in machine learning." Summer school on + machine learning. Springer, Berlin, Heidelberg, (2003) + + Other Parameters + ---------------- + alpha : :class:`float`, optional + Shape parameter. Standard range: ``[0.5, 50]`` + Default: ``1.0`` + """ + + def default_opt_arg(self): + """Defaults for the optional arguments. + + * ``{"alpha": 1.0}`` + + Returns + ------- + :class:`dict` + Defaults for optional arguments + """ + return {"alpha": 1.0} + + def default_opt_arg_bounds(self): + """Defaults for boundaries of the optional arguments. + + * ``{"alpha": [0.5, 50.0]}`` + + Returns + ------- + :class:`dict` + Boundaries for optional arguments + """ + return {"alpha": [0.5, 50.0]} + + def cor(self, h): + """Rational normalized correlation function.""" + return np.power(1 + h ** 2 / self.alpha, -self.alpha) + + def calc_integral_scale(self): # noqa: D102 + return ( + self.len_rescaled + * np.sqrt(np.pi * self.alpha) + * sps.gamma(self.alpha - 0.5) + / sps.gamma(self.alpha) + / 2.0 + ) + + +class Cubic(CovModel): + r"""The Cubic covariance model. + + A model with reverse curvature near the origin and a finite range of + correlation. + + Notes + ----- + This model is given by the following correlation function [Chiles2009]_: + + .. math:: + \rho(r) = + \begin{cases} + 1- 7 \left(s\cdot\frac{r}{\ell}\right)^{2} + + \frac{35}{4} \left(s\cdot\frac{r}{\ell}\right)^{3} + - \frac{7}{2} \left(s\cdot\frac{r}{\ell}\right)^{5} + + \frac{3}{4} \left(s\cdot\frac{r}{\ell}\right)^{7} + & r<\frac{\ell}{s}\\ + 0 & r\geq\frac{\ell}{s} + \end{cases} + + Where the standard rescale factor is :math:`s=1`. + + References + ---------- + .. [Chiles2009] Chiles, J. P., & Delfiner, P., + "Geostatistics: modeling spatial uncertainty" (Vol. 497), + John Wiley & Sons. (2009) + """ + + def cor(self, h): + """Spherical normalized correlation function.""" + h = np.minimum(np.abs(h, dtype=np.double), 1.0) + return 1.0 - 7 * h ** 2 + 8.75 * h ** 3 - 3.5 * h ** 5 + 0.75 * h ** 7 class Linear(CovModel): @@ -468,38 +509,31 @@ class Linear(CovModel): Notes ----- - This model is given by the following correlation function: + This model is given by the following correlation function [Webster2007]_: .. math:: \rho(r) = \begin{cases} - 1-\frac{r}{\ell} - & r<\ell\\ - 0 & r\geq\ell + 1-s\cdot\frac{r}{\ell} & r<\frac{\ell}{s}\\ + 0 & r\geq\frac{\ell}{s} \end{cases} - """ - - def correlation(self, r): - r"""Linear correlation function. + Where the standard rescale factor is :math:`s=1`. - .. math:: - \rho(r) = - \begin{cases} - 1-\frac{r}{\ell} - & r<\ell\\ - 0 & r\geq\ell - \end{cases} - """ - r = np.array(np.abs(r), dtype=np.double) - res = np.zeros_like(r) - r_ll = r < self.len_scale - r_low = r[r_ll] - res[r_ll] = 1.0 - r_low / self.len_scale - return res + References + ---------- + .. [Webster2007] Webster, R. and Oliver, M. A. + "Geostatistics for environmental scientists.", + John Wiley & Sons. (2007) + """ + def cor(self, h): + """Linear normalized correlation function.""" + return np.maximum(1 - np.abs(h, dtype=np.double), 0.0) -# Circular Model ############################################################## + def check_dim(self, dim): + """Linear model is only valid in 1D.""" + return dim < 2 class Circular(CovModel): @@ -511,54 +545,44 @@ class Circular(CovModel): Notes ----- - This model is given by the following correlation function: + This model is given by the following correlation function [Webster2007]_: .. math:: \rho(r) = \begin{cases} - \frac{2}{\pi}\cdot\left( - \cos^{-1}\left(\frac{r}{\ell}\right) - - \frac{r}{\ell}\cdot\sqrt{1-\left(\frac{r}{\ell}\right)^{2}} + \frac{2}{\pi}\cdot + \left( + \cos^{-1}\left(s\cdot\frac{r}{\ell}\right) - + s\cdot\frac{r}{\ell}\cdot\sqrt{1-\left(s\cdot\frac{r}{\ell}\right)^{2}} \right) - & r<\ell\\ - 0 & r\geq\ell + & r<\frac{\ell}{s}\\ + 0 & r\geq\frac{\ell}{s} \end{cases} - """ - - def correlation(self, r): - r"""Circular correlation function. + Where the standard rescale factor is :math:`s=1`. - .. math:: - \rho(r) = - \begin{cases} - \frac{2}{\pi}\cdot\left( - \cos^{-1}\left(\frac{r}{\ell}\right) - - \frac{r}{\ell}\cdot\sqrt{1-\left(\frac{r}{\ell}\right)^{2}} - \right) - & r<\ell\\ - 0 & r\geq\ell - \end{cases} + References + ---------- + .. [Webster2007] Webster, R. and Oliver, M. A. + "Geostatistics for environmental scientists.", + John Wiley & Sons. (2007) + """ - """ - r = np.array(np.abs(r), dtype=np.double) - res = np.zeros_like(r) - r_ll = r < self.len_scale - r_low = r[r_ll] - res[r_ll] = ( - 2 - / np.pi - * ( - np.arccos(r_low / self.len_scale) - - r_low - / self.len_scale - * np.sqrt(1 - (r_low / self.len_scale) ** 2) - ) + def cor(self, h): + """Circular normalized correlation function.""" + h = np.array(np.abs(h), dtype=np.double) + res = np.zeros_like(h) + # arccos is instable around h=1 + h_l1 = h < 1.0 + h_low = h[h_l1] + res[h_l1] = ( + 2 / np.pi * (np.arccos(h_low) - h_low * np.sqrt(1 - h_low ** 2)) ) return res - -# Spherical Model ############################################################# + def check_dim(self, dim): + """Circular model is only valid in 1D and 2D.""" + return dim < 3 class Spherical(CovModel): @@ -570,142 +594,277 @@ class Spherical(CovModel): Notes ----- - This model is given by the following correlation function: + This model is given by the following correlation function [Webster2007]_: .. math:: \rho(r) = \begin{cases} - 1-\frac{3}{2}\cdot\frac{r}{\ell} + - \frac{1}{2}\cdot\left(\frac{r}{\ell}\right)^{3} - & r<\ell\\ - 0 & r\geq\ell + 1-\frac{3}{2}\cdot s\cdot\frac{r}{\ell} + + \frac{1}{2}\cdot\left(s\cdot\frac{r}{\ell}\right)^{3} + & r<\frac{\ell}{s}\\ + 0 & r\geq\frac{\ell}{s} \end{cases} + Where the standard rescale factor is :math:`s=1`. + + References + ---------- + .. [Webster2007] Webster, R. and Oliver, M. A. + "Geostatistics for environmental scientists.", + John Wiley & Sons. (2007) """ - def correlation(self, r): - r"""Spherical correlation function. - - .. math:: - \rho(r) = - \begin{cases} - 1-\frac{3}{2}\cdot\frac{r}{\ell} + - \frac{1}{2}\cdot\left(\frac{r}{\ell}\right)^{3} - & r<\ell\\ - 0 & r\geq\ell - \end{cases} - """ - r = np.array(np.abs(r), dtype=np.double) - res = np.zeros_like(r) - r_ll = r < self.len_scale - r_low = r[r_ll] - res[r_ll] = ( - 1.0 - - 3.0 / 2.0 * r_low / self.len_scale - + 1.0 / 2.0 * (r_low / self.len_scale) ** 3 - ) - return res + def cor(self, h): + """Spherical normalized correlation function.""" + h = np.minimum(np.abs(h, dtype=np.double), 1.0) + return 1.0 - 1.5 * h + 0.5 * h ** 3 + def check_dim(self, dim): + """Spherical model is only valid in 1D, 2D and 3D.""" + return dim < 4 -class Intersection(CovModel): - r"""The Intersection covariance model. + +class HyperSpherical(CovModel): + r"""The Hyper-Spherical covariance model. This model is derived from the relative intersection area of - two d-dimensional spheres, + two d-dimensional hyperspheres, where the middle points have a distance of :math:`r` and the diameters are given by :math:`\ell`. - In 1D this is the Linear model, in 2D this is the Circular model - and in 3D this is the Spherical model. + In 1D this is the Linear model, in 2D the Circular model + and in 3D the Spherical model. Notes ----- - This model is given by the following correlation functions. - - In 1D: + This model is given by the following correlation function [Matern1960]_: .. math:: \rho(r) = \begin{cases} - 1-\frac{r}{\ell} - & r<\ell\\ - 0 & r\geq\ell + 1-s\cdot\frac{r}{\ell}\cdot\frac{ + _{2}F_{1}\left(\frac{1}{2},-\frac{d-1}{2},\frac{3}{2}, + \left(s\cdot\frac{r}{\ell}\right)^{2}\right)} + {_{2}F_{1}\left(\frac{1}{2},-\frac{d-1}{2},\frac{3}{2},1\right)} + & r<\frac{\ell}{s}\\ + 0 & r\geq\frac{\ell}{s} \end{cases} - In 2D: + Where the standard rescale factor is :math:`s=1`. + :math:`d` is the dimension. + + References + ---------- + .. [Matern1960] Matern B., "Spatial Variation", + Swedish National Institute for Forestry Research, (1960) + """ + + def cor(self, h): + """Hyper-Spherical normalized correlation function.""" + h = np.array(h, dtype=np.double) + res = np.zeros_like(h) + h_l1 = h < 1 + nu = (self.dim - 1.0) / 2.0 + fac = 1.0 / sps.hyp2f1(0.5, -nu, 1.5, 1) + res[h_l1] = 1 - h[h_l1] * fac * sps.hyp2f1(0.5, -nu, 1.5, h[h_l1] ** 2) + return res + + def spectral_density(self, k): # noqa: D102 + k = np.array(k, dtype=np.double) + res = np.empty_like(k) + kl = k * self.len_rescaled + kl_gz = np.logical_not(np.isclose(k, 0)) + res[kl_gz] = sps.gamma(self.dim / 2 + 1) / np.sqrt(np.pi) ** self.dim + res[kl_gz] *= sps.jv(self.dim / 2, kl[kl_gz] / 2) ** 2 + res[kl_gz] /= k[kl_gz] ** self.dim + res[np.logical_not(kl_gz)] = ( + (self.len_rescaled / 4) ** self.dim + / sps.gamma(self.dim / 2 + 1) + / np.sqrt(np.pi) ** self.dim + ) + return res + + +class SuperSpherical(CovModel): + r"""The Super-Spherical covariance model. + + This model is derived from the relative intersection area of + two d-dimensional hyperspheres, + where the middle points have a distance of :math:`r` + and the diameters are given by :math:`\ell`. + It is than valid in all lower dimensions. + By default it coincides with the Hyper-Spherical model. + + Notes + ----- + This model is given by the following correlation function [Matern1960]_: .. math:: \rho(r) = \begin{cases} - \frac{2}{\pi}\cdot\left( - \cos^{-1}\left(\frac{r}{\ell}\right) - - \frac{r}{\ell}\cdot\sqrt{1-\left(\frac{r}{\ell}\right)^{2}} - \right) - & r<\ell\\ - 0 & r\geq\ell + 1-s\cdot\frac{r}{\ell}\cdot\frac{ + _{2}F_{1}\left(\frac{1}{2},-\nu,\frac{3}{2}, + \left(s\cdot\frac{r}{\ell}\right)^{2}\right)} + {_{2}F_{1}\left(\frac{1}{2},-\nu,\frac{3}{2},1\right)} + & r<\frac{\ell}{s}\\ + 0 & r\geq\frac{\ell}{s} \end{cases} - In 3D: + Where the standard rescale factor is :math:`s=1`. + :math:`\nu\geq\frac{d-1}{2}` is a shape parameter. + + References + ---------- + .. [Matern1960] Matern B., "Spatial Variation", + Swedish National Institute for Forestry Research, (1960) + + Other Parameters + ---------------- + nu : :class:`float`, optional + Shape parameter. Standard range: ``[(dim-1)/2, 50]`` + Default: ``(dim-1)/2`` + """ + + def default_opt_arg(self): + """Defaults for the optional arguments. + + * ``{"nu": (dim-1)/2}`` + + Returns + ------- + :class:`dict` + Defaults for optional arguments + """ + return {"nu": (self.dim - 1) / 2} + + def default_opt_arg_bounds(self): + """Defaults for boundaries of the optional arguments. + + * ``{"nu": [(dim-1)/2, 50.0]}`` + + Returns + ------- + :class:`dict` + Boundaries for optional arguments + """ + return {"nu": [(self.dim - 1) / 2, 50.0]} + + def cor(self, h): + """Super-Spherical normalized correlation function.""" + h = np.array(h, dtype=np.double) + res = np.zeros_like(h) + h_l1 = h < 1 + fac = 1.0 / sps.hyp2f1(0.5, -self.nu, 1.5, 1.0) + res[h_l1] = 1.0 - h[h_l1] * fac * sps.hyp2f1( + 0.5, -self.nu, 1.5, h[h_l1] ** 2 + ) + return res + + +class JBessel(CovModel): + r"""The J-Bessel hole model. + + This covariance model is a valid hole model, meaning it has areas + of negative correlation but a valid spectral density. + + Notes + ----- + This model is given by the following correlation function [Chiles2009]_: .. math:: \rho(r) = - \begin{cases} - 1-\frac{3}{2}\cdot\frac{r}{\ell} + - \frac{1}{2}\cdot\left(\frac{r}{\ell}\right)^{3} - & r<\ell\\ - 0 & r\geq\ell - \end{cases} + \Gamma(\nu+1) \cdot + \frac{\mathrm{J}_{\nu}\left(s\cdot\frac{r}{\ell}\right)} + {\left(s\cdot\frac{r}{2\ell}\right)^{\nu}} + + Where the standard rescale factor is :math:`s=1`. + :math:`\Gamma` is the gamma function and :math:`\mathrm{J}_{\nu}` + is the Bessel functions of the first kind. + :math:`\nu\geq\frac{d}{2}-1` is a shape parameter, + which defaults to :math:`\nu=\frac{d}{2}`, + since the spectrum of the model gets instable for + :math:`\nu\to\frac{d}{2}-1`. + + For :math:`\nu=\frac{1}{2}` (valid in d=1,2,3) + we get the so-called 'Wave' model: + + .. math:: + \rho(r) = + \frac{\sin\left(s\cdot\frac{r}{\ell}\right)}{s\cdot\frac{r}{\ell}} + References + ---------- + .. [Chiles2009] Chiles, J. P., & Delfiner, P., + "Geostatistics: modeling spatial uncertainty" (Vol. 497), + John Wiley & Sons. (2009) + + Other Parameters + ---------------- + nu : :class:`float`, optional + Shape parameter. Standard range: ``[dim/2 - 1, 50]`` + Default: ``dim/2`` """ - def correlation(self, r): # noqa: D102 - r = np.array(np.abs(r), dtype=np.double) - res = np.zeros_like(r) - r_ll = r < self.len_scale - r_low = r[r_ll] - if self.dim == 1: - res[r_ll] = 1.0 - r_low / self.len_scale - elif self.dim == 2: - res[r_ll] = ( - 2 - / np.pi - * ( - np.arccos(r_low / self.len_scale) - - r_low - / self.len_scale - * np.sqrt(1 - (r_low / self.len_scale) ** 2) - ) - ) - else: - res[r_ll] = ( - 1.0 - - 3.0 / 2.0 * r_low / self.len_scale - + 1.0 / 2.0 * (r_low / self.len_scale) ** 3 + def default_opt_arg(self): + """Defaults for the optional arguments. + + * ``{"nu": dim/2}`` + + Returns + ------- + :class:`dict` + Defaults for optional arguments + """ + return {"nu": self.dim / 2} + + def default_opt_arg_bounds(self): + """Defaults for boundaries of the optional arguments. + + * ``{"nu": [dim/2 - 1, 50.0]}`` + + Returns + ------- + :class:`dict` + Boundaries for optional arguments + """ + return {"nu": [self.dim / 2 - 1, 50.0]} + + def check_opt_arg(self): + """Check the optional arguments. + + Warns + ----- + nu + If nu is close to dim/2 - 1, the model tends to get unstable. + """ + if abs(self.nu - self.dim / 2 + 1) < 0.01: + warnings.warn( + "JBessel: parameter 'nu' is close to d/2-1, " + "count with unstable results", + AttributeWarning, ) + + def cor(self, h): + """J-Bessel correlation.""" + h = np.array(h, dtype=np.double) + h_gz = np.logical_not(np.isclose(h, 0)) + hh = h[h_gz] + res = np.ones_like(h) + nu = self.nu + res[h_gz] = sps.gamma(nu + 1) * sps.jv(nu, hh) / (hh / 2.0) ** nu return res def spectral_density(self, k): # noqa: D102 k = np.array(k, dtype=np.double) - res = np.empty_like(k) - kl = k * self.len_scale - kl_gz = kl > 0 - # for k=0 we calculate the limit by hand - if self.dim == 1: - res[kl_gz] = (1.0 - np.cos(kl[kl_gz])) / ( - np.pi * k[kl_gz] * kl[kl_gz] - ) - res[np.logical_not(kl_gz)] = self.len_scale / 2.0 / np.pi - elif self.dim == 2: - res[kl_gz] = sps.j1(kl[kl_gz] / 2.0) ** 2 / np.pi / k[kl_gz] ** 2 - res[np.logical_not(kl_gz)] = self.len_scale ** 2 / 16.0 / np.pi - else: - res[kl_gz] = -( - 12 * kl[kl_gz] * np.sin(kl[kl_gz]) - + (12 - 3 * kl[kl_gz] ** 2) * np.cos(kl[kl_gz]) - - 3 * kl[kl_gz] ** 2 - - 12 - ) / (2 * np.pi ** 2 * kl[kl_gz] ** 3 * k[kl_gz] ** 3) - res[np.logical_not(kl_gz)] = ( - self.len_scale ** 3 / 48.0 / np.pi ** 2 - ) + k_ll = k < 1.0 / self.len_rescaled + kk = k[k_ll] + res = np.zeros_like(k) + # the model is degenerated for nu=d/2-1, so we tweak the spectral pdf + # and cut of the divisor at nu-(d/2-1)=0.01 (gamma(0.01) about 100) + res[k_ll] = ( + (self.len_rescaled / np.sqrt(np.pi)) ** self.dim + * sps.gamma(self.nu + 1.0) + / np.minimum(sps.gamma(self.nu - self.dim / 2 + 1), 100.0) + * (1.0 - (kk * self.len_rescaled) ** 2) ** (self.nu - self.dim / 2) + ) return res diff --git a/gstools/covmodel/plot.py b/gstools/covmodel/plot.py index c2a3f68b8..8f170ed3a 100644 --- a/gstools/covmodel/plot.py +++ b/gstools/covmodel/plot.py @@ -10,6 +10,12 @@ plot_variogram plot_covariance plot_correlation + plot_vario_yadrenko + plot_cov_yadrenko + plot_cor_yadrenko + plot_vario_axis + plot_cov_axis + plot_cor_axis plot_vario_spatial plot_cov_spatial plot_cor_spatial @@ -17,16 +23,21 @@ plot_spectral_density plot_spectral_rad_pdf """ -# pylint: disable=C0103 +# pylint: disable=C0103, C0415, E1130 import numpy as np - -import gstools -from gstools.field.tools import reshape_axis_from_struct_to_unstruct +from gstools.tools.geometric import generate_grid +from gstools.tools.misc import get_fig_ax __all__ = [ "plot_variogram", "plot_covariance", "plot_correlation", + "plot_vario_yadrenko", + "plot_cov_yadrenko", + "plot_cor_yadrenko", + "plot_vario_axis", + "plot_cov_axis", + "plot_cor_axis", "plot_vario_spatial", "plot_cov_spatial", "plot_cor_spatial", @@ -39,168 +50,228 @@ # plotting routines ####################################################### -def _get_fig_ax(fig, ax, ax_name="rectilinear"): # pragma: no cover - from matplotlib import pyplot as plt +def _plot_spatial(dim, pos, field, fig, ax, latlon, **kwargs): + from gstools.field.plot import plot_1d, plot_nd - if fig is None and ax is None: - fig = plt.figure() - ax = fig.add_subplot(111, projection=ax_name) - elif ax is None: - ax = fig.add_subplot(111, projection=ax_name) - elif fig is None: - fig = ax.get_figure() - assert ax.name == ax_name - else: - assert ax.name == ax_name - assert ax.get_figure() == fig - return fig, ax + if dim == 1: + return plot_1d(pos, field, fig, ax, **kwargs) + return plot_nd(pos, field, "structured", fig, ax, latlon, **kwargs) def plot_vario_spatial( - model, x_min=0.0, x_max=None, fig=None, ax=None + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs ): # pragma: no cover """Plot spatial variogram of a given CovModel.""" - field = gstools.field.base.Field(model) - field._value_type = "scalar" if x_max is None: - x_max = 3 * model.integral_scale - field.mesh_type = "structured" + x_max = 3 * model.len_scale x_s = np.linspace(-x_max, x_max) + x_min pos = [x_s] * model.dim - x, y, z, shape = reshape_axis_from_struct_to_unstruct(model.dim, *pos) - vario = model.vario_spatial([x, y, z][: model.dim]).reshape(shape) - field.pos = pos - field.field = vario - return field.plot(fig=fig, ax=ax) + shp = tuple(len(p) for p in pos) + fld = model.vario_spatial(generate_grid(pos)).reshape(shp) + return _plot_spatial(model.dim, pos, fld, fig, ax, model.latlon, **kwargs) def plot_cov_spatial( - model, x_min=0.0, x_max=None, fig=None, ax=None + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs ): # pragma: no cover """Plot spatial covariance of a given CovModel.""" - field = gstools.field.base.Field(model) - field._value_type = "scalar" if x_max is None: - x_max = 3 * model.integral_scale - field.mesh_type = "structured" + x_max = 3 * model.len_scale x_s = np.linspace(-x_max, x_max) + x_min pos = [x_s] * model.dim - x, y, z, shape = reshape_axis_from_struct_to_unstruct(model.dim, *pos) - vario = model.cov_spatial([x, y, z][: model.dim]).reshape(shape) - field.pos = pos - field.field = vario - return field.plot(fig=fig, ax=ax) + shp = tuple(len(p) for p in pos) + fld = model.cov_spatial(generate_grid(pos)).reshape(shp) + return _plot_spatial(model.dim, pos, fld, fig, ax, model.latlon, **kwargs) def plot_cor_spatial( - model, x_min=0.0, x_max=None, fig=None, ax=None + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs ): # pragma: no cover """Plot spatial correlation of a given CovModel.""" - field = gstools.field.base.Field(model) - field._value_type = "scalar" if x_max is None: - x_max = 3 * model.integral_scale - field.mesh_type = "structured" + x_max = 3 * model.len_scale x_s = np.linspace(-x_max, x_max) + x_min pos = [x_s] * model.dim - x, y, z, shape = reshape_axis_from_struct_to_unstruct(model.dim, *pos) - vario = model.cor_spatial([x, y, z][: model.dim]).reshape(shape) - field.pos = pos - field.field = vario - return field.plot(fig=fig, ax=ax) + shp = tuple(len(p) for p in pos) + fld = model.cor_spatial(generate_grid(pos)).reshape(shp) + return _plot_spatial(model.dim, pos, fld, fig, ax, model.latlon, **kwargs) def plot_variogram( - model, x_min=0.0, x_max=None, fig=None, ax=None + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs ): # pragma: no cover """Plot variogram of a given CovModel.""" - fig, ax = _get_fig_ax(fig, ax) + fig, ax = get_fig_ax(fig, ax) if x_max is None: - x_max = 3 * model.integral_scale + x_max = 3 * model.len_scale x_s = np.linspace(x_min, x_max) - ax.plot(x_s, model.variogram(x_s), label=model.name + " variogram") + kwargs.setdefault("label", f"{model.name} variogram") + ax.plot(x_s, model.variogram(x_s), **kwargs) ax.legend() fig.show() return ax def plot_covariance( - model, x_min=0.0, x_max=None, fig=None, ax=None + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs ): # pragma: no cover """Plot covariance of a given CovModel.""" - fig, ax = _get_fig_ax(fig, ax) + fig, ax = get_fig_ax(fig, ax) if x_max is None: - x_max = 3 * model.integral_scale + x_max = 3 * model.len_scale x_s = np.linspace(x_min, x_max) - ax.plot(x_s, model.covariance(x_s), label=model.name + " covariance") + kwargs.setdefault("label", f"{model.name} covariance") + ax.plot(x_s, model.covariance(x_s), **kwargs) ax.legend() fig.show() return ax def plot_correlation( - model, x_min=0.0, x_max=None, fig=None, ax=None + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs ): # pragma: no cover """Plot correlation function of a given CovModel.""" - fig, ax = _get_fig_ax(fig, ax) + fig, ax = get_fig_ax(fig, ax) + if x_max is None: + x_max = 3 * model.len_scale + x_s = np.linspace(x_min, x_max) + kwargs.setdefault("label", f"{model.name} correlation") + ax.plot(x_s, model.correlation(x_s), **kwargs) + ax.legend() + fig.show() + return ax + + +def plot_vario_yadrenko( + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs +): # pragma: no cover + """Plot Yadrenko variogram of a given CovModel.""" + fig, ax = get_fig_ax(fig, ax) + if x_max is None: + x_max = min(3 * model.len_rescaled, np.pi) + x_s = np.linspace(x_min, x_max) + kwargs.setdefault("label", f"{model.name} Yadrenko variogram") + ax.plot(x_s, model.vario_yadrenko(x_s), **kwargs) + ax.legend() + fig.show() + return ax + + +def plot_cov_yadrenko( + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs +): # pragma: no cover + """Plot Yadrenko covariance of a given CovModel.""" + fig, ax = get_fig_ax(fig, ax) + if x_max is None: + x_max = min(3 * model.len_rescaled, np.pi) + x_s = np.linspace(x_min, x_max) + kwargs.setdefault("label", f"{model.name} Yadrenko covariance") + ax.plot(x_s, model.cov_yadrenko(x_s), **kwargs) + ax.legend() + fig.show() + return ax + + +def plot_cor_yadrenko( + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs +): # pragma: no cover + """Plot Yadrenko correlation function of a given CovModel.""" + fig, ax = get_fig_ax(fig, ax) + if x_max is None: + x_max = min(3 * model.len_rescaled, np.pi) + x_s = np.linspace(x_min, x_max) + kwargs.setdefault("label", f"{model.name} Yadrenko correlation") + ax.plot(x_s, model.cor_yadrenko(x_s), **kwargs) + ax.legend() + fig.show() + return ax + + +def plot_vario_axis( + model, axis=0, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs +): # pragma: no cover + """Plot variogram of a given CovModel.""" + fig, ax = get_fig_ax(fig, ax) + if x_max is None: + x_max = 3 * model.len_scale + x_s = np.linspace(x_min, x_max) + kwargs.setdefault("label", f"{model.name} variogram on axis {axis}") + ax.plot(x_s, model.vario_axis(x_s, axis), **kwargs) + ax.legend() + fig.show() + return ax + + +def plot_cov_axis( + model, axis=0, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs +): # pragma: no cover + """Plot variogram of a given CovModel.""" + fig, ax = get_fig_ax(fig, ax) + if x_max is None: + x_max = 3 * model.len_scale + x_s = np.linspace(x_min, x_max) + kwargs.setdefault("label", f"{model.name} covariance on axis {axis}") + ax.plot(x_s, model.cov_axis(x_s, axis), **kwargs) + ax.legend() + fig.show() + return ax + + +def plot_cor_axis( + model, axis=0, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs +): # pragma: no cover + """Plot variogram of a given CovModel.""" + fig, ax = get_fig_ax(fig, ax) if x_max is None: - x_max = 3 * model.integral_scale + x_max = 3 * model.len_scale x_s = np.linspace(x_min, x_max) - ax.plot(x_s, model.correlation(x_s), label=model.name + " correlation") + kwargs.setdefault("label", f"{model.name} correlation on axis {axis}") + ax.plot(x_s, model.cor_axis(x_s, axis), **kwargs) ax.legend() fig.show() return ax def plot_spectrum( - model, x_min=0.0, x_max=None, fig=None, ax=None + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs ): # pragma: no cover """Plot specturm of a given CovModel.""" - fig, ax = _get_fig_ax(fig, ax) + fig, ax = get_fig_ax(fig, ax) if x_max is None: - x_max = 3 / model.integral_scale + x_max = 3 / model.len_scale x_s = np.linspace(x_min, x_max) - ax.plot( - x_s, - model.spectrum(x_s), - label=model.name + " " + str(model.dim) + "D spectrum", - ) + kwargs.setdefault("label", f"{model.name} {model.dim}D spectrum") + ax.plot(x_s, model.spectrum(x_s), **kwargs) ax.legend() fig.show() return ax def plot_spectral_density( - model, x_min=0.0, x_max=None, fig=None, ax=None + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs ): # pragma: no cover """Plot spectral density of a given CovModel.""" - fig, ax = _get_fig_ax(fig, ax) + fig, ax = get_fig_ax(fig, ax) if x_max is None: - x_max = 3 / model.integral_scale + x_max = 3 / model.len_scale x_s = np.linspace(x_min, x_max) - ax.plot( - x_s, - model.spectral_density(x_s), - label=model.name + " " + str(model.dim) + "D spectral-density", - ) + kwargs.setdefault("label", f"{model.name} {model.dim}D spectral-density") + ax.plot(x_s, model.spectral_density(x_s), **kwargs) ax.legend() fig.show() return ax def plot_spectral_rad_pdf( - model, x_min=0.0, x_max=None, fig=None, ax=None + model, x_min=0.0, x_max=None, fig=None, ax=None, **kwargs ): # pragma: no cover """Plot radial spectral pdf of a given CovModel.""" - fig, ax = _get_fig_ax(fig, ax) + fig, ax = get_fig_ax(fig, ax) if x_max is None: - x_max = 3 / model.integral_scale + x_max = 3 / model.len_scale x_s = np.linspace(x_min, x_max) - ax.plot( - x_s, - model.spectral_rad_pdf(x_s), - label=model.name + " " + str(model.dim) + "D spectral-rad-pdf", - ) + kwargs.setdefault("label", f"{model.name} {model.dim}D spectral-rad-pdf") + ax.plot(x_s, model.spectral_rad_pdf(x_s), **kwargs) ax.legend() fig.show() return ax diff --git a/gstools/covmodel/tools.py b/gstools/covmodel/tools.py index 953631fcd..01dc5db99 100644 --- a/gstools/covmodel/tools.py +++ b/gstools/covmodel/tools.py @@ -7,55 +7,103 @@ The following classes and functions are provided .. autosummary:: - InitSubclassMeta + AttributeWarning rad_fac + set_opt_args set_len_anis check_bounds - inc_gamma - exp_int - inc_beta + check_arg_in_bounds + default_arg_from_bounds + spectral_rad_pdf + percentile_scale + set_arg_bounds + check_arg_bounds + set_dim + compare + model_repr """ -# pylint: disable=C0103 +# pylint: disable=C0103, W0212 +import warnings import numpy as np +from scipy.optimize import root from scipy import special as sps - -__all__ = ["InitSubclassMeta", "rad_fac", "set_len_anis", "check_bounds"] - - -# __init_subclass__ hack ###################################################### - -if hasattr(object, "__init_subclass__"): - InitSubclassMeta = type -else: - - class InitSubclassMeta(type): # pragma: no cover - """Metaclass that implements PEP 487 protocol. - - Notes - ----- - See : - https://www.python.org/dev/peps/pep-0487 - - taken from : - https://github.com/graphql-python/graphene/blob/master/graphene/pyutils/init_subclass.py - """ - - def __new__(cls, name, bases, ns, **kwargs): - """Create a new subclass.""" - __init_subclass__ = ns.pop("__init_subclass__", None) - if __init_subclass__: - __init_subclass__ = classmethod(__init_subclass__) - ns["__init_subclass__"] = __init_subclass__ - return super(InitSubclassMeta, cls).__new__( - cls, name, bases, ns, **kwargs - ) - - def __init__(cls, name, bases, ns, **kwargs): - super(InitSubclassMeta, cls).__init__(name, bases, ns) - super_class = super(cls, cls) - if hasattr(super_class, "__init_subclass__"): - super_class.__init_subclass__.__func__(cls, **kwargs) +from hankel import SymmetricFourierTransform as SFT +from gstools.tools.misc import list_format +from gstools.tools.geometric import set_anis, set_angles + +__all__ = [ + "AttributeWarning", + "rad_fac", + "set_opt_args", + "set_len_anis", + "check_bounds", + "check_arg_in_bounds", + "default_arg_from_bounds", + "spectral_rad_pdf", + "percentile_scale", + "set_arg_bounds", + "check_arg_bounds", + "set_dim", + "compare", + "model_repr", +] + + +class AttributeWarning(UserWarning): + """Attribute warning for CovModel class.""" + + +def _init_subclass(cls): + """Initialize gstools covariance model.""" + + def variogram(self, r): + """Isotropic variogram of the model.""" + return self.var - self.covariance(r) + self.nugget + + def covariance(self, r): + """Covariance of the model.""" + return self.var * self.correlation(r) + + def correlation(self, r): + """Correlation function of the model.""" + return 1.0 - (self.variogram(r) - self.nugget) / self.var + + def correlation_from_cor(self, r): + """Correlation function of the model.""" + r = np.array(np.abs(r), dtype=np.double) + return self.cor(r / self.len_rescaled) + + def cor_from_correlation(self, h): + """Correlation taking a non-dimensional range.""" + h = np.array(np.abs(h), dtype=np.double) + return self.correlation(h * self.len_rescaled) + + abstract = True + if hasattr(cls, "cor"): + if not hasattr(cls, "correlation"): + cls.correlation = correlation_from_cor + abstract = False + else: + cls.cor = cor_from_correlation + if not hasattr(cls, "variogram"): + cls.variogram = variogram + else: + abstract = False + if not hasattr(cls, "covariance"): + cls.covariance = covariance + else: + abstract = False + if not hasattr(cls, "correlation"): + cls.correlation = correlation + else: + abstract = False + if abstract: + raise TypeError( + f"Can't instantiate class '{cls.__name__}', " + "without providing at least one of the methods " + "'cor', 'variogram', 'covariance' or 'correlation'." + ) # Helping functions ########################################################### @@ -79,16 +127,61 @@ def rad_fac(dim, r): fac = 2 * np.pi * r elif dim == 3: fac = 4 * np.pi * r ** 2 - else: # general solution ( for the record :D ) + else: # pragma: no cover fac = ( dim * r ** (dim - 1) * np.sqrt(np.pi) ** dim - / sps.gamma(dim / 2.0 + 1.0) + / sps.gamma(dim / 2 + 1) ) return fac +def set_opt_args(model, opt_arg): + """ + Set optional arguments in the model class. + + Parameters + ---------- + model : :any:`CovModel` + The covariance model in use. + opt_arg : :class:`dict` + Dictionary with optional arguments. + + Raises + ------ + ValueError + When an optional argument has an already taken name. + """ + model._opt_arg = [] + # look up the defaults for the optional arguments (defined by the user) + default = model.default_opt_arg() + for opt_name in opt_arg: + if opt_name not in default: + warnings.warn( + f"The given optional argument '{opt_name}' " + "is unknown or has at least no defined standard value. " + "Or you made a Typo... hehe.", + AttributeWarning, + ) + # add the default vaules if not specified + for def_arg in default: + if def_arg not in opt_arg: + opt_arg[def_arg] = default[def_arg] + # save names of the optional arguments (sort them by name) + model._opt_arg = sorted(opt_arg) + # add the optional arguments as attributes to the class + for opt_name in opt_arg: + if opt_name in dir(model): # "dir" also respects properties + raise ValueError( + f"parameter '{opt_name}' has a 'bad' name, " + "since it is already present in " + "the class. It could not be added to the model." + ) + # Magic happens here + setattr(model, opt_name, float(opt_arg[opt_name])) + + def set_len_anis(dim, len_scale, anis): """Set the length scale and anisotropy factors for the given dimension. @@ -97,16 +190,16 @@ def set_len_anis(dim, len_scale, anis): dim : :class:`int` spatial dimension len_scale : :class:`float` or :class:`list` - the length scale of the SRF in x direction or in x- (y-, z-) direction - anis : :class:`float`/list - the anisotropy of length scales along the y- and z-directions + the length scale of the SRF in x direction or in x- (y-, ...) direction + anis : :class:`float` or :class:`list` + the anisotropy of length scales along the transversal axes Returns ------- len_scale : :class:`float` the main length scale of the SRF in x direction - anis : :class:`float`/list, optional - the anisotropy of length scales along the y- and z-directions + anis : :class:`list`, optional + the anisotropy of length scales along the transversal axes Notes ----- @@ -120,29 +213,22 @@ def set_len_anis(dim, len_scale, anis): If to few ``anis`` values are given, the first dimensions will be filled up with 1. (eg. anis=[e] in 3D is equal to anis=[1, e]) """ - ls_tmp = np.atleast_1d(len_scale)[:dim] + ls_tmp = np.array(len_scale, dtype=np.double) + ls_tmp = np.atleast_1d(ls_tmp)[:dim] # use just one length scale (x-direction) out_len_scale = ls_tmp[0] # set the anisotropies in y- and z-direction according to the input if len(ls_tmp) == 1: - out_anis = np.atleast_1d(anis)[: dim - 1] - if len(out_anis) < dim - 1: - # fill up the anisotropies with ones, such that len()==dim-1 - out_anis = np.pad( - out_anis, - (dim - len(out_anis) - 1, 0), - "constant", - constant_values=1.0, - ) + out_anis = set_anis(dim, anis) else: - # fill up length-scales with main len_scale, such that len()==dim + # fill up length-scales with the latter len_scale, such that len()==dim if len(ls_tmp) < dim: ls_tmp = np.pad(ls_tmp, (0, dim - len(ls_tmp)), "edge") # if multiple length-scales are given, calculate the anisotropies out_anis = np.zeros(dim - 1, dtype=np.double) for i in range(1, dim): out_anis[i - 1] = ls_tmp[i] / ls_tmp[0] - + # sanity check for ani in out_anis: if not ani > 0.0: raise ValueError( @@ -151,47 +237,6 @@ def set_len_anis(dim, len_scale, anis): return out_len_scale, out_anis -def set_angles(dim, angles): - """Set the angles for the given dimension. - - Parameters - ---------- - dim : :class:`int` - spatial dimension (anything different from 1 and 2 is interpreted as 3) - angles : :class:`float`/list - the angles of the SRF - - Returns - ------- - angles : :class:`float` - the angles fitting to the dimension - """ - if dim == 1: - # no rotation in 1D - out_angles = np.empty(0) - elif dim == 2: - # one rotation axis in 2D - out_angles = np.atleast_1d(angles)[:1] - # fill up the rotation angle array with zeros - out_angles = np.pad( - out_angles, - (0, 1 - len(out_angles)), - "constant", - constant_values=0.0, - ) - else: - # three rotation axis in 3D - out_angles = np.atleast_1d(angles)[:3] - # fill up the rotation angle array with zeros - out_angles = np.pad( - out_angles, - (0, 3 - len(out_angles)), - "constant", - constant_values=0.0, - ) - return out_angles - - def check_bounds(bounds): """ Check if given bounds are valid. @@ -217,3 +262,339 @@ def check_bounds(bounds): if len(bounds) == 3 and bounds[2] not in ("oo", "oc", "co", "cc"): return False return True + + +def check_arg_in_bounds(model, arg, val=None): + """Check if given argument value is in bounds of the given model.""" + if arg not in model.arg_bounds: + raise ValueError("check bounds: unknown argument: {}".format(arg)) + bnd = list(model.arg_bounds[arg]) + val = getattr(model, arg) if val is None else val + val = np.array(val) + error_case = 0 + if len(bnd) == 2: + bnd.append("cc") # use closed intervals by default + if bnd[2][0] == "c": + if np.any(val < bnd[0]): + error_case = 1 + else: + if np.any(val <= bnd[0]): + error_case = 2 + if bnd[2][1] == "c": + if np.any(val > bnd[1]): + error_case = 3 + else: + if np.any(val >= bnd[1]): + error_case = 4 + return error_case + + +def default_arg_from_bounds(bounds): + """ + Determine a default value from given bounds. + + Parameters + ---------- + bounds : list + bounds for the value. + + Returns + ------- + float + Default value in the given bounds. + """ + if bounds[0] > -np.inf and bounds[1] < np.inf: + return (bounds[0] + bounds[1]) / 2.0 + if bounds[0] > -np.inf: + return bounds[0] + 1.0 + if bounds[1] < np.inf: + return bounds[1] - 1.0 + return 0.0 # pragma: no cover + + +# outsourced routines + + +def spectral_rad_pdf(model, r): + """ + Spectral radians PDF of a model. + + Parameters + ---------- + model : :any:`CovModel` + The covariance model in use. + r : :class:`numpy.ndarray` + Given radii. + + Returns + ------- + :class:`numpy.ndarray` + PDF values. + + """ + r = np.array(np.abs(r), dtype=np.double) + if model.dim > 1: + r_gz = np.logical_not(np.isclose(r, 0)) + # to prevent numerical errors, we just calculate where r>0 + res = np.zeros_like(r, dtype=np.double) + res[r_gz] = rad_fac(model.dim, r[r_gz]) * np.abs( + model.spectral_density(r[r_gz]) + ) + else: + res = rad_fac(model.dim, r) * np.abs(model.spectral_density(r)) + # prevent numerical errors in hankel for small r values (set 0) + res[np.logical_not(np.isfinite(res))] = 0.0 + # prevent numerical errors in hankel for big r (set non-negative) + res = np.maximum(res, 0.0) + return res + + +def percentile_scale(model, per=0.9): + """ + Calculate the percentile scale of the isotrope model. + + This is the distance, where the given percentile of the variance + is reached by the variogram + + + Parameters + ---------- + model : :any:`CovModel` + The covariance model in use. + per : float, optional + Percentile to use. The default is 0.9. + + Raises + ------ + ValueError + When percentile is not in (0, 1). + + Returns + ------- + float + Percentile scale. + + """ + # check the given percentile + if not 0.0 < per < 1.0: + raise ValueError( + "percentile needs to be within (0, 1), got: " + str(per) + ) + + # define a curve, that has its root at the wanted point + def curve(x): + return 1.0 - model.correlation(x) - per + + # take 'per * len_rescaled' as initial guess + return root(curve, per * model.len_rescaled)["x"][0] + + +def set_arg_bounds(model, check_args=True, **kwargs): + r"""Set bounds for the parameters of the model. + + Parameters + ---------- + model : :any:`CovModel` + The covariance model in use. + check_args : bool, optional + Whether to check if the arguments are in their valid bounds. + In case not, a propper default value will be determined. + Default: True + **kwargs + Parameter name as keyword ("var", "len_scale", "nugget", ) + and a list of 2 or 3 values as value: + + * ``[a, b]`` or + * ``[a, b, ]`` + + is one of ``"oo"``, ``"cc"``, ``"oc"`` or ``"co"`` + to define if the bounds are open ("o") or closed ("c"). + """ + # if variance needs to be resetted, do this at last + var_bnds = [] + for arg in kwargs: + if not check_bounds(kwargs[arg]): + raise ValueError( + "Given bounds for '{0}' are not valid, got: {1}".format( + arg, kwargs[arg] + ) + ) + if arg in model.opt_arg: + model._opt_arg_bounds[arg] = kwargs[arg] + elif arg == "var": + var_bnds = kwargs[arg] + continue + elif arg == "len_scale": + model.len_scale_bounds = kwargs[arg] + elif arg == "nugget": + model.nugget_bounds = kwargs[arg] + elif arg == "anis": + model.anis_bounds = kwargs[arg] + else: + raise ValueError( + "set_arg_bounds: unknown argument '{}'".format(arg) + ) + if check_args and check_arg_in_bounds(model, arg) > 0: + def_arg = default_arg_from_bounds(kwargs[arg]) + if arg == "anis": + setattr(model, arg, [def_arg] * (model.dim - 1)) + else: + setattr(model, arg, def_arg) + # set var last like allways + if var_bnds: + model.var_bounds = var_bnds + if check_args and check_arg_in_bounds(model, "var") > 0: + model.var = default_arg_from_bounds(var_bnds) + + +def check_arg_bounds(model): + """ + Check arguments to be within their given bounds. + + Parameters + ---------- + model : :any:`CovModel` + The covariance model in use. + + Raises + ------ + ValueError + When an argument is not in its valid bounds. + """ + # check var, len_scale, nugget and optional-arguments + for arg in model.arg_bounds: + if not model.arg_bounds[arg]: + continue # no bounds given during init (called from self.dim) + bnd = list(model.arg_bounds[arg]) + val = getattr(model, arg) + error_case = check_arg_in_bounds(model, arg) + if error_case == 1: + raise ValueError( + "{0} needs to be >= {1}, got: {2}".format(arg, bnd[0], val) + ) + if error_case == 2: + raise ValueError( + "{0} needs to be > {1}, got: {2}".format(arg, bnd[0], val) + ) + if error_case == 3: + raise ValueError( + "{0} needs to be <= {1}, got: {2}".format(arg, bnd[1], val) + ) + if error_case == 4: + raise ValueError( + "{0} needs to be < {1}, got: {2}".format(arg, bnd[1], val) + ) + + +def set_dim(model, dim): + """ + Set the dimension in the given model. + + Parameters + ---------- + model : :any:`CovModel` + The covariance model in use. + dim : :class:`int` + dimension of the model. + + Raises + ------ + ValueError + When dimension is < 1. + """ + # check if a fixed dimension should be used + if model.fix_dim() is not None and model.fix_dim() != dim: + warnings.warn( + model.name + ": using fixed dimension " + str(model.fix_dim()), + AttributeWarning, + ) + dim = model.fix_dim() + if model.latlon and dim != 3: + raise ValueError( + f"{model.name}: using fixed dimension {model.fix_dim()}, " + "which is not compatible with a latlon model." + ) + # force dim=3 for latlon models + dim = 3 if model.latlon else dim + # set the dimension + if dim < 1: + raise ValueError("Only dimensions of d >= 1 are supported.") + if not model.check_dim(dim): + warnings.warn( + f"Dimension {dim} is not appropriate for this model.", + AttributeWarning, + ) + model._dim = int(dim) + # create fourier transform just once (recreate for dim change) + model._sft = SFT(ndim=model.dim, **model.hankel_kw) + # recalculate dimension related parameters + if model._anis is not None: + model._len_scale, model._anis = set_len_anis( + model.dim, model._len_scale, model._anis + ) + if model._angles is not None: + model._angles = set_angles(model.dim, model._angles) + model.check_arg_bounds() + + +def compare(this, that): + """ + Compare CovModels. + + Parameters + ---------- + this / that : :any:`CovModel` + The covariance models to compare. + """ + # prevent attribute error in opt_arg if the are not equal + if set(this.opt_arg) != set(that.opt_arg): + return False + # prevent dim error in anis and angles + if this.dim != that.dim: + return False + equal = True + equal &= this.name == that.name + equal &= np.isclose(this.var, that.var) + equal &= np.isclose(this.var_raw, that.var_raw) # ?! needless? + equal &= np.isclose(this.nugget, that.nugget) + equal &= np.isclose(this.len_scale, that.len_scale) + equal &= np.all(np.isclose(this.anis, that.anis)) + equal &= np.all(np.isclose(this.angles, that.angles)) + equal &= np.isclose(this.rescale, that.rescale) + equal &= this.latlon == that.latlon + for opt in this.opt_arg: + equal &= np.isclose(getattr(this, opt), getattr(that, opt)) + return equal + + +def model_repr(model): # pragma: no cover + """ + Generate the model string representation. + + Parameters + ---------- + model : :any:`CovModel` + The covariance model in use. + """ + m = model + p = model._prec + opt_str = "" + if not np.isclose(m.rescale, m.default_rescale()): + opt_str += f", rescale={m.rescale:.{p}}" + for opt in m.opt_arg: + opt_str += f", {opt}={getattr(m, opt):.{p}}" + # only print anis and angles if model is anisotropic or rotated + ani_str = "" if m.is_isotropic else f", anis={list_format(m.anis, p)}" + ang_str = f", angles={list_format(m.angles, p)}" if m.do_rotation else "" + if m.latlon: + repr_str = ( + f"{m.name}(latlon={m.latlon}, var={m.var:.{p}}, " + f"len_scale={m.len_scale:.{p}}, nugget={m.nugget:.{p}}{opt_str})" + ) + else: + repr_str = ( + f"{m.name}(dim={m.dim}, var={m.var:.{p}}, " + f"len_scale={m.len_scale:.{p}}, nugget={m.nugget:.{p}}" + f"{ani_str}{ang_str}{opt_str})" + ) + return repr_str diff --git a/gstools/covmodel/tpl_models.py b/gstools/covmodel/tpl_models.py index 27a9e0dcf..165111fb2 100644 --- a/gstools/covmodel/tpl_models.py +++ b/gstools/covmodel/tpl_models.py @@ -10,12 +10,13 @@ TPLGaussian TPLExponential TPLStable + TPLSimple """ # pylint: disable=C0103, E1101 - import warnings import numpy as np from gstools.covmodel.base import CovModel +from gstools.covmodel.tools import AttributeWarning from gstools.tools.special import ( tplstable_cor, tpl_gau_spec_dens, @@ -23,19 +24,60 @@ ) -__all__ = ["TPLGaussian", "TPLExponential", "TPLStable"] +__all__ = ["TPLGaussian", "TPLExponential", "TPLStable", "TPLSimple"] + + +class TPLCovModel(CovModel): + """Truncated-Power-Law Covariance Model base class for super-position.""" + + @property + def len_up(self): + """:class:`float`: Upper length scale truncation of the model. + + * ``len_up = len_low + len_scale`` + """ + return self.len_low + self.len_scale + + @property + def len_up_rescaled(self): + """:class:`float`: Upper length scale truncation rescaled. + + * ``len_up_rescaled = (len_low + len_scale) / rescale`` + """ + return self.len_up / self.rescale + + @property + def len_low_rescaled(self): + """:class:`float`: Lower length scale truncation rescaled. + + * ``len_low_rescaled = len_low / rescale`` + """ + return self.len_low / self.rescale + + def var_factor(self): + """Factor for C (intensity of variation) to result in variance.""" + return ( + self.len_up_rescaled ** (2 * self.hurst) + - self.len_low_rescaled ** (2 * self.hurst) + ) / (2 * self.hurst) + + def cor(self, h): + """TPL - normalized correlation function.""" + + def correlation(self, r): + """TPL - correlation function.""" # Truncated power law ######################################################### -class TPLGaussian(CovModel): +class TPLGaussian(TPLCovModel): r"""Truncated-Power-Law with Gaussian modes. Notes ----- The truncated power law is given by a superposition of scale-dependent - variograms: + variograms [Federico1997]_: .. math:: \gamma_{\ell_{\mathrm{low}},\ell_{\mathrm{up}}}(r) = @@ -77,9 +119,10 @@ class TPLGaussian(CovModel): If you want to define an upper scale truncation, you should set ``len_low`` and ``len_scale`` accordingly. - The following Parameters occure: + The following Parameters occur: - * :math:`C>0` : scaling factor from the Power-Law + * :math:`C>0` : + scaling factor from the Power-Law (intensity of variation) This parameter will be calculated internally by the given variance. You can access C directly by ``model.var_raw`` * :math:`00` : scaling factor from the Power-Law + * :math:`C>0` : + scaling factor from the Power-Law (intensity of variation) This parameter will be calculated internally by the given variance. You can access C directly by ``model.var_raw`` * :math:`00` : scaling factor from the Power-Law + * :math:`C>0` : + scaling factor from the Power-Law (intensity of variation) This parameter will be calculated internally by the given variance. You can access C directly by ``model.var_raw`` * :math:`0 1 and value.size != dim: # vector mean + raise ValueError(f"Mean/Trend: Wrong size ({value})") + return value if value.size > 1 else value.item() + class Field: - """A field base class for random and kriging fields ect. + """A base class for random fields, kriging fields, etc. Parameters ---------- model : :any:`CovModel` Covariance Model related to the field. - mean : :class:`float`, optional - Mean value of the field. + value_type : :class:`str`, optional + Value type of the field. Either "scalar" or "vector". + The default is "scalar". + mean : :any:`None` or :class:`float` or :any:`callable`, optional + Mean of the field if wanted. Could also be a callable. + The default is None. + normalizer : :any:`None` or :any:`Normalizer`, optional + Normalizer to be applied to the field. + The default is None. + trend : :any:`None` or :class:`float` or :any:`callable`, optional + Trend of the denormalized fields. If no normalizer is applied, + this behaves equal to 'mean'. + The default is None. """ - def __init__(self, model, mean=0.0): + def __init__( + self, + model, + value_type="scalar", + mean=None, + normalizer=None, + trend=None, + ): # initialize attributes self.pos = None self.mesh_type = None self.field = None # initialize private attributes - self._mean = None self._model = None - self.mean = mean - self.model = model self._value_type = None + self._mean = None + self._normalizer = None + self._trend = None + # set properties + self.model = model + self.value_type = value_type + self.mean = mean + self.normalizer = normalizer + self.trend = trend - def __call__(*args, **kwargs): + def __call__(self, *args, **kwargs): """Generate the field.""" - pass def structured(self, *args, **kwargs): """Generate a field on a structured mesh. @@ -73,88 +103,46 @@ def unstructured(self, *args, **kwargs): return call(*args, **kwargs) def mesh( - self, mesh, points="centroids", direction="xyz", name="field", **kwargs - ): # pragma: no cover + self, mesh, points="centroids", direction="all", name="field", **kwargs + ): """Generate a field on a given meshio or ogs5py mesh. Parameters ---------- - mesh : meshio.Mesh or ogs5py.MSH - The given meshio or ogs5py mesh + mesh : meshio.Mesh or ogs5py.MSH or PyVista mesh + The given meshio, ogs5py, or PyVista mesh points : :class:`str`, optional The points to evaluate the field at. Either the "centroids" of the mesh cells (calculated as mean of the cell vertices) or the "points" of the given mesh. Default: "centroids" - direction : :class:`str`, optional + direction : :class:`str` or :class:`list`, optional Here you can state which direction should be choosen for lower dimension. For example, if you got a 2D mesh in xz direction, - you have to pass "xz" - Default: "xyz" - name : :class:`str`, optional - Name to store the field in the given mesh as point_data or - cell_data. Default: "field" + you have to pass "xz". By default, all directions are used. + One can also pass a list of indices. + Default: "all" + name : :class:`str` or :class:`list` of :class:`str`, optional + Name(s) to store the field(s) in the given mesh as point_data or + cell_data. If to few names are given, digits will be appended. + Default: "field" **kwargs Keyword arguments forwareded to `Field.__call__`. Notes ----- This will store the field in the given mesh under the given name, - if a meshio mesh was given. + if a meshio or PyVista mesh was given. See: https://github.com/nschloe/meshio + See: https://github.com/pyvista/pyvista See: :any:`Field.__call__` """ - select = _get_select(direction) - if len(select) < self.model.dim: - raise ValueError( - "Field.mesh: need at least {} direction(s), got '{}'".format( - self.model.dim, direction - ) - ) - if hasattr(mesh, "centroids_flat"): - if points == "centroids": - pnts = mesh.centroids_flat.T[select] - else: - pnts = mesh.NODES.T[select] - out = self.unstructured(pos=pnts, **kwargs) - else: - if points == "centroids": - # define unique order of cells - cells = list(mesh.cells) - offset = [] - length = [] - pnts = np.empty((0, 3), dtype=np.double) - for cell in cells: - pnt = np.mean(mesh.points[mesh.cells[cell]], axis=1) - offset.append(pnts.shape[0]) - length.append(pnt.shape[0]) - pnts = np.vstack((pnts, pnt)) - # generate pos for __call__ - pnts = pnts.T[select] - out = self.unstructured(pos=pnts, **kwargs) - if isinstance(out, np.ndarray): - field = out - else: - # if multiple values are returned, take the first one - field = out[0] - field_dict = {} - for i, cell in enumerate(cells): - field_dict[cell] = field[offset[i] : offset[i] + length[i]] - mesh.cell_data[name] = field_dict - else: - out = self.unstructured(pos=mesh.points.T[select], **kwargs) - if isinstance(out, np.ndarray): - field = out - else: - # if multiple values are returned, take the first one - field = out[0] - mesh.point_data[name] = field - return out - - def _pre_pos(self, pos, mesh_type="unstructured", make_unstruct=False): + return generate_on_mesh(self, mesh, points, direction, name, **kwargs) + + def pre_pos(self, pos, mesh_type="unstructured"): """ Preprocessing positions and mesh_type. @@ -163,116 +151,76 @@ def _pre_pos(self, pos, mesh_type="unstructured", make_unstruct=False): pos : :any:`iterable` the position tuple, containing main direction and transversal directions - mesh_type : :class:`str` + mesh_type : :class:`str`, optional 'structured' / 'unstructured' - make_unstruct: :class:`bool` - State if mesh_type should be made unstructured. + Default: `"unstructured"` Returns ------- - x : :class:`numpy.ndarray` - first components of unrotated and isotropic position vectors - y : :class:`numpy.ndarray` or None - analog to x - z : :class:`numpy.ndarray` or None - analog to x - pos : :class:`tuple` of :class:`numpy.ndarray` - the normalized position tuple - mesh_type_gen : :class:`str` - 'structured' / 'unstructured' for the generator - mesh_type_changed : :class:`bool` - State if the mesh_type was changed. - axis_lens : :class:`tuple` or :any:`None` - axis lengths of the structured mesh if mesh type was changed. + iso_pos : (d, n), :class:`numpy.ndarray` + the isometrized position tuple + shape : :class:`tuple` + Shape of the resulting field. """ - x, y, z = pos2xyz(pos, max_dim=self.model.dim) - pos = xyz2pos(x, y, z) - mesh_type_gen = mesh_type - # format the positional arguments of the mesh - check_mesh(self.model.dim, x, y, z, mesh_type) - mesh_type_changed = False - axis_lens = None - if ( - self.model.do_rotation or make_unstruct - ) and mesh_type == "structured": - mesh_type_changed = True - mesh_type_gen = "unstructured" - x, y, z, axis_lens = reshape_axis_from_struct_to_unstruct( - self.model.dim, x, y, z - ) - if self.model.do_rotation: - x, y, z = unrotate_mesh(self.model.dim, self.model.angles, x, y, z) - if not self.model.is_isotropic: - y, z = make_isotropic(self.model.dim, self.model.anis, y, z) - return x, y, z, pos, mesh_type_gen, mesh_type_changed, axis_lens - - def _to_vtk_helper( - self, filename=None, field_select="field", fieldname="field" - ): # pragma: no cover - """Create a VTK/PyVista grid of the field or save it as a VTK file. - - This is an internal helper that will handle saving or creating objects + # save mesh-type + self.mesh_type = mesh_type + # save pos tuple + if mesh_type != "unstructured": + pos, shape = format_struct_pos_dim(pos, self.dim) + self.pos = pos + pos = generate_grid(pos) + else: + pos = np.array(pos, dtype=np.double).reshape(self.dim, -1) + self.pos = pos + shape = np.shape(pos[0]) + # prepend dimension if we have a vector field + if self.value_type == "vector": + shape = (self.dim,) + shape + if self.model.latlon: + raise ValueError("Field: Vector fields not allowed for latlon") + # return isometrized pos tuple and resulting field shape + return self.model.isometrize(pos), shape + + def post_field(self, field, name="field", process=True, save=True): + """ + Postprocessing field values. Parameters ---------- - filename : :class:`str` - Filename of the file to be saved, including the path. Note that an - ending (.vtr or .vtu) will be added to the name. If ``None`` is - passed, a PyVista dataset of the appropriate type will be returned. - field_select : :class:`str`, optional - Field that should be stored. Can be: - "field", "raw_field", "krige_field", "err_field" or "krige_var". - Default: "field" - fieldname : :class:`str`, optional - Name of the field in the VTK file. Default: "field" + field : :class:`numpy.ndarray` + Field values. + name : :class:`str`, optional + Name. to store the field. + The default is "field". + process : :class:`bool`, optional + Whether to process field to apply mean, normalizer and trend. + The default is True. + save : :class:`bool`, optional + Whether to store the field under the given name. + The default is True. + + Returns + ------- + field : :class:`numpy.ndarray` + Processed field values. """ - if self.value_type is None: - raise ValueError( - "Field value type not set! " - + "Specify 'scalar' or 'vector' before plotting." - ) - elif self.value_type == "vector": - if hasattr(self, field_select): - field = getattr(self, field_select) - else: - field = None - if not ( - self.pos is None or field is None or self.mesh_type is None - ): - suf = ["_X", "_Y", "_Z"] - fields = {} - for i in range(self.model.dim): - fields[fieldname + suf[i]] = field[i] - if filename is None: - return to_vtk(self.pos, fields, self.mesh_type) - else: - return vtk_export( - filename, self.pos, fields, self.mesh_type - ) - elif self.value_type == "scalar": - if hasattr(self, field_select): - field = getattr(self, field_select) - else: - field = None - if not ( - self.pos is None or field is None or self.mesh_type is None - ): - if filename is None: - return to_vtk(self.pos, {fieldname: field}, self.mesh_type) - else: - return vtk_export( - filename, self.pos, {fieldname: field}, self.mesh_type - ) - else: - print( - "Field.to_vtk: No " - + field_select - + " stored in the class." - ) - else: - raise ValueError( - "Unknown field value type: {}".format(self.value_type) + if process: + if self.pos is None: + raise ValueError("post_field: no 'pos' tuple set for field.") + field = apply_mean_norm_trend( + pos=self.pos, + field=field, + mesh_type=self.mesh_type, + value_type=self.value_type, + mean=self.mean, + normalizer=self.normalizer, + trend=self.trend, + check_shape=False, + stacked=False, ) + if save: + setattr(self, name, field) + return field def to_pyvista( self, field_select="field", fieldname="field" @@ -288,8 +236,8 @@ def to_pyvista( fieldname : :class:`str`, optional Name of the field in the VTK file. Default: "field" """ - grid = self._to_vtk_helper( - filename=None, field_select=field_select, fieldname=fieldname + grid = to_vtk_helper( + self, filename=None, field_select=field_select, fieldname=fieldname ) return grid @@ -312,19 +260,23 @@ def vtk_export( """ if not isinstance(filename, str): raise TypeError("Please use a string filename.") - return self._to_vtk_helper( - filename=filename, field_select=field_select, fieldname=fieldname + return to_vtk_helper( + self, + filename=filename, + field_select=field_select, + fieldname=fieldname, ) - def plot(self, field="field", fig=None, ax=None): # pragma: no cover + def plot( + self, field="field", fig=None, ax=None, **kwargs + ): # pragma: no cover """ Plot the spatial random field. Parameters ---------- field : :class:`str`, optional - Field that should be plotted. Can be: - "field", "raw_field", "krige_field", "err_field" or "krige_var". + Field that should be plotted. Default: "field" fig : :class:`Figure` or :any:`None` Figure to plot the axes on. If `None`, a new one will be created. @@ -332,6 +284,8 @@ def plot(self, field="field", fig=None, ax=None): # pragma: no cover ax : :class:`Axes` or :any:`None` Axes to plot on. If `None`, a new one will be added to the figure. Default: `None` + **kwargs + Forwarded to the plotting routine. """ # just import if needed; matplotlib is not required by setup from gstools.field.plot import plot_field, plot_vec_field @@ -339,35 +293,23 @@ def plot(self, field="field", fig=None, ax=None): # pragma: no cover if self.value_type is None: raise ValueError( "Field value type not set! " - + "Specify 'scalar' or 'vector' before plotting." + "Specify 'scalar' or 'vector' before plotting." ) - elif self.value_type == "scalar": - r = plot_field(self, field, fig, ax) - + if self.value_type == "scalar": + r = plot_field(self, field, fig, ax, **kwargs) elif self.value_type == "vector": if self.model.dim == 2: - r = plot_vec_field(self, field, fig, ax) + r = plot_vec_field(self, field, fig, ax, **kwargs) else: raise NotImplementedError( "Streamflow plotting only supported for 2d case." ) else: - raise ValueError( - "Unknown field value type: {}".format(self.value_type) - ) + raise ValueError(f"Unknown field value type: {self.value_type}") return r - @property - def mean(self): - """:class:`float`: The mean of the field.""" - return self._mean - - @mean.setter - def mean(self, mean): - self._mean = float(mean) - @property def model(self): """:any:`CovModel`: The covariance model of the field.""" @@ -382,21 +324,63 @@ def model(self, model): "Field: 'model' is not an instance of 'gstools.CovModel'" ) + @property + def mean(self): + """:class:`float` or :any:`callable`: The mean of the field.""" + return self._mean + + @mean.setter + def mean(self, mean): + self._mean = _set_mean_trend(mean, self.dim) + + @property + def normalizer(self): + """:any:`Normalizer`: Normalizer of the field.""" + return self._normalizer + + @normalizer.setter + def normalizer(self, normalizer): + self._normalizer = _check_normalizer(normalizer) + + @property + def trend(self): + """:class:`float` or :any:`callable`: The trend of the field.""" + return self._trend + + @trend.setter + def trend(self, trend): + self._trend = _set_mean_trend(trend, self.dim) + @property def value_type(self): """:class:`str`: Type of the field values (scalar, vector).""" return self._value_type - def __str__(self): - """Return String representation.""" - return self.__repr__() + @value_type.setter + def value_type(self, value_type): + if value_type not in VALUE_TYPES: + raise ValueError(f"Field: value type not in {VALUE_TYPES}") + self._value_type = value_type - def __repr__(self): - """Return String representation.""" - return "Field(model={0}, mean={1})".format(self.model, self.mean) + @property + def dim(self): + """:class:`int`: Dimension of the field.""" + return self.model.field_dim + @property + def name(self): + """:class:`str`: The name of the class.""" + return self.__class__.__name__ -if __name__ == "__main__": # pragma: no cover - import doctest + def _fmt_mean_norm_trend(self): + # fmt_mean_norm_trend for all child classes + return fmt_mean_norm_trend(self) - doctest.testmod() + def __repr__(self): + """Return String representation.""" + return "{0}(model={1}, value_type='{2}'{3})".format( + self.name, + self.model.name, + self.value_type, + self._fmt_mean_norm_trend(), + ) diff --git a/gstools/field/cond_srf.py b/gstools/field/cond_srf.py new file mode 100644 index 000000000..5e1eb85dc --- /dev/null +++ b/gstools/field/cond_srf.py @@ -0,0 +1,205 @@ +# -*- coding: utf-8 -*- +""" +GStools subpackage providing a class for conditioned spatial random fields. + +.. currentmodule:: gstools.field.cond_srf + +The following classes are provided + +.. autosummary:: + CondSRF +""" +# pylint: disable=C0103, W0231, W0221, E1102 +import numpy as np +from gstools.field.generator import RandMeth +from gstools.field.base import Field +from gstools.krige import Krige + +__all__ = ["CondSRF"] + +GENERATOR = { + "RandMeth": RandMeth, +} +"""dict: Standard generators for conditioned spatial random fields.""" + + +class CondSRF(Field): + """A class to generate conditioned spatial random fields (SRF). + + Parameters + ---------- + krige : :any:`Krige` + Kriging setup to condition the spatial random field. + generator : :class:`str`, optional + Name of the field generator to be used. + At the moment, only the following generator is provided: + + * "RandMeth" : The Randomization Method. + See: :any:`RandMeth` + + Default: "RandMeth" + **generator_kwargs + Keyword arguments that are forwarded to the generator in use. + Have a look at the provided generators for further information. + """ + + def __init__(self, krige, generator="RandMeth", **generator_kwargs): + if not isinstance(krige, Krige): + raise ValueError("CondSRF: krige should be an instance of Krige.") + self._krige = krige + # initialize attributes + self.pos = None + self.mesh_type = None + self.field = None + self.raw_field = None + # initialize private attributes + self._generator = None + # initialize attributes + self.set_generator(generator, **generator_kwargs) + + def __call__(self, pos, seed=np.nan, mesh_type="unstructured", **kwargs): + """Generate the conditioned spatial random field. + + The field is saved as `self.field` and is also returned. + + Parameters + ---------- + pos : :class:`list` + the position tuple, containing main direction and transversal + directions + seed : :class:`int`, optional + seed for RNG for reseting. Default: keep seed from generator + mesh_type : :class:`str` + 'structured' / 'unstructured' + **kwargs + keyword arguments that are forwarded to the kriging routine in use. + + Returns + ------- + field : :class:`numpy.ndarray` + the conditioned SRF + """ + kwargs["mesh_type"] = mesh_type + kwargs["only_mean"] = False # overwrite if given + kwargs["return_var"] = True # overwrite if given + kwargs["post_process"] = False # overwrite if given + # update the model/seed in the generator if any changes were made + self.generator.update(self.model, seed) + # get isometrized positions and the resulting field-shape + iso_pos, shape = self.pre_pos(pos, mesh_type) + # generate the field + self.raw_field = np.reshape( + self.generator(iso_pos, add_nugget=False), shape + ) + field, krige_var = self.krige(pos, **kwargs) + var_scale, nugget = self.get_scaling(krige_var, shape) + # need to use a copy to not alter "field" by reference + self.krige.post_field(self.krige.field.copy()) + return self.post_field(field + var_scale * self.raw_field + nugget) + + def get_scaling(self, krige_var, shape): + """ + Get scaling coefficients for the random field. + + Parameters + ---------- + krige_var : :class:`numpy.ndarray` + Kriging variance. + shape : :class:`tuple` of :class:`int` + Field shape. + + Returns + ------- + var_scale : :class:`numpy.ndarray` + Variance scaling factor for the random field. + nugget : :class:`numpy.ndarray` or :class:`int` + Nugget to be added to the field. + """ + if self.model.nugget > 0: + var_scale = np.maximum(krige_var - self.model.nugget, 0) + nug_scale = np.sqrt((krige_var - var_scale) / self.model.nugget) + var_scale = np.sqrt(var_scale / self.model.var) + nugget = nug_scale * self.generator.get_nugget(shape) + else: + var_scale = np.sqrt(krige_var / self.model.var) + nugget = 0 + return var_scale, nugget + + def set_generator(self, generator, **generator_kwargs): + """Set the generator for the field. + + Parameters + ---------- + generator : :class:`str`, optional + Name of the generator to use for field generation. + Default: "RandMeth" + **generator_kwargs + keyword arguments that are forwarded to the generator in use. + """ + if generator in GENERATOR: + gen = GENERATOR[generator] + self._generator = gen(self.model, **generator_kwargs) + self.value_type = self.generator.value_type + else: + raise ValueError(f"gstools.CondSRF: Unknown generator {generator}") + + @property + def krige(self): + """:any:`Krige`: The underlying kriging class.""" + return self._krige + + @property + def generator(self): + """:any:`callable`: The generator of the field.""" + return self._generator + + @property + def model(self): + """:any:`CovModel`: The covariance model of the field.""" + return self.krige.model + + @model.setter + def model(self, model): + self.krige.model = model + + @property + def mean(self): + """:class:`float` or :any:`callable`: The mean of the field.""" + return self.krige.mean + + @mean.setter + def mean(self, mean): + self.krige.mean = mean + + @property + def normalizer(self): + """:any:`Normalizer`: Normalizer of the field.""" + return self.krige.normalizer + + @normalizer.setter + def normalizer(self, normalizer): + self.krige.normalizer = normalizer + + @property + def trend(self): + """:class:`float` or :any:`callable`: The trend of the field.""" + return self.krige.trend + + @trend.setter + def trend(self, trend): + self.krige.trend = trend + + @property + def value_type(self): + """:class:`str`: Type of the field values (scalar, vector).""" + return self.krige.value_type + + @value_type.setter + def value_type(self, value_type): + self.krige.value_type = value_type + + def __repr__(self): + """Return String representation.""" + return "CondSRF(krige={0}, generator={1})".format( + self.krige, self.generator.name + ) diff --git a/gstools/field/condition.py b/gstools/field/condition.py deleted file mode 100644 index cbb0f076d..000000000 --- a/gstools/field/condition.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- -""" -GStools subpackage providing routines for conditioned random fields. - -.. currentmodule:: gstools.field.condition - -The following functions are provided - -.. autosummary:: - ordinary - simple -""" -# pylint: disable=C0103 -from gstools.field.tools import make_isotropic, unrotate_mesh -from gstools.tools.geometric import pos2xyz -from gstools.krige import Ordinary, Simple - - -def ordinary(srf): - """Condition a given spatial random field with ordinary kriging. - - Parameters - ---------- - srf : :any:`SRF` - The spatial random field class containing all information - - Returns - ------- - cond_field : :class:`numpy.ndarray` - the conditioned field - krige_field : :class:`numpy.ndarray` - the kriged field - err_field : :class:`numpy.ndarray` - the error field to set the given random field to zero at the conditions - krige_var : :class:`numpy.ndarray` - the variance of the kriged field - """ - if srf._value_type != "scalar": - raise ValueError("Conditioned SRF: only scalar fields allowed.") - krige_ok = Ordinary( - model=srf.model, cond_pos=srf.cond_pos, cond_val=srf.cond_val - ) - krige_field, krige_var = krige_ok(srf.pos, srf.mesh_type) - - # evaluate the field at the conditional points - x, y, z = pos2xyz(srf.cond_pos, max_dim=srf.model.dim) - if srf.model.do_rotation: - x, y, z = unrotate_mesh(srf.model.dim, srf.model.angles, x, y, z) - y, z = make_isotropic(srf.model.dim, srf.model.anis, y, z) - err_data = srf.generator.__call__(x, y, z, "unstructured") - - err_ok = Ordinary( - model=srf.model, cond_pos=srf.cond_pos, cond_val=err_data - ) - err_field, __ = err_ok(srf.pos, srf.mesh_type) - cond_field = srf.raw_field + krige_field - err_field - info = {"mean": krige_ok.mean} - return cond_field, krige_field, err_field, krige_var, info - - -def simple(srf): - """Condition a given spatial random field with simple kriging. - - Parameters - ---------- - srf : :any:`SRF` - The spatial random field class containing all information - - Returns - ------- - cond_field : :class:`numpy.ndarray` - the conditioned field - krige_field : :class:`numpy.ndarray` - the kriged field - err_field : :class:`numpy.ndarray` - the error field to set the given random field to zero at the conditions - krige_var : :class:`numpy.ndarray` - the variance of the kriged field - """ - if srf._value_type != "scalar": - raise ValueError("Conditioned SRF: only scalar fields allowed.") - krige_sk = Simple( - model=srf.model, - mean=srf.mean, - cond_pos=srf.cond_pos, - cond_val=srf.cond_val, - ) - krige_field, krige_var = krige_sk(srf.pos, srf.mesh_type) - - # evaluate the field at the conditional points - x, y, z = pos2xyz(srf.cond_pos, max_dim=srf.model.dim) - if srf.model.do_rotation: - x, y, z = unrotate_mesh(srf.model.dim, srf.model.angles, x, y, z) - y, z = make_isotropic(srf.model.dim, srf.model.anis, y, z) - err_data = srf.generator.__call__(x, y, z, "unstructured") + srf.mean - - err_sk = Simple( - model=srf.model, - mean=srf.mean, - cond_pos=srf.cond_pos, - cond_val=err_data, - ) - err_field, __ = err_sk(srf.pos, srf.mesh_type) - cond_field = srf.raw_field + krige_field - err_field + srf.mean - info = {} - return cond_field, krige_field, err_field, krige_var, info diff --git a/gstools/field/generator.py b/gstools/field/generator.py index 3bbe7a585..9b3eeca67 100644 --- a/gstools/field/generator.py +++ b/gstools/field/generator.py @@ -10,24 +10,23 @@ RandMeth IncomprRandMeth """ -# pylint: disable=C0103 - +# pylint: disable=C0103, W0222 +import warnings from copy import deepcopy as dcp import numpy as np from gstools.covmodel.base import CovModel from gstools.random.rng import RNG -from gstools.field.summator import ( - summate_unstruct, - summate_struct, - summate_incompr_unstruct, - summate_incompr_struct, -) +from gstools.field.summator import summate, summate_incompr + __all__ = ["RandMeth", "IncomprRandMeth"] +SAMPLING = ["auto", "inversion", "mcmc"] + + class RandMeth: - r"""Randomization method for calculating isotropic spatial random fields. + r"""Randomization method for calculating isotropic random fields. Parameters ---------- @@ -41,6 +40,13 @@ class RandMeth: verbose : :class:`bool`, optional Be chatty during the generation. Default: :any:`False` + sampling : :class:`str`, optional + Sampling strategy. Either + + * "auto": select best strategy depending on given model + * "inversion": use inversion method + * "mcmc": use mcmc sampling + **kwargs Placeholder for keyword-args @@ -48,7 +54,7 @@ class RandMeth: ----- The Randomization method is used to generate isotropic spatial random fields characterized by a given covariance model. - The calculation looks like: + The calculation looks like [Hesse2014]_: .. math:: u\left(x\right)= @@ -64,13 +70,26 @@ class RandMeth: * :math:`Z_{j,i}` : random samples from a normal distribution * :math:`k_i` : samples from the spectral density distribution of the covariance model + + References + ---------- + .. [Hesse2014] Heße, F., Prykhodko, V., Schlüter, S., and Attinger, S., + "Generating random fields with a truncated power-law variogram: + A comparison of several numerical methods", + Environmental Modelling & Software, 55, 32-48., (2014) """ def __init__( - self, model, mode_no=1000, seed=None, verbose=False, **kwargs + self, + model, + mode_no=1000, + seed=None, + verbose=False, + sampling="auto", + **kwargs, ): if kwargs: - print("gstools.RandMeth: **kwargs are ignored") + warnings.warn("gstools.RandMeth: **kwargs are ignored") # initialize atributes self._mode_no = int(mode_no) self._verbose = bool(verbose) @@ -82,10 +101,13 @@ def __init__( self._z_2 = None self._cov_sample = None self._value_type = "scalar" + # set sampling strategy + self._sampling = None + self.sampling = sampling # set model and seed self.update(model, seed) - def __call__(self, x, y=None, z=None, mesh_type="unstructured"): + def __call__(self, pos, add_nugget=True): """Calculate the random modes for the randomization method. This method calls the `summate_*` Cython methods, which are the @@ -93,37 +115,22 @@ def __call__(self, x, y=None, z=None, mesh_type="unstructured"): Parameters ---------- - x : :class:`float`, :class:`numpy.ndarray` - The x components of the pos. tuple. - y : :class:`float`, :class:`numpy.ndarray`, optional - The y components of the pos. tuple. - z : :class:`float`, :class:`numpy.ndarray`, optional - The z components of the pos. tuple. - mesh_type : :class:`str`, optional - 'structured' / 'unstructured' + pos : (d, n), :class:`numpy.ndarray` + the position tuple with d dimensions and n points. + add_nugget : :class:`bool` + Whether to add nugget noise to the field. Returns ------- :class:`numpy.ndarray` the random modes """ - if mesh_type == "unstructured": - pos = _reshape_pos(x, y, z, dtype=np.double) - - summed_modes = summate_unstruct( - self._cov_sample, self._z_1, self._z_2, pos - ) - else: - x, y, z = _set_dtype(x, y, z, dtype=np.double) - summed_modes = summate_struct( - self._cov_sample, self._z_1, self._z_2, x, y, z - ) - - nugget = self._set_nugget(summed_modes.shape) - + pos = np.array(pos, dtype=np.double) + summed_modes = summate(self._cov_sample, self._z_1, self._z_2, pos) + nugget = self.get_nugget(summed_modes.shape) if add_nugget else 0.0 return np.sqrt(self.model.var / self._mode_no) * summed_modes + nugget - def _set_nugget(self, shape): + def get_nugget(self, shape): """ Generate normal distributed values for the nugget simulation. @@ -131,6 +138,7 @@ def _set_nugget(self, shape): ---------- shape : :class:`tuple` the shape of the summed modes + Returns ------- nugget : :class:`numpy.ndarray` @@ -190,13 +198,13 @@ def update(self, model=None, seed=np.nan): else: raise ValueError( "gstools.field.generator.RandMeth: " - + "neither 'model' nor 'seed' given!" + "neither 'model' nor 'seed' given!" ) # wrong model type else: raise ValueError( "gstools.field.generator.RandMeth: 'model' is not an " - + "instance of 'gstools.CovModel'" + "instance of 'gstools.CovModel'" ) def reset_seed(self, seed=np.nan): @@ -223,7 +231,9 @@ def reset_seed(self, seed=np.nan): # sample uniform on a sphere sphere_coord = self._rng.sample_sphere(self.model.dim, self._mode_no) # sample radii acording to radial spectral density of the model - if self.model.has_ppf: + if self.sampling == "inversion" or ( + self.sampling == "auto" and self.model.has_ppf + ): pdf, cdf, ppf = self.model.dist_func rad = self._rng.sample_dist( size=self._mode_no, pdf=pdf, cdf=cdf, ppf=ppf, a=0 @@ -232,11 +242,22 @@ def reset_seed(self, seed=np.nan): rad = self._rng.sample_ln_pdf( ln_pdf=self.model.ln_spectral_rad_pdf, size=self._mode_no, - sample_around=1.0 / self.model.len_scale, + sample_around=1.0 / self.model.len_rescaled, ) # get fully spatial samples by multiplying sphere samples and radii self._cov_sample = rad * sphere_coord + @property + def sampling(self): + """:class:`str`: Sampling strategy.""" + return self._sampling + + @sampling.setter + def sampling(self, sampling): + if sampling not in ["auto", "inversion", "mcmc"]: + raise ValueError(f"RandMeth: sampling not in {SAMPLING}.") + self._sampling = sampling + @property def seed(self): """:class:`int`: Seed of the master RNG. @@ -292,14 +313,10 @@ def value_type(self): """:class:`str`: Type of the field values (scalar, vector).""" return self._value_type - def __str__(self): - """Return String representation.""" - return self.__repr__() - def __repr__(self): """Return String representation.""" return "RandMeth(model={0}, mode_no={1}, seed={2})".format( - repr(self.model), self._mode_no, self.seed + self.model, self._mode_no, self.seed ) @@ -320,6 +337,13 @@ class IncomprRandMeth(RandMeth): verbose : :class:`bool`, optional State if there should be output during the generation. Default: :any:`False` + sampling : :class:`str`, optional + Sampling strategy. Either + + * "auto": select best strategy depending on given model + * "inversion": use inversion method + * "mcmc": use mcmc sampling + **kwargs Placeholder for keyword-args @@ -327,7 +351,7 @@ class IncomprRandMeth(RandMeth): ----- The Randomization method is used to generate isotropic spatial incompressible random vector fields characterized - by a given covariance model. The equation is: + by a given covariance model. The equation is [Kraichnan1970]_: .. math:: u_i\left(x\right)= \bar{u_i} \delta_{i1} + @@ -346,6 +370,12 @@ class IncomprRandMeth(RandMeth): the covariance model * :math:`p_i(k_j) = e_1 - \frac{k_i k_1}{k^2}` : the projector ensuring the incompressibility + + References + ---------- + .. [Kraichnan1970] Kraichnan, R. H., + "Diffusion by a random velocity field.", + The physics of fluids, 13(1), 22-31., (1970) """ def __init__( @@ -355,19 +385,19 @@ def __init__( mode_no=1000, seed=None, verbose=False, - **kwargs + sampling="auto", + **kwargs, ): - if model.dim < 2: + if model.dim < 2 or model.dim > 3: raise ValueError( - "Only 2- and 3-dimensional incompressible fields " - + "can be generated." + "Only 2D and 3D incompressible fields can be generated." ) - super().__init__(model, mode_no, seed, verbose, **kwargs) + super().__init__(model, mode_no, seed, verbose, sampling, **kwargs) self.mean_u = mean_velocity self._value_type = "vector" - def __call__(self, x, y=None, z=None, mesh_type="unstructured"): + def __call__(self, pos): """Calculate the random modes for the randomization method. This method calls the `summate_incompr_*` Cython methods, @@ -377,35 +407,19 @@ def __call__(self, x, y=None, z=None, mesh_type="unstructured"): Parameters ---------- - x : :class:`float`, :class:`numpy.ndarray` - the x components of the position tuple, the shape has to be - (len(x), 1, 1) for 3d and accordingly shorter for lower - dimensions - y : :class:`float`, :class:`numpy.ndarray`, optional - the y components of the pos. tuples. Default: ``None`` - z : :class:`float`, :class:`numpy.ndarray`, optional - the z components of the pos. tuple. Default: ``None`` - mesh_type : :class:`str`, optional - 'structured' / 'unstructured' + pos : (d, n), :class:`numpy.ndarray` + the position tuple with d dimensions and n points. Returns ------- :class:`numpy.ndarray` the random modes """ - if mesh_type == "unstructured": - pos = _reshape_pos(x, y, z, dtype=np.double) - - summed_modes = summate_incompr_unstruct( - self._cov_sample, self._z_1, self._z_2, pos - ) - else: - x, y, z = _set_dtype(x, y, z, dtype=np.double) - summed_modes = summate_incompr_struct( - self._cov_sample, self._z_1, self._z_2, x, y, z - ) - - nugget = self._set_nugget(summed_modes.shape) + pos = np.array(pos, dtype=np.double) + summed_modes = summate_incompr( + self._cov_sample, self._z_1, self._z_2, pos + ) + nugget = self.get_nugget(summed_modes.shape) e1 = self._create_unit_vector(summed_modes.shape) @@ -441,68 +455,3 @@ def _create_unit_vector(self, broadcast_shape, axis=0): e1 = np.zeros(shape) e1[axis] = 1.0 return e1 - - -def _reshape_pos(x, y=None, z=None, dtype=np.double): - """ - Reshape the 1d x, y, z positions to a 2d position array. - - Parameters - ---------- - x : :class:`float`, :class:`numpy.ndarray` - the x components of the position tuple, the shape has to be - (len(x), 1, 1) for 3d and accordingly shorter for lower - dimensions - y : :class:`float`, :class:`numpy.ndarray`, optional - the y components of the pos. tuple - z : :class:`float`, :class:`numpy.ndarray`, optional - the z components of the pos. tuple - dtype : :class:`numpy.dtype`, optional - the numpy dtype to which the elements should be converted - - Returns - ------- - :class:`numpy.ndarray` - the positions in one convinient data structure - """ - if y is None and z is None: - pos = np.array(x.reshape(1, len(x)), dtype=dtype) - elif z is None: - pos = np.array(np.vstack((x, y)), dtype=dtype) - else: - pos = np.array(np.vstack((x, y, z)), dtype=dtype) - return pos - - -def _set_dtype(x, y=None, z=None, dtype=np.double): - """ - Convert the dtypes of the input arrays to given dtype. - - Parameters - ---------- - x : :class:`float`, :class:`numpy.ndarray` - The array to be converted. - y : :class:`float`, :class:`numpy.ndarray`, optional - The array to be converted. - z : :class:`float`, :class:`numpy.ndarray`, optional - The array to be converted. - dtype : :class:`numpy.dtype`, optional - The numpy dtype to which the elements should be converted. - - Returns - ------- - :class:`numpy.ndarray` - The input lists/ arrays as numpy arrays with given dtype. - """ - x = x.astype(dtype, copy=False) - if y is not None: - y = y.astype(dtype, copy=False) - if z is not None: - z = z.astype(dtype, copy=False) - return x, y, z - - -if __name__ == "__main__": # pragma: no cover - import doctest - - doctest.testmod() diff --git a/gstools/field/plot.py b/gstools/field/plot.py index 23bcf3b88..77e604ae6 100644 --- a/gstools/field/plot.py +++ b/gstools/field/plot.py @@ -10,14 +10,19 @@ plot_field plot_vec_field """ -# pylint: disable=C0103 +# pylint: disable=C0103, W0613, E1101 import numpy as np from scipy import interpolate as inter -import matplotlib.pyplot as plt -from matplotlib.widgets import Slider, RadioButtons -from mpl_toolkits.mplot3d import Axes3D -from gstools.tools import pos2xyz -from gstools.covmodel.plot import _get_fig_ax +from scipy.spatial import ConvexHull +from gstools.tools.misc import get_fig_ax +from gstools.tools.geometric import rotation_planes + +try: + import matplotlib.pyplot as plt + from matplotlib.widgets import Slider, RadioButtons +except ImportError as exc: + raise ImportError("Plotting: Matplotlib not installed.") from exc + __all__ = ["plot_field", "plot_vec_field"] @@ -25,7 +30,9 @@ # plotting routines ####################################################### -def plot_field(fld, field="field", fig=None, ax=None): # pragma: no cover +def plot_field( + fld, field="field", fig=None, ax=None, **kwargs +): # pragma: no cover """ Plot a spatial field. @@ -41,173 +48,234 @@ def plot_field(fld, field="field", fig=None, ax=None): # pragma: no cover ax : :class:`Axes` or :any:`None`, optional Axes to plot on. If `None`, a new one will be added to the figure. Default: `None` + **kwargs + Forwarded to the plotting routine. """ - plot_field = getattr(fld, field) - assert not (fld.pos is None or plot_field is None) - if fld.model.dim == 1: - ax = _plot_1d(fld.pos, plot_field, fig, ax) - elif fld.model.dim == 2: - ax = _plot_2d(fld.pos, plot_field, fld.mesh_type, fig, ax) - else: - ax = _plot_3d(fld.pos, plot_field, fld.mesh_type, fig, ax) - return ax + plt_fld = getattr(fld, field) + assert not (fld.pos is None or plt_fld is None) + if fld.dim == 1: + return plot_1d(fld.pos, plt_fld, fig, ax, **kwargs) + return plot_nd( + fld.pos, plt_fld, fld.mesh_type, fig, ax, fld.model.latlon, **kwargs + ) + +def plot_1d(pos, field, fig=None, ax=None, ax_names=None): # pragma: no cover + """ + Plot a 1D field. -def _plot_1d(pos, field, fig=None, ax=None): # pragma: no cover - """Plot a 1d field.""" - fig, ax = _get_fig_ax(fig, ax) - title = "Field 1D: " + str(field.shape) - x, __, __ = pos2xyz(pos, max_dim=1) + Parameters + ---------- + pos : :class:`list` + the position tuple, containing either the point coordinates (x, y, ...) + or the axes descriptions (for mesh_type='structured') + field : :class:`numpy.ndarray` + Field values. + fig : :class:`Figure` or :any:`None`, optional + Figure to plot the axes on. If `None`, a new one will be created. + Default: `None` + ax : :class:`Axes` or :any:`None`, optional + Axes to plot on. If `None`, a new one will be added to the figure. + Default: `None` + ax_names : :class:`list` of :class:`str`, optional + Axes names. The default is ["$x$", "field"]. + + Returns + ------- + ax : :class:`Axes` + Axis containing the plot. + """ + fig, ax = get_fig_ax(fig, ax) + title = f"Field 1D: {field.shape}" + x = pos[0] x = x.flatten() arg = np.argsort(x) + ax_names = _ax_names(1, ax_names=ax_names) ax.plot(x[arg], field.ravel()[arg]) - ax.set_xlabel("X") - ax.set_ylabel("field") + ax.set_xlabel(ax_names[0]) + ax.set_ylabel(ax_names[1]) ax.set_title(title) fig.show() return ax -def _plot_2d(pos, field, mesh_type, fig=None, ax=None): # pragma: no cover - """Plot a 2d field.""" - fig, ax = _get_fig_ax(fig, ax) - title = "Field 2D " + mesh_type + ": " + str(field.shape) - x, y, __ = pos2xyz(pos, max_dim=2) - if mesh_type == "unstructured": - cont = ax.tricontourf(x, y, field.ravel(), levels=256) - else: - try: - cont = ax.contourf(x, y, field.T, levels=256) - except TypeError: - cont = ax.contourf(x, y, field.T, 256) - ax.set_xlabel("X") - ax.set_ylabel("Y") - ax.set_title(title) - fig.colorbar(cont) - fig.show() - return ax +def plot_nd( + pos, + field, + mesh_type, + fig=None, + ax=None, + latlon=False, + resolution=128, + ax_names=None, + aspect="quad", + show_colorbar=True, + convex_hull=False, + contour_plot=True, + **kwargs, +): # pragma: no cover + """ + Plot field in arbitrary dimensions. + Parameters + ---------- + pos : :class:`list` + the position tuple, containing either the point coordinates (x, y, ...) + or the axes descriptions (for mesh_type='structured') + field : :class:`numpy.ndarray` + Field values. + fig : :class:`Figure` or :any:`None`, optional + Figure to plot the axes on. If `None`, a new one will be created. + Default: `None` + ax : :class:`Axes` or :any:`None`, optional + Axes to plot on. If `None`, a new one will be added to the figure. + Default: `None` + latlon : :class:`bool`, optional + Whether the data is representing 2D fields on earths surface described + by latitude and longitude. When using this, the estimator will + use great-circle distance for variogram estimation. + Note, that only an isotropic variogram can be estimated and a + ValueError will be raised, if a direction was specified. + Bin edges need to be given in radians in this case. + Default: False + resolution : :class:`int`, optional + Resolution of the imshow plot. The default is 128. + ax_names : :class:`list` of :class:`str`, optional + Axes names. The default is ["$x$", "field"]. + aspect : :class:`str` or :any:`None` or :class:`float`, optional + Aspect of the plot. Can be "auto", "equal", "quad", None or a number + describing the aspect ratio. + The default is "quad". + show_colorbar : :class:`bool`, optional + Whether to show the colorbar. The default is True. + convex_hull : :class:`bool`, optional + Whether to show the convex hull in 2D with unstructured data. + The default is False. + contour_plot : :class:`bool`, optional + Whether to use a contour-plot in 2D. The default is True. -def _plot_3d(pos, field, mesh_type, fig=None, ax=None): # pragma: no cover - """Plot 3D field.""" - dir1, dir2 = np.mgrid[0:1:51j, 0:1:51j] - levels = np.linspace(field.min(), field.max(), 256, endpoint=True) - - x_min = pos[0].min() - x_max = pos[0].max() - y_min = pos[1].min() - y_max = pos[1].max() - z_min = pos[2].min() - z_max = pos[2].max() - x_range = x_max - x_min - y_range = y_max - y_min - z_range = z_max - z_min - x_step = x_range / 50.0 - y_step = y_range / 50.0 - z_step = z_range / 50.0 - ax_info = { - "x": [x_min, x_max, x_range, x_step], - "y": [y_min, y_max, y_range, y_step], - "z": [z_min, z_max, z_range, z_step], - } - fig, ax = _get_fig_ax(fig, ax, Axes3D.name) - title = "Field 3D " + mesh_type + ": " + str(field.shape) - fig.subplots_adjust(left=0.2, right=0.8, bottom=0.25) - sax = plt.axes([0.15, 0.1, 0.65, 0.03]) - z_height = Slider( - sax, - "z value", - z_min, - z_max, - valinit=z_min + z_range / 2.0, - valstep=z_step, - ) - rax = plt.axes([0.05, 0.5, 0.1, 0.15]) - radio = RadioButtons(rax, ("x slice", "y slice", "z slice"), active=2) - z_dir_tmp = "z" - # create container - container_class = type( - "info", (object,), {"z_height": z_height, "z_dir_tmp": z_dir_tmp} - ) - container = container_class() - - def get_plane(z_val_in, z_dir): - """Get the plane.""" - if z_dir == "z": - x_io = dir1 * x_range + x_min - y_io = dir2 * y_range + y_min - z_io = np.full_like(x_io, z_val_in) - elif z_dir == "y": - x_io = dir1 * x_range + x_min - z_io = dir2 * z_range + z_min - y_io = np.full_like(x_io, z_val_in) - else: - y_io = dir1 * y_range + y_min - z_io = dir2 * z_range + z_min - x_io = np.full_like(y_io, z_val_in) - - if mesh_type == "structured": - # contourf plots image like for griddata, therefore transpose - plane = inter.interpn( - pos, field, np.array((x_io, y_io, z_io)).T, bounds_error=False - ).T - else: - plane = inter.griddata( - pos, field, (x_io, y_io, z_io), method="linear" - ) - if z_dir == "z": - z_io = plane - elif z_dir == "y": - y_io = plane - else: - x_io = plane - return x_io, y_io, z_io - - def update(__): - """Widget update.""" - z_dir_in = radio.value_selected[0] - if z_dir_in != container.z_dir_tmp: - sax.clear() - container.z_height = Slider( - sax, - z_dir_in + " value", - ax_info[z_dir_in][0], - ax_info[z_dir_in][1], - valinit=ax_info[z_dir_in][0] + ax_info[z_dir_in][2] / 2.0, - valstep=ax_info[z_dir_in][3], - ) - container.z_height.on_changed(update) - container.z_dir_tmp = z_dir_in - z_val = container.z_height.val - ax.clear() - xx, yy, zz = get_plane(z_val, z_dir_in) - cont = ax.contourf( - xx, - yy, - zz, - vmin=field.min(), - vmax=field.max(), - levels=levels, - zdir=z_dir_in, - offset=z_val, + Returns + ------- + ax : :class:`Axes` + Axis containing the plot. + """ + dim = len(pos) + assert dim > 1 + assert not latlon or dim == 2 + if dim == 2 and contour_plot: + return _plot_2d( + pos, field, mesh_type, fig, ax, latlon, ax_names, **kwargs + ) + pos = pos[::-1] if latlon else pos + field = field.T if (latlon and mesh_type != "unstructured") else field + ax_names = _ax_names(dim, latlon, ax_names) + # init planes + planes = rotation_planes(dim) + plane_names = [f" {ax_names[p[0]]} - {ax_names[p[1]]}" for p in planes] + ax_ends = [[p.min(), p.max()] for p in pos] + ax_rngs = [end[1] - end[0] for end in ax_ends] + ax_steps = [rng / resolution for rng in ax_rngs] + ax_extents = [ax_ends[p[0]] + ax_ends[p[1]] for p in planes] + # create figure + reformat = fig is None and ax is None + fig, ax = get_fig_ax(fig, ax) + ax.set_title(f"Field {dim}D {mesh_type} {field.shape}") + if reformat: # only format fig if it was created here + fig.set_size_inches(8, 5.5 + 0.5 * (dim - 2)) + # init additional axis, radio-buttons and sliders + s_frac = 0.5 * (dim - 2) / (6 + 0.5 * (dim - 2)) + s_size = s_frac / max(dim - 2, 1) + left, bottom = (0.25, s_frac + 0.13) if dim > 2 else (None, None) + fig.subplots_adjust(left=left, bottom=bottom) + slider = [] + for i in range(dim - 2, 0, -1): + slider_ax = fig.add_axes([0.3, i * s_size, 0.435, s_size * 0.6]) + slider.append(Slider(slider_ax, "", 0, 1, facecolor="grey")) + slider[-1].vline.set_color("k") + # create radio buttons + if dim > 2: + rax = fig.add_axes( + [0.05, 0.85 - 2 * s_frac, 0.15, 2 * s_frac], frame_on=0, alpha=0 ) - cont.cmap.set_under("k", alpha=0.0) - cont.cmap.set_bad("k", alpha=0.0) - ax.set_xlabel("X") - ax.set_ylabel("Y") - ax.set_zlabel("Z") - ax.set_xlim([x_min, x_max]) - ax.set_ylim([y_min, y_max]) - ax.set_zlim([z_min, z_max]) - ax.set_title(title) + rax.set_title(" Plane", loc="left") + radio = RadioButtons(rax, plane_names, activecolor="grey") + # make radio buttons circular + rpos = rax.get_position().get_points() + fh, fw = fig.get_figheight(), fig.get_figwidth() + rscale = (rpos[:, 1].ptp() / rpos[:, 0].ptp()) * (fh / fw) + for circ in radio.circles: + circ.set_radius(0.06) + circ.height /= rscale + elif mesh_type == "unstructured" and convex_hull: + # show convex hull in 2D + hull = ConvexHull(pos.T) + for simplex in hull.simplices: + ax.plot(pos[0, simplex], pos[1, simplex], "k") + # init imshow and colorbar axis + grid = np.mgrid[0 : 1 : resolution * 1j, 0 : 1 : resolution * 1j] + f_ini, vmin, vmax = np.full_like(grid[0], np.nan), field.min(), field.max() + im = ax.imshow( + f_ini.T, interpolation="bicubic", origin="lower", vmin=vmin, vmax=vmax + ) + + # actions + def inter_plane(cuts, axes): + """Interpolate plane.""" + plane_ax = [] + for i, (rng, end, cut) in enumerate(zip(ax_rngs, ax_ends, cuts)): + if i in axes: + plane_ax.append(grid[axes.index(i)] * rng + end[0]) + else: + plane_ax.append(np.full_like(grid[0], cut, dtype=float)) + # needs to be a tuple + plane_ax = tuple(plane_ax) + if mesh_type != "unstructured": + return inter.interpn(pos, field, plane_ax, bounds_error=False) + return inter.griddata(pos.T, field, plane_ax, method="nearest") + + def update_field(*args): + """Sliders update.""" + p = plane_names.index(radio.value_selected) if dim > 2 else 0 + # dummy cut values for selected plane-axes (setting to 0) + cuts = [s.val for s in slider] + cuts.insert(planes[p][0], 0) + cuts.insert(planes[p][1], 0) + im.set_array(inter_plane(cuts, planes[p]).T) fig.canvas.draw_idle() - return cont - container.z_height.on_changed(update) - radio.on_clicked(update) - cont = update(0) - cax = plt.axes([0.85, 0.2, 0.03, 0.6]) - fig.colorbar(cont, cax=cax, ax=ax) + def update_plane(label): + """Radio button update.""" + p = plane_names.index(label) + cut_select = [i for i in range(dim) if i not in planes[p]] + # reset sliders + for i, s in zip(cut_select, slider): + s.label.set_text(ax_names[i]) + s.valmin, s.valmax = ax_ends[i] + s.valinit = ax_ends[i][0] + ax_rngs[i] / 2.0 + s.valstep = ax_steps[i] + s.ax.set_xlim(*ax_ends[i]) + # update representation + s.poly.xy[:2] = (s.valmin, 0), (s.valmin, 1) + s.vline.set_data(2 * [s.valinit], [-0.1, 1.1]) + s.reset() + im.set_extent(ax_extents[p]) + if aspect == "quad": + asp = ax_rngs[planes[p][0]] / ax_rngs[planes[p][1]] + if aspect is not None: + ax.set_aspect(asp if aspect == "quad" else aspect) + ax.set_xlabel(ax_names[planes[p][0]]) + ax.set_ylabel(ax_names[planes[p][1]]) + update_field() + + # initial plot on xy plane + update_plane(plane_names[0]) + # bind actions + if dim > 2: + radio.on_clicked(update_plane) + for s in slider: + s.on_changed(update_field) + if show_colorbar: + fig.colorbar(im, ax=ax) fig.show() return ax @@ -229,25 +297,26 @@ def plot_vec_field(fld, field="field", fig=None, ax=None): # pragma: no cover Axes to plot on. If `None`, a new one will be added to the figure. Default: `None` """ - if fld.mesh_type != "structured": + if fld.mesh_type == "unstructured": raise RuntimeError( - "Only structured vector fields are supported" - + " for plotting. Please create one on a structured grid." + "Only structured vector fields are supported " + "for plotting. Please create one on a structured grid." ) - plot_field = getattr(fld, field) - assert not (fld.pos is None or plot_field is None) + plt_fld = getattr(fld, field) + assert not (fld.pos is None or plt_fld is None) - norm = np.sqrt(plot_field[0, :].T ** 2 + plot_field[1, :].T ** 2) + norm = np.sqrt(plt_fld[0, :].T ** 2 + plt_fld[1, :].T ** 2) - fig, ax = _get_fig_ax(fig, ax) - title = "Field 2D " + fld.mesh_type + ": " + str(plot_field.shape) - x, y, __ = pos2xyz(fld.pos, max_dim=2) + fig, ax = get_fig_ax(fig, ax) + title = f"Field 2D {fld.mesh_type}: {plt_fld.shape}" + x = fld.pos[0] + y = fld.pos[1] sp = plt.streamplot( x, y, - plot_field[0, :].T, - plot_field[1, :].T, + plt_fld[0, :].T, + plt_fld[1, :].T, color=norm, linewidth=norm / 2, ) @@ -257,3 +326,47 @@ def plot_vec_field(fld, field="field", fig=None, ax=None): # pragma: no cover fig.colorbar(sp.lines) fig.show() return ax + + +def _ax_names(dim, latlon=False, ax_names=None): + if ax_names is not None: + assert len(ax_names) >= dim + return ax_names[:dim] + if dim == 2 and latlon: + return ["lon", "lat"] + if dim <= 3: + return ["$x$", "$y$", "$z$"][:dim] + (dim == 1) * ["field"] + return [f"$x_{{{i}}}$" for i in range(dim)] + + +def _plot_2d( + pos, + field, + mesh_type, + fig=None, + ax=None, + latlon=False, + ax_names=None, + levels=64, + antialias=True, +): # pragma: no cover + """Plot a 2d field with a contour plot.""" + fig, ax = get_fig_ax(fig, ax) + title = f"Field 2D {mesh_type}: {field.shape}" + ax_names = _ax_names(2, latlon, ax_names=ax_names) + x, y = pos[::-1] if latlon else pos + if mesh_type == "unstructured": + cont = ax.tricontourf(x, y, field.ravel(), levels=levels) + if antialias: + ax.tricontour(x, y, field.ravel(), levels=levels, zorder=-10) + else: + plt_fld = field if latlon else field.T + cont = ax.contourf(x, y, plt_fld, levels=levels) + if antialias: + ax.contour(x, y, plt_fld, levels=levels, zorder=-10) + ax.set_xlabel(ax_names[0]) + ax.set_ylabel(ax_names[1]) + ax.set_title(title) + fig.colorbar(cont) + fig.show() + return ax diff --git a/gstools/field/srf.py b/gstools/field/srf.py index 74b6ea4c6..de0951241 100644 --- a/gstools/field/srf.py +++ b/gstools/field/srf.py @@ -9,15 +9,11 @@ .. autosummary:: SRF """ -# pylint: disable=C0103 - +# pylint: disable=C0103, W0221, E1102 import numpy as np from gstools.field.generator import RandMeth, IncomprRandMeth -from gstools.field.tools import reshape_field_from_unstruct_to_struct from gstools.field.base import Field from gstools.field.upscaling import var_coarse_graining, var_no_scaling -from gstools.field.condition import ordinary, simple -from gstools.krige.tools import set_condition __all__ = ["SRF"] @@ -27,11 +23,13 @@ "VectorField": IncomprRandMeth, "VelocityField": IncomprRandMeth, } +"""dict: Standard generators for spatial random fields.""" + UPSCALING = { "coarse_graining": var_coarse_graining, "no_scaling": var_no_scaling, } -CONDITION = {"ordinary": ordinary, "simple": simple} +"""dict: Upscaling routines for spatial random fields.""" class SRF(Field): @@ -41,8 +39,16 @@ class SRF(Field): ---------- model : :any:`CovModel` Covariance Model of the spatial random field. - mean : :class:`float`, optional - mean value of the SRF + mean : :class:`float` or :any:`callable`, optional + Mean of the SRF (in normal form). Could also be a callable. + The default is 0.0. + normalizer : :any:`None` or :any:`Normalizer`, optional + Normalizer to be applied to the SRF to transform the field values. + The default is None. + trend : :any:`None` or :class:`float` or :any:`callable`, optional + Trend of the SRF (in transformed form). + If no normalizer is applied, this behaves equal to 'mean'. + The default is None. upscaling : :class:`str`, optional Method to be used for upscaling the variance at each point depending on the related element volume. @@ -78,31 +84,20 @@ def __init__( self, model, mean=0.0, + normalizer=None, + trend=None, upscaling="no_scaling", generator="RandMeth", - **generator_kwargs + **generator_kwargs, ): - super().__init__(model, mean) + super().__init__(model, mean=mean, normalizer=normalizer, trend=trend) # initialize private attributes self._generator = None self._upscaling = None self._upscaling_func = None - # condition related - self._cond_pos = None - self._cond_val = None - self._krige_type = None # initialize attributes - self.raw_field = None - self.krige_field = None - self.err_field = None - self.krige_var = None - self.set_generator(generator, **generator_kwargs) self.upscaling = upscaling - if self._value_type is None: - raise ValueError( - "Unknown field value type, " - + "specify 'scalar' or 'vector' before calling SRF." - ) + self.set_generator(generator, **generator_kwargs) def __call__( self, pos, seed=np.nan, point_volumes=0.0, mesh_type="unstructured" @@ -132,105 +127,19 @@ def __call__( field : :class:`numpy.ndarray` the SRF """ - self.mesh_type = mesh_type # update the model/seed in the generator if any changes were made self.generator.update(self.model, seed) - # internal conversation - x, y, z, self.pos, mt_gen, mt_changed, axis_lens = self._pre_pos( - pos, mesh_type - ) + # get isometrized positions and the resulting field-shape + iso_pos, shape = self.pre_pos(pos, mesh_type) # generate the field - self.raw_field = self.generator.__call__(x, y, z, mt_gen) - # reshape field if we got an unstructured mesh - if mt_changed: - self.raw_field = reshape_field_from_unstruct_to_struct( - self.model.dim, self.raw_field, axis_lens - ) - # apply given conditions to the field - if self.condition: - ( - cond_field, - krige_field, - err_field, - krigevar, - info, - ) = self.cond_func(self) - # store everything in the class - self.field = cond_field - self.krige_field = krige_field - self.err_field = err_field - self.krige_var = krigevar - if "mean" in info: # ordinary krging estimates mean - self.mean = info["mean"] - else: - self.field = self.raw_field + self.mean + field = np.reshape(self.generator(iso_pos), shape) # upscaled variance if not np.isscalar(point_volumes) or not np.isclose(point_volumes, 0): scaled_var = self.upscaling_func(self.model, point_volumes) - self.field -= self.mean - self.field *= np.sqrt(scaled_var / self.model.sill) - self.field += self.mean - return self.field - - def set_condition( - self, cond_pos=None, cond_val=None, krige_type="ordinary" - ): - """Condition a given spatial random field with measurements. - - Parameters - ---------- - cond_pos : :class:`list` - the position tuple of the conditions - cond_val : :class:`numpy.ndarray` - the values of the conditions - krige_type : :class:`str`, optional - Used kriging type for conditioning. - Either 'ordinary' or 'simple'. - Default: 'ordinary' - - Notes - ----- - When using "ordinary" as ``krige_type``, the ``mean`` attribute of the - spatial random field will be overwritten with the estimated mean. - """ - if cond_pos is not None: - self._cond_pos, self._cond_val = set_condition( - cond_pos, cond_val, self.model.dim - ) - else: - self._cond_pos = self._cond_val = None - self._krige_type = krige_type - if krige_type not in CONDITION: - raise ValueError( - "gstools.SRF: Unknown kriging method: " + krige_type - ) - - def del_condition(self): - """Delete Conditions.""" - self._cond_pos = None - self._cond_val = None - self._krige_type = None - - @property - def cond_pos(self): - """:class:`list`: The position tuple of the conditions.""" - return self._cond_pos - - @property - def cond_val(self): - """:class:`list`: The values of the conditions.""" - return self._cond_val - - @property - def condition(self): - """:any:`bool`: State if conditions ar given.""" - return self._cond_pos is not None - - def cond_func(self, *args, **kwargs): - """Conditioning method applied to the field.""" - if self.condition: - return CONDITION[self._krige_type](*args, **kwargs) - return None + if np.size(scaled_var) > 1: + scaled_var = np.reshape(scaled_var, shape) + field *= np.sqrt(scaled_var / self.model.sill) + return self.post_field(field) def upscaling_func(self, *args, **kwargs): """Upscaling method applied to the field variance.""" @@ -250,9 +159,13 @@ def set_generator(self, generator, **generator_kwargs): if generator in GENERATOR: gen = GENERATOR[generator] self._generator = gen(self.model, **generator_kwargs) - self._value_type = self._generator.value_type + self.value_type = self._generator.value_type else: - raise ValueError("gstools.SRF: Unknown generator: " + generator) + raise ValueError(f"gstools.SRF: Unknown generator: {generator}") + for val in [self.mean, self.trend]: + if not callable(val) and val is not None: + if np.size(val) > 1 and self.value_type == "scalar": + raise ValueError(f"Mean/Trend: Wrong size ({val})") @property def generator(self): @@ -277,18 +190,13 @@ def upscaling(self, upscaling): self._upscaling = upscaling self._upscaling_func = UPSCALING[upscaling] else: - raise ValueError( - "gstools.SRF: Unknown upscaling method: " + upscaling - ) + raise ValueError(f"SRF: Unknown upscaling method: {upscaling}") def __repr__(self): """Return String representation.""" - return "SRF(model={0}, mean={1}, generator={2}".format( - self.model, self.mean, self.generator + return "{0}(model={1}{2}, generator={3})".format( + self.name, + self.model.name, + self._fmt_mean_norm_trend(), + self.generator.name, ) - - -if __name__ == "__main__": # pragma: no cover - import doctest - - doctest.testmod() diff --git a/gstools/field/summator.pyx b/gstools/field/summator.pyx index 490c7e634..80b5cf4c2 100644 --- a/gstools/field/summator.pyx +++ b/gstools/field/summator.pyx @@ -1,7 +1,7 @@ #cython: language_level=3, boundscheck=False, wraparound=False, cdivision=True # -*- coding: utf-8 -*- """ -This is the variogram estimater, implemented in cython. +This is the randomization method summator, implemented in cython. """ import numpy as np @@ -16,7 +16,7 @@ DTYPE = np.double ctypedef np.double_t DTYPE_t -def summate_unstruct( +def summate( const double[:,:] cov_samples, const double[:] z_1, const double[:] z_2, @@ -41,97 +41,6 @@ def summate_unstruct( return np.asarray(summed_modes) -def summate_struct( - const double[:,:] cov_samples, - const double[:] z_1, - const double[:] z_2, - const double[:] x, - const double[:] y=None, - const double[:] z=None, -): - if y == None and z == None: - return summate_struct_1d(cov_samples, z_1, z_2, x) - elif z == None: - return summate_struct_2d(cov_samples, z_1, z_2, x, y) - else: - return summate_struct_3d(cov_samples, z_1, z_2, x, y, z) - -def summate_struct_1d( - const double[:,:] cov_samples, - const double[:] z_1, - const double[:] z_2, - const double[:] x, - ): - - cdef int i, j, X_len, N - cdef double phase - - X_len = x.shape[0] - N = cov_samples.shape[1] - - cdef double[:] summed_modes = np.zeros(X_len, dtype=DTYPE) - - for i in prange(X_len, nogil=True): - for j in range(N): - phase = cov_samples[0,j] * x[i] - summed_modes[i] += z_1[j] * cos(phase) + z_2[j] * sin(phase) - - return np.asarray(summed_modes) - -def summate_struct_2d( - const double[:,:] cov_samples, - const double[:] z_1, - const double[:] z_2, - const double[:] x, - const double[:] y, - ): - cdef int i, j, k, X_len, Y_len, N - cdef double phase - - X_len = x.shape[0] - Y_len = y.shape[0] - N = cov_samples.shape[1] - - cdef double[:,:] summed_modes = np.zeros((X_len, Y_len), dtype=DTYPE) - - for i in prange(X_len, nogil=True): - for j in range(Y_len): - for k in range(N): - phase = cov_samples[0,k] * x[i] + cov_samples[1,k] * y[j] - summed_modes[i,j] += z_1[k] * cos(phase) + z_2[k] * sin(phase) - - return np.asarray(summed_modes) - -def summate_struct_3d( - const double[:,:] cov_samples, - const double[:] z_1, - const double[:] z_2, - const double[:] x, - const double[:] y, - const double[:] z, - ): - cdef int i, j, k, l, X_len, Y_len, Z_len, N - cdef double phase - - X_len = x.shape[0] - Y_len = y.shape[0] - Z_len = z.shape[0] - N = cov_samples.shape[1] - - cdef double[:,:,:] summed_modes = np.zeros((X_len, Y_len, Z_len), dtype=DTYPE) - - for i in prange(X_len, nogil=True): - for j in range(Y_len): - for k in range(Z_len): - for l in range(N): - phase = ( - cov_samples[0,l] * x[i] + - cov_samples[1,l] * y[j] + - cov_samples[2,l] * z[k] - ) - summed_modes[i,j,k] += z_1[l] * cos(phase) + z_2[l] * sin(phase) - - return np.asarray(summed_modes) cdef (double) abs_square(const double[:] vec) nogil: cdef int i @@ -142,7 +51,8 @@ cdef (double) abs_square(const double[:] vec) nogil: return r -def summate_incompr_unstruct( + +def summate_incompr( const double[:,:] cov_samples, const double[:] z_1, const double[:] z_2, @@ -174,89 +84,3 @@ def summate_incompr_unstruct( summed_modes[d,i] += proj[d] * (z_1[j] * cos(phase) + z_2[j] * sin(phase)) return np.asarray(summed_modes) - -def summate_incompr_struct( - const double[:,:] cov_samples, - const double[:] z_1, - const double[:] z_2, - const double[:] x, - const double[:] y=None, - const double[:] z=None, -): - if z == None: - return summate_incompr_struct_2d(cov_samples, z_1, z_2, x, y) - else: - return summate_incompr_struct_3d(cov_samples, z_1, z_2, x, y, z) - -def summate_incompr_struct_2d( - const double[:,:] cov_samples, - const double[:] z_1, - const double[:] z_2, - const double[:] x, - const double[:] y, - ): - cdef int i, j, k, d, X_len, Y_len, N - cdef double phase - cdef int dim = 2 - cdef double k_2 - - cdef double[:] e1 = np.zeros(dim, dtype=DTYPE) - e1[0] = 1. - cdef double[:] proj = np.empty(dim, dtype=DTYPE) - - X_len = x.shape[0] - Y_len = y.shape[0] - N = cov_samples.shape[1] - - cdef double[:,:,:] summed_modes = np.zeros((dim, X_len, Y_len), dtype=DTYPE) - - for i in range(X_len): - for j in range(Y_len): - for k in range(N): - k_2 = abs_square(cov_samples[:,k]) - phase = cov_samples[0,k] * x[i] + cov_samples[1,k] * y[j] - for d in range(dim): - proj[d] = e1[d] - cov_samples[d,k] * cov_samples[0,k] / k_2 - summed_modes[d,i,j] += proj[d] * (z_1[k] * cos(phase) + z_2[k] * sin(phase)) - - return np.asarray(summed_modes) - -def summate_incompr_struct_3d( - const double[:,:] cov_samples, - const double[:] z_1, - const double[:] z_2, - const double[:] x, - const double[:] y, - const double[:] z, - ): - cdef int i, j, k, l, d, X_len, Y_len, Z_len, N - cdef double phase - cdef int dim = 3 - cdef double k_2 - - cdef double[:] e1 = np.zeros(dim, dtype=DTYPE) - e1[0] = 1. - cdef double[:] proj = np.empty(dim, dtype=DTYPE) - - X_len = x.shape[0] - Y_len = y.shape[0] - Z_len = z.shape[0] - N = cov_samples.shape[1] - - cdef double[:,:,:,:] summed_modes = np.zeros((dim, X_len, Y_len, Z_len), dtype=DTYPE) - - for i in range(X_len): - for j in range(Y_len): - for k in range(Z_len): - for l in range(N): - k_2 = abs_square(cov_samples[:,l]) - phase = ( - cov_samples[0,l] * x[i] + - cov_samples[1,l] * y[j] + - cov_samples[2,l] * z[k] - ) - for d in range(dim): - proj[d] = e1[d] - cov_samples[d,l] * cov_samples[0,l] / k_2 - summed_modes[d,i,j,k] += proj[d] * (z_1[l] * cos(phase) + z_2[l] * sin(phase)) - - return np.asarray(summed_modes) diff --git a/gstools/field/tools.py b/gstools/field/tools.py index e9f289a23..fba20f94f 100644 --- a/gstools/field/tools.py +++ b/gstools/field/tools.py @@ -1,262 +1,265 @@ # -*- coding: utf-8 -*- """ -GStools subpackage providing tools for the spatial random field. +GStools subpackage providing tools for Fields. .. currentmodule:: gstools.field.tools The following classes and functions are provided .. autosummary:: - reshape_input - reshape_input_axis_from_unstruct - reshape_input_axis_from_struct - check_mesh - make_isotropic - make_anisotropic - unrotate_mesh - rotate_mesh - reshape_axis_from_struct_to_unstruct - reshape_field_from_unstruct_to_struct + fmt_mean_norm_trend + to_vtk_helper + generate_on_mesh """ -# pylint: disable=C0103 - +# pylint: disable=W0212, C0415 import numpy as np -from gstools.tools.geometric import r3d_x, r3d_y, r3d_z - -__all__ = [ - "reshape_input", - "reshape_input_axis_from_unstruct", - "reshape_input_axis_from_struct", - "check_mesh", - "make_isotropic", - "make_anisotropic", - "unrotate_mesh", - "rotate_mesh", - "reshape_axis_from_struct_to_unstruct", - "reshape_field_from_unstruct_to_struct", -] - - -# Geometric functions ######################################################### - - -def reshape_input(x, y=None, z=None, mesh_type="unstructured"): - """Reshape given axes, depending on the mesh type.""" - if mesh_type == "unstructured": - x, y, z = reshape_input_axis_from_unstruct(x, y, z) - elif mesh_type == "structured": - x, y, z = reshape_input_axis_from_struct(x, y, z) - return x, y, z - - -def reshape_input_axis_from_unstruct(x, y=None, z=None): - """Reshape given axes for vectorisation on unstructured grid.""" - x = np.atleast_1d(x) - y = np.atleast_1d(y) - z = np.atleast_1d(z) - x = np.reshape(x, (len(x), 1)) - y = np.reshape(y, (len(y), 1)) - z = np.reshape(z, (len(z), 1)) - return (x, y, z) - - -def reshape_input_axis_from_struct(x, y=None, z=None): - """Reshape given axes for vectorisation on unstructured grid.""" - x = np.atleast_1d(x) - y = np.atleast_1d(y) - z = np.atleast_1d(z) - x = np.reshape(x, (len(x), 1, 1, 1)) - y = np.reshape(y, (1, len(y), 1, 1)) - z = np.reshape(z, (1, 1, len(z), 1)) - return (x, y, z) - - -# SRF helpers ################################################################# - - -def check_mesh(dim, x, y, z, mesh_type): - """Do a basic check of the shapes of the input arrays.""" - if dim >= 2: - if y is None: - raise ValueError( - "The y-component is missing for " "{0} dimensions".format(dim) - ) - if dim == 3: - if z is None: - raise ValueError( - "The z-component is missing for " "{0} dimensions".format(dim) +import meshio + +from gstools.normalizer import Normalizer +from gstools.tools.export import to_vtk, vtk_export +from gstools.tools.misc import list_format + + +__all__ = ["fmt_mean_norm_trend", "to_vtk_helper", "generate_on_mesh"] + + +def _fmt_func_val(f_cls, func_val): # pragma: no cover + if func_val is None: + return str(None) + if callable(func_val): + return "" # or format(func_val.__name__) + if np.size(func_val) > 1: + return list_format(func_val, prec=f_cls.model._prec) + return "{0:.{p}}".format(float(func_val), p=f_cls.model._prec) + + +def _fmt_normalizer(f_cls): # pragma: no cover + norm = f_cls.normalizer + return str(None) if norm.__class__ is Normalizer else norm.name + + +def fmt_mean_norm_trend(f_cls): # pragma: no cover + """Format string repr. for mean, normalizer and trend of a field.""" + args = [ + "mean=" + _fmt_func_val(f_cls, f_cls.mean), + "normalizer=" + _fmt_normalizer(f_cls), + "trend=" + _fmt_func_val(f_cls, f_cls.trend), + ] + return "".join([", " + arg for arg in args if not arg.endswith("None")]) + + +def to_vtk_helper( + f_cls, filename=None, field_select="field", fieldname="field" +): # pragma: no cover + """Create a VTK/PyVista grid of the field or save it as a VTK file. + + This is an internal helper that will handle saving or creating objects + + Parameters + ---------- + f_cls : :any:`Field` + Field class in use. + filename : :class:`str` + Filename of the file to be saved, including the path. Note that an + ending (.vtr or .vtu) will be added to the name. If ``None`` is + passed, a PyVista dataset of the appropriate type will be returned. + field_select : :class:`str`, optional + Field that should be stored. Can be: + "field", "raw_field", "krige_field", "err_field" or "krige_var". + Default: "field" + fieldname : :class:`str`, optional + Name of the field in the VTK file. Default: "field" + """ + if f_cls.value_type == "vector": + if hasattr(f_cls, field_select): + field = getattr(f_cls, field_select) + else: + field = None + if not (f_cls.pos is None or field is None or f_cls.mesh_type is None): + suf = ["_X", "_Y", "_Z"] + fields = {} + for i in range(f_cls.model.dim): + fields[fieldname + suf[i]] = field[i] + if filename is None: + return to_vtk(f_cls.pos, fields, f_cls.mesh_type) + return vtk_export(filename, f_cls.pos, fields, f_cls.mesh_type) + raise ValueError(f"Field.to_vtk: '{field_select}' not available.") + if f_cls.value_type == "scalar": + if hasattr(f_cls, field_select): + field = getattr(f_cls, field_select) + else: + field = None + if not (f_cls.pos is None or field is None or f_cls.mesh_type is None): + if filename is None: + return to_vtk(f_cls.pos, {fieldname: field}, f_cls.mesh_type) + return vtk_export( + filename, f_cls.pos, {fieldname: field}, f_cls.mesh_type ) - if mesh_type == "unstructured": - if dim >= 2: - try: - if len(x) != len(y): - raise ValueError( - "len(x) = {0} != len(y) = {1} " - "for unstructured grids".format(len(x), len(y)) - ) - except TypeError: - pass - if dim == 3: - try: - if len(x) != len(z): - raise ValueError( - "len(x) = {0} != len(z) = {1} " - "for unstructured grids".format(len(x), len(z)) - ) - except TypeError: - pass - elif mesh_type == "structured": - pass - else: - raise ValueError("Unknown mesh type {0}".format(mesh_type)) + raise ValueError(f"Field.to_vtk: '{field_select}' not available.") + raise ValueError(f"Unknown field value type: {f_cls.value_type}") -def make_isotropic(dim, anis, y, z): - """Stretch given axes in order to implement anisotropy.""" - if dim == 1: - return y, z - if dim == 2: - return y / anis[0], z - if dim == 3: - return y / anis[0], z / anis[1] - return None - - -def make_anisotropic(dim, anis, y, z): - """Re-stretch given axes.""" - if dim == 1: - return y, z - if dim == 2: - return y * anis[0], z - if dim == 3: - return y * anis[0], z * anis[1] - return None - - -def unrotate_mesh(dim, angles, x, y, z): - """Rotate axes in order to implement rotation. - - for 3d: yaw, pitch, and roll angles are alpha, beta, and gamma, - of intrinsic rotation rotation whose Tait-Bryan angles are - alpha, beta, gamma about axes x, y, z. +def generate_on_mesh( + f_cls, mesh, points="centroids", direction="all", name="field", **kwargs +): + """Generate a field on a given meshio, ogs5py or pyvista mesh. + + Parameters + ---------- + f_cls : :any:`Field` + The field class in use. + mesh : meshio.Mesh or ogs5py.MSH or PyVista mesh + The given meshio, ogs5py, or PyVista mesh + points : :class:`str`, optional + The points to evaluate the field at. + Either the "centroids" of the mesh cells + (calculated as mean of the cell vertices) or the "points" + of the given mesh. + Default: "centroids" + direction : :class:`str` or :class:`list`, optional + Here you can state which direction should be choosen for + lower dimension. For example, if you got a 2D mesh in xz direction, + you have to pass "xz". By default, all directions are used. + One can also pass a list of indices. + Default: "all" + name : :class:`str` or :class:`list` of :class:`str`, optional + Name(s) to store the field(s) in the given mesh as point_data or + cell_data. If to few names are given, digits will be appended. + Default: "field" + **kwargs + Keyword arguments forwareded to `Field.__call__`. + + Notes + ----- + This will store the field in the given mesh under the given name, + if a meshio or PyVista mesh was given. + + See: https://github.com/nschloe/meshio + + See: https://github.com/GeoStat-Framework/ogs5py + + See: https://github.com/pyvista/pyvista """ - if dim == 1: - return x, y, z - if dim == 2: - # extract 2d rotation matrix - rot_mat = r3d_z(-angles[0])[0:2, 0:2] - pos_tuple = np.vstack((x, y)) - pos_tuple = np.vsplit(np.dot(rot_mat, pos_tuple), 2) - x = pos_tuple[0].reshape(np.shape(x)) - y = pos_tuple[1].reshape(np.shape(y)) - return x, y, z - if dim == 3: - alpha = -angles[0] - beta = -angles[1] - gamma = -angles[2] - rot_mat = np.dot(np.dot(r3d_z(alpha), r3d_y(beta)), r3d_x(gamma)) - pos_tuple = np.vstack((x, y, z)) - pos_tuple = np.vsplit(np.dot(rot_mat, pos_tuple), 3) - x = pos_tuple[0].reshape(np.shape(x)) - y = pos_tuple[1].reshape(np.shape(y)) - z = pos_tuple[2].reshape(np.shape(z)) - return x, y, z - return None - + has_pyvista = False + has_ogs5py = False -def rotate_mesh(dim, angles, x, y, z): - """Rotate axes. + try: + import pyvista as pv - for 3d: yaw, pitch, and roll angles are alpha, beta, and gamma, - of intrinsic rotation rotation whose Tait-Bryan angles are - alpha, beta, gamma about axes x, y, z. - """ - if dim == 1: - return x, y, z - if dim == 2: - # extract 2d rotation matrix - rot_mat = r3d_z(angles[0])[0:2, 0:2] - pos_tuple = np.vstack((x, y)) - pos_tuple = np.vsplit(np.dot(rot_mat, pos_tuple), 2) - x = pos_tuple[0].reshape(np.shape(x)) - y = pos_tuple[1].reshape(np.shape(y)) - return x, y, z - if dim == 3: - alpha = angles[0] - beta = angles[1] - gamma = angles[2] - rot_mat = np.dot(np.dot(r3d_x(gamma), r3d_y(beta)), r3d_z(alpha)) - pos_tuple = np.vstack((x, y, z)) - pos_tuple = np.vsplit(np.dot(rot_mat, pos_tuple), 3) - x = pos_tuple[0].reshape(np.shape(x)) - y = pos_tuple[1].reshape(np.shape(y)) - z = pos_tuple[2].reshape(np.shape(z)) - return x, y, z - return None + has_pyvista = True + except ImportError: + pass + try: + import ogs5py as ogs + has_ogs5py = True + except ImportError: + pass -def reshape_axis_from_struct_to_unstruct( - dim, x, y=None, z=None, indexing="ij" -): - """Reshape given axes from struct to unstruct for rotation.""" - if dim == 1: - return x, y, z, (len(x),) - if dim == 2: - x_u, y_u = np.meshgrid(x, y, indexing=indexing) - len_unstruct = len(x) * len(y) - x_u = np.reshape(x_u, len_unstruct) - y_u = np.reshape(y_u, len_unstruct) - return x_u, y_u, z, (len(x), len(y)) - if dim == 3: - x_u, y_u, z_u = np.meshgrid(x, y, z, indexing=indexing) - len_unstruct = len(x) * len(y) * len(z) - x_u = np.reshape(x_u, len_unstruct) - y_u = np.reshape(y_u, len_unstruct) - z_u = np.reshape(z_u, len_unstruct) - return x_u, y_u, z_u, (len(x), len(y), len(z)) - return None + if isinstance(direction, str) and direction == "all": + select = list(range(f_cls.dim)) + elif isinstance(direction, str): + select = _get_select(direction)[: f_cls.dim] + else: + select = direction[: f_cls.dim] + if len(select) < f_cls.dim: + raise ValueError( + f"Field.mesh: need at least {f_cls.dim} direction(s), " + f"got '{direction}'" + ) + # convert pyvista mesh + if has_pyvista and pv.is_pyvista_dataset(mesh): + if points == "centroids": + pnts = mesh.cell_centers().points.T[select] + else: + pnts = mesh.points.T[select] + out = f_cls.unstructured(pos=pnts, **kwargs) + # Deal with the output + fields = [out] if isinstance(out, np.ndarray) else out + if f_cls.value_type == "vector": + fields = [f.T for f in fields] + for f_name, field in zip(_names(name, len(fields)), fields): + mesh[f_name] = field + # convert ogs5py mesh + elif has_ogs5py and isinstance(mesh, ogs.MSH): + if points == "centroids": + pnts = mesh.centroids_flat.T[select] + else: + pnts = mesh.NODES.T[select] + out = f_cls.unstructured(pos=pnts, **kwargs) + # convert meshio mesh + elif isinstance(mesh, meshio.Mesh): + if points == "centroids": + # define unique order of cells + offset = [] + length = [] + mesh_dim = mesh.points.shape[1] + if mesh_dim < f_cls.dim: + raise ValueError("Field.mesh: mesh dimension too low!") + pnts = np.empty((0, mesh_dim), dtype=np.double) + for cell in mesh.cells: + pnt = np.mean(mesh.points[cell[1]], axis=1) + offset.append(pnts.shape[0]) + length.append(pnt.shape[0]) + pnts = np.vstack((pnts, pnt)) + # generate pos for __call__ + pnts = pnts.T[select] + out = f_cls.unstructured(pos=pnts, **kwargs) + fields = [out] if isinstance(out, np.ndarray) else out + if f_cls.value_type == "vector": + fields = [f.T for f in fields] + f_lists = [] + for field in fields: + f_list = [] + for off, leng in zip(offset, length): + f_list.append(field[off : off + leng]) + f_lists.append(f_list) + for f_name, f_list in zip(_names(name, len(f_lists)), f_lists): + mesh.cell_data[f_name] = f_list + else: + out = f_cls.unstructured(pos=mesh.points.T[select], **kwargs) + fields = [out] if isinstance(out, np.ndarray) else out + if f_cls.value_type == "vector": + fields = [f.T for f in fields] + for f_name, field in zip(_names(name, len(fields)), fields): + mesh.point_data[f_name] = field + else: + raise ValueError("Field.mesh: Unknown mesh format!") + return out -def reshape_field_from_unstruct_to_struct(dim, field, axis_lens): - """Reshape the rotated field back to struct.""" - if dim == 1: - return field - if dim == 2: - field = np.reshape(field, axis_lens) - return field - if dim == 3: - field = np.reshape(field, axis_lens) - return field - return None +def _names(name, cnt): + name = [name] if isinstance(name, str) else list(name)[:cnt] + if len(name) < cnt: + name += [f"{name[-1]}{i + 1}" for i in range(cnt - len(name))] + return name def _get_select(direction): select = [] - if not (0 < len(direction) < 4): + if not 0 < len(direction) < 4: raise ValueError( - "Field.mesh: need 1 to 3 direction(s), got '{}'".format(direction) + f"Field.mesh: need 1 to 3 direction(s), got '{direction}'" ) for axis in direction: if axis == "x": if 0 in select: raise ValueError( - "Field.mesh: got duplicate directions {}".format(direction) + f"Field.mesh: got duplicate directions {direction}" ) select.append(0) elif axis == "y": if 1 in select: raise ValueError( - "Field.mesh: got duplicate directions {}".format(direction) + f"Field.mesh: got duplicate directions {direction}" ) select.append(1) elif axis == "z": if 2 in select: raise ValueError( - "Field.mesh: got duplicate directions {}".format(direction) + f"Field.mesh: got duplicate directions {direction}" ) select.append(2) else: - raise ValueError( - "Field.mesh: got unknown direction {}".format(axis) - ) + raise ValueError(f"Field.mesh: got unknown direction {axis}") return select diff --git a/gstools/field/upscaling.py b/gstools/field/upscaling.py index d52df39b6..6232b36d4 100644 --- a/gstools/field/upscaling.py +++ b/gstools/field/upscaling.py @@ -10,7 +10,7 @@ var_coarse_graining var_no_scaling """ - +# pylint: disable=W0613 import warnings import numpy as np diff --git a/gstools/krige/__init__.py b/gstools/krige/__init__.py index af319b1c9..76670d77a 100644 --- a/gstools/krige/__init__.py +++ b/gstools/krige/__init__.py @@ -10,14 +10,14 @@ .. autosummary:: :toctree: generated + Krige Simple Ordinary Universal ExtDrift Detrended - ----- """ +from gstools.krige.base import Krige from gstools.krige.methods import ( Simple, Ordinary, @@ -26,4 +26,4 @@ Detrended, ) -__all__ = ["Simple", "Ordinary", "Universal", "ExtDrift", "Detrended"] +__all__ = ["Krige", "Simple", "Ordinary", "Universal", "ExtDrift", "Detrended"] diff --git a/gstools/krige/base.py b/gstools/krige/base.py index 366d02cd3..80cc21b49 100755 --- a/gstools/krige/base.py +++ b/gstools/krige/base.py @@ -9,28 +9,37 @@ .. autosummary:: Krige """ -# pylint: disable=C0103 +# pylint: disable=C0103, W0221, E1102, R0201 import collections import numpy as np -# from scipy.linalg import inv from scipy.spatial.distance import cdist -from gstools.field.tools import reshape_field_from_unstruct_to_struct +import scipy.linalg as spl from gstools.field.base import Field -from gstools.krige.krigesum import krigesum -from gstools.krige.tools import ( - set_condition, - get_drift_functions, - no_trend, - eval_func, +from gstools.krige.krigesum import ( + calc_field_krige_and_variance, + calc_field_krige, ) +from gstools.krige.tools import set_condition, get_drift_functions +from gstools.tools.misc import eval_func +from gstools.tools.geometric import rotated_main_axes +from gstools.variogram import vario_estimate __all__ = ["Krige"] +P_INV = {"pinv": spl.pinv, "pinv2": spl.pinv2, "pinvh": spl.pinvh} +"""dict: Standard pseudo-inverse routines""" + + class Krige(Field): """ - A base class for kriging. + A Swiss Army knife for kriging. + + A Kriging class enabling the basic kriging routines: + Simple-, Ordinary-, Univseral-, External Drift- + and detrended/regression-Kriging as well as + Kriging the Mean [Wackernagel2003]_. Parameters ---------- @@ -40,10 +49,6 @@ class Krige(Field): tuple, containing the given condition positions (x, [y, z]) cond_val : :class:`numpy.ndarray` the values of the conditions - mean : :class:`float`, optional - mean value of the kriging field - ext_drift : :class:`numpy.ndarray` or :any:`None`, optional - the external drift values at the given cond. positions (only for EDK) drift_functions : :class:`list` of :any:`callable`, :class:`str` or :class:`int` Either a list of callable functions, an integer representing the polynomial order of the drift or one of the following strings: @@ -51,12 +56,73 @@ class Krige(Field): * "linear" : regional linear drift (equals order=1) * "quadratic" : regional quadratic drift (equals order=2) - trend_function : :any:`callable`, optional - A callable trend function. Should have the signiture: f(x, [y, z]) + ext_drift : :class:`numpy.ndarray` or :any:`None`, optional + the external drift values at the given cond. positions. + mean : :class:`float`, optional + mean value used to shift normalized conditioning data. + Could also be a callable. The default is None. + normalizer : :any:`None` or :any:`Normalizer`, optional + Normalizer to be applied to the input data to gain normality. + The default is None. + trend : :any:`None` or :class:`float` or :any:`callable`, optional + A callable trend function. Should have the signiture: f(x, [y, z, ...]) This is used for detrended kriging, where the trended is subtracted from the conditions before kriging is applied. This can be used for regression kriging, where the trend function is determined by an external regression algorithm. + If no normalizer is applied, this behaves equal to 'mean'. + The default is None. + unbiased : :class:`bool`, optional + Whether the kriging weights should sum up to 1, so the estimator + is unbiased. If unbiased is `False` and no drifts are given, + this results in simple kriging. + Default: True + exact : :class:`bool`, optional + Whether the interpolator should reproduce the exact input values. + If `False`, `cond_err` is interpreted as measurement error + at the conditioning points and the result will be more smooth. + Default: False + cond_err : :class:`str`, :class :class:`float` or :class:`list`, optional + The measurement error at the conditioning points. + Either "nugget" to apply the model-nugget, a single value applied to + all points or an array with individual values for each point. + The "exact=True" variant only works with "cond_err='nugget'". + Default: "nugget" + pseudo_inv : :class:`bool`, optional + Whether the kriging system is solved with the pseudo inverted + kriging matrix. If `True`, this leads to more numerical stability + and redundant points are averaged. But it can take more time. + Default: True + pseudo_inv_type : :class:`str` or :any:`callable`, optional + Here you can select the algorithm to compute the pseudo-inverse matrix: + + * `"pinv"`: use `pinv` from `scipy` which uses `lstsq` + * `"pinv2"`: use `pinv2` from `scipy` which uses `SVD` + * `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values + + If you want to use another routine to invert the kriging matrix, + you can pass a callable which takes a matrix and returns the inverse. + Default: `"pinv"` + fit_normalizer : :class:`bool`, optional + Wheater to fit the data-normalizer to the given conditioning data. + Default: False + fit_variogram : :class:`bool`, optional + Wheater to fit the given variogram model to the data. + This is done by using isotropy settings of the given model, + assuming the sill to be the data variance and with the + standard bins provided by the :any:`standard_bins` routine. + Default: False + + Notes + ----- + If you have changed any properties in the class, you can update the kriging + setup by calling :any:`Krige.set_condition` without any arguments. + + References + ---------- + .. [Wackernagel2003] Wackernagel, H., + "Multivariate geostatistics", + Springer, Berlin, Heidelberg (2003) """ def __init__( @@ -64,31 +130,55 @@ def __init__( model, cond_pos, cond_val, - mean=0.0, - ext_drift=None, drift_functions=None, - trend_function=None, + ext_drift=None, + mean=None, + normalizer=None, + trend=None, + unbiased=True, + exact=False, + cond_err="nugget", + pseudo_inv=True, + pseudo_inv_type="pinv", + fit_normalizer=False, + fit_variogram=False, ): - super().__init__(model, mean) + super().__init__(model, mean=mean, normalizer=normalizer, trend=trend) + self.mean_field = None self.krige_var = None + self._unbiased = bool(unbiased) + self._exact = bool(exact) + self._pseudo_inv = bool(pseudo_inv) + self._pseudo_inv_type = None + self.pseudo_inv_type = pseudo_inv_type # initialize private attributes - self._unbiased = True - self._value_type = "scalar" self._cond_pos = None self._cond_val = None + self._cond_err = None self._krige_mat = None self._krige_pos = None self._cond_trend = None - self._trend_function = None - self.trend_function = trend_function self._cond_ext_drift = np.array([]) - self._drift_functions = [] - if drift_functions is not None: - self.set_drift_functions(drift_functions) - self.set_condition(cond_pos, cond_val, ext_drift) + self._drift_functions = None + self.set_drift_functions(drift_functions) + self.set_condition( + cond_pos, + cond_val, + ext_drift, + cond_err, + fit_normalizer, + fit_variogram, + ) def __call__( - self, pos, mesh_type="unstructured", ext_drift=None, chunk_size=None + self, + pos, + mesh_type="unstructured", + ext_drift=None, + chunk_size=None, + only_mean=False, + return_var=True, + post_process=True, ): """ Generate the kriging field. @@ -109,65 +199,156 @@ def __call__( Chunk size to cut down the size of the kriging system to prevent memory errors. Default: None + only_mean : :class:`bool`, optional + Whether to only calculate the mean of the kriging field. + Default: `False` + return_var : :class:`bool`, optional + Whether to return the variance along with the field. + Default: `True` + post_process : :class:`bool`, optional + Whether to apply mean, normalizer and trend to the field. + Default: `True` Returns ------- field : :class:`numpy.ndarray` - the kriged field - krige_var : :class:`numpy.ndarray` + the kriged field or mean_field + krige_var : :class:`numpy.ndarray`, optional the kriging error variance + (if return_var is True and only_mean is False) """ - self.mesh_type = mesh_type - # internal conversation - x, y, z, self.pos, __, mt_changed, axis_lens = self._pre_pos( - pos, mesh_type, make_unstruct=True - ) - point_no = len(x) - # set chunk size - chunk_size = point_no if chunk_size is None else int(chunk_size) - chunk_no = int(np.ceil(point_no / chunk_size)) - field = np.empty_like(x) - krige_var = np.empty_like(x) - ext_drift = self._pre_ext_drift(point_no, ext_drift) - # iterate of chunks - for i in range(chunk_no): - # get chunk slice for actual chunk - chunk_slice = (i * chunk_size, min(point_no, (i + 1) * chunk_size)) - c_slice = slice(*chunk_slice) - # get RHS of the kriging system - k_vec = self._get_krige_vecs((x, y, z), chunk_slice, ext_drift) - # generate the raw kriging field and error variance - field[c_slice], krige_var[c_slice] = krigesum( - self._krige_mat, k_vec, self._krige_cond - ) + return_var &= not only_mean # don't return variance when calc. mean + iso_pos, shape = self.pre_pos(pos, mesh_type) + pnt_cnt = len(iso_pos[0]) + + field = np.empty(pnt_cnt, dtype=np.double) + krige_var = np.empty(pnt_cnt, dtype=np.double) if return_var else None + # set constant mean if present and wanted + if only_mean and self.drift_no == 0: + field[...] = self.get_mean(post_process=False) + # execute the kriging routine + else: + # set chunk size + chunk_size = pnt_cnt if chunk_size is None else int(chunk_size) + chunk_no = int(np.ceil(pnt_cnt / chunk_size)) + ext_drift = self._pre_ext_drift(pnt_cnt, ext_drift) + # iterate chunks + for i in range(chunk_no): + # get chunk slice for actual chunk + chunk_slice = ( + i * chunk_size, + min(pnt_cnt, (i + 1) * chunk_size), + ) + c_slice = slice(*chunk_slice) + # get RHS of the kriging system + k_vec = self._get_krige_vecs( + iso_pos, chunk_slice, ext_drift, only_mean + ) + # generate the raw kriging field and error variance + self._summate(field, krige_var, c_slice, k_vec, return_var) # reshape field if we got a structured mesh - if mt_changed: - field = reshape_field_from_unstruct_to_struct( - self.model.dim, field, axis_lens + field = np.reshape(field, shape) + if only_mean: # care about 'kriging the mean' + return self.post_field(field, "mean_field", process=post_process) + # save field to class + field = self.post_field(field, "field", process=post_process) + if return_var: # care about the estimated error variance + krige_var = np.reshape( + np.maximum(self.model.sill - krige_var, 0), shape + ) + krige_var = self.post_field(krige_var, "krige_var", process=False) + return field, krige_var + # if we only calculate the field, overwrite the error variance + self.krige_var = None + return field + + def _summate(self, field, krige_var, c_slice, k_vec, return_var): + if return_var: # estimate error variance + field[c_slice], krige_var[c_slice] = calc_field_krige_and_variance( + self._krige_mat, k_vec, self._krige_cond ) - krige_var = reshape_field_from_unstruct_to_struct( - self.model.dim, krige_var, axis_lens + else: # solely calculate the interpolated field + field[c_slice] = calc_field_krige( + self._krige_mat, k_vec, self._krige_cond ) - self._post_field(field, krige_var) - return self.field, self.krige_var - def _get_krige_mat(self): # pragma: no cover + def _inv(self, mat): + # return pseudo-inverted matrix if wanted (numerically more stable) + if self.pseudo_inv: + # if the given type is a callable, call it + if callable(self.pseudo_inv_type): + return self.pseudo_inv_type(mat) + # use the selected method to compute the pseudo-inverse matrix + return P_INV[self.pseudo_inv_type](mat) + # if no pseudo-inverse is wanted, calculate the real inverse + return spl.inv(mat) + + def _get_krige_mat(self): """Calculate the inverse matrix of the kriging equation.""" - return None + res = np.empty((self.krige_size, self.krige_size), dtype=np.double) + # fill the kriging matrix with the covariance + res[: self.cond_no, : self.cond_no] = self.model.covariance( + self._get_dists(self._krige_pos) + ) + # apply the measurement error (nugget by default) + res[np.diag_indices(self.cond_no)] += self.cond_err + # set unbias condition (weights have to sum up to 1) + if self.unbiased: + res[self.cond_no, : self.cond_no] = 1 + res[: self.cond_no, self.cond_no] = 1 + # set functional drift terms + for i, f in enumerate(self.drift_functions): + drift_tmp = f(*self.cond_pos) + res[-self.drift_no + i, : self.cond_no] = drift_tmp + res[: self.cond_no, -self.drift_no + i] = drift_tmp + # set external drift terms + if self.ext_drift_no > 0: + ext_size = self.krige_size - self.ext_drift_no + res[ext_size:, : self.cond_no] = self.cond_ext_drift + res[: self.cond_no, ext_size:] = self.cond_ext_drift.T + # set lower right part of the matrix to 0 + res[self.cond_no :, self.cond_no :] = 0 + return self._inv(res) def _get_krige_vecs( - self, pos, chunk_slice=(0, None), ext_drift=None - ): # pragma: no cover + self, pos, chunk_slice=(0, None), ext_drift=None, only_mean=False + ): """Calculate the RHS of the kriging equation.""" - return None - - def _pre_ext_drift(self, point_no, ext_drift=None, set_cond=False): + # determine the chunk size + chunk_size = len(pos[0]) if chunk_slice[1] is None else chunk_slice[1] + chunk_size -= chunk_slice[0] + res = np.empty((self.krige_size, chunk_size), dtype=np.double) + if only_mean: + # set points to limit of the covariance to only get the mean + res[: self.cond_no, :] = 0 + else: + # get correct covarinace functions (depending on exact values) + cf = self.model.cov_nugget if self.exact else self.model.covariance + res[: self.cond_no, :] = cf( + self._get_dists(self._krige_pos, pos, chunk_slice) + ) + # apply the unbiased condition + if self.unbiased: + res[self.cond_no, :] = 1 + # drift function need the anisotropic and rotated positions + if self.int_drift_no > 0: + chunk_pos = self.model.anisometrize(pos)[:, slice(*chunk_slice)] + # apply functional drift + for i, f in enumerate(self.drift_functions): + res[-self.drift_no + i, :] = f(*chunk_pos) + # apply external drift + if self.ext_drift_no > 0: + ext_size = self.krige_size - self.ext_drift_no + res[ext_size:, :] = ext_drift[:, slice(*chunk_slice)] + return res + + def _pre_ext_drift(self, pnt_cnt, ext_drift=None, set_cond=False): """ Preprocessor for external drifts. Parameters ---------- - point_no : :class:`numpy.ndarray` + pnt_cnt : :class:`numpy.ndarray` Number of points of the mesh. ext_drift : :class:`numpy.ndarray` or :any:`None`, optional the external drift values at the given positions (only for EDK) @@ -183,42 +364,24 @@ def _pre_ext_drift(self, point_no, ext_drift=None, set_cond=False): the drift values at the given positions """ if ext_drift is not None: + ext_drift = np.array(ext_drift, dtype=np.double, ndmin=2) + if ext_drift.size == 0: # treat empty array as no ext_drift + return np.array([]) if set_cond: - ext_drift = np.array(ext_drift, dtype=np.double, ndmin=2) - if len(ext_drift.shape) > 2 or ext_drift.shape[1] != point_no: - raise ValueError("Krige: wrong number of cond. drifts.") + if len(ext_drift.shape) > 2 or ext_drift.shape[1] != pnt_cnt: + raise ValueError("Krige: wrong number of ext. drifts.") return ext_drift - ext_drift = np.array(ext_drift, dtype=np.double, ndmin=2) ext_shape = np.shape(ext_drift) - shape = (self.drift_no, point_no) - if self.drift_no > 1 and ext_shape[0] != self.drift_no: + shape = (self.ext_drift_no, pnt_cnt) + if self.drift_no > 1 and ext_shape[0] != self.ext_drift_no: raise ValueError("Krige: wrong number of external drifts.") if np.prod(ext_shape) != np.prod(shape): raise ValueError("Krige: wrong number of ext. drift values.") return np.array(ext_drift, dtype=np.double).reshape(shape) - elif not set_cond and self._cond_ext_drift.size > 0: + if not set_cond and self._cond_ext_drift.size > 0: raise ValueError("Krige: wrong number of ext. drift values.") return np.array([]) - def _post_field(self, field, krige_var): - """ - Postprocessing and saving of kriging field and error variance. - - Parameters - ---------- - field : :class:`numpy.ndarray` - Raw kriging field. - krige_var : :class:`numpy.ndarray` - Raw kriging error variance. - """ - if self.trend_function is no_trend: - self.field = field - else: - self.field = field + eval_func( - self.trend_function, self.pos, self.mesh_type - ) - self.krige_var = krige_var - def _get_dists(self, pos1, pos2=None, pos2_slice=(0, None)): """ Calculate pairwise distances. @@ -237,38 +400,134 @@ def _get_dists(self, pos1, pos2=None, pos2_slice=(0, None)): :class:`numpy.ndarray` Matrix containing the pairwise distances. """ - pos1_stack = np.column_stack(pos1[: self.model.dim]) if pos2 is None: - return cdist(pos1_stack, pos1_stack) - p2s = slice(*pos2_slice) - pos2_stack = np.column_stack(pos2[: self.model.dim])[p2s, ...] - return cdist(pos1_stack, pos2_stack) + return cdist(pos1.T, pos1.T) + return cdist(pos1.T, pos2.T[slice(*pos2_slice), ...]) + + def get_mean(self, post_process=True): + """Calculate the estimated mean of the detrended field. - def get_mean(self): - """Calculate the estimated mean.""" - return self._mean + Parameters + ---------- + post_process : :class:`bool`, optional + Whether to apply field-mean and normalizer. + Default: `True` - def set_condition(self, cond_pos, cond_val, ext_drift=None): + Returns + ------- + mean : :class:`float` or :any:`None` + Mean of the Kriging System. + + Notes + ----- + Only not ``None`` if the Kriging System has a constant mean. + This means, no drift is given and the given field-mean is constant. + The result is neglecting a potential given trend. + """ + # if there are drift-terms, no constant mean can be calculated -> None + # if mean should not be post-processed, it exists when no drift given + if not self.has_const_mean and (post_process or self.drift_no > 0): + return None + res = 0.0 # for simple kriging return the given mean + # correctly setting given mean + mean = 0.0 if self.mean is None else self.mean + # for ordinary kriging return the estimated mean + if self.unbiased: + # set the right side of the kriging system to the limit of cov. + mean_est = np.concatenate((np.full_like(self.cond_val, 0.0), [1])) + # execute the kriging routine with einsum + res = np.einsum( + "i,ij,j", self._krige_cond, self._krige_mat, mean_est + ) + return self.normalizer.denormalize(res + mean) if post_process else res + + def set_condition( + self, + cond_pos=None, + cond_val=None, + ext_drift=None, + cond_err=None, + fit_normalizer=False, + fit_variogram=False, + ): """Set the conditions for kriging. + This method could also be used to update the kriging setup, when + properties were changed. Then you can call it without arguments. + Parameters ---------- - cond_pos : :class:`list` - the position tuple of the conditions (x, [y, z]) - cond_val : :class:`numpy.ndarray` - the values of the conditions + cond_pos : :class:`list`, optional + the position tuple of the conditions (x, [y, z]). Default: current. + cond_val : :class:`numpy.ndarray`, optional + the values of the conditions. Default: current. ext_drift : :class:`numpy.ndarray` or :any:`None`, optional the external drift values at the given conditions (only for EDK) For multiple external drifts, the first dimension - should be the index of the drift term. + should be the index of the drift term. When passing `None`, the + extisting external drift will be used. + cond_err : :class:`str`, :class :class:`float`, :class:`list`, optional + The measurement error at the conditioning points. + Either "nugget" to apply the model-nugget, a single value applied + to all points or an array with individual values for each point. + The measurement error has to be <= nugget. + The "exact=True" variant only works with "cond_err='nugget'". + Default: "nugget" + fit_normalizer : :class:`bool`, optional + Wheater to fit the data-normalizer to the given conditioning data. + Default: False + fit_variogram : :class:`bool`, optional + Wheater to fit the given variogram model to the data. + This is done by using isotropy settings of the given model, + assuming the sill to be the data variance and with the + standard bins provided by the :any:`standard_bins` routine. + Default: False """ + # only use existing external drift, if no new positions are given + ext_drift = ( + self._cond_ext_drift + if (ext_drift is None and cond_pos is None) + else ext_drift + ) + # use existing values or set default + cond_pos = self._cond_pos if cond_pos is None else cond_pos + cond_val = self._cond_val if cond_val is None else cond_val + cond_err = self._cond_err if cond_err is None else cond_err + cond_err = "nugget" if cond_err is None else cond_err # default + if cond_pos is None or cond_val is None: + raise ValueError("Krige.set_condition: missing cond_pos/cond_val.") + # correctly format cond_pos and cond_val self._cond_pos, self._cond_val = set_condition( - cond_pos, cond_val, self.model.dim + cond_pos, cond_val, self.dim ) + if fit_normalizer: # fit normalizer to detrended data + self.normalizer.fit(self.cond_val - self.cond_trend) + if fit_variogram: # fitting model to empirical variogram of data + # normalize field + field = self.normalizer.normalize(self.cond_val - self.cond_trend) + field -= self.cond_mean + sill = np.var(field) + if self.model.is_isotropic: + emp_vario = vario_estimate( + self.cond_pos, field, latlon=self.model.latlon + ) + else: + axes = rotated_main_axes(self.model.dim, self.model.angles) + emp_vario = vario_estimate( + self.cond_pos, field, direction=axes + ) + # set the sill to the field variance + self.model.fit_variogram(*emp_vario, sill=sill) + # set the measurement errors + self.cond_err = cond_err + # set the external drift values and the conditioning points self._cond_ext_drift = self._pre_ext_drift( self.cond_no, ext_drift, set_cond=True ) - self.update() + # upate the internal kriging settings + self._krige_pos = self.model.isometrize(self.cond_pos) + # krige pos are the unrotated and isotropic condition positions + self._krige_mat = self._get_krige_mat() def set_drift_functions(self, drift_functions=None): """ @@ -292,7 +551,7 @@ def set_drift_functions(self, drift_functions=None): self._drift_functions = [] elif isinstance(drift_functions, (str, int)): self._drift_functions = get_drift_functions( - self.model.dim, drift_functions + self.dim, drift_functions ) else: if isinstance(drift_functions, collections.abc.Iterator): @@ -304,31 +563,18 @@ def set_drift_functions(self, drift_functions=None): drift_functions = [drift_functions] for f in drift_functions: if not callable(f): - raise ValueError("Universal: Drift functions not callable") + raise ValueError("Krige: Drift functions not callable") self._drift_functions = drift_functions - def update(self): - """Update the kriging settings.""" - x, y, z, __, __, __, __ = self._pre_pos(self.cond_pos) - # krige pos are the unrotated and isotropic condition positions - self._krige_pos = (x, y, z)[: self.model.dim] - self._krige_mat = self._get_krige_mat() - if self.trend_function is no_trend: - self._cond_trend = 0.0 - else: - self._cond_trend = self.trend_function(*self.cond_pos) - self._mean = self.get_mean() - @property def _krige_cond(self): """:class:`numpy.ndarray`: The prepared kriging conditions.""" pad_size = self.drift_no + int(self.unbiased) - return np.pad( - self.cond_val - self.cond_trend, - (0, pad_size), - mode="constant", - constant_values=0, - ) + # detrend data and normalize + val = self.normalizer.normalize(self.cond_val - self.cond_trend) + # set to zero mean + val -= self.cond_mean + return np.pad(val, (0, pad_size), mode="constant", constant_values=0) @property def cond_pos(self): @@ -340,6 +586,33 @@ def cond_val(self): """:class:`list`: The values of the conditions.""" return self._cond_val + @property + def cond_err(self): + """:class:`list`: The measurement errors at the condition points.""" + if isinstance(self._cond_err, str) and self._cond_err == "nugget": + return self.model.nugget + return self._cond_err + + @cond_err.setter + def cond_err(self, value): + if isinstance(value, str) and value == "nugget": + self._cond_err = value + else: + if self.exact: + raise ValueError( + "krige.cond_err: measurement errors can't be given, " + "when interpolator should be exact." + ) + value = np.array(value, dtype=np.double).reshape(-1) + if value.size == 1: + self._cond_err = value.item() + else: + if value.size != self.cond_no: + raise ValueError( + "krige.cond_err: wrong number of measurement errors." + ) + self._cond_err = value + @property def cond_no(self): """:class:`int`: The number of the conditions.""" @@ -350,41 +623,82 @@ def cond_ext_drift(self): """:class:`numpy.ndarray`: The ext. drift at the conditions.""" return self._cond_ext_drift + @property + def cond_mean(self): + """:class:`numpy.ndarray`: Trend at the conditions.""" + return eval_func(self.mean, self.cond_pos, self.dim, broadcast=True) + + @property + def cond_trend(self): + """:class:`numpy.ndarray`: Trend at the conditions.""" + return eval_func(self.trend, self.cond_pos, self.dim, broadcast=True) + + @property + def unbiased(self): + """:class:`bool`: Whether the kriging is unbiased or not.""" + return self._unbiased + + @property + def exact(self): + """:class:`bool`: Whether the interpolator is exact.""" + return self._exact + + @property + def pseudo_inv(self): + """:class:`bool`: Whether pseudo inverse matrix is used.""" + return self._pseudo_inv + + @property + def pseudo_inv_type(self): + """:class:`str`: Method selector for pseudo inverse calculation.""" + return self._pseudo_inv_type + + @pseudo_inv_type.setter + def pseudo_inv_type(self, val): + if val not in P_INV and not callable(val): + raise ValueError(f"Krige: pseudo_inv_type not in {sorted(P_INV)}") + self._pseudo_inv_type = val + @property def drift_functions(self): """:class:`list` of :any:`callable`: The drift functions.""" return self._drift_functions @property - def drift_no(self): - """:class:`int`: Number of drift values per point.""" - return len(self.drift_functions) + self.cond_ext_drift.shape[0] + def has_const_mean(self): + """:class:`bool`: Whether the field has a constant mean or not.""" + return self.drift_no == 0 and not callable(self.mean) @property - def cond_trend(self): - """:class:`numpy.ndarray`: Trend at the conditions.""" - return self._cond_trend + def krige_size(self): + """:class:`int`: Size of the kriging system.""" + return self.cond_no + self.drift_no + int(self.unbiased) @property - def trend_function(self): - """:any:`callable`: The trend function.""" - return self._trend_function - - @trend_function.setter - def trend_function(self, trend_function): - if trend_function is None: - trend_function = no_trend - if not callable(trend_function): - raise ValueError("Detrended kriging: trend function not callable.") - self._trend_function = trend_function + def drift_no(self): + """:class:`int`: Number of drift values per point.""" + return self.int_drift_no + self.ext_drift_no @property - def unbiased(self): - """:class:`bool`: Whether the kriging is unbiased or not.""" - return self._unbiased - + def int_drift_no(self): + """:class:`int`: Number of internal drift values per point.""" + return len(self.drift_functions) -if __name__ == "__main__": # pragma: no cover - import doctest + @property + def ext_drift_no(self): + """:class:`int`: Number of external drift values per point.""" + return self.cond_ext_drift.shape[0] - doctest.testmod() + @property + def name(self): + """:class:`str`: The name of the kriging class.""" + return self.__class__.__name__ + + def __repr__(self): + """Return String representation.""" + return "{0}(model={1}, cond_no={2}{3})".format( + self.name, + self.model.name, + self.cond_no, + self._fmt_mean_norm_trend(), + ) diff --git a/gstools/krige/krigesum.pyx b/gstools/krige/krigesum.pyx index 05a7d152a..d1a5055ff 100644 --- a/gstools/krige/krigesum.pyx +++ b/gstools/krige/krigesum.pyx @@ -11,7 +11,7 @@ from cython.parallel import prange cimport numpy as np -def krigesum( +def calc_field_krige_and_variance( const double[:,:] krig_mat, const double[:,:] krig_vecs, const double[:] cond @@ -27,7 +27,7 @@ def krigesum( cdef int i, j, k # error = krig_vecs * krig_mat * krig_vecs - # field = krig_facs * krig_vecs + # field = cond * krig_mat * krig_vecs for k in prange(res_i, nogil=True): for i in range(mat_i): krig_fac = 0.0 @@ -37,3 +37,28 @@ def krigesum( field[k] += cond[i] * krig_fac return np.asarray(field), np.asarray(error) + + +def calc_field_krige( + const double[:,:] krig_mat, + const double[:,:] krig_vecs, + const double[:] cond +): + + cdef int mat_i = krig_mat.shape[0] + cdef int res_i = krig_vecs.shape[1] + + cdef double[:] field = np.zeros(res_i) + cdef double krig_fac + + cdef int i, j, k + + # field = cond * krig_mat * krig_vecs + for k in prange(res_i, nogil=True): + for i in range(mat_i): + krig_fac = 0.0 + for j in range(mat_i): + krig_fac += krig_mat[i,j] * krig_vecs[j,k] + field[k] += cond[i] * krig_fac + + return np.asarray(field) diff --git a/gstools/krige/methods.py b/gstools/krige/methods.py index bad1b059d..734d799d4 100644 --- a/gstools/krige/methods.py +++ b/gstools/krige/methods.py @@ -14,12 +14,7 @@ Detrended """ # pylint: disable=C0103 -import numpy as np -from scipy.linalg import inv -from gstools.field.tools import make_anisotropic, rotate_mesh -from gstools.tools.geometric import pos2xyz, xyz2pos from gstools.krige.base import Krige -from gstools.krige.tools import eval_func, no_trend __all__ = ["Simple", "Ordinary", "Universal", "ExtDrift", "Detrended"] @@ -39,64 +34,86 @@ class Simple(Krige): cond_val : :class:`numpy.ndarray` the values of the conditions mean : :class:`float`, optional - mean value of the kriging field - trend_function : :any:`callable`, optional - A callable trend function. Should have the signiture: f(x, [y, z]) + mean value used to shift normalized conditioning data. + Could also be a callable. The default is None. + normalizer : :any:`None` or :any:`Normalizer`, optional + Normalizer to be applied to the input data to gain normality. + The default is None. + trend : :any:`None` or :class:`float` or :any:`callable`, optional + A callable trend function. Should have the signiture: f(x, [y, z, ...]) This is used for detrended kriging, where the trended is subtracted from the conditions before kriging is applied. This can be used for regression kriging, where the trend function is determined by an external regression algorithm. + If no normalizer is applied, this behaves equal to 'mean'. + The default is None. + exact : :class:`bool`, optional + Whether the interpolator should reproduce the exact input values. + If `False`, `cond_err` is interpreted as measurement error + at the conditioning points and the result will be more smooth. + Default: False + cond_err : :class:`str`, :class :class:`float` or :class:`list`, optional + The measurement error at the conditioning points. + Either "nugget" to apply the model-nugget, a single value applied to + all points or an array with individual values for each point. + The measurement error has to be <= nugget. + The "exact=True" variant only works with "cond_err='nugget'". + Default: "nugget" + pseudo_inv : :class:`bool`, optional + Whether the kriging system is solved with the pseudo inverted + kriging matrix. If `True`, this leads to more numerical stability + and redundant points are averaged. But it can take more time. + Default: True + pseudo_inv_type : :class:`str` or :any:`callable`, optional + Here you can select the algorithm to compute the pseudo-inverse matrix: + + * `"pinv"`: use `pinv` from `scipy` which uses `lstsq` + * `"pinv2"`: use `pinv2` from `scipy` which uses `SVD` + * `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values + + If you want to use another routine to invert the kriging matrix, + you can pass a callable which takes a matrix and returns the inverse. + Default: `"pinv"` + fit_normalizer : :class:`bool`, optional + Wheater to fit the data-normalizer to the given conditioning data. + Default: False + fit_variogram : :class:`bool`, optional + Wheater to fit the given variogram model to the data. + This is done by using isotropy settings of the given model, + assuming the sill to be the data variance and with the + standard bins provided by the :any:`standard_bins` routine. + Default: False """ def __init__( - self, model, cond_pos, cond_val, mean=0.0, trend_function=None + self, + model, + cond_pos, + cond_val, + mean=0.0, + normalizer=None, + trend=None, + exact=False, + cond_err="nugget", + pseudo_inv=True, + pseudo_inv_type="pinv", + fit_normalizer=False, + fit_variogram=False, ): super().__init__( - model, cond_pos, cond_val, mean=mean, trend_function=trend_function - ) - self._unbiased = False - - def _get_krige_mat(self): - """Calculate the inverse matrix of the kriging equation.""" - return inv(self.model.cov_nugget(self._get_dists(self._krige_pos))) - - def _get_krige_vecs(self, pos, chunk_slice=(0, None), ext_drift=None): - """Calculate the RHS of the kriging equation.""" - return self.model.cov_nugget( - self._get_dists(self._krige_pos, pos, chunk_slice) - ) - - def _post_field(self, field, krige_var): - """ - Postprocessing and saving of kriging field and error variance. - - Parameters - ---------- - field : :class:`numpy.ndarray` - Raw kriging field. - krige_var : :class:`numpy.ndarray` - Raw kriging error variance. - """ - if self.trend_function is no_trend: - self.field = field + self.mean - else: - self.field = ( - field - + self.mean - + eval_func(self.trend_function, self.pos, self.mesh_type) - ) - # add the given mean - self.krige_var = self.model.sill - krige_var - - @property - def _krige_cond(self): - """:class:`numpy.ndarray`: The prepared kriging conditions.""" - return self.cond_val - self.mean - self.cond_trend - - def __repr__(self): - """Return String representation.""" - return "Simple(model={0}, cond_pos={1}, cond_val={2}, mean={3})".format( - self.model, self.cond_pos, self.cond_val, self.mean + model, + cond_pos, + cond_val, + mean=mean, + normalizer=normalizer, + trend=trend, + unbiased=False, + exact=exact, + cond_err=cond_err, + pseudo_inv=pseudo_inv, + pseudo_inv_type=pseudo_inv_type, + fit_normalizer=fit_normalizer, + fit_variogram=fit_variogram, ) @@ -114,56 +131,81 @@ class Ordinary(Krige): tuple, containing the given condition positions (x, [y, z]) cond_val : :class:`numpy.ndarray` the values of the conditions - trend_function : :any:`callable`, optional - A callable trend function. Should have the signiture: f(x, [y, z]) + normalizer : :any:`None` or :any:`Normalizer`, optional + Normalizer to be applied to the input data to gain normality. + The default is None. + trend : :any:`None` or :class:`float` or :any:`callable`, optional + A callable trend function. Should have the signiture: f(x, [y, z, ...]) This is used for detrended kriging, where the trended is subtracted from the conditions before kriging is applied. This can be used for regression kriging, where the trend function is determined by an external regression algorithm. + If no normalizer is applied, this behaves equal to 'mean'. + The default is None. + exact : :class:`bool`, optional + Whether the interpolator should reproduce the exact input values. + If `False`, `cond_err` is interpreted as measurement error + at the conditioning points and the result will be more smooth. + Default: False + cond_err : :class:`str`, :class :class:`float` or :class:`list`, optional + The measurement error at the conditioning points. + Either "nugget" to apply the model-nugget, a single value applied to + all points or an array with individual values for each point. + The measurement error has to be <= nugget. + The "exact=True" variant only works with "cond_err='nugget'". + Default: "nugget" + pseudo_inv : :class:`bool`, optional + Whether the kriging system is solved with the pseudo inverted + kriging matrix. If `True`, this leads to more numerical stability + and redundant points are averaged. But it can take more time. + Default: True + pseudo_inv_type : :class:`str` or :any:`callable`, optional + Here you can select the algorithm to compute the pseudo-inverse matrix: + + * `"pinv"`: use `pinv` from `scipy` which uses `lstsq` + * `"pinv2"`: use `pinv2` from `scipy` which uses `SVD` + * `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values + + If you want to use another routine to invert the kriging matrix, + you can pass a callable which takes a matrix and returns the inverse. + Default: `"pinv"` + fit_normalizer : :class:`bool`, optional + Wheater to fit the data-normalizer to the given conditioning data. + Default: False + fit_variogram : :class:`bool`, optional + Wheater to fit the given variogram model to the data. + This is done by using isotropy settings of the given model, + assuming the sill to be the data variance and with the + standard bins provided by the :any:`standard_bins` routine. + Default: False """ - def __init__(self, model, cond_pos, cond_val, trend_function=None): + def __init__( + self, + model, + cond_pos, + cond_val, + normalizer=None, + trend=None, + exact=False, + cond_err="nugget", + pseudo_inv=True, + pseudo_inv_type="pinv", + fit_normalizer=False, + fit_variogram=False, + ): super().__init__( - model, cond_pos, cond_val, trend_function=trend_function - ) - - def _get_krige_mat(self): - """Calculate the inverse matrix of the kriging equation.""" - size = self.cond_no + int(self.unbiased) - res = np.empty((size, size), dtype=np.double) - res[: self.cond_no, : self.cond_no] = self.model.vario_nugget( - self._get_dists(self._krige_pos) - ) - if self.unbiased: - res[self.cond_no, :] = 1 - res[:, self.cond_no] = 1 - res[self.cond_no, self.cond_no] = 0 - return inv(res) - - def _get_krige_vecs(self, pos, chunk_slice=(0, None), ext_drift=None): - """Calculate the RHS of the kriging equation.""" - chunk_size = len(pos[0]) if chunk_slice[1] is None else chunk_slice[1] - chunk_size -= chunk_slice[0] - size = self.cond_no + int(self.unbiased) - res = np.empty((size, chunk_size), dtype=np.double) - res[: self.cond_no, :] = self.model.vario_nugget( - self._get_dists(self._krige_pos, pos, chunk_slice) - ) - if self.unbiased: - res[self.cond_no, :] = 1 - return res - - def get_mean(self): - """Calculate the estimated mean.""" - mean_est = np.concatenate( - (np.full_like(self.cond_val, self.model.sill), [1]) - ) - return np.einsum("i,ij,j", self._krige_cond, self._krige_mat, mean_est) - - def __repr__(self): - """Return String representation.""" - return "Ordinary(model={0}, cond_pos={1}, cond_val={2}".format( - self.model, self.cond_pos, self.cond_val + model, + cond_pos, + cond_val, + trend=trend, + normalizer=normalizer, + exact=exact, + cond_err=cond_err, + pseudo_inv=pseudo_inv, + pseudo_inv_type=pseudo_inv_type, + fit_normalizer=fit_normalizer, + fit_variogram=fit_variogram, ) @@ -194,73 +236,83 @@ class Universal(Krige): * "linear" : regional linear drift (equals order=1) * "quadratic" : regional quadratic drift (equals order=2) - trend_function : :any:`callable`, optional - A callable trend function. Should have the signiture: f(x, [y, z]) + normalizer : :any:`None` or :any:`Normalizer`, optional + Normalizer to be applied to the input data to gain normality. + The default is None. + trend : :any:`None` or :class:`float` or :any:`callable`, optional + A callable trend function. Should have the signiture: f(x, [y, z, ...]) This is used for detrended kriging, where the trended is subtracted from the conditions before kriging is applied. This can be used for regression kriging, where the trend function is determined by an external regression algorithm. + If no normalizer is applied, this behaves equal to 'mean'. + The default is None. + exact : :class:`bool`, optional + Whether the interpolator should reproduce the exact input values. + If `False`, `cond_err` is interpreted as measurement error + at the conditioning points and the result will be more smooth. + Default: False + cond_err : :class:`str`, :class :class:`float` or :class:`list`, optional + The measurement error at the conditioning points. + Either "nugget" to apply the model-nugget, a single value applied to + all points or an array with individual values for each point. + The measurement error has to be <= nugget. + The "exact=True" variant only works with "cond_err='nugget'". + Default: "nugget" + pseudo_inv : :class:`bool`, optional + Whether the kriging system is solved with the pseudo inverted + kriging matrix. If `True`, this leads to more numerical stability + and redundant points are averaged. But it can take more time. + Default: True + pseudo_inv_type : :class:`str` or :any:`callable`, optional + Here you can select the algorithm to compute the pseudo-inverse matrix: + + * `"pinv"`: use `pinv` from `scipy` which uses `lstsq` + * `"pinv2"`: use `pinv2` from `scipy` which uses `SVD` + * `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values + + If you want to use another routine to invert the kriging matrix, + you can pass a callable which takes a matrix and returns the inverse. + Default: `"pinv"` + fit_normalizer : :class:`bool`, optional + Wheater to fit the data-normalizer to the given conditioning data. + Default: False + fit_variogram : :class:`bool`, optional + Wheater to fit the given variogram model to the data. + This is done by using isotropy settings of the given model, + assuming the sill to be the data variance and with the + standard bins provided by the :any:`standard_bins` routine. + Default: False """ def __init__( - self, model, cond_pos, cond_val, drift_functions, trend_function=None + self, + model, + cond_pos, + cond_val, + drift_functions, + normalizer=None, + trend=None, + exact=False, + cond_err="nugget", + pseudo_inv=True, + pseudo_inv_type="pinv", + fit_normalizer=False, + fit_variogram=False, ): super().__init__( model, cond_pos, cond_val, drift_functions=drift_functions, - trend_function=trend_function, - ) - - def _get_krige_mat(self): - """Calculate the inverse matrix of the kriging equation.""" - size = self.cond_no + int(self.unbiased) + self.drift_no - res = np.empty((size, size), dtype=np.double) - res[: self.cond_no, : self.cond_no] = self.model.vario_nugget( - self._get_dists(self._krige_pos) - ) - if self.unbiased: - res[self.cond_no, : self.cond_no] = 1 - res[: self.cond_no, self.cond_no] = 1 - for i, f in enumerate(self.drift_functions): - drift_tmp = f(*self.cond_pos) - res[-self.drift_no + i, : self.cond_no] = drift_tmp - res[: self.cond_no, -self.drift_no + i] = drift_tmp - res[self.cond_no :, self.cond_no :] = 0 - return inv(res) - - def _get_krige_vecs(self, pos, chunk_slice=(0, None), ext_drift=None): - """Calculate the RHS of the kriging equation.""" - chunk_size = len(pos[0]) if chunk_slice[1] is None else chunk_slice[1] - chunk_size -= chunk_slice[0] - size = self.cond_no + int(self.unbiased) + self.drift_no - res = np.empty((size, chunk_size), dtype=np.double) - res[: self.cond_no, :] = self.model.vario_nugget( - self._get_dists(self._krige_pos, pos, chunk_slice) - ) - if self.unbiased: - res[self.cond_no, :] = 1 - # trend function need the anisotropic and rotated positions - if not self.model.is_isotropic: - x, y, z = pos2xyz(pos, max_dim=self.model.dim) - y, z = make_anisotropic(self.model.dim, self.model.anis, y, z) - if self.model.do_rotation: - x, y, z = rotate_mesh( - self.model.dim, self.model.angles, x, y, z - ) - pos = xyz2pos(x, y, z, max_dim=self.model.dim) - chunk_pos = list(pos[: self.model.dim]) - for i in range(self.model.dim): - chunk_pos[i] = chunk_pos[i][slice(*chunk_slice)] - for i, f in enumerate(self.drift_functions): - res[-self.drift_no + i, :] = f(*chunk_pos) - return res - - def __repr__(self): - """Return String representation.""" - return "Universal(model={0}, cond_pos={1}, cond_val={2})".format( - self.model, self.cond_pos, self.cond_val + normalizer=normalizer, + trend=trend, + exact=exact, + cond_err=cond_err, + pseudo_inv=pseudo_inv, + pseudo_inv_type=pseudo_inv_type, + fit_normalizer=fit_normalizer, + fit_variogram=fit_variogram, ) @@ -286,62 +338,87 @@ class ExtDrift(Krige): the values of the conditions ext_drift : :class:`numpy.ndarray` the external drift values at the given condition positions. - trend_function : :any:`callable`, optional - A callable trend function. Should have the signiture: f(x, [y, z]) + normalizer : :any:`None` or :any:`Normalizer`, optional + Normalizer to be applied to the input data to gain normality. + The default is None. + trend : :any:`None` or :class:`float` or :any:`callable`, optional + A callable trend function. Should have the signiture: f(x, [y, z, ...]) This is used for detrended kriging, where the trended is subtracted from the conditions before kriging is applied. This can be used for regression kriging, where the trend function is determined by an external regression algorithm. + If no normalizer is applied, this behaves equal to 'mean'. + The default is None. + exact : :class:`bool`, optional + Whether the interpolator should reproduce the exact input values. + If `False`, `cond_err` is interpreted as measurement error + at the conditioning points and the result will be more smooth. + Default: False + cond_err : :class:`str`, :class :class:`float` or :class:`list`, optional + The measurement error at the conditioning points. + Either "nugget" to apply the model-nugget, a single value applied to + all points or an array with individual values for each point. + The measurement error has to be <= nugget. + The "exact=True" variant only works with "cond_err='nugget'". + Default: "nugget" + pseudo_inv : :class:`bool`, optional + Whether the kriging system is solved with the pseudo inverted + kriging matrix. If `True`, this leads to more numerical stability + and redundant points are averaged. But it can take more time. + Default: True + pseudo_inv_type : :class:`str` or :any:`callable`, optional + Here you can select the algorithm to compute the pseudo-inverse matrix: + + * `"pinv"`: use `pinv` from `scipy` which uses `lstsq` + * `"pinv2"`: use `pinv2` from `scipy` which uses `SVD` + * `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values + + If you want to use another routine to invert the kriging matrix, + you can pass a callable which takes a matrix and returns the inverse. + Default: `"pinv"` + fit_normalizer : :class:`bool`, optional + Wheater to fit the data-normalizer to the given conditioning data. + Default: False + fit_variogram : :class:`bool`, optional + Wheater to fit the given variogram model to the data. + This is done by using isotropy settings of the given model, + assuming the sill to be the data variance and with the + standard bins provided by the :any:`standard_bins` routine. + Default: False """ def __init__( - self, model, cond_pos, cond_val, ext_drift, trend_function=None + self, + model, + cond_pos, + cond_val, + ext_drift, + normalizer=None, + trend=None, + exact=False, + cond_err="nugget", + pseudo_inv=True, + pseudo_inv_type="pinv", + fit_normalizer=False, + fit_variogram=False, ): super().__init__( model, cond_pos, cond_val, ext_drift=ext_drift, - trend_function=trend_function, - ) - - def _get_krige_mat(self): - """Calculate the inverse matrix of the kriging equation.""" - size = self.cond_no + int(self.unbiased) + self.drift_no - res = np.empty((size, size), dtype=np.double) - res[: self.cond_no, : self.cond_no] = self.model.vario_nugget( - self._get_dists(self._krige_pos) - ) - if self.unbiased: - res[self.cond_no, : self.cond_no] = 1 - res[: self.cond_no, self.cond_no] = 1 - res[-self.drift_no :, : self.cond_no] = self.cond_ext_drift - res[: self.cond_no, -self.drift_no :] = self.cond_ext_drift.T - res[self.cond_no :, self.cond_no :] = 0 - return inv(res) - - def _get_krige_vecs(self, pos, chunk_slice=(0, None), ext_drift=None): - """Calculate the RHS of the kriging equation.""" - chunk_size = len(pos[0]) if chunk_slice[1] is None else chunk_slice[1] - chunk_size -= chunk_slice[0] - size = self.cond_no + int(self.unbiased) + self.drift_no - res = np.empty((size, chunk_size), dtype=np.double) - res[: self.cond_no, :] = self.model.vario_nugget( - self._get_dists(self._krige_pos, pos, chunk_slice) - ) - if self.unbiased: - res[self.cond_no, :] = 1 - res[-self.drift_no :, :] = ext_drift[:, slice(*chunk_slice)] - return res - - def __repr__(self): - """Return String representation.""" - return "ExtDrift(model={0}, cond_pos={1}, cond_val={2})".format( - self.model, self.cond_pos, self.cond_val + normalizer=normalizer, + trend=trend, + exact=exact, + cond_err=cond_err, + pseudo_inv=pseudo_inv, + pseudo_inv_type=pseudo_inv_type, + fit_normalizer=fit_normalizer, + fit_variogram=fit_variogram, ) -class Detrended(Simple): +class Detrended(Krige): """ Detrended simple kriging. @@ -352,8 +429,10 @@ class Detrended(Simple): This can be used for regression kriging, where the trend function is determined by an external regression algorithm. - This is just a shortcut for simple kriging with a given trend function - and zero mean. A trend can be given with EVERY provided kriging routine. + This is just a shortcut for simple kriging with a given trend function, + zero mean and no normalizer. + + A trend can be given with EVERY provided kriging routine. Parameters ---------- @@ -365,21 +444,62 @@ class Detrended(Simple): the values of the conditions trend_function : :any:`callable` The callable trend function. Should have the signiture: f(x, [y, z]) + exact : :class:`bool`, optional + Whether the interpolator should reproduce the exact input values. + If `False`, `cond_err` is interpreted as measurement error + at the conditioning points and the result will be more smooth. + Default: False + cond_err : :class:`str`, :class :class:`float` or :class:`list`, optional + The measurement error at the conditioning points. + Either "nugget" to apply the model-nugget, a single value applied to + all points or an array with individual values for each point. + The measurement error has to be <= nugget. + The "exact=True" variant only works with "cond_err='nugget'". + Default: "nugget" + pseudo_inv : :class:`bool`, optional + Whether the kriging system is solved with the pseudo inverted + kriging matrix. If `True`, this leads to more numerical stability + and redundant points are averaged. But it can take more time. + Default: True + pseudo_inv_type : :class:`str` or :any:`callable`, optional + Here you can select the algorithm to compute the pseudo-inverse matrix: + + * `"pinv"`: use `pinv` from `scipy` which uses `lstsq` + * `"pinv2"`: use `pinv2` from `scipy` which uses `SVD` + * `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values + + If you want to use another routine to invert the kriging matrix, + you can pass a callable which takes a matrix and returns the inverse. + Default: `"pinv"` + fit_variogram : :class:`bool`, optional + Wheater to fit the given variogram model to the data. + This is done by using isotropy settings of the given model, + assuming the sill to be the data variance and with the + standard bins provided by the :any:`standard_bins` routine. + Default: False """ - def __init__(self, model, cond_pos, cond_val, trend_function): + def __init__( + self, + model, + cond_pos, + cond_val, + trend, + exact=False, + cond_err="nugget", + pseudo_inv=True, + pseudo_inv_type="pinv", + fit_variogram=False, + ): super().__init__( - model, cond_pos, cond_val, trend_function=trend_function - ) - - def __repr__(self): - """Return String representation.""" - return "Detrended(model={0} cond_pos={1}, cond_val={2})".format( - self.model, self.cond_pos, self.cond_val + model, + cond_pos, + cond_val, + trend=trend, + unbiased=False, + exact=exact, + cond_err=cond_err, + pseudo_inv=pseudo_inv, + pseudo_inv_type=pseudo_inv_type, + fit_variogram=fit_variogram, ) - - -if __name__ == "__main__": # pragma: no cover - import doctest - - doctest.testmod() diff --git a/gstools/krige/tools.py b/gstools/krige/tools.py index 121e312bd..5d220f034 100644 --- a/gstools/krige/tools.py +++ b/gstools/krige/tools.py @@ -9,70 +9,16 @@ .. autosummary:: set_condition get_drift_functions - no_trend - eval_func """ # pylint: disable=C0103 from itertools import combinations_with_replacement import numpy as np -from gstools.tools.geometric import pos2xyz, xyz2pos -from gstools.field.tools import ( - reshape_axis_from_struct_to_unstruct, - reshape_field_from_unstruct_to_struct, -) -__all__ = ["no_trend", "eval_func", "set_condition", "get_drift_functions"] +__all__ = ["set_condition", "get_drift_functions"] -def no_trend(*args, **kwargs): - """ - Zero trend dummy function. - - Parameters - ---------- - *args : any - Ignored arguments. - **kwargs : any - Ignored keyword arguments. - - Returns - ------- - float - A zero trend given as single float. - - """ - return 0.0 - - -def eval_func(func, pos, mesh_type="structured"): - """ - Evaluate a function on a mesh. - - Parameters - ---------- - func : :any:`callable` - The function to be called. Should have the signiture f(x, [y, z]) - pos : :class:`list` - the position tuple, containing main direction and transversal - directions (x, [y, z]) - mesh_type : :class:`str`, optional - 'structured' / 'unstructured' - Returns - ------- - :class:`numpy.ndarray` - Function values at the given points. - """ - x, y, z, dim = pos2xyz(pos, calc_dim=True) - if mesh_type == "structured": - x, y, z, axis_lens = reshape_axis_from_struct_to_unstruct(dim, x, y, z) - res = func(*[x, y, z][:dim]) - if mesh_type == "structured": - res = reshape_field_from_unstruct_to_struct(dim, res, axis_lens) - return res - - -def set_condition(cond_pos, cond_val, max_dim=3): +def set_condition(cond_pos, cond_val, dim): """ Set the conditions for kriging. @@ -82,8 +28,8 @@ def set_condition(cond_pos, cond_val, max_dim=3): the position tuple of the conditions (x, [y, z]) cond_val : :class:`numpy.ndarray` the values of the conditions - max_dim : :class:`int`, optional - Cut of information above the given dimension. Default: 3 + dim : :class:`int`, optional + Spatial dimension Raises ------ @@ -98,18 +44,12 @@ def set_condition(cond_pos, cond_val, max_dim=3): the error checked cond_val """ # convert the input for right shapes and dimension checks - c_x, c_y, c_z = pos2xyz(cond_pos, dtype=np.double, max_dim=max_dim) - cond_pos = xyz2pos(c_x, c_y, c_z) - if len(cond_pos) != max_dim: - raise ValueError( - "Please check your 'cond_pos' parameters. " - + "The dimension does not match with the given one." - ) cond_val = np.array(cond_val, dtype=np.double).reshape(-1) - if not all([len(cond_pos[i]) == len(cond_val) for i in range(max_dim)]): + cond_pos = np.array(cond_pos, dtype=np.double).reshape(dim, -1) + if len(cond_pos[0]) != len(cond_val): raise ValueError( "Please check your 'cond_pos' and 'cond_val' parameters. " - + "The shapes do not match." + "The shapes do not match." ) return cond_pos, cond_val diff --git a/gstools/normalizer/__init__.py b/gstools/normalizer/__init__.py new file mode 100644 index 000000000..af3a5f243 --- /dev/null +++ b/gstools/normalizer/__init__.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +""" +GStools subpackage providing normalization routines. + +.. currentmodule:: gstools.normalizer + +Base-Normalizer +^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated + + Normalizer + +Field-Normalizer +^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated + + LogNormal + BoxCox + BoxCoxShift + YeoJohnson + Modulus + Manly + +Convenience Routines +^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated + + apply_mean_norm_trend + remove_trend_norm_mean +""" + +from gstools.normalizer.base import Normalizer +from gstools.normalizer.methods import ( + LogNormal, + BoxCox, + BoxCoxShift, + YeoJohnson, + Modulus, + Manly, +) +from gstools.normalizer.tools import ( + apply_mean_norm_trend, + remove_trend_norm_mean, +) + +__all__ = [ + "Normalizer", + "LogNormal", + "BoxCox", + "BoxCoxShift", + "YeoJohnson", + "Modulus", + "Manly", + "apply_mean_norm_trend", + "remove_trend_norm_mean", +] diff --git a/gstools/normalizer/base.py b/gstools/normalizer/base.py new file mode 100644 index 000000000..da6e0a9cb --- /dev/null +++ b/gstools/normalizer/base.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +""" +GStools subpackage providing the base class for normalizers. + +.. currentmodule:: gstools.normalizer.base + +The following classes are provided + +.. autosummary:: + Normalizer +""" +# pylint: disable=R0201 +import warnings +import numpy as np +import scipy.misc as spm +import scipy.optimize as spo + + +class Normalizer: + """Normalizer class. + + Parameters + ---------- + data : array_like, optional + Input data to fit the transformation to in order to gain normality. + The default is None. + **parameter + Specified parameters given by name. If not given, default parameters + will be used. + """ + + default_parameter = {} + """:class:`dict`: Default parameters of the Normalizer.""" + normalize_range = (-np.inf, np.inf) + """:class:`tuple`: Valid range for input data.""" + denormalize_range = (-np.inf, np.inf) + """:class:`tuple`: Valid range for output/normal data.""" + _dx = 1e-6 # dx for numerical derivative + + def __init__(self, data=None, **parameter): + # only use parameter, that have a provided default value + for key, value in self.default_parameter.items(): + setattr(self, key, parameter.get(key, value)) + # fit parameters if data is given + if data is not None: + self.fit(data) + # optimization results + self._opti = None + # precision for printing + self._prec = 3 + + def _denormalize(self, data): + return data + + def _normalize(self, data): + return data + + def _derivative(self, data): + return spm.derivative(self._normalize, data, dx=self._dx) + + def _loglikelihood(self, data): + add = -0.5 * np.size(data) * (np.log(2 * np.pi) + 1) + return self._kernel_loglikelihood(data) + add + + def _kernel_loglikelihood(self, data): + res = -0.5 * np.size(data) * np.log(np.var(self._normalize(data))) + return res + np.sum(np.log(np.maximum(1e-16, self._derivative(data)))) + + def _check_input(self, data, data_range=None, return_output_template=True): + is_data = np.array(np.logical_not(np.isnan(data))) + if return_output_template: + out = np.full_like(data, np.nan, dtype=np.double) + data = np.array(data, dtype=np.double)[is_data] + if data_range is not None and np.min(np.abs(data_range)) < np.inf: + dat_in = np.logical_and(data > data_range[0], data < data_range[1]) + if not np.all(dat_in): + warnings.warn( + "{0}: data (min: {1}, max: {2}) out of range: {3}. " + "Affected values will be treated as NaN.".format( + self.name, np.min(data), np.max(data), data_range + ) + ) + is_data[is_data] &= dat_in + data = data[dat_in] + if return_output_template: + return data, is_data, out + return data + + def denormalize(self, data): + """Transform to input distribution. + + Parameters + ---------- + data : array_like + Input data (normal distributed). + + Returns + ------- + :class:`numpy.ndarray` + Denormalized data. + """ + data, is_data, out = self._check_input(data, self.denormalize_range) + out[is_data] = self._denormalize(data) + return out + + def normalize(self, data): + """Transform to normal distribution. + + Parameters + ---------- + data : array_like + Input data (not normal distributed). + + Returns + ------- + :class:`numpy.ndarray` + Normalized data. + """ + data, is_data, out = self._check_input(data, self.normalize_range) + out[is_data] = self._normalize(data) + return out + + def derivative(self, data): + """Factor for normal PDF to gain target PDF. + + Parameters + ---------- + data : array_like + Input data (not normal distributed). + + Returns + ------- + :class:`numpy.ndarray` + Derivative of the normalization transformation function. + """ + data, is_data, out = self._check_input(data, self.normalize_range) + out[is_data] = self._derivative(data) + return out + + def likelihood(self, data): + """Likelihood for given data with current parameters. + + Parameters + ---------- + data : array_like + Input data to fit the transformation to in order to gain normality. + + Returns + ------- + :class:`float` + Likelihood of the given data. + """ + return np.exp(self.loglikelihood(data)) + + def loglikelihood(self, data): + """Log-Likelihood for given data with current parameters. + + Parameters + ---------- + data : array_like + Input data to fit the transformation to in order to gain normality. + + Returns + ------- + :class:`float` + Log-Likelihood of the given data. + """ + data = self._check_input(data, self.normalize_range, False) + return self._loglikelihood(data) + + def kernel_loglikelihood(self, data): + """Kernel Log-Likelihood for given data with current parameters. + + Parameters + ---------- + data : array_like + Input data to fit the transformation to in order to gain normality. + + Returns + ------- + :class:`float` + Kernel Log-Likelihood of the given data. + + Notes + ----- + This loglikelihood function is neglecting additive constants, + that are not needed for optimization. + """ + data = self._check_input(data, self.normalize_range, False) + return self._kernel_loglikelihood(data) + + def fit(self, data, skip=None, **kwargs): + """Fitting the transformation to data by maximizing Log-Likelihood. + + Parameters + ---------- + data : array_like + Input data to fit the transformation to in order to gain normality. + skip : :class:`list` of :class:`str` or :any:`None`, optional + Names of parameters to be skiped in fitting. + The default is None. + **kwargs + Keyword arguments passed to :any:`scipy.optimize.minimize_scalar` + when only one parameter present or :any:`scipy.optimize.minimize`. + + Returns + ------- + :class:`dict` + Optimal paramters given by names. + """ + skip = [] if skip is None else skip + all_names = sorted(self.default_parameter) + para_names = [name for name in all_names if name not in skip] + + def _neg_kllf(par, dat): + for name, val in zip(para_names, np.atleast_1d(par)): + setattr(self, name, val) + return -self.kernel_loglikelihood(dat) + + if len(para_names) == 0: # transformations without para. (no opti.) + warnings.warn(f"{self.name}.fit: no parameters!") + return {} + if len(para_names) == 1: # one-para. transformations (simple opti.) + # default bracket like in scipy's boxcox (if not given) + kwargs.setdefault("bracket", (-2, 2)) + out = spo.minimize_scalar(_neg_kllf, args=(data,), **kwargs) + else: # general case + # init guess from current parameters (if x0 not given) + kwargs.setdefault("x0", [getattr(self, p) for p in para_names]) + out = spo.minimize(_neg_kllf, args=(data,), **kwargs) + # save optimization results + self._opti = out + for name, val in zip(para_names, np.atleast_1d(out.x)): + setattr(self, name, val) + return {name: getattr(self, name) for name in all_names} + + def __eq__(self, other): + """Compare Normalizers.""" + # check for correct base class + if type(self) is not type(other): + return False + # if base class is same, this is safe + for val in self.default_parameter: + if not np.isclose(getattr(self, val), getattr(other, val)): + return False + return True + + @property + def name(self): + """:class:`str`: The name of the normalizer class.""" + return self.__class__.__name__ + + def __repr__(self): + """Return String representation.""" + para_strs = [ + "{0}={1:.{2}}".format(p, float(getattr(self, p)), self._prec) + for p in sorted(self.default_parameter) + ] + return f"{self.name}({', '.join(para_strs)})" diff --git a/gstools/normalizer/methods.py b/gstools/normalizer/methods.py new file mode 100644 index 000000000..9c73aefe6 --- /dev/null +++ b/gstools/normalizer/methods.py @@ -0,0 +1,362 @@ +# -*- coding: utf-8 -*- +""" +GStools subpackage providing different normalizer transformations. + +.. currentmodule:: gstools.normalizer.methods + +The following classes are provided + +.. autosummary:: + LogNormal + BoxCox + BoxCoxShift + YeoJohnson + Modulus + Manly +""" +# pylint: disable=E1101 +import numpy as np +from gstools.normalizer.base import Normalizer + + +class LogNormal(Normalizer): + r"""Log-normal fields. + + Notes + ----- + This parameter-free transformation is given by: + + .. math:: + y=\log(x) + """ + + normalize_range = (0.0, np.inf) + """Valid range for input data.""" + + def _denormalize(self, data): + return np.exp(data) + + def _normalize(self, data): + return np.log(data) + + def _derivative(self, data): + return np.power(data, -1) + + +class BoxCox(Normalizer): + r"""Box-Cox (1964) transformed fields. + + Parameters + ---------- + data : array_like, optional + Input data to fit the transformation in order to gain normality. + The default is None. + lmbda : :class:`float`, optional + Shape parameter. Default: 1 + + Notes + ----- + This transformation is given by [Box1964]_: + + .. math:: + y=\begin{cases} + \frac{x^{\lambda} - 1}{\lambda} & \lambda\neq 0 \\ + \log(x) & \lambda = 0 + \end{cases} + + References + ---------- + .. [Box1964] G.E.P. Box and D.R. Cox, + "An Analysis of Transformations", + Journal of the Royal Statistical Society B, 26, 211-252, (1964) + """ + + default_parameter = {"lmbda": 1} + """:class:`dict`: Default parameter of the BoxCox-Normalizer.""" + normalize_range = (0.0, np.inf) + """:class:`tuple`: Valid range for input data.""" + + @property + def denormalize_range(self): + """:class:`tuple`: Valid range for output data depending on lmbda. + + `(-1/lmbda, inf)` or `(-inf, -1/lmbda)` + """ + if np.isclose(self.lmbda, 0): + return (-np.inf, np.inf) + if self.lmbda < 0: + return (-np.inf, -np.divide(1, self.lmbda)) + return (-np.divide(1, self.lmbda), np.inf) + + def _denormalize(self, data): + if np.isclose(self.lmbda, 0): + return np.exp(data) + return (1 + np.multiply(data, self.lmbda)) ** (1 / self.lmbda) + + def _normalize(self, data): + if np.isclose(self.lmbda, 0): + return np.log(data) + return (np.power(data, self.lmbda) - 1) / self.lmbda + + def _derivative(self, data): + return np.power(data, self.lmbda - 1) + + +class BoxCoxShift(Normalizer): + r"""Box-Cox (1964) transformed fields including shifting. + + Parameters + ---------- + data : array_like, optional + Input data to fit the transformation in order to gain normality. + The default is None. + lmbda : :class:`float`, optional + Shape parameter. Default: 1 + shift : :class:`float`, optional + Shift parameter. Default: 0 + + Notes + ----- + This transformation is given by [Box1964]_: + + .. math:: + y=\begin{cases} + \frac{(x+s)^{\lambda} - 1}{\lambda} & \lambda\neq 0 \\ + \log(x+s) & \lambda = 0 + \end{cases} + + Fitting the shift parameter is rather hard. You should consider skipping + "shift" during fitting: + + >>> data = range(5) + >>> norm = BoxCoxShift(shift=0.5) + >>> norm.fit(data, skip=["shift"]) + {'shift': 0.5, 'lmbda': 0.6747515267420799} + + References + ---------- + .. [Box1964] G.E.P. Box and D.R. Cox, + "An Analysis of Transformations", + Journal of the Royal Statistical Society B, 26, 211-252, (1964) + """ + + default_parameter = {"shift": 0, "lmbda": 1} + """:class:`dict`: Default parameters of the BoxCoxShift-Normalizer.""" + + @property + def normalize_range(self): + """:class:`tuple`: Valid range for input data depending on shift. + + `(-shift, inf)` + """ + return (-self.shift, np.inf) + + @property + def denormalize_range(self): + """:class:`tuple`: Valid range for output data depending on lmbda. + + `(-1/lmbda, inf)` or `(-inf, -1/lmbda)` + """ + if np.isclose(self.lmbda, 0): + return (-np.inf, np.inf) + if self.lmbda < 0: + return (-np.inf, -np.divide(1, self.lmbda)) + return (-np.divide(1, self.lmbda), np.inf) + + def _denormalize(self, data): + if np.isclose(self.lmbda, 0): + return np.exp(data) - self.shift + return (1 + np.multiply(data, self.lmbda)) ** ( + 1 / self.lmbda + ) - self.shift + + def _normalize(self, data): + if np.isclose(self.lmbda, 0): + return np.log(np.add(data, self.shift)) + return (np.add(data, self.shift) ** self.lmbda - 1) / self.lmbda + + def _derivative(self, data): + return np.power(np.add(data, self.shift), self.lmbda - 1) + + +class YeoJohnson(Normalizer): + r"""Yeo-Johnson (2000) transformed fields. + + Parameters + ---------- + data : array_like, optional + Input data to fit the transformation in order to gain normality. + The default is None. + lmbda : :class:`float`, optional + Shape parameter. Default: 1 + + Notes + ----- + This transformation is given by [Yeo2000]_: + + .. math:: + y=\begin{cases} + \frac{(x+1)^{\lambda} - 1}{\lambda} + & x\geq 0,\, \lambda\neq 0 \\ + \log(x+1) + & x\geq 0,\, \lambda = 0 \\ + -\frac{(|x|+1)^{2-\lambda} - 1}{2-\lambda} + & x<0,\, \lambda\neq 2 \\ + -\log(|x|+1) + & x<0,\, \lambda = 2 + \end{cases} + + + References + ---------- + .. [Yeo2000] I.K. Yeo and R.A. Johnson, + "A new family of power transformations to improve normality or + symmetry." Biometrika, 87(4), pp.954-959, (2000). + """ + + default_parameter = {"lmbda": 1} + """:class:`dict`: Default parameter of the YeoJohnson-Normalizer.""" + + def _denormalize(self, data): + data = np.asanyarray(data) + res = np.zeros_like(data, dtype=np.double) + pos = data >= 0 + # when data >= 0 + if np.isclose(self.lmbda, 0): + res[pos] = np.expm1(data[pos]) + else: # self.lmbda != 0 + res[pos] = np.power(data[pos] * self.lmbda + 1, 1 / self.lmbda) - 1 + # when data < 0 + if np.isclose(self.lmbda, 2): + res[~pos] = -np.expm1(-data[~pos]) + else: # self.lmbda != 2 + res[~pos] = 1 - np.power( + -(2 - self.lmbda) * data[~pos] + 1, 1 / (2 - self.lmbda) + ) + return res + + def _normalize(self, data): + data = np.asanyarray(data) + res = np.zeros_like(data, dtype=np.double) + pos = data >= 0 + # when data >= 0 + if np.isclose(self.lmbda, 0): + res[pos] = np.log1p(data[pos]) + else: # self.lmbda != 0 + res[pos] = (np.power(data[pos] + 1, self.lmbda) - 1) / self.lmbda + # when data < 0 + if np.isclose(self.lmbda, 2): + res[~pos] = -np.log1p(-data[~pos]) + else: # self.lmbda != 2 + res[~pos] = -(np.power(-data[~pos] + 1, 2 - self.lmbda) - 1) / ( + 2 - self.lmbda + ) + return res + + def _derivative(self, data): + return (np.abs(data) + 1) ** (np.sign(data) * (self.lmbda - 1)) + + +class Modulus(Normalizer): + r"""Modulus or John-Draper (1980) transformed fields. + + Parameters + ---------- + data : array_like, optional + Input data to fit the transformation in order to gain normality. + The default is None. + lmbda : :class:`float`, optional + Shape parameter. Default: 1 + + Notes + ----- + This transformation is given by [John1980]_: + + .. math:: + y=\begin{cases} + \mathrm{sgn}(x)\frac{(|x|+1)^{\lambda} - 1}{\lambda} & \lambda\neq 0 \\ + \mathrm{sgn}(x)\log(|x|+1) & \lambda = 0 + \end{cases} + + References + ---------- + .. [John1980] J. A. John, and N. R. Draper, + "An Alternative Family of Transformations." Journal + of the Royal Statistical Society C, 29.2, 190-197, (1980) + """ + + default_parameter = {"lmbda": 1} + """:class:`dict`: Default parameter of the Modulus-Normalizer.""" + + def _denormalize(self, data): + if np.isclose(self.lmbda, 0): + return np.sign(data) * np.expm1(np.abs(data)) + return np.sign(data) * ( + (1 + self.lmbda * np.abs(data)) ** (1 / self.lmbda) - 1 + ) + + def _normalize(self, data): + if np.isclose(self.lmbda, 0): + return np.sign(data) * np.log1p(np.abs(data)) + return ( + np.sign(data) * ((np.abs(data) + 1) ** self.lmbda - 1) / self.lmbda + ) + + def _derivative(self, data): + return np.power(np.abs(data) + 1, self.lmbda - 1) + + +class Manly(Normalizer): + r"""Manly (1971) transformed fields. + + Parameters + ---------- + data : array_like, optional + Input data to fit the transformation in order to gain normality. + The default is None. + lmbda : :class:`float`, optional + Shape parameter. Default: 1 + + Notes + ----- + This transformation is given by [Manly1976]_: + + .. math:: + y=\begin{cases} + \frac{\exp(\lambda x) - 1}{\lambda} & \lambda\neq 0 \\ + x & \lambda = 0 + \end{cases} + + References + ---------- + .. [Manly1976] B. F. J. Manly, "Exponential data transformations.", + Journal of the Royal Statistical Society D, 25.1, 37-42 (1976). + """ + + default_parameter = {"lmbda": 1} + """:class:`dict`: Default parameter of the Manly-Normalizer.""" + + @property + def denormalize_range(self): + """:class:`tuple`: Valid range for output data depending on lmbda. + + `(-1/lmbda, inf)` or `(-inf, -1/lmbda)` + """ + if np.isclose(self.lmbda, 0): + return (-np.inf, np.inf) + if self.lmbda < 0: + return (-np.inf, np.divide(1, self.lmbda)) + return (-np.divide(1, self.lmbda), np.inf) + + def _denormalize(self, data): + if np.isclose(self.lmbda, 0): + return data + return np.log1p(np.multiply(data, self.lmbda)) / self.lmbda + + def _normalize(self, data): + if np.isclose(self.lmbda, 0): + return data + return np.expm1(np.multiply(data, self.lmbda)) / self.lmbda + + def _derivative(self, data): + return np.exp(np.multiply(data, self.lmbda)) diff --git a/gstools/normalizer/tools.py b/gstools/normalizer/tools.py new file mode 100644 index 000000000..f71b758f0 --- /dev/null +++ b/gstools/normalizer/tools.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- +""" +GStools subpackage providing tools for Normalizers. + +.. currentmodule:: gstools.normalizer.tools + +The following classes and functions are provided + +.. autosummary:: + apply_mean_norm_trend + remove_trend_norm_mean +""" +import numpy as np + +from gstools.normalizer.base import Normalizer +from gstools.tools.misc import eval_func +from gstools.tools.geometric import ( + format_struct_pos_shape, + format_unstruct_pos_shape, +) + +__all__ = ["apply_mean_norm_trend", "remove_trend_norm_mean"] + + +def _check_normalizer(normalizer): + if isinstance(normalizer, type) and issubclass(normalizer, Normalizer): + normalizer = normalizer() + elif normalizer is None: + normalizer = Normalizer() + elif not isinstance(normalizer, Normalizer): + raise ValueError("Check: 'normalizer' not of type 'Normalizer'.") + return normalizer + + +def apply_mean_norm_trend( + pos, + field, + mean=None, + normalizer=None, + trend=None, + mesh_type="unstructured", + value_type="scalar", + check_shape=True, + stacked=False, +): + """ + Apply mean, de-normalization and trend to given field. + + Parameters + ---------- + pos : :any:`iterable` + Position tuple, containing main direction and transversal directions. + field : :class:`numpy.ndarray` or :class:`list` of :class:`numpy.ndarray` + The spatially distributed data. + You can pass a list of fields, that will be used simultaneously. + Then you need to set ``stacked=True``. + mean : :any:`None` or :class:`float` or :any:`callable`, optional + Mean of the field if wanted. Could also be a callable. + The default is None. + normalizer : :any:`None` or :any:`Normalizer`, optional + Normalizer to be applied to the field. + The default is None. + trend : :any:`None` or :class:`float` or :any:`callable`, optional + Trend of the denormalized fields. If no normalizer is applied, + this behaves equal to 'mean'. + The default is None. + mesh_type : :class:`str`, optional + 'structured' / 'unstructured' + Default: 'unstructured' + value_type : :class:`str`, optional + Value type of the field. Either "scalar" or "vector". + The default is "scalar". + check_shape : :class:`bool`, optional + Wheather to check pos and field shapes. The default is True. + stacked : :class:`bool`, optional + Wheather the field is stacked or not. The default is False. + + Returns + ------- + field : :class:`numpy.ndarray` + The transformed field. + """ + normalizer = _check_normalizer(normalizer) + if check_shape: + if mesh_type != "unstructured": + pos, shape, dim = format_struct_pos_shape( + pos, field.shape, check_stacked_shape=stacked + ) + else: + pos, shape, dim = format_unstruct_pos_shape( + pos, field.shape, check_stacked_shape=stacked + ) + field = np.array(field, dtype=np.double).reshape(shape) + else: + dim = len(pos) + if not stacked: + field = [field] + field_cnt = len(field) + for i in range(field_cnt): + field[i] += eval_func(mean, pos, dim, mesh_type, value_type, True) + field = normalizer.denormalize(field) + for i in range(field_cnt): + field[i] += eval_func(trend, pos, dim, mesh_type, value_type, True) + return field if stacked else field[0] + + +def remove_trend_norm_mean( + pos, + field, + mean=None, + normalizer=None, + trend=None, + mesh_type="unstructured", + value_type="scalar", + check_shape=True, + stacked=False, + fit_normalizer=False, +): + """ + Remove trend, de-normalization and mean from given field. + + Parameters + ---------- + pos : :any:`iterable` + Position tuple, containing main direction and transversal directions. + field : :class:`numpy.ndarray` or :class:`list` of :class:`numpy.ndarray` + The spatially distributed data. + You can pass a list of fields, that will be used simultaneously. + Then you need to set ``stacked=True``. + mean : :any:`None` or :class:`float` or :any:`callable`, optional + Mean of the field if wanted. Could also be a callable. + The default is None. + normalizer : :any:`None` or :any:`Normalizer`, optional + Normalizer to be applied to the field. + The default is None. + trend : :any:`None` or :class:`float` or :any:`callable`, optional + Trend of the denormalized fields. If no normalizer is applied, + this behaves equal to 'mean'. + The default is None. + mesh_type : :class:`str`, optional + 'structured' / 'unstructured' + Default: 'unstructured' + value_type : :class:`str`, optional + Value type of the field. Either "scalar" or "vector". + The default is "scalar". + check_shape : :class:`bool`, optional + Wheather to check pos and field shapes. The default is True. + stacked : :class:`bool`, optional + Wheather the field is stacked or not. The default is False. + fit_normalizer : :class:`bool`, optional + Wheater to fit the data-normalizer to the given (detrended) field. + Default: False + + Returns + ------- + field : :class:`numpy.ndarray` + The cleaned field. + normalizer : :any:`Normalizer`, optional + The fitted normalizer for the given data. + Only provided if `fit_normalizer` is True. + """ + normalizer = _check_normalizer(normalizer) + if check_shape: + if mesh_type != "unstructured": + pos, shape, dim = format_struct_pos_shape( + pos, field.shape, check_stacked_shape=stacked + ) + else: + pos, shape, dim = format_unstruct_pos_shape( + pos, field.shape, check_stacked_shape=stacked + ) + field = np.array(field, dtype=np.double).reshape(shape) + else: + dim = len(pos) + if not stacked: + field = [field] + field_cnt = len(field) + for i in range(field_cnt): + field[i] -= eval_func(trend, pos, dim, mesh_type, value_type, True) + if fit_normalizer: + normalizer.fit(field) + field = normalizer.normalize(field) + for i in range(field_cnt): + field[i] -= eval_func(mean, pos, dim, mesh_type, value_type, True) + out = field if stacked else field[0] + return (out, normalizer) if fit_normalizer else out diff --git a/gstools/random/rng.py b/gstools/random/rng.py index e2915f6f1..5b740f7c2 100644 --- a/gstools/random/rng.py +++ b/gstools/random/rng.py @@ -9,8 +9,7 @@ .. autosummary:: RNG """ -# pylint: disable=no-member - +# pylint: disable=E1101 import numpy as np import numpy.random as rand import emcee as mc @@ -61,10 +60,10 @@ def sample_ln_pdf( nwalkers : :class:`int`, optional The number of walkers in the mcmc sampler. Used for the emcee.EnsembleSampler class. - Default: 100 + Default: 50 burn_in : :class:`int`, optional Number of burn-in runs in the mcmc algorithm. - Default: 100 + Default: 20 oversampling_factor : :class:`int`, optional To guess the sample number needed for proper results, we use a factor for oversampling. The intern used sample-size is @@ -154,9 +153,11 @@ def sample_sphere(self, dim, size=None): x[, y[, z]] coordinates on the sphere with shape (dim, size) """ if size is None: # pragma: no cover - coord = np.empty(dim, dtype=np.double) + coord = np.empty((dim, 1), dtype=np.double) else: - coord = np.empty((dim, size), dtype=np.double) + coord = np.empty( # saver conversion of size to resulting shape + (dim,) + tuple(np.atleast_1d(size)), dtype=np.double + ) if dim == 1: coord[0] = self.random.choice([-1, 1], size=size) elif dim == 2: @@ -169,15 +170,33 @@ def sample_sphere(self, dim, size=None): coord[0] = np.sqrt(1.0 - ang2 ** 2) * np.cos(ang1) coord[1] = np.sqrt(1.0 - ang2 ** 2) * np.sin(ang1) coord[2] = ang2 - return coord + else: # pragma: no cover + # http://corysimon.github.io/articles/uniformdistn-on-sphere/ + coord = self.random.normal(size=coord.shape) + while True: # loop until all norms are non-zero + norm = np.linalg.norm(coord, axis=0) + # check for zero norms + zero_norms = np.isclose(norm, 0) + # exit the loop if all norms are non-zero + if not np.any(zero_norms): + break + # transpose, since the next transpose reverses axis order + zero_samples = zero_norms.T.nonzero() + # need to transpose to have dim-axis last + new_shape = coord.T[zero_samples].shape + # resample the zero norm samples + coord.T[zero_samples] = self.random.normal(size=new_shape) + # project onto sphere + coord = coord / norm + return np.reshape(coord, dim) if size is None else coord @property def random(self): - """:any:`numpy.random.mtrand.RandomState`: Randomstate. + """:any:`numpy.random.RandomState`: Randomstate. Get a stream to the numpy Random number generator. You can use this, to call any provided distribution - from :any:`numpy.random.mtrand.RandomState`. + from :any:`numpy.random.RandomState`. """ return rand.RandomState(self._master_rng()) @@ -194,16 +213,6 @@ def seed(self): def seed(self, new_seed=None): self._master_rng = MasterRNG(new_seed) - def __str__(self): - """Return String representation.""" - return self.__repr__() - def __repr__(self): """Return String representation.""" - return "RNG(seed={})".format(self.seed) - - -if __name__ == "__main__": # pragma: no cover - import doctest - - doctest.testmod() + return f"RNG(seed={self.seed})" diff --git a/gstools/random/tools.py b/gstools/random/tools.py index c4799ee41..30f41cae2 100644 --- a/gstools/random/tools.py +++ b/gstools/random/tools.py @@ -46,13 +46,9 @@ def seed(self): """ return self._seed - def __str__(self): - """Return String representation.""" - return self.__repr__() - def __repr__(self): """Return String representation.""" - return "RNG(seed={})".format(self.seed) + return f"MasterRNG(seed={self.seed})" def dist_gen(pdf_in=None, cdf_in=None, ppf_in=None, **kwargs): @@ -92,14 +88,14 @@ def dist_gen(pdf_in=None, cdf_in=None, ppf_in=None, **kwargs): if pdf_in is not None and cdf_in is not None: return DistPdfCdf(pdf_in, cdf_in, **kwargs) raise ValueError("Either pdf or cdf must be given") - else: - if pdf_in is not None and cdf_in is None: - return DistPdfPpf(pdf_in, ppf_in, **kwargs) - if pdf_in is None and cdf_in is not None: - return DistCdfPpf(cdf_in, ppf_in, **kwargs) - if pdf_in is not None and cdf_in is not None: - return DistPdfCdfPpf(pdf_in, cdf_in, ppf_in, **kwargs) - raise ValueError("pdf or cdf must be given along with the ppf") + + if pdf_in is not None and cdf_in is None: + return DistPdfPpf(pdf_in, ppf_in, **kwargs) + if pdf_in is None and cdf_in is not None: + return DistCdfPpf(cdf_in, ppf_in, **kwargs) + if pdf_in is not None and cdf_in is not None: + return DistPdfCdfPpf(pdf_in, cdf_in, ppf_in, **kwargs) + raise ValueError("pdf or cdf must be given along with the ppf") class DistPdf(rv_continuous): @@ -186,9 +182,3 @@ def _cdf(self, x, *args): def _ppf(self, q, *args): return self.ppf_in(q) - - -if __name__ == "__main__": # pragma: no cover - import doctest - - doctest.testmod() diff --git a/gstools/tools/__init__.py b/gstools/tools/__init__.py index 12de7d004..e03529c3b 100644 --- a/gstools/tools/__init__.py +++ b/gstools/tools/__init__.py @@ -19,6 +19,7 @@ ^^^^^^^^^^^^^^^^^ .. autosummary:: + confidence_scaling inc_gamma exp_int inc_beta @@ -30,11 +31,27 @@ ^^^^^^^^^ .. autosummary:: - xyz2pos - pos2xyz - r3d_x - r3d_y - r3d_z + rotated_main_axes + set_angles + set_anis + no_of_angles + rotation_planes + givens_rotation + matrix_rotate + matrix_derotate + matrix_isotropify + matrix_anisotropify + matrix_isometrize + matrix_anisometrize + ang2dir + generate_grid + generate_st_grid + +Misc +^^^^ + +.. autosummary:: + EARTH_RADIUS ---- """ @@ -49,6 +66,7 @@ ) from gstools.tools.special import ( + confidence_scaling, inc_gamma, exp_int, inc_beta, @@ -57,7 +75,28 @@ tpl_gau_spec_dens, ) -from gstools.tools.geometric import r3d_x, r3d_y, r3d_z, xyz2pos, pos2xyz +from gstools.tools.geometric import ( + set_angles, + set_anis, + no_of_angles, + rotation_planes, + givens_rotation, + matrix_rotate, + matrix_derotate, + matrix_isotropify, + matrix_anisotropify, + matrix_isometrize, + matrix_anisometrize, + rotated_main_axes, + ang2dir, + generate_grid, + generate_st_grid, +) + + +EARTH_RADIUS = 6371.0 +"""float: earth radius for WGS84 ellipsoid in km""" + __all__ = [ "vtk_export", @@ -66,15 +105,27 @@ "to_vtk", "to_vtk_structured", "to_vtk_unstructured", + "confidence_scaling", "inc_gamma", "exp_int", "inc_beta", "tplstable_cor", "tpl_exp_spec_dens", "tpl_gau_spec_dens", - "xyz2pos", - "pos2xyz", - "r3d_x", - "r3d_y", - "r3d_z", + "set_angles", + "set_anis", + "no_of_angles", + "rotation_planes", + "givens_rotation", + "matrix_rotate", + "matrix_derotate", + "matrix_isotropify", + "matrix_anisotropify", + "matrix_isometrize", + "matrix_anisometrize", + "rotated_main_axes", + "ang2dir", + "generate_grid", + "generate_st_grid", + "EARTH_RADIUS", ] diff --git a/gstools/tools/export.py b/gstools/tools/export.py index 9174830b9..e3e2c9b55 100644 --- a/gstools/tools/export.py +++ b/gstools/tools/export.py @@ -15,11 +15,9 @@ to_vtk_unstructured """ # pylint: disable=C0103, E1101 - import numpy as np from pyevtk.hl import gridToVTK, pointsToVTK -from gstools.tools.geometric import pos2xyz try: import pyvista as pv @@ -40,15 +38,17 @@ def _vtk_structured_helper(pos, fields): - """An internal helper to extract what is needed for the vtk rectilinear grid - """ + """Extract field info for vtk rectilinear grid.""" if not isinstance(fields, dict): fields = {"field": fields} - x, y, z = pos2xyz(pos) - if y is None: - y = np.array([0]) - if z is None: - z = np.array([0]) + if len(pos) > 3: + raise ValueError( + "gstools.vtk_export_structured: " + "vtk export only possible for dim=1,2,3" + ) + x = pos[0] + y = pos[1] if len(pos) > 1 else np.array([0]) + z = pos[2] if len(pos) > 2 else np.array([0]) # need fortran order in VTK for field in fields: fields[field] = fields[field].reshape(-1, order="F") @@ -80,12 +80,10 @@ def to_vtk_structured(pos, fields): # pragma: no cover live on the point data of this PyVista dataset. """ x, y, z, fields = _vtk_structured_helper(pos=pos, fields=fields) - try: - import pyvista as pv - + if pv is not None: grid = pv.RectilinearGrid(x, y, z) grid.point_arrays.update(fields) - except ImportError: + else: raise ImportError("Please install PyVista to create VTK datasets.") return grid @@ -113,11 +111,14 @@ def vtk_export_structured(filename, pos, fields): # pragma: no cover def _vtk_unstructured_helper(pos, fields): if not isinstance(fields, dict): fields = {"field": fields} - x, y, z = pos2xyz(pos) - if y is None: - y = np.zeros_like(x) - if z is None: - z = np.zeros_like(x) + if len(pos) > 3: + raise ValueError( + "gstools.vtk_export_structured: " + "vtk export only possible for dim=1,2,3" + ) + x = pos[0] + y = pos[1] if len(pos) > 1 else np.zeros_like(x) + z = pos[2] if len(pos) > 2 else np.zeros_like(x) for field in fields: fields[field] = fields[field].reshape(-1) if ( @@ -153,12 +154,10 @@ def to_vtk_unstructured(pos, fields): # pragma: no cover a point cloud with no topology. """ x, y, z, fields = _vtk_unstructured_helper(pos=pos, fields=fields) - try: - import pyvista as pv - + if pv is not None: grid = pv.PolyData(np.c_[x, y, z]).cast_to_unstructured_grid() grid.point_arrays.update(fields) - except ImportError: + else: raise ImportError("Please install PyVista to create VTK datasets.") return grid @@ -206,7 +205,7 @@ def to_vtk(pos, fields, mesh_type="unstructured"): # pragma: no cover :class:`pyvista.RectilinearGrid` and unstructured meshes will return an :class:`pyvista.UnstructuredGrid` object. """ - if mesh_type == "structured": + if mesh_type != "unstructured": grid = to_vtk_structured(pos=pos, fields=fields) else: grid = to_vtk_unstructured(pos=pos, fields=fields) @@ -233,9 +232,6 @@ def vtk_export( mesh_type : :class:`str`, optional 'structured' / 'unstructured'. Default: structured """ - if mesh_type == "structured": + if mesh_type != "unstructured": return vtk_export_structured(filename=filename, pos=pos, fields=fields) - else: - return vtk_export_unstructured( - filename=filename, pos=pos, fields=fields - ) + return vtk_export_unstructured(filename=filename, pos=pos, fields=fields) diff --git a/gstools/tools/geometric.py b/gstools/tools/geometric.py index 6edcd7586..204df5aea 100644 --- a/gstools/tools/geometric.py +++ b/gstools/tools/geometric.py @@ -7,157 +7,697 @@ The following functions are provided .. autosummary:: - r3d_x - r3d_y - r3d_z - pos2xyz - xyz2pos + set_angles + set_anis + no_of_angles + rotation_planes + givens_rotation + matrix_rotate + matrix_derotate + matrix_isotropify + matrix_anisotropify + matrix_isometrize + matrix_anisometrize + rotated_main_axes + generate_grid + generate_st_grid + format_struct_pos_dim + format_struct_pos_shape + format_unstruct_pos_shape + ang2dir + latlon2pos + pos2latlon + chordal_to_great_circle """ # pylint: disable=C0103 - import numpy as np -__all__ = ["r3d_x", "r3d_y", "r3d_z", "pos2xyz", "xyz2pos"] +__all__ = [ + "set_angles", + "set_anis", + "no_of_angles", + "rotation_planes", + "givens_rotation", + "matrix_rotate", + "matrix_derotate", + "matrix_isotropify", + "matrix_anisotropify", + "matrix_isometrize", + "matrix_anisometrize", + "rotated_main_axes", + "generate_grid", + "generate_st_grid", + "format_struct_pos_dim", + "format_struct_pos_shape", + "format_unstruct_pos_shape", + "ang2dir", + "latlon2pos", + "pos2latlon", + "chordal_to_great_circle", +] # Geometric functions ######################################################### -def r3d_x(theta): - """Rotation matrix about x axis. +def set_angles(dim, angles): + """Set the angles for the given dimension. + + Parameters + ---------- + dim : :class:`int` + spatial dimension + angles : :class:`float` or :class:`list` + the angles of the SRF + + Returns + ------- + angles : :class:`float` + the angles fitting to the dimension + + Notes + ----- + If too few angles are given, they are filled up with `0`. + """ + out_angles = np.array(angles, dtype=np.double) + out_angles = np.atleast_1d(out_angles)[: no_of_angles(dim)] + # fill up the rotation angle array with zeros + out_angles = np.pad( + out_angles, + (0, no_of_angles(dim) - len(out_angles)), + "constant", + constant_values=0.0, + ) + return out_angles + + +def set_anis(dim, anis): + """Set the anisotropy ratios for the given dimension. Parameters ---------- - theta : :class:`float` - Rotation angle + dim : :class:`int` + spatial dimension + anis : :class:`list` of :class:`float` + the anisotropy of length scales along the transversal directions + + Returns + ------- + anis : :class:`list` of :class:`float` + the anisotropy of length scales fitting the dimensions + + Notes + ----- + If too few anisotropy ratios are given, they are filled up with `1`. + """ + out_anis = np.array(anis, dtype=np.double) + out_anis = np.atleast_1d(out_anis)[: dim - 1] + if len(out_anis) < dim - 1: + # fill up the anisotropies with ones, such that len()==dim-1 + out_anis = np.pad( + out_anis, + (dim - len(out_anis) - 1, 0), + "constant", + constant_values=1.0, + ) + return out_anis + + +def no_of_angles(dim): + """Calculate number of rotation angles depending on the dimension. + + Parameters + ---------- + dim : :class:`int` + spatial dimension + + Returns + ------- + :class:`int` + Number of angles. + """ + return (dim * (dim - 1)) // 2 + + +def rotation_planes(dim): + """Get all 2D sub-planes for rotation. + + Parameters + ---------- + dim : :class:`int` + spatial dimension + + Returns + ------- + :class:`list` of :class:`tuple` of :class:`int` + All 2D sub-planes for rotation. + """ + return [(i, j) for j in range(1, dim) for i in range(j)] + + +def givens_rotation(dim, plane, angle): + """Givens rotation matrix in arbitrary dimensions. + + Parameters + ---------- + dim : :class:`int` + spatial dimension + plane : :class:`list` of :class:`int` + the plane to rotate in, given by the indices of the two defining axes. + For example the xy plane is defined by `(0,1)` + angle : :class:`float` or :class:`list` + the rotation angle in the given plane + + Returns + ------- + :class:`numpy.ndarray` + Rotation matrix. + """ + result = np.eye(dim, dtype=np.double) + result[plane[0], plane[0]] = np.cos(angle) + result[plane[1], plane[1]] = np.cos(angle) + result[plane[0], plane[1]] = -np.sin(angle) + result[plane[1], plane[0]] = np.sin(angle) + return result + + +def matrix_rotate(dim, angles): + """Create a matrix to rotate points to the target coordinate-system. + + Parameters + ---------- + dim : :class:`int` + spatial dimension + angles : :class:`float` or :class:`list` + the rotation angles of the target coordinate-system + + Returns + ------- + :class:`numpy.ndarray` + Rotation matrix. + """ + angles = set_angles(dim, angles) + planes = rotation_planes(dim) + result = np.eye(dim, dtype=np.double) + for i, (angle, plane) in enumerate(zip(angles, planes)): + # angles have alternating signs to match tait-bryan + result = np.matmul( + givens_rotation(dim, plane, (-1) ** i * angle), result + ) + return result + + +def matrix_derotate(dim, angles): + """Create a matrix to derotate points to the initial coordinate-system. + + Parameters + ---------- + dim : :class:`int` + spatial dimension + angles : :class:`float` or :class:`list` + the rotation angles of the target coordinate-system Returns ------- :class:`numpy.ndarray` Rotation matrix. """ - sin = np.sin(theta) - cos = np.cos(theta) - return np.array(((1.0, +0.0, +0.0), (0.0, cos, -sin), (0.0, sin, cos))) + # derotating by taking negative angles + angles = -set_angles(dim, angles) + planes = rotation_planes(dim) + result = np.eye(dim, dtype=np.double) + for i, (angle, plane) in enumerate(zip(angles, planes)): + # angles have alternating signs to match tait bryan + result = np.matmul( + result, givens_rotation(dim, plane, (-1) ** i * angle) + ) + return result -def r3d_y(theta): - """Rotation matrix about y axis. +def matrix_isotropify(dim, anis): + """Create a stretching matrix to make things isotrope. Parameters ---------- - theta : :class:`float` - Rotation angle + dim : :class:`int` + spatial dimension + anis : :class:`list` of :class:`float` + the anisotropy of length scales along the transversal directions Returns ------- :class:`numpy.ndarray` - Rotation matrix. + Stretching matrix. """ - sin = np.sin(theta) - cos = np.cos(theta) - return np.array(((+cos, 0.0, sin), (+0.0, 1.0, +0.0), (-sin, 0.0, cos))) + anis = set_anis(dim, anis) + return np.diag(np.concatenate(([1.0], 1.0 / anis))) -def r3d_z(theta): - """Rotation matrix about z axis. +def matrix_anisotropify(dim, anis): + """Create a stretching matrix to make things anisotrope. Parameters ---------- - theta : :class:`float` - Rotation angle + dim : :class:`int` + spatial dimension + anis : :class:`list` of :class:`float` + the anisotropy of length scales along the transversal directions Returns ------- :class:`numpy.ndarray` - Rotation matrix. + Stretching matrix. + """ + anis = set_anis(dim, anis) + return np.diag(np.concatenate(([1.0], anis))) + + +def matrix_isometrize(dim, angles, anis): + """Create a matrix to derotate points and make them isotrope. + + Parameters + ---------- + dim : :class:`int` + spatial dimension + angles : :class:`float` or :class:`list` + the rotation angles of the target coordinate-system + anis : :class:`list` of :class:`float` + the anisotropy of length scales along the transversal directions + + Returns + ------- + :class:`numpy.ndarray` + Transformation matrix. + """ + return np.matmul( + matrix_isotropify(dim, anis), matrix_derotate(dim, angles) + ) + + +def matrix_anisometrize(dim, angles, anis): + """Create a matrix to rotate points and make them anisotrope. + + Parameters + ---------- + dim : :class:`int` + spatial dimension + angles : :class:`float` or :class:`list` + the rotation angles of the target coordinate-system + anis : :class:`list` of :class:`float` + the anisotropy of length scales along the transversal directions + + Returns + ------- + :class:`numpy.ndarray` + Transformation matrix. """ - sin = np.sin(theta) - cos = np.cos(theta) - return np.array(((cos, -sin, 0.0), (sin, +cos, 0.0), (+0.0, +0.0, 1.0))) + return np.matmul( + matrix_rotate(dim, angles), matrix_anisotropify(dim, anis) + ) + + +def rotated_main_axes(dim, angles): + """Create list of the main axis defined by the given system rotations. + + Parameters + ---------- + dim : :class:`int` + spatial dimension + angles : :class:`float` or :class:`list` + the rotation angles of the target coordinate-system + + Returns + ------- + :class:`numpy.ndarray` + Main axes of the target coordinate-system. + """ + return matrix_rotate(dim, angles).T + + +# grid routines ############################################################### + + +def generate_grid(pos): + """ + Generate grid from a structured position tuple. + + Parameters + ---------- + pos : :class:`tuple` of :class:`numpy.ndarray` + The structured position tuple. + + Returns + ------- + :class:`numpy.ndarray` + Unstructured position tuple. + """ + return np.array(np.meshgrid(*pos, indexing="ij"), dtype=np.double).reshape( + (len(pos), -1) + ) + + +def generate_st_grid(pos, time, mesh_type="unstructured"): + """ + Generate spatio-temporal grid from a position tuple and time array. + + Parameters + ---------- + pos : :class:`tuple` of :class:`numpy.ndarray` + The (un-)structured position tuple. + time : :any:`iterable` + The time array. + mesh_type : :class:`str`, optional + 'structured' / 'unstructured' + Default: `"unstructured"` + + Returns + ------- + :class:`numpy.ndarray` + Unstructured spatio-temporal point tuple. + + Notes + ----- + Time dimension will be the last one. + """ + time = np.array(time, dtype=np.double).reshape(-1) + if mesh_type != "unstructured": + pos = generate_grid(pos) + else: + pos = np.array(pos, dtype=np.double, ndmin=2) + out = [np.repeat(p.reshape(-1), np.size(time)) for p in pos] + out.append(np.tile(time, np.size(pos[0]))) + return np.array(out, dtype=np.double) # conversion ################################################################## -def pos2xyz(pos, dtype=None, calc_dim=False, max_dim=3): - """Convert postional arguments to x, y, z. +def format_struct_pos_dim(pos, dim): + """ + Format a structured position tuple with given dimension. + + Parameters + ---------- + pos : :any:`iterable` + Position tuple, containing main direction and transversal directions. + dim : :class:`int` + Spatial dimension. + + Raises + ------ + ValueError + When position tuple doesn't match the given dimension. + + Returns + ------- + pos : :class:`tuple` of :class:`numpy.ndarray` + The formatted structured position tuple. + shape : :class:`tuple` + Shape of the resulting field. + """ + if dim == 1: + pos = (np.array(pos, dtype=np.double).reshape(-1),) + elif len(pos) != dim: + raise ValueError("Formatting: position tuple doesn't match dimension.") + else: + pos = tuple(np.array(p_i, dtype=np.double).reshape(-1) for p_i in pos) + shape = tuple(len(p_i) for p_i in pos) + return pos, shape + + +def format_struct_pos_shape(pos, shape, check_stacked_shape=False): + """ + Format a structured position tuple with given shape. + + Shape could be stacked, when multiple fields are given. Parameters ---------- pos : :any:`iterable` - the position tuple, containing main direction and transversal - directions + Position tuple, containing main direction and transversal directions. + shape : :class:`tuple` + Shape of the input field. + check_stacked_shape : :class:`bool`, optional + Whether to check if given shape comes from stacked fields. + Default: False. + + Raises + ------ + ValueError + When position tuple doesn't match the given dimension. + + Returns + ------- + pos : :class:`tuple` of :class:`numpy.ndarray` + The formatted structured position tuple. + shape : :class:`tuple` + Shape of the resulting field. + dim : :class:`int` + Spatial dimension. + """ + # some help from the given shape + shape_size = np.prod(shape) + stacked_shape_size = np.prod(shape[1:]) + wrong_shape = False + # now we try to be smart + try: + # if this works we have either: + # - a 1D array + # - nD array where all axes have same length (corner case) + check_pos = np.array(pos, dtype=np.double, ndmin=2) + except ValueError: + # if it doesn't work, we have a tuple of differently sized axes (easy) + dim = len(pos) + pos, pos_shape = format_struct_pos_dim(pos, dim) + # determine if we have a stacked field if wanted + if check_stacked_shape and stacked_shape_size == np.prod(pos_shape): + shape = (shape[0],) + pos_shape + # check if we have a single field with matching size + elif shape_size == np.prod(pos_shape): + shape = (1,) + pos_shape if check_stacked_shape else pos_shape + # if nothing works, we raise an error + else: + wrong_shape = True + else: + struct_size = np.prod([p_i.size for p_i in check_pos]) + # case: 1D unstacked + if check_pos.size == shape_size: + dim = 1 + pos, pos_shape = format_struct_pos_dim(check_pos, dim) + shape = (1,) + pos_shape if check_stacked_shape else pos_shape + # case: 1D and stacked + elif check_pos.size == stacked_shape_size: + dim = 1 + pos, pos_shape = format_struct_pos_dim(check_pos, dim) + cnt = shape[0] + shape = (cnt,) + pos_shape + wrong_shape = not check_stacked_shape + # case: nD unstacked + elif struct_size == shape_size: + dim = len(check_pos) + pos, pos_shape = format_struct_pos_dim(pos, dim) + shape = (1,) + pos_shape if check_stacked_shape else pos_shape + # case: nD and stacked + elif struct_size == stacked_shape_size: + dim = len(check_pos) + pos, pos_shape = format_struct_pos_dim(pos, dim) + cnt = shape[0] + shape = (cnt,) + pos_shape + wrong_shape = not check_stacked_shape + # if nothing works, we raise an error + else: + wrong_shape = True + + # if shape was wrong at one point we raise an error + if wrong_shape: + raise ValueError("Formatting: position tuple doesn't match dimension.") + + return pos, shape, dim + + +def format_unstruct_pos_shape(pos, shape, check_stacked_shape=False): + """ + Format an unstructured position tuple with given shape. + + Shape could be stacked, when multiple fields were given. + + Parameters + ---------- + pos : :any:`iterable` + Position tuple, containing point coordinates. + shape : :class:`tuple` + Shape of the input field. + check_stacked_shape : :class:`bool`, optional + Whether to check if given shape comes from stacked fields. + Default: False. + + Raises + ------ + ValueError + When position tuple doesn't match the given dimension. + + Returns + ------- + pos : :class:`tuple` of :class:`numpy.ndarray` + The formatted structured position tuple. + shape : :class:`tuple` + Shape of the resulting field. + dim : :class:`int` + Spatial dimension. + """ + # some help from the given shape + shape_size = np.prod(shape) + stacked_shape_size = np.prod(shape[1:]) + wrong_shape = False + # now we try to be smart + pre_len = len(np.atleast_1d(pos)) + # care about 1D: pos can be given as 1D array here -> convert to 2D array + pos = np.array(pos, dtype=np.double, ndmin=2) + post_len = len(pos) + # first array dimension should be spatial dimension (1D is special case) + dim = post_len if pre_len == post_len else 1 + pnt_cnt = pos[0].size + # case: 1D unstacked + if dim == 1 and pos.size == shape_size: + shape = (1, pos.size) if check_stacked_shape else (pos.size,) + # case: 1D and stacked + elif dim == 1 and pos.size == stacked_shape_size: + shape = (shape[0], pos.size) + wrong_shape = not check_stacked_shape + # case: nD unstacked + elif pnt_cnt == shape_size: + shape = (1, pnt_cnt) if check_stacked_shape else pnt_cnt + # case: nD and stacked + elif pnt_cnt == stacked_shape_size: + shape = (shape[0], pnt_cnt) + wrong_shape = not check_stacked_shape + # if nothing works, we raise an error + else: + wrong_shape = True + + # if shape was wrong at one point we raise an error + if wrong_shape: + raise ValueError("Formatting: position tuple doesn't match dimension.") + + pos = pos.reshape((dim, -1)) + + return pos, shape, dim + + +def ang2dir(angles, dtype=np.double, dim=None): + """Convert n-D spherical coordinates to Euclidean direction vectors. + + Parameters + ---------- + angles : :class:`list` of :class:`numpy.ndarray` + spherical coordinates given as angles. dtype : data-type, optional The desired data-type for the array. If not given, then the type will be determined as the minimum type required to hold the objects in the sequence. Default: None - calc_dim : :class:`bool`, optional - State if the dimension should be returned. Default: False - max_dim : :class:`int`, optional - Cut of information above the given dimension. Default: 3 + dim : :class:`int`, optional + Cut of information above the given dimension. + Otherwise, dimension is determined by number of angles + Default: None Returns ------- - x : :class:`numpy.ndarray` - first components of position vectors - y : :class:`numpy.ndarray` or None - analog to x - z : :class:`numpy.ndarray` or None - analog to x - dim : :class:`int`, optional - dimension (only if calc_dim is True) + :class:`numpy.ndarray` + the array of direction vectors + """ + pre_dim = np.asanyarray(angles).ndim + angles = np.array(angles, ndmin=2, dtype=dtype) + if len(angles.shape) > 2: + raise ValueError(f"Can't interpret angles array {angles}") + dim = angles.shape[1] + 1 if dim is None else dim + if dim == 2 and angles.shape[0] == 1 and pre_dim < 2: + # fix for 2D where only one angle per direction is given + angles = angles.T # can't be interpreted if dim=None is given + if dim != angles.shape[1] + 1 or dim == 1: + raise ValueError(f"Wrong dim. ({dim}) for angles {angles}") + vec = np.empty((angles.shape[0], dim), dtype=dtype) + vec[:, 0] = np.prod(np.sin(angles), axis=1) + for i in range(1, dim): + vec[:, i] = np.prod(np.sin(angles[:, i:]), axis=1) # empty prod = 1 + vec[:, i] *= np.cos(angles[:, (i - 1)]) + if dim in [2, 3]: + vec[:, [0, 1]] = vec[:, [1, 0]] # to match convention in 2D and 3D + return vec - Notes - ----- - If len(pos) > 3, everything after pos[2] will be ignored. - """ - if max_dim == 1: # sanity check - pos = np.array(pos, ndmin=2) - x = np.array(pos[0], dtype=dtype).reshape(-1) - dim = 1 - y = z = None - if len(pos) > 1 and max_dim > 1: - dim = 2 - y = np.array(pos[1], dtype=dtype).reshape(-1) - if len(pos) > 2 and max_dim > 2: - dim = 3 - z = np.array(pos[2], dtype=dtype).reshape(-1) - if calc_dim: - return x, y, z, dim - return x, y, z - - -def xyz2pos(x, y=None, z=None, dtype=None, max_dim=3): - """Convert x, y, z to postional arguments. - - Parameters - ---------- - x : :class:`numpy.ndarray` - grid axis in x-direction if structured, or first components of - position vectors if unstructured - y : :class:`numpy.ndarray`, optional - analog to x - z : :class:`numpy.ndarray`, optional - analog to x + +def latlon2pos(latlon, radius=1.0, dtype=np.double): + """Convert lat-lon geo coordinates to 3D position tuple. + + Parameters + ---------- + latlon : :class:`list` of :class:`numpy.ndarray` + latitude and longitude given in degrees. + radius : :class:`float`, optional + Earth radius. Default: `1.0` dtype : data-type, optional The desired data-type for the array. If not given, then the type will be determined as the minimum type required to hold the objects in the sequence. Default: None - max_dim : :class:`int`, optional - Cut of information above the given dimension. Default: 3 Returns ------- - pos : :class:`tuple` of :class:`numpy.ndarray` - the position tuple - """ - if y is None and z is not None: - raise ValueError("gstools.tools.xyz2pos: if z is given, y is needed!") - pos = [] - pos.append(np.array(x, dtype=dtype).reshape(-1)) - if y is not None and max_dim > 1: - pos.append(np.array(y, dtype=dtype).reshape(-1)) - if z is not None and max_dim > 2: - pos.append(np.array(z, dtype=dtype).reshape(-1)) - return tuple(pos) + :class:`numpy.ndarray` + the 3D position array + """ + latlon = np.array(latlon, dtype=dtype).reshape((2, -1)) + lat, lon = np.deg2rad(latlon) + return np.array( + ( + radius * np.cos(lat) * np.cos(lon), + radius * np.cos(lat) * np.sin(lon), + radius * np.sin(lat) * np.ones_like(lon), + ), + dtype=dtype, + ) + + +def pos2latlon(pos, radius=1.0, dtype=np.double): + """Convert 3D position tuple from sphere to lat-lon geo coordinates. + + Parameters + ---------- + pos : :class:`list` of :class:`numpy.ndarray` + The position tuple containing points on a unit-sphere. + radius : :class:`float`, optional + Earth radius. Default: `1.0` + dtype : data-type, optional + The desired data-type for the array. + If not given, then the type will be determined as the minimum type + required to hold the objects in the sequence. Default: None + + Returns + ------- + :class:`numpy.ndarray` + the 3D position array + """ + pos = np.array(pos, dtype=dtype).reshape((3, -1)) + # prevent numerical errors in arcsin + lat = np.arcsin(np.maximum(np.minimum(pos[2] / radius, 1.0), -1.0)) + lon = np.arctan2(pos[1], pos[0]) + return np.rad2deg((lat, lon), dtype=dtype) + + +def chordal_to_great_circle(dist): + """ + Calculate great circle distance corresponding to given chordal distance. + + Parameters + ---------- + dist : array_like + Chordal distance of two points on the unit-sphere. + + Returns + ------- + :class:`numpy.ndarray` + Great circle distance corresponding to given chordal distance. + + Notes + ----- + If given values are not in [0, 1], they will be truncated. + """ + return 2 * np.arcsin(np.maximum(np.minimum(np.divide(dist, 2), 1), 0)) diff --git a/gstools/tools/misc.py b/gstools/tools/misc.py new file mode 100755 index 000000000..53e3c9cd6 --- /dev/null +++ b/gstools/tools/misc.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +""" +GStools subpackage providing miscellaneous tools. + +.. currentmodule:: gstools.tools.misc + +The following functions are provided + +.. autosummary:: + get_fig_ax + list_format + eval_func +""" +# pylint: disable=C0103, C0415 +import numpy as np +from gstools.tools.geometric import format_struct_pos_dim, generate_grid + + +__all__ = ["get_fig_ax", "list_format", "eval_func"] + + +def get_fig_ax(fig=None, ax=None, ax_name="rectilinear"): # pragma: no cover + """ + Get correct matplotlib figure and axes. + + Parameters + ---------- + fig : figure or :any:`None` + desired figure. + ax : axis or :any:`None` + desired axis. + ax_name : :class:`str`, optional + Axis type name. The default is "rectilinear". + + Returns + ------- + fig : figure + desired figure. + ax : axis + desired axis. + """ + try: + from matplotlib import pyplot as plt + except ImportError as exc: + raise ImportError("Plotting: Matplotlib not installed.") from exc + + if fig is None and ax is None: + fig = plt.figure() + ax = fig.add_subplot(111, projection=ax_name) + elif ax is None: + ax = fig.add_subplot(111, projection=ax_name) + elif fig is None: + fig = ax.get_figure() + assert ax.name == ax_name + else: + assert ax.name == ax_name + assert ax.get_figure() == fig + return fig, ax + + +def list_format(lst, prec): # pragma: no cover + """Format a list of floats.""" + return f"[{', '.join(f'{float(x):.{prec}}' for x in lst)}]" + + +def eval_func( + func_val, + pos, + dim, + mesh_type="unstructured", + value_type="scalar", + broadcast=False, +): + """ + Evaluate a function on a mesh. + + Parameters + ---------- + func_val : :any:`callable` or :class:`float` or :any:`None` + Function to be called or single value to be filled. + Should have the signiture f(x, [y, z, ...]) in case of callable. + In case of a float, the field will be filled with a single value and + in case of None, this value will be set to 0. + pos : :class:`list` + The position tuple, containing main direction and transversal + directions (x, [y, z, ...]). + dim : :class:`int` + The spatial dimension. + mesh_type : :class:`str`, optional + 'structured' / 'unstructured' + Default: 'unstructured' + value_type : :class:`str`, optional + Value type of the field. Either "scalar" or "vector". + The default is "scalar". + broadcast : :class:`bool`, optional + Whether to return a single value, if a single value was given. + Default: False + + Returns + ------- + :class:`numpy.ndarray` + Function values at the given points. + """ + # care about scalar inputs + func_val = 0 if func_val is None else func_val + if broadcast and not callable(func_val) and np.size(func_val) == 1: + return np.array(func_val, dtype=np.double).item() + if not callable(func_val): + func_val = _func_from_single_val(func_val, dim, value_type=value_type) + # care about mesh and function call + if mesh_type != "unstructured": + pos, shape = format_struct_pos_dim(pos, dim) + pos = generate_grid(pos) + else: + pos = np.array(pos, dtype=np.double).reshape(dim, -1) + shape = np.shape(pos[0]) + # prepend dimension if we have a vector field + if value_type == "vector": + shape = (dim,) + shape + return np.reshape(func_val(*pos), shape) + + +def _func_from_single_val(value, dim=None, value_type="scalar"): + # care about broadcasting vector values for each dim + v_d = dim if value_type == "vector" else 1 # value dim + if v_d is None: # pragma: no cover + raise ValueError("_func_from_single_val: dim needed for vector value.") + value = np.array(value, dtype=np.double).ravel()[:v_d] + # fill up vector valued output to dimension with last value + value = np.pad( + value, (0, v_d - len(value)), "constant", constant_values=value[-1] + ) + + def _f(*pos): + # zip uses shortest len of iterables given (correct for scalar value) + return np.concatenate( + [ + np.full_like(p, val, dtype=np.double) + for p, val in zip(pos, value) + ] + ) + + return _f diff --git a/gstools/tools/special.py b/gstools/tools/special.py index 272417177..63940c08f 100644 --- a/gstools/tools/special.py +++ b/gstools/tools/special.py @@ -15,11 +15,11 @@ tpl_gau_spec_dens """ # pylint: disable=C0103, E1101 - import numpy as np from scipy import special as sps __all__ = [ + "confidence_scaling", "inc_gamma", "exp_int", "inc_beta", @@ -32,8 +32,25 @@ # special functions ########################################################### +def confidence_scaling(per=0.95): + """ + Scaling of standard deviation to get the desired confidence interval. + + Parameters + ---------- + per : :class:`float`, optional + Confidence level. The default is 0.95. + + Returns + ------- + :class:`float` + Scale to multiply the standard deviation with. + """ + return np.sqrt(2) * sps.erfinv(per) + + def inc_gamma(s, x): - r"""The (upper) incomplete gamma function. + r"""Calculate the (upper) incomplete gamma function. Given by: :math:`\Gamma(s,x) = \int_x^{\infty} t^{s-1}\,e^{-t}\,{\rm d}t` @@ -54,7 +71,7 @@ def inc_gamma(s, x): def exp_int(s, x): - r"""The exponential integral :math:`E_s(x)`. + r"""Calculate the exponential integral :math:`E_s(x)`. Given by: :math:`E_s(x) = \int_1^\infty \frac{e^{-xt}}{t^s}\,\mathrm dt` @@ -90,7 +107,7 @@ def exp_int(s, x): def inc_beta(a, b, x): - r"""The incomplete Beta function. + r"""Calculate the incomplete Beta function. Given by: :math:`B(a,b;\,x) = \int_0^x t^{a-1}\,(1-t)^{b-1}\,dt` @@ -107,7 +124,7 @@ def inc_beta(a, b, x): def tplstable_cor(r, len_scale, hurst, alpha): - r"""The correlation function of the TPLStable model. + r"""Calculate the correlation function of the TPLStable model. Given by the following correlation function: diff --git a/gstools/transform/__init__.py b/gstools/transform/__init__.py index 1937e6a73..0c52fd6b2 100644 --- a/gstools/transform/__init__.py +++ b/gstools/transform/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ -GStools subpackage providing transformations. +GStools subpackage providing transformations to post-process normal fields. .. currentmodule:: gstools.transform diff --git a/gstools/transform/field.py b/gstools/transform/field.py index 89e88704c..bc0e201ee 100644 --- a/gstools/transform/field.py +++ b/gstools/transform/field.py @@ -17,10 +17,8 @@ normal_to_arcsin normal_to_uquad """ -# pylint: disable=C0103, E1101 - +# pylint: disable=C0103 from warnings import warn - import numpy as np from scipy.special import erf, erfinv @@ -108,15 +106,14 @@ def discrete(fld, values, thresholds="arithmetic"): if len(values) != len(thresholds) + 1: raise ValueError( "discrete transformation: " - + "len(values) != len(thresholds) + 1" + "len(values) != len(thresholds) + 1" ) values = np.array(values) thresholds = np.array(thresholds) # check thresholds if not np.all(thresholds[:-1] < thresholds[1:]): raise ValueError( - "discrete transformation: " - + "thresholds need to be ascending." + "discrete transformation: thresholds need to be ascending." ) # use a separate result so the intermediate results are not affected result = np.empty_like(fld.field) @@ -135,7 +132,7 @@ def discrete(fld, values, thresholds="arithmetic"): def boxcox(fld, lmbda=1, shift=0): """ - Box-Cox transformation. + (Inverse) Box-Cox transformation to denormalize data. After this transformation, the again Box-Cox transformed field is normal distributed. diff --git a/gstools/variogram/__init__.py b/gstools/variogram/__init__.py index c5735c52d..fe2c148fc 100644 --- a/gstools/variogram/__init__.py +++ b/gstools/variogram/__init__.py @@ -8,15 +8,34 @@ ^^^^^^^^^^^^^^^^^^^^ .. autosummary:: - vario_estimate_unstructured - vario_estimate_structured + :toctree: generated + + vario_estimate + vario_estimate_axis + +Binning +^^^^^^^ + +.. autosummary:: + :toctree: generated + + standard_bins ---- """ from gstools.variogram.variogram import ( + vario_estimate, + vario_estimate_axis, vario_estimate_structured, vario_estimate_unstructured, ) +from gstools.variogram.binning import standard_bins -__all__ = ["vario_estimate_unstructured", "vario_estimate_structured"] +__all__ = [ + "vario_estimate", + "vario_estimate_axis", + "vario_estimate_unstructured", + "vario_estimate_structured", + "standard_bins", +] diff --git a/gstools/variogram/binning.py b/gstools/variogram/binning.py new file mode 100644 index 000000000..c7be0eca0 --- /dev/null +++ b/gstools/variogram/binning.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +""" +GStools subpackage providing binning routines. + +.. currentmodule:: gstools.variogram.binning + +The following functions are provided + +.. autosummary:: + standard_bins +""" +import numpy as np + +from gstools.tools.geometric import ( + generate_grid, + format_struct_pos_dim, + latlon2pos, + chordal_to_great_circle, +) + +__all__ = ["standard_bins"] + + +def _sturges(pnt_cnt): + return int(np.ceil(2 * np.log2(pnt_cnt) + 1)) + + +def standard_bins( + pos=None, + dim=2, + latlon=False, + mesh_type="unstructured", + bin_no=None, + max_dist=None, +): + r""" + Get standard binning. + + Parameters + ---------- + pos : :class:`list`, optional + the position tuple, containing either the point coordinates (x, y, ...) + or the axes descriptions (for mesh_type='structured') + dim : :class:`int`, optional + Field dimension. + latlon : :class:`bool`, optional + Whether the data is representing 2D fields on earths surface described + by latitude and longitude. When using this, the estimator will + use great-circle distance for variogram estimation. + Note, that only an isotropic variogram can be estimated and a + ValueError will be raised, if a direction was specified. + Bin edges need to be given in radians in this case. + Default: False + mesh_type : :class:`str`, optional + 'structured' / 'unstructured', indicates whether the pos tuple + describes the axis or the point coordinates. + Default: `'unstructured'` + bin_no: :class:`int`, optional + number of bins to create. If None is given, will be determined by + Sturges' rule from the number of points. + Default: None + max_dist: :class:`float`, optional + Cut of length for the bins. If None is given, it will be set to one + third of the box-diameter from the given points. + Default: None + + Returns + ------- + :class:`numpy.ndarray` + The generated bin edges. + + Notes + ----- + Internally uses double precision and also returns doubles. + """ + dim = 2 if latlon else int(dim) + if bin_no is None or max_dist is None: + if pos is None: + raise ValueError("standard_bins: no pos tuple given.") + if mesh_type != "unstructured": + pos = generate_grid(format_struct_pos_dim(pos, dim)[0]) + else: + pos = np.array(pos, dtype=np.double).reshape(dim, -1) + pos = latlon2pos(pos) if latlon else pos + pnt_cnt = len(pos[0]) + box = [] + for axis in pos: + box.append([np.min(axis), np.max(axis)]) + box = np.array(box) + diam = np.linalg.norm(box[:, 0] - box[:, 1]) + # convert diameter to great-circle distance if using latlon + diam = chordal_to_great_circle(diam) if latlon else diam + bin_no = _sturges(pnt_cnt) if bin_no is None else int(bin_no) + max_dist = diam / 3 if max_dist is None else float(max_dist) + return np.linspace(0, max_dist, num=bin_no + 1, dtype=np.double) diff --git a/gstools/variogram/estimator.pyx b/gstools/variogram/estimator.pyx index 41414f6c4..bd814cd7c 100644 --- a/gstools/variogram/estimator.pyx +++ b/gstools/variogram/estimator.pyx @@ -10,7 +10,7 @@ import numpy as np cimport cython from cython.parallel import prange, parallel from libcpp.vector cimport vector -from libc.math cimport fabs, sqrt +from libc.math cimport fabs, sqrt, isnan, acos, pow, sin, cos, atan2, M_PI cimport numpy as np @@ -18,34 +18,83 @@ DTYPE = np.double ctypedef np.double_t DTYPE_t -cdef inline double _distance_1d( - const double[:] x, - const double[:] y, - const double[:] z, +cdef inline double dist_euclid( + const int dim, + const double[:,:] pos, const int i, const int j ) nogil: - return sqrt((x[i] - x[j]) * (x[i] - x[j])) + cdef int d + cdef double dist_squared = 0.0 + for d in range(dim): + dist_squared += ((pos[d,i] - pos[d,j]) * (pos[d,i] - pos[d,j])) + return sqrt(dist_squared) -cdef inline double _distance_2d( - const double[:] x, - const double[:] y, - const double[:] z, + +cdef inline double dist_haversine( + const int dim, + const double[:,:] pos, const int i, const int j ) nogil: - return sqrt((x[i] - x[j]) * (x[i] - x[j]) + (y[i] - y[j]) * (y[i] - y[j])) + # pos holds lat-lon in deg + cdef double deg_2_rad = M_PI / 180.0 + cdef double diff_lat = (pos[0, j] - pos[0, i]) * deg_2_rad + cdef double diff_lon = (pos[1, j] - pos[1, i]) * deg_2_rad + cdef double arg = ( + pow(sin(diff_lat/2.0), 2) + + cos(pos[0, i]*deg_2_rad) * + cos(pos[0, j]*deg_2_rad) * + pow(sin(diff_lon/2.0), 2) + ) + return 2.0 * atan2(sqrt(arg), sqrt(1.0-arg)) + + +ctypedef double (*_dist_func)( + const int, + const double[:,:], + const int, + const int +) nogil -cdef inline double _distance_3d( - const double[:] x, - const double[:] y, - const double[:] z, + +cdef inline bint dir_test( + const int dim, + const double[:,:] pos, + const double dist, + const double[:,:] direction, + const double angles_tol, + const double bandwidth, const int i, - const int j + const int j, + const int d ) nogil: - return sqrt((x[i] - x[j]) * (x[i] - x[j]) + - (y[i] - y[j]) * (y[i] - y[j]) + - (z[i] - z[j]) * (z[i] - z[j])) + cdef double s_prod = 0.0 # scalar product + cdef double b_dist = 0.0 # band-distance + cdef double tmp # temporary variable + cdef int k + cdef bint in_band = True + cdef bint in_angle = True + + # scalar-product calculation for bandwidth projection and angle calculation + for k in range(dim): + s_prod += (pos[k,i] - pos[k,j]) * direction[d,k] + + # calculate band-distance by projection of point-pair-vec to direction line + if bandwidth > 0.0: + for k in range(dim): + tmp = (pos[k,i] - pos[k,j]) - s_prod * direction[d,k] + b_dist += tmp * tmp + in_band = sqrt(b_dist) < bandwidth + + # allow repeating points (dist = 0) + if dist > 0.0: + # use smallest angle by taking absolute value for arccos angle formula + tmp = fabs(s_prod) / dist + if tmp < 1.0: # else same direction (prevent numerical errors) + in_angle = acos(tmp) < angles_tol + + return in_band and in_angle cdef inline double estimator_matheron(const double f_diff) nogil: @@ -63,22 +112,20 @@ cdef inline void normalization_matheron( cdef int i for i in range(variogram.size()): # avoid division by zero - if counts[i] == 0: - counts[i] = 1 - variogram[i] /= (2. * counts[i]) + variogram[i] /= (2. * max(counts[i], 1)) cdef inline void normalization_cressie( vector[double]& variogram, vector[long]& counts ): cdef int i + cdef long cnt for i in range(variogram.size()): # avoid division by zero - if counts[i] == 0: - counts[i] = 1 + cnt = max(counts[i], 1) variogram[i] = ( - 0.5 * (1./counts[i] * variogram[i])**4 / - (0.457 + 0.494 / counts[i] + 0.045 / counts[i]**2) + 0.5 * (1./cnt * variogram[i])**4 / + (0.457 + 0.494 / cnt + 0.045 / cnt**2) ) ctypedef void (*_normalization_func)( @@ -86,6 +133,36 @@ ctypedef void (*_normalization_func)( vector[long]& ) +cdef inline void normalization_matheron_vec( + double[:,:]& variogram, + long[:,:]& counts +): + cdef int d, i + for d in range(variogram.shape[0]): + for i in range(variogram.shape[1]): + # avoid division by zero + variogram[d, i] /= (2. * max(counts[d, i], 1)) + +cdef inline void normalization_cressie_vec( + double[:,:]& variogram, + long[:,:]& counts +): + cdef int d, i + cdef long cnt + for d in range(variogram.shape[0]): + for i in range(variogram.shape[1]): + # avoid division by zero + cnt = max(counts[d, i], 1) + variogram[d, i] = ( + 0.5 * (1./cnt * variogram[d, i])**4 / + (0.457 + 0.494 / cnt + 0.045 / cnt**2) + ) + +ctypedef void (*_normalization_func_vec)( + double[:,:]&, + long[:,:]& +) + cdef _estimator_func choose_estimator_func(str estimator_type): cdef _estimator_func estimator_func if estimator_type == 'm': @@ -102,45 +179,99 @@ cdef _normalization_func choose_estimator_normalization(str estimator_type): normalization_func = normalization_cressie return normalization_func -ctypedef double (*_dist_func)( - const double[:], - const double[:], - const double[:], - const int, - const int -) nogil +cdef _normalization_func_vec choose_estimator_normalization_vec(str estimator_type): + cdef _normalization_func_vec normalization_func_vec + if estimator_type == 'm': + normalization_func_vec = normalization_matheron_vec + elif estimator_type == 'c': + normalization_func_vec = normalization_cressie_vec + return normalization_func_vec -def unstructured( - const double[:] f, +def directional( + const int dim, + const double[:,:] f, const double[:] bin_edges, - const double[:] x, - const double[:] y=None, - const double[:] z=None, + const double[:,:] pos, + const double[:,:] direction, # should be normed + const double angles_tol=M_PI/8.0, + const double bandwidth=-1.0, # negative values to turn of bandwidth search + const bint separate_dirs=False, # whether the direction bands don't overlap str estimator_type='m' ): - if x.shape[0] != f.shape[0]: - raise ValueError('len(x) = {0} != len(f) = {1} '. - format(x.shape[0], f.shape[0])) + if pos.shape[1] != f.shape[1]: + raise ValueError('len(pos) = {0} != len(f) = {1} '. + format(pos.shape[1], f.shape[1])) + if bin_edges.shape[0] < 2: raise ValueError('len(bin_edges) too small') + if angles_tol <= 0: + raise ValueError('tolerance for angle search masks must be > 0') + + cdef _estimator_func estimator_func = choose_estimator_func(estimator_type) + cdef _normalization_func_vec normalization_func_vec = ( + choose_estimator_normalization_vec(estimator_type) + ) + + cdef int d_max = direction.shape[0] + cdef int i_max = bin_edges.shape[0] - 1 + cdef int j_max = pos.shape[1] - 1 + cdef int k_max = pos.shape[1] + cdef int f_max = f.shape[0] + + cdef double[:,:] variogram = np.zeros((d_max, len(bin_edges)-1)) + cdef long[:,:] counts = np.zeros((d_max, len(bin_edges)-1), dtype=long) + cdef vector[double] pos1 = vector[double](dim, 0.0) + cdef vector[double] pos2 = vector[double](dim, 0.0) + cdef int i, j, k, m, d + cdef DTYPE_t dist + + for i in prange(i_max, nogil=True): + for j in range(j_max): + for k in range(j+1, k_max): + dist = dist_euclid(dim, pos, j, k) + if dist < bin_edges[i] or dist >= bin_edges[i+1]: + continue # skip if not in current bin + for d in range(d_max): + if not dir_test(dim, pos, dist, direction, angles_tol, bandwidth, k, j, d): + continue # skip if not in current direction + for m in range(f_max): + # skip no data values + if not (isnan(f[m,k]) or isnan(f[m,j])): + counts[d, i] += 1 + variogram[d, i] += estimator_func(f[m,k] - f[m,j]) + # once we found a fitting direction + # break the search if directions are separated + if separate_dirs: + break + + normalization_func_vec(variogram, counts) + return np.asarray(variogram), np.asarray(counts) + +def unstructured( + const int dim, + const double[:,:] f, + const double[:] bin_edges, + const double[:,:] pos, + str estimator_type='m', + str distance_type='e' +): cdef _dist_func distance - # 3d - if z is not None: - if z.shape[0] != f.shape[0]: - raise ValueError('len(z) = {0} != len(f) = {1} '. - format(z.shape[0], f.shape[0])) - distance = _distance_3d - # 2d - elif y is not None: - if y.shape[0] != f.shape[0]: - raise ValueError('len(y) = {0} != len(f) = {1} '. - format(y.shape[0], f.shape[0])) - distance = _distance_2d - # 1d + + if distance_type == 'e': + distance = dist_euclid else: - distance = _distance_1d + distance = dist_haversine + if dim != 2: + raise ValueError('Haversine: dim = {0} != 2'.format(dim)) + + if pos.shape[1] != f.shape[1]: + raise ValueError('len(pos) = {0} != len(f) = {1} '. + format(pos.shape[1], f.shape[1])) + + if bin_edges.shape[0] < 2: + raise ValueError('len(bin_edges) too small') cdef _estimator_func estimator_func = choose_estimator_func(estimator_type) cdef _normalization_func normalization_func = ( @@ -148,26 +279,34 @@ def unstructured( ) cdef int i_max = bin_edges.shape[0] - 1 - cdef int j_max = x.shape[0] - 1 - cdef int k_max = x.shape[0] + cdef int j_max = pos.shape[1] - 1 + cdef int k_max = pos.shape[1] + cdef int f_max = f.shape[0] cdef vector[double] variogram = vector[double](len(bin_edges)-1, 0.0) cdef vector[long] counts = vector[long](len(bin_edges)-1, 0) - cdef int i, j, k + cdef vector[double] pos1 = vector[double](dim, 0.0) + cdef vector[double] pos2 = vector[double](dim, 0.0) + cdef int i, j, k, m cdef DTYPE_t dist + for i in prange(i_max, nogil=True): for j in range(j_max): for k in range(j+1, k_max): - dist = distance(x, y, z, k, j) - if dist >= bin_edges[i] and dist < bin_edges[i+1]: - counts[i] += 1 - variogram[i] += estimator_func(f[k] - f[j]) + dist = distance(dim, pos, j, k) + if dist < bin_edges[i] or dist >= bin_edges[i+1]: + continue # skip if not in current bin + for m in range(f_max): + # skip no data values + if not (isnan(f[m,k]) or isnan(f[m,j])): + counts[i] += 1 + variogram[i] += estimator_func(f[m,k] - f[m,j]) normalization_func(variogram, counts) - return np.asarray(variogram) + return np.asarray(variogram), np.asarray(counts) -def structured(const double[:,:,:] f, str estimator_type='m'): +def structured(const double[:,:] f, str estimator_type='m'): cdef _estimator_func estimator_func = choose_estimator_func(estimator_type) cdef _normalization_func normalization_func = ( choose_estimator_normalization(estimator_type) @@ -175,27 +314,26 @@ def structured(const double[:,:,:] f, str estimator_type='m'): cdef int i_max = f.shape[0] - 1 cdef int j_max = f.shape[1] - cdef int k_max = f.shape[2] - cdef int l_max = i_max + 1 + cdef int k_max = i_max + 1 - cdef vector[double] variogram = vector[double](l_max, 0.0) - cdef vector[long] counts = vector[long](l_max, 0) - cdef int i, j, k, l + cdef vector[double] variogram = vector[double](k_max, 0.0) + cdef vector[long] counts = vector[long](k_max, 0) + cdef int i, j, k with nogil, parallel(): for i in range(i_max): for j in range(j_max): - for k in range(k_max): - for l in prange(1, l_max-i): - counts[l] += 1 - variogram[l] += estimator_func(f[i,j,k] - f[i+l,j,k]) + for k in prange(1, k_max-i): + counts[k] += 1 + variogram[k] += estimator_func(f[i,j] - f[i+k,j]) normalization_func(variogram, counts) return np.asarray(variogram) + def ma_structured( - const double[:,:,:] f, - const bint[:,:,:] mask, + const double[:,:] f, + const bint[:,:] mask, str estimator_type='m' ): cdef _estimator_func estimator_func = choose_estimator_func(estimator_type) @@ -205,21 +343,19 @@ def ma_structured( cdef int i_max = f.shape[0] - 1 cdef int j_max = f.shape[1] - cdef int k_max = f.shape[2] - cdef int l_max = i_max + 1 + cdef int k_max = i_max + 1 - cdef vector[double] variogram = vector[double](l_max, 0.0) - cdef vector[long] counts = vector[long](l_max, 0) - cdef int i, j, k, l + cdef vector[double] variogram = vector[double](k_max, 0.0) + cdef vector[long] counts = vector[long](k_max, 0) + cdef int i, j, k with nogil, parallel(): for i in range(i_max): for j in range(j_max): - for k in range(k_max): - for l in prange(1, l_max-i): - if not mask[i,j,k] and not mask[i+l,j,k]: - counts[l] += 1 - variogram[l] += estimator_func(f[i,j,k] - f[i+l,j,k]) + for k in prange(1, k_max-i): + if not mask[i,j] and not mask[i+k,j]: + counts[k] += 1 + variogram[k] += estimator_func(f[i,j] - f[i+k,j]) normalization_func(variogram, counts) return np.asarray(variogram) diff --git a/gstools/variogram/variogram.py b/gstools/variogram/variogram.py index ceb615bde..f979e61c5 100644 --- a/gstools/variogram/variogram.py +++ b/gstools/variogram/variogram.py @@ -7,17 +7,36 @@ The following functions are provided .. autosummary:: - vario_estimate_unstructured - vario_estimate_structured + vario_estimate + vario_estimate_axis """ -# pylint: disable=C0103 - import numpy as np -from gstools.tools.geometric import pos2xyz -from gstools.variogram.estimator import unstructured, structured, ma_structured - -__all__ = ["vario_estimate_unstructured", "vario_estimate_structured"] +from gstools.tools.geometric import ( + generate_grid, + format_struct_pos_shape, + format_unstruct_pos_shape, + ang2dir, +) +from gstools.variogram.estimator import ( + unstructured, + structured, + ma_structured, + directional, +) +from gstools.variogram.binning import standard_bins +from gstools.normalizer.tools import remove_trend_norm_mean + +__all__ = [ + "vario_estimate", + "vario_estimate_axis", + "vario_estimate_unstructured", + "vario_estimate_structured", +] + + +AXIS = ["x", "y", "z"] +AXIS_DIR = {"x": 0, "y": 1, "z": 2} def _set_estimator(estimator): @@ -27,22 +46,45 @@ def _set_estimator(estimator): elif estimator.lower() == "cressie": cython_estimator = "c" else: - raise ValueError( - "Unknown variogram estimator function " + str(estimator) - ) + raise ValueError(f"Unknown variogram estimator function: {estimator}") return cython_estimator -def vario_estimate_unstructured( +def _separate_dirs_test(direction, angles_tol): + """Check if given directions are separated.""" + if direction is None or direction.shape[0] < 2: + return True + separate_dirs = True + for i in range(direction.shape[0] - 1): + for j in range(i + 1, direction.shape[0]): + s_prod = np.minimum(np.abs(np.dot(direction[i], direction[j])), 1) + separate_dirs &= np.arccos(s_prod) >= 2 * angles_tol + return separate_dirs + + +def vario_estimate( pos, field, - bin_edges, + bin_edges=None, sampling_size=None, sampling_seed=None, estimator="matheron", + latlon=False, + direction=None, + angles=None, + angles_tol=np.pi / 8, + bandwidth=None, + no_data=np.nan, + mask=np.ma.nomask, + mesh_type="unstructured", + return_counts=False, + mean=None, + normalizer=None, + trend=None, + fit_normalizer=False, ): r""" - Estimates the variogram on a unstructured grid. + Estimates the empirical variogram. The algorithm calculates following equation: @@ -62,21 +104,33 @@ def vario_estimate_unstructured( with :math:`r_k \leq \| \mathbf x_i - \mathbf x_i' \| < r_{k+1}` being the bins. - The Cressie estimator is more robust to outliers. + The Cressie estimator is more robust to outliers [Webster2007]_. - Notes - ----- - Internally uses double precision and also returns doubles. + By provding `direction` vector[s] or angles, a directional variogram + can be calculated. If multiple directions are given, a set of variograms + will be returned. + Directional bining is controled by a given angle tolerance (`angles_tol`) + and an optional `bandwidth`, that truncates the width of the search band + around the given direction[s]. + + To reduce the calcuation time, `sampling_size` could be passed to sample + down the number of field points. Parameters ---------- pos : :class:`list` - the position tuple, containing main direction and transversal - directions - field : :class:`numpy.ndarray` - the spatially distributed data - bin_edges : :class:`numpy.ndarray` - the bins on which the variogram will be calculated + the position tuple, containing either the point coordinates (x, y, ...) + or the axes descriptions (for mesh_type='structured') + field : :class:`numpy.ndarray` or :class:`list` of :class:`numpy.ndarray` + The spatially distributed data. + Can also be of type :class:`numpy.ma.MaskedArray` to use masked values. + You can pass a list of fields, that will be used simultaneously. + This could be helpful, when there are multiple realizations at the + same points, with the same statistical properties. + bin_edges : :class:`numpy.ndarray`, optional + the bins on which the variogram will be calculated. + If :any:`None` are given, standard bins provided by the + :any:`standard_bins` routine will be used. Default: :any:`None` sampling_size : :class:`int` or :any:`None`, optional for large input data, this method can take a long time to compute the variogram, therefore this argument specifies @@ -92,43 +146,234 @@ def vario_estimate_unstructured( * "cressie": an estimator more robust to outliers Default: "matheron" + latlon : :class:`bool`, optional + Whether the data is representing 2D fields on earths surface described + by latitude and longitude. When using this, the estimator will + use great-circle distance for variogram estimation. + Note, that only an isotropic variogram can be estimated and a + ValueError will be raised, if a direction was specified. + Bin edges need to be given in radians in this case. + Default: False + direction : :class:`list` of :class:`numpy.ndarray`, optional + directions to evaluate a directional variogram. + Anglular tolerance is given by `angles_tol`. + bandwidth to cut off how wide the search for point pairs should be + is given by `bandwidth`. + You can provide multiple directions at once to get one variogram + for each direction. + For a single direction you can also use the `angles` parameter, + to provide the direction by its spherical coordianates. + Default: :any:`None` + angles : :class:`numpy.ndarray`, optional + the angles of the main axis to calculate the variogram for in radians + angle definitions from ISO standard 80000-2:2009 + for 1d this parameter will have no effect at all + for 2d supply one angle which is + azimuth :math:`\varphi` (ccw from +x in xy plane) + for 3d supply two angles which are + azimuth :math:`\varphi` (ccw from +x in xy plane) + and inclination :math:`\theta` (cw from +z). + Can be used instead of direction. + Default: :any:`None` + angles_tol : class:`float`, optional + the tolerance around the variogram angle to count a point as being + within this direction from another point (the angular tolerance around + the directional vector given by angles) + Default: `np.pi/8` = 22.5° + bandwidth : class:`float`, optional + bandwidth to cut off the angular tolerance for directional variograms. + If None is given, only the `angles_tol` parameter will control the + point selection. + Default: :any:`None` + no_data : :class:`float`, optional + Value to identify missing data in the given field. + Default: `numpy.nan` + mask : :class:`numpy.ndarray` of :class:`bool`, optional + Mask to deselect data in the given field. + Default: :any:`numpy.ma.nomask` + mesh_type : :class:`str`, optional + 'structured' / 'unstructured', indicates whether the pos tuple + describes the axis or the point coordinates. + Default: `'unstructured'` + return_counts: :class:`bool`, optional + if set to true, this function will also return the number of data + points found at each lag distance as a third return value + Default: False + mean : :class:`float`, optional + mean value used to shift normalized input data. + Can also be a callable. The default is None. + normalizer : :any:`None` or :any:`Normalizer`, optional + Normalizer to be applied to the input data to gain normality. + The default is None. + trend : :any:`None` or :class:`float` or :any:`callable`, optional + A callable trend function. Should have the signiture: f(x, [y, z, ...]) + If no normalizer is applied, this behaves equal to 'mean'. + The default is None. + fit_normalizer : :class:`bool`, optional + Wheater to fit the data-normalizer to the given (detrended) field. + Default: False Returns ------- - :class:`tuple` of :class:`numpy.ndarray` - the estimated variogram and the bin centers - """ - # TODO check_mesh - field = np.array(field, ndmin=1, dtype=np.double) - bin_edges = np.array(bin_edges, ndmin=1, dtype=np.double) - x, y, z, dim = pos2xyz(pos, calc_dim=True, dtype=np.double) - bin_centres = (bin_edges[:-1] + bin_edges[1:]) / 2.0 + bin_center : (n), :class:`numpy.ndarray` + The bin centers. + gamma : (n) or (d, n), :class:`numpy.ndarray` + The estimated variogram values at bin centers. + Is stacked if multiple `directions` (d>1) are given. + counts : (n) or (d, n), :class:`numpy.ndarray`, optional + The number of point pairs found for each bin. + Is stacked if multiple `directions` (d>1) are given. + Only provided if `return_counts` is True. + normalizer : :any:`Normalizer`, optional + The fitted normalizer for the given data. + Only provided if `fit_normalizer` is True. - if sampling_size is not None and sampling_size < len(field): + Notes + ----- + Internally uses double precision and also returns doubles. + + References + ---------- + .. [Webster2007] Webster, R. and Oliver, M. A. + "Geostatistics for environmental scientists.", + John Wiley & Sons. (2007) + """ + if bin_edges is not None: + bin_edges = np.array(bin_edges, ndmin=1, dtype=np.double) + bin_centres = (bin_edges[:-1] + bin_edges[1:]) / 2.0 + # allow multiple fields at same positions (ndmin=2: first axis -> field ID) + # need to convert to ma.array, since list of ma.array is not recognised + field = np.ma.array(field, ndmin=2, dtype=np.double) + masked = np.ma.is_masked(field) or np.any(mask) + # catch special case if everything is masked + if masked and np.all(mask): + bin_centres = np.empty(0) if bin_edges is None else bin_centres + estimates = np.zeros_like(bin_centres) + if return_counts: + return bin_centres, estimates, np.zeros_like(estimates, dtype=int) + return bin_centres, estimates + if not masked: + field = field.filled() + # check mesh shape + if mesh_type != "unstructured": + pos, __, dim = format_struct_pos_shape( + pos, field.shape, check_stacked_shape=True + ) + pos = generate_grid(pos) + else: + pos, __, dim = format_unstruct_pos_shape( + pos, field.shape, check_stacked_shape=True + ) + if latlon and dim != 2: + raise ValueError("Variogram: given field needs to be 2D for lat-lon.") + # prepare the field + pnt_cnt = len(pos[0]) + field = field.reshape((-1, pnt_cnt)) + # apply mask if wanted + if masked: + # if fields have different masks, take the minimal common mask + # given mask will be applied in addition + # selected region is the inverted masked (unmasked values) + if np.size(mask) > 1: # not only np.ma.nomask + select = np.invert( + np.logical_or( + np.reshape(mask, pnt_cnt), np.all(field.mask, axis=0) + ) + ) + else: + select = np.invert(np.all(field.mask, axis=0)) + pos = pos[:, select] + field.fill_value = np.nan # use no-data val. for remaining masked vals + field = field[:, select].filled() # convert to ndarray + select = mask = None # free space + # set no_data values + if not np.isnan(no_data): + field[np.isclose(field, float(no_data))] = np.nan + # set directions + dir_no = 0 + if direction is not None and dim > 1: + direction = np.array(direction, ndmin=2, dtype=np.double) + if len(direction.shape) > 2: + raise ValueError(f"Can't interpret directions: {direction}") + if direction.shape[1] != dim: + raise ValueError(f"Can't interpret directions: {direction}") + dir_no = direction.shape[0] + # convert given angles to direction vector + if angles is not None and direction is None and dim > 1: + direction = ang2dir(angles=angles, dtype=np.double, dim=dim) + dir_no = direction.shape[0] + # prepare directional variogram + if dir_no > 0: + if latlon: + raise ValueError("Directional variogram not allowed for lat-lon.") + norms = np.linalg.norm(direction, axis=1) + if np.any(np.isclose(norms, 0)): + raise ValueError(f"Zero length directions: {direction}") + # only unit-vectors for directions + direction = np.divide(direction, norms[:, np.newaxis]) + # negative bandwidth to turn it off + bandwidth = float(bandwidth) if bandwidth is not None else -1.0 + angles_tol = float(angles_tol) + # prepare sampled variogram + if sampling_size is not None and sampling_size < pnt_cnt: sampled_idx = np.random.RandomState(sampling_seed).choice( - np.arange(len(field)), sampling_size, replace=False + np.arange(pnt_cnt), sampling_size, replace=False ) - field = field[sampled_idx] - x = x[sampled_idx] - if dim > 1: - y = y[sampled_idx] - if dim > 2: - z = z[sampled_idx] - - cython_estimator = _set_estimator(estimator) - - return ( - bin_centres, - unstructured( - field, bin_edges, x, y, z, estimator_type=cython_estimator - ), + field = field[:, sampled_idx] + pos = pos[:, sampled_idx] + # create bining if not given + if bin_edges is None: + bin_edges = standard_bins(pos, dim, latlon) + bin_centres = (bin_edges[:-1] + bin_edges[1:]) / 2.0 + # normalize field + norm_field_out = remove_trend_norm_mean( + *(pos, field, mean, normalizer, trend), + check_shape=False, + stacked=True, + fit_normalizer=fit_normalizer, ) + field = norm_field_out[0] if fit_normalizer else norm_field_out + norm_out = (norm_field_out[1],) if fit_normalizer else () + # select variogram estimator + cython_estimator = _set_estimator(estimator) + # run + if dir_no == 0: + # "h"aversine or "e"uclidean distance type + distance_type = "h" if latlon else "e" + estimates, counts = unstructured( + dim, + field, + bin_edges, + pos, + estimator_type=cython_estimator, + distance_type=distance_type, + ) + else: + estimates, counts = directional( + dim, + field, + bin_edges, + pos, + direction, + angles_tol, + bandwidth, + separate_dirs=_separate_dirs_test(direction, angles_tol), + estimator_type=cython_estimator, + ) + if dir_no == 1: + estimates, counts = estimates[0], counts[0] + est_out = (estimates, counts) + return (bin_centres,) + est_out[: 2 if return_counts else 1] + norm_out -def vario_estimate_structured(field, direction="x", estimator="matheron"): - r"""Estimates the variogram on a regular grid. +def vario_estimate_axis( + field, direction="x", estimator="matheron", no_data=np.nan +): + r"""Estimates the variogram along array axis. The indices of the given direction are used for the bins. + Uniform spacings along the given axis are assumed. + The algorithm calculates following equation: .. math:: @@ -147,68 +392,75 @@ def vario_estimate_structured(field, direction="x", estimator="matheron"): with :math:`r_k \leq \| \mathbf x_i - \mathbf x_i' \| < r_{k+1}` being the bins. - The Cressie estimator is more robust to outliers. - - Warnings - -------- - It is assumed that the field is defined on an equidistant Cartesian grid. - - Notes - ----- - Internally uses double precision and also returns doubles. + The Cressie estimator is more robust to outliers [Webster2007]_. Parameters ---------- - field : :class:`numpy.ndarray` - the spatially distributed data - direction : :class:`str` + field : :class:`numpy.ndarray` or :class:`numpy.ma.MaskedArray` + the spatially distributed data (can be masked) + direction : :class:`str` or :class:`int` the axis over which the variogram will be estimated (x, y, z) + or (0, 1, 2, ...) estimator : :class:`str`, optional the estimator function, possible choices: - * "mathoron": the standard method of moments of Matheron + * "matheron": the standard method of moments of Matheron * "cressie": an estimator more robust to outliers Default: "matheron" + no_data : :class:`float`, optional + Value to identify missing data in the given field. + Default: `numpy.nan` + Returns ------- :class:`numpy.ndarray` the estimated variogram along the given direction. + + Warnings + -------- + It is assumed that the field is defined on an equidistant Cartesian grid. + + Notes + ----- + Internally uses double precision and also returns doubles. + + References + ---------- + .. [Webster2007] Webster, R. and Oliver, M. A. + "Geostatistics for environmental scientists.", + John Wiley & Sons. (2007) """ - try: - mask = np.array(field.mask, dtype=np.int32) + missing_mask = ( + np.isnan(field) if np.isnan(no_data) else np.isclose(field, no_data) + ) + missing = np.any(missing_mask) + masked = np.ma.is_masked(field) or missing + if masked: field = np.ma.array(field, ndmin=1, dtype=np.double) - masked = True - except AttributeError: - mask = None - field = np.array(field, ndmin=1, dtype=np.double) - masked = False - - if direction == "x": - axis_to_swap = 0 - elif direction == "y": - axis_to_swap = 1 - elif direction == "z": - axis_to_swap = 2 + if missing: + field.mask = np.logical_or(field.mask, missing_mask) + mask = np.array(np.ma.getmaskarray(field), dtype=np.int32) else: - raise ValueError("Unknown direction {0}".format(direction)) + field = np.array(field, ndmin=1, dtype=np.double) + missing_mask = None # free space + axis_to_swap = AXIS_DIR[direction] if direction in AXIS else int(direction) + # desired axis first, convert to 2D array afterwards field = field.swapaxes(0, axis_to_swap) + field = field.reshape((field.shape[0], -1)) if masked: mask = mask.swapaxes(0, axis_to_swap) + mask = mask.reshape((mask.shape[0], -1)) cython_estimator = _set_estimator(estimator) - # fill up the field with empty dimensions up to a number of 3 - for i in range(3 - len(field.shape)): - field = field[..., np.newaxis] if masked: - for i in range(3 - len(mask.shape)): - mask = mask[..., np.newaxis] + return ma_structured(field, mask, cython_estimator) + return structured(field, cython_estimator) - if mask is None: - gamma = structured(field, cython_estimator) - else: - gamma = ma_structured(field, mask, cython_estimator) - return gamma + +# for backward compatibility +vario_estimate_unstructured = vario_estimate +vario_estimate_structured = vario_estimate_axis diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..fd18dc4ed --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,65 @@ +[build-system] +requires = [ + "setuptools>=42", + "wheel", + "setuptools_scm[toml]>=3.5", + "numpy>=1.14.5,<2.0", + "Cython>=0.28.3,<3.0", +] +build-backend = "setuptools.build_meta" + +[tool.setuptools_scm] +write_to = "gstools/_version.py" +write_to_template = "__version__ = '{version}'" +local_scheme = "no-local-version" +fallback_version = "0.0.0.dev0" + +[tool.black] +line-length = 79 +target-version = [ + "py36", + "py37", + "py38", +] + +[tool.coverage] + [tool.coverage.run] + source = ["gstools"] + omit = [ + "*docs*", + "*examples*", + "*tests*", + "*/gstools/covmodel/plot.py", + "*/gstools/field/plot.py", + ] + + [tool.coverage.report] + exclude_lines = [ + "pragma: no cover", + "def __repr__", + "def __str__", + ] + +[tool.pylint] + [tool.pylint.master] + extension-pkg-whitelist = [ + "numpy", + "scipy", + ] + ignore = "_version.py" + + [tool.pylint.message_control] + disable = [ + "R0801", + ] + + [tool.pylint.reports] + output-format = "colorized" + + [tool.pylint.design] + max-args = 20 + max-locals = 50 + max-branches = 30 + max-statements = 80 + max-attributes = 25 + max-public-methods = 75 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 56c772167..000000000 --- a/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -numpy>=1.14.5 -scipy>=1.1.0 -hankel>=1.0.2 -emcee>=3.0.0 -pyevtk>=1.1.1 diff --git a/requirements_setup.txt b/requirements_setup.txt deleted file mode 100755 index 7329b9c1c..000000000 --- a/requirements_setup.txt +++ /dev/null @@ -1,4 +0,0 @@ -setuptools>=41.0.1 -setuptools_scm>=3.5.0 -cython>=0.28.3 -numpy>=1.14.5 \ No newline at end of file diff --git a/requirements_test.txt b/requirements_test.txt deleted file mode 100755 index be10813ec..000000000 --- a/requirements_test.txt +++ /dev/null @@ -1,2 +0,0 @@ -pytest-cov>=2.8.0 -pytest>=5.3.0 diff --git a/setup.cfg b/setup.cfg index f48fdadb8..53e4bbf0b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,72 @@ [metadata] -description-file = README.md +name = gstools +description = GSTools: A geostatistical toolbox. +long_description = file: README.md +long_description_content_type = text/markdown +url = https://github.com/GeoStat-Framework/GSTools +author = Sebastian Müller, Lennart Schüler +author_email = info@geostat-framework.org +maintainer = Sebastian Müller, Lennart Schüler +maintainer_email = info@geostat-framework.org +license = LGPL-3.0 license_file = LICENSE +platforms = any +classifiers = + Development Status :: 5 - Production/Stable + Intended Audience :: Developers + Intended Audience :: End Users/Desktop + Intended Audience :: Science/Research + License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3) + Natural Language :: English + Operating System :: Unix + Programming Language :: Python + Programming Language :: Python :: 3 + Programming Language :: Python :: 3 :: Only + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 + Topic :: Scientific/Engineering + Topic :: Utilities +project_urls = + Documentation = https://gstools.readthedocs.io + Source = https://github.com/GeoStat-Framework/GSTools + Tracker = https://github.com/GeoStat-Framework/GSTools/issues + Changelog = https://github.com/GeoStat-Framework/GSTools/blob/develop/CHANGELOG.md + Conda-Forge = https://anaconda.org/conda-forge/gstools + +[options] +packages = find: +install_requires = + emcee>=3.0.0,<4 + hankel>=1.0.2,<2 + meshio>=4.0.3,<5.0 + numpy>=1.14.5,<2 + pyevtk>=1.1.1,<2 + scipy>=1.1.0,<2 +python_requires = >=3.6 +zip_safe = False + +[options.packages.find] +exclude = + tests* + docs* + +[options.extras_require] +doc = + m2r2 + matplotlib>=3 + meshzoo + numpydoc>=1.1 + pykrige>=1.5 + pyvista + sphinx>=3 + sphinx-gallery>=0.8 + sphinx-rtd-theme>=0.5 +plotting = + matplotlib + pyvista +test = + coverage[toml]>=5.2.1 + pytest>=6.0 + pytest-cov>=2.11.0 diff --git a/setup.py b/setup.py index eadc302c2..06149506a 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ from distutils.ccompiler import new_compiler from distutils.sysconfig import customize_compiler -from setuptools import setup, find_packages, Distribution, Extension +from setuptools import setup, Extension from Cython.Build import cythonize import numpy as np @@ -19,9 +19,9 @@ # openmp finder ############################################################### -# This code is adapted for a large part from the scikit-learn openmp helpers, +# This code is adapted for a large part from the scikit-learn openmp_helpers.py # which can be found at: -# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/_build_utils/openmp_helpers.py +# https://github.com/scikit-learn/scikit-learn/blob/0.24.0/sklearn/_build_utils CCODE = """ @@ -49,17 +49,6 @@ def get_openmp_flag(compiler): if sys.platform == "darwin" and ("icc" in compiler or "icl" in compiler): return ["-openmp"] if sys.platform == "darwin" and "openmp" in os.getenv("CPPFLAGS", ""): - # -fopenmp can't be passed as compile flag when using Apple-clang. - # OpenMP support has to be enabled during preprocessing. - # - # For example, our macOS wheel build jobs use the following environment - # variables to build with Apple-clang and the brew installed "libomp": - # - # export CPPFLAGS="$CPPFLAGS -Xpreprocessor -fopenmp" - # export CFLAGS="$CFLAGS -I/usr/local/opt/libomp/include" - # export CXXFLAGS="$CXXFLAGS -I/usr/local/opt/libomp/include" - # export LDFLAGS="$LDFLAGS -L/usr/local/opt/libomp/lib -lomp" - # export DYLD_LIBRARY_PATH=/usr/local/opt/libomp/lib return [] # Default flag for GCC and clang: return ["-fopenmp"] @@ -120,7 +109,9 @@ def check_openmp_support(): # openmp ###################################################################### -USE_OPENMP = bool("--openmp" in sys.argv) +# you can set GSTOOLS_BUILD_PARALLEL=0 or GSTOOLS_BUILD_PARALLEL=1 +GS_PARALLEL = os.getenv("GSTOOLS_BUILD_PARALLEL") +USE_OPENMP = bool(int(GS_PARALLEL)) if GS_PARALLEL else False if USE_OPENMP: # just check if wanted @@ -135,18 +126,6 @@ def check_openmp_support(): FLAGS = [] -# add the "--openmp" to the global options -# enables calles like: -# python3 setup.py --openmp build_ext --inplace -# pip install --global-option="--openmp" gstools -class MPDistribution(Distribution): - """Distribution with --openmp as global option.""" - - global_options = Distribution.global_options + [ - ("openmp", None, "Flag to use openmp in the build") - ] - - # cython extensions ########################################################### @@ -181,82 +160,12 @@ class MPDistribution(Distribution): ) EXT_MODULES = cythonize(CY_MODULES) # annotate=True -# This is an important part. By setting this compiler directive, cython will -# embed signature information in docstrings. Sphinx then knows how to extract -# and use those signatures. -# python setup.py build_ext --inplace --> then sphinx build +# embed signatures for sphinx for ext_m in EXT_MODULES: ext_m.cython_directives = {"embedsignature": True} -# setup ####################################################################### -with open(os.path.join(HERE, "README.md"), encoding="utf-8") as f: - README = f.read() -with open(os.path.join(HERE, "requirements.txt"), encoding="utf-8") as f: - REQ = f.read().splitlines() -with open(os.path.join(HERE, "requirements_setup.txt"), encoding="utf-8") as f: - REQ_SETUP = f.read().splitlines() -with open(os.path.join(HERE, "requirements_test.txt"), encoding="utf-8") as f: - REQ_TEST = f.read().splitlines() -with open( - os.path.join(HERE, "docs", "requirements_doc.txt"), encoding="utf-8" -) as f: - REQ_DOC = f.read().splitlines() - -REQ_DEV = REQ_SETUP + REQ_TEST + REQ_DOC +# setup ####################################################################### -DOCLINE = __doc__.split("\n")[0] -CLASSIFIERS = [ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "Intended Audience :: End Users/Desktop", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", - "Natural Language :: English", - "Operating System :: Unix", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3 :: Only", - "Topic :: Scientific/Engineering", - "Topic :: Utilities", -] -setup( - name="gstools", - description=DOCLINE, - long_description=README, - long_description_content_type="text/markdown", - maintainer="Lennart Schueler, Sebastian Mueller", - maintainer_email="info@geostat-framework.org", - author="Lennart Schueler, Sebastian Mueller", - author_email="info@geostat-framework.org", - url="https://github.com/GeoStat-Framework/GSTools", - license="LGPLv3", - classifiers=CLASSIFIERS, - platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], - include_package_data=True, - python_requires=">=3.5", - use_scm_version={ - "relative_to": __file__, - "write_to": "gstools/_version.py", - "write_to_template": "__version__ = '{version}'", - "local_scheme": "no-local-version", - "fallback_version": "0.0.0.dev0", - }, - setup_requires=REQ_SETUP, - install_requires=REQ, - extras_require={ - "plotting": ["pyvista", "matplotlib"], - "doc": REQ_DOC, - "test": REQ_TEST, - "dev": REQ_DEV, - }, - packages=find_packages(exclude=["tests*", "docs*"]), - ext_modules=EXT_MODULES, - include_dirs=[np.get_include()], - distclass=MPDistribution, -) +setup(ext_modules=EXT_MODULES, include_dirs=[np.get_include()]) diff --git a/tests/test_condition.py b/tests/test_condition.py index e7c588791..587469d13 100644 --- a/tests/test_condition.py +++ b/tests/test_condition.py @@ -5,20 +5,14 @@ import numpy as np import unittest -from gstools import ( - Gaussian, - Exponential, - # Spherical, - SRF, -) +import gstools as gs class TestCondition(unittest.TestCase): def setUp(self): self.cov_models = [ - Gaussian, - Exponential, - # Spherical, + gs.Gaussian, + gs.Exponential, ] self.dims = range(1, 4) self.data = np.array( @@ -44,10 +38,12 @@ def test_simple(self): model = Model( dim=1, var=0.5, len_scale=2, anis=[0.1, 1], angles=[0.5, 0, 0] ) - srf = SRF(model, self.mean, seed=19970221) - srf.set_condition(self.cond_pos[0], self.cond_val, "simple") - field_1 = srf.unstructured(self.pos[0]) - field_2 = srf.structured(self.pos[0]) + krige = gs.krige.Simple( + model, self.cond_pos[0], self.cond_val, self.mean + ) + crf = gs.CondSRF(krige, seed=19970221) + field_1 = crf.unstructured(self.pos[0]) + field_2 = crf.structured(self.pos[0]) for i, val in enumerate(self.cond_val): self.assertAlmostEqual(val, field_1[i], places=2) self.assertAlmostEqual(val, field_2[(i,)], places=2) @@ -60,10 +56,12 @@ def test_simple(self): anis=[0.1, 1], angles=[0.5, 0, 0], ) - srf = SRF(model, self.mean, seed=19970221) - srf.set_condition(self.cond_pos[:dim], self.cond_val, "simple") - field_1 = srf.unstructured(self.pos[:dim]) - field_2 = srf.structured(self.pos[:dim]) + krige = gs.krige.Simple( + model, self.cond_pos[:dim], self.cond_val, self.mean + ) + crf = gs.CondSRF(krige, seed=19970221) + field_1 = crf.unstructured(self.pos[:dim]) + field_2 = crf.structured(self.pos[:dim]) for i, val in enumerate(self.cond_val): self.assertAlmostEqual(val, field_1[i], places=2) self.assertAlmostEqual(val, field_2[dim * (i,)], places=2) @@ -73,10 +71,10 @@ def test_ordinary(self): model = Model( dim=1, var=0.5, len_scale=2, anis=[0.1, 1], angles=[0.5, 0, 0] ) - srf = SRF(model, seed=19970221) - srf.set_condition(self.cond_pos[0], self.cond_val, "ordinary") - field_1 = srf.unstructured(self.pos[0]) - field_2 = srf.structured(self.pos[0]) + krige = gs.krige.Ordinary(model, self.cond_pos[0], self.cond_val) + crf = gs.CondSRF(krige, seed=19970221) + field_1 = crf.unstructured(self.pos[0]) + field_2 = crf.structured(self.pos[0]) for i, val in enumerate(self.cond_val): self.assertAlmostEqual(val, field_1[i], places=2) self.assertAlmostEqual(val, field_2[(i,)], places=2) @@ -89,16 +87,69 @@ def test_ordinary(self): anis=[0.1, 1], angles=[0.5, 0, 0], ) - srf = SRF(model, seed=19970221) - srf.set_condition( - self.cond_pos[:dim], self.cond_val, "ordinary" + krige = gs.krige.Ordinary( + model, self.cond_pos[:dim], self.cond_val ) - field_1 = srf.unstructured(self.pos[:dim]) - field_2 = srf.structured(self.pos[:dim]) + crf = gs.CondSRF(krige, seed=19970221) + field_1 = crf.unstructured(self.pos[:dim]) + field_2 = crf.structured(self.pos[:dim]) for i, val in enumerate(self.cond_val): self.assertAlmostEqual(val, field_1[i], places=2) self.assertAlmostEqual(val, field_2[dim * (i,)], places=2) + def test_raise_error(self): + self.assertRaises(ValueError, gs.CondSRF, gs.Gaussian()) + krige = gs.krige.Ordinary(gs.Stable(), self.cond_pos, self.cond_val) + self.assertRaises(ValueError, gs.CondSRF, krige, generator="unknown") + + def test_nugget(self): + model = gs.Gaussian( + nugget=0.01, + var=0.5, + len_scale=2, + anis=[0.1, 1], + angles=[0.5, 0, 0], + ) + krige = gs.krige.Ordinary( + model, self.cond_pos, self.cond_val, exact=True + ) + crf = gs.CondSRF(krige, seed=19970221) + field_1 = crf.unstructured(self.pos) + field_2 = crf.structured(self.pos) + for i, val in enumerate(self.cond_val): + self.assertAlmostEqual(val, field_1[i], places=2) + self.assertAlmostEqual(val, field_2[3 * (i,)], places=2) + + def test_setter(self): + krige1 = gs.krige.Krige(gs.Exponential(), self.cond_pos, self.cond_val) + krige2 = gs.krige.Krige( + gs.Gaussian(var=2), + self.cond_pos, + self.cond_val, + mean=-1, + trend=-2, + normalizer=gs.normalizer.YeoJohnson(), + ) + crf1 = gs.CondSRF(krige1) + crf2 = gs.CondSRF(krige2, seed=19970221) + # update settings + crf1.model = gs.Gaussian(var=2) + crf1.mean = -1 + crf1.trend = -2 + # also checking correctly setting uninitialized normalizer + crf1.normalizer = gs.normalizer.YeoJohnson + # check if setting went right + self.assertTrue(crf1.model == crf2.model) + self.assertTrue(crf1.normalizer == crf2.normalizer) + self.assertAlmostEqual(crf1.mean, crf2.mean) + self.assertAlmostEqual(crf1.trend, crf2.trend) + # reset kriging + crf1.krige.set_condition() + # compare fields + field1 = crf1(self.pos, seed=19970221) + field2 = crf2(self.pos) + self.assertTrue(np.all(np.isclose(field1, field2))) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_covmodel.py b/tests/test_covmodel.py index d331773d2..15123af20 100644 --- a/tests/test_covmodel.py +++ b/tests/test_covmodel.py @@ -4,47 +4,89 @@ """ import numpy as np import unittest +from gstools.covmodel.tools import ( + AttributeWarning, + check_bounds, + check_arg_in_bounds, +) from gstools import ( CovModel, Gaussian, Exponential, - Rational, Stable, + Rational, + Cubic, Matern, Linear, Circular, Spherical, + HyperSpherical, + SuperSpherical, + JBessel, TPLGaussian, TPLExponential, TPLStable, + TPLSimple, ) +class Gau_var(CovModel): + def variogram(self, r): + h = np.abs(r) / self.len_rescaled + return self.var * (1.0 - np.exp(-(h ** 2))) + self.nugget + + +class Gau_cov(CovModel): + def covariance(self, r): + h = np.abs(r) / self.len_rescaled + return self.var * np.exp(-(h ** 2)) + + +class Gau_cor(CovModel): + def correlation(self, r): + h = np.abs(r) / self.len_rescaled + return np.exp(-(h ** 2)) + + +class Gau_fix(CovModel): + def cor(self, h): + return np.exp(-(h ** 2)) + + def fix_dim(self): + return 2 + + +class Mod_add(CovModel): + def cor(self, h): + return 1.0 + + def default_opt_arg(self): + return {"alpha": 1} + + class TestCovModel(unittest.TestCase): def setUp(self): - self.cov_models = [ + self.std_cov_models = [ Gaussian, Exponential, - Rational, Stable, + Rational, + Cubic, Matern, Linear, Circular, Spherical, + HyperSpherical, + SuperSpherical, + JBessel, + TPLSimple, + ] + self.tpl_cov_models = [ TPLGaussian, TPLExponential, TPLStable, ] - self.std_cov_models = [ - Gaussian, - Exponential, - Rational, - Stable, - Matern, - Linear, - Circular, - Spherical, - ] + self.cov_models = self.std_cov_models + self.tpl_cov_models self.dims = range(1, 4) self.lens = [[10, 5, 2]] self.anis = [[0.5, 0.2]] @@ -61,7 +103,7 @@ def test_creation(self): class User(CovModel): def cor(self, h): - return np.exp(-h ** 2) + return np.exp(-(h ** 2)) user = User(len_scale=2) self.assertAlmostEqual(user.correlation(1), np.exp(-0.25)) @@ -91,11 +133,27 @@ def cor(self, h): model.var * model.correlation(1), ) self.assertAlmostEqual( - model.vario_spatial(([1], [2], [3]))[0], + model.vario_spatial(([1], [2], [3])[:dim])[0], model.var + model.nugget - - model.cov_spatial(([1], [2], [3]))[0], + - model.cov_spatial(([1], [2], [3])[:dim])[0], ) + self.assertAlmostEqual( + model.cor_spatial(([1], [2], [3])[:dim])[0], + model.cov_spatial(([1], [2], [3])[:dim])[0] + / model.var, + ) + for d in range(dim): + self.assertAlmostEqual( + model.vario_axis(1, axis=d), + model.var + + model.nugget + - model.cov_axis(1, axis=d), + ) + self.assertAlmostEqual( + model.cor_axis(1, axis=d), + model.cov_axis(1, axis=d) / model.var, + ) self.assertAlmostEqual( model.cov_nugget(0), model.sill ) @@ -108,7 +166,7 @@ def cor(self, h): model.vario_nugget(1), model.variogram(1) ) # check if callable - model.vario_spatial((1, 2, 3)) + model.vario_spatial((1, 2, 3)[:dim]) model.spectral_density([0, 1]) model.spectrum([0, 1]) model.spectral_rad_pdf([0, 1]) @@ -120,6 +178,34 @@ def cor(self, h): if model.has_ppf: model.spectral_rad_ppf([0.0, 0.99]) model.pykrige_kwargs + # check arg bound setting + model.set_arg_bounds( + var=[2, np.inf], nugget=[1, 2] + ) + self.assertAlmostEqual(model.var, 3) + self.assertAlmostEqual(model.nugget, 1.5) + + def test_tpl_models(self): + for Model in self.tpl_cov_models: + for dim in self.dims: + model = Model(dim=dim, len_scale=9, len_low=1, rescale=2) + self.assertAlmostEqual(model.len_up_rescaled, 5) + model.len_low = 0.0 + self.assertAlmostEqual(model.cor(2), model.correlation(9)) + # also check resetting of var when sill is given lower + model.fit_variogram( + self.gamma_x, self.gamma_y, sill=1.1, nugget=False + ) + self.assertAlmostEqual(model.var, 1.1, delta=1e-5) + # check var_raw handling + model = Model(var_raw=1, len_low=0, integral_scale=10) + var_save = model.var + model.var_raw = 1.1 + self.assertAlmostEqual(model.var, var_save * 1.1) + self.assertAlmostEqual(model.integral_scale, 10) + # integral scale is not setable when len_low is not 0 + with self.assertRaises(ValueError): + Model(var_raw=1, len_low=5, integral_scale=10) def test_fitting(self): for Model in self.std_cov_models: @@ -127,6 +213,193 @@ def test_fitting(self): model = Model(dim=dim) model.fit_variogram(self.gamma_x, self.gamma_y, nugget=False) self.assertAlmostEqual(model.nugget, 0.0) + model = Model(dim=dim) + # also check resetting of var when sill is given lower + model.fit_variogram(self.gamma_x, self.gamma_y, sill=0.9) + self.assertAlmostEqual(model.nugget + model.var, 0.9) + model = Model(dim=dim) + # more detailed checks + model.fit_variogram( + self.gamma_x, self.gamma_y, sill=2, nugget=False + ) + self.assertAlmostEqual(model.var, 2.0) + model = Model(dim=dim) + model.fit_variogram( + self.gamma_x, self.gamma_y, sill=2, nugget=1 + ) + self.assertAlmostEqual(model.var, 1) + model = Model(dim=dim) + ret = model.fit_variogram( + self.gamma_x, + self.gamma_y, + loss="linear", + return_r2=True, + weights="inv", + init_guess="current", + ) + self.assertEqual(len(ret), 3) + + # treatment of sill/var/nugget by fitting + model = Stable() + model.fit_variogram( + self.gamma_x, self.gamma_y, nugget=False, var=False, sill=2 + ) + self.assertAlmostEqual(model.var, 1) + self.assertAlmostEqual(model.nugget, 1) + model.fit_variogram(self.gamma_x, self.gamma_y, var=2, sill=3) + self.assertAlmostEqual(model.var, 2) + self.assertAlmostEqual(model.nugget, 1) + model.var = 3 + model.fit_variogram( + self.gamma_x, self.gamma_y, nugget=False, var=False, sill=2 + ) + self.assertAlmostEqual(model.var, 2) + self.assertAlmostEqual(model.nugget, 0) + model.fit_variogram(self.gamma_x, self.gamma_y, weights="inv") + len_save = model.len_scale + model.fit_variogram( + self.gamma_x, self.gamma_y, weights=lambda x: 1 / (1 + x) + ) + self.assertAlmostEqual(model.len_scale, len_save) + # check ValueErrors + with self.assertRaises(ValueError): + model.fit_variogram(self.gamma_x, self.gamma_y, sill=2, var=3) + with self.assertRaises(ValueError): + model.fit_variogram(self.gamma_x, self.gamma_y, sill=2, nugget=3) + with self.assertRaises(ValueError): + model.fit_variogram(self.gamma_x, self.gamma_y, method="wrong") + with self.assertRaises(ValueError): + model.fit_variogram(self.gamma_x, self.gamma_y, wrong=False) + model.var_bounds = [0, 1] + model.nugget_bounds = [0, 1] + with self.assertRaises(ValueError): + model.fit_variogram(self.gamma_x, self.gamma_y, sill=3) + # init guess + with self.assertRaises(ValueError): + model.fit_variogram(self.gamma_x, self.gamma_y, init_guess="wrong") + model.var_bounds = [0, np.inf] + model.fit_variogram( + self.gamma_x, np.array(self.gamma_y) + 1, sill=2, alpha=False + ) + self.assertAlmostEqual(model.var + model.nugget, 2) + + def test_covmodel_class(self): + model_std = Gaussian(rescale=3, var=1.1, nugget=1.2, len_scale=1.3) + model_var = Gau_var(rescale=3, var=1.1, nugget=1.2, len_scale=1.3) + model_cov = Gau_cov(rescale=3, var=1.1, nugget=1.2, len_scale=1.3) + model_cor = Gau_cor(rescale=3, var=1.1, nugget=1.2, len_scale=1.3) + var = model_std.variogram(2.5) + cov = model_std.covariance(2.5) + corr = model_std.correlation(2.5) + cor = model_std.cor(2.5) + + self.assertFalse(check_bounds(bounds=[0])) + self.assertFalse(check_bounds(bounds=[1, -1])) + self.assertFalse(check_bounds(bounds=[0, 1, 2, 3])) + self.assertFalse(check_bounds(bounds=[0, 1, "kk"])) + self.assertRaises(ValueError, model_std.set_arg_bounds, wrong_arg=[1]) + self.assertRaises( + ValueError, model_std.set_arg_bounds, wrong_arg=[-1, 1] + ) + + # checking some properties + model_par = Stable() + self.assertFalse(model_par.do_rotation) + self.assertEqual(len(model_par.arg), len(model_par.arg_list)) + self.assertEqual(len(model_par.iso_arg), len(model_par.iso_arg_list)) + self.assertEqual(len(model_par.arg), len(model_par.iso_arg) + 2) + self.assertEqual(len(model_par.len_scale_vec), model_par.dim) + self.assertFalse(Gaussian() == Stable()) + model_par.hankel_kw = {"N": 300} + self.assertEqual(model_par.hankel_kw["N"], 300) + + # arg in bounds check + model_std.set_arg_bounds(var=[0.5, 1.5]) + with self.assertRaises(ValueError): + model_std.var = 0.4 + with self.assertRaises(ValueError): + model_std.var = 1.6 + model_std.set_arg_bounds(var=[0.5, 1.5, "oo"]) + with self.assertRaises(ValueError): + model_std.var = 0.5 + with self.assertRaises(ValueError): + model_std.var = 1.5 + with self.assertRaises(ValueError): + model_std.var_bounds = [1, -1] + with self.assertRaises(ValueError): + model_std.len_scale_bounds = [1, -1] + with self.assertRaises(ValueError): + model_std.nugget_bounds = [1, -1] + with self.assertRaises(ValueError): + model_std.anis_bounds = [1, -1] + # reset the standard model + model_std = Gaussian(rescale=3, var=1.1, nugget=1.2, len_scale=1.3) + # std value from bounds with neg. inf and finit bound + model_add = Mod_add() + model_add.set_arg_bounds(alpha=[-np.inf, 0]) + self.assertAlmostEqual(model_add.alpha, -1) + # special treatment of anis check + model_std.set_arg_bounds(anis=[2, 4, "oo"]) + self.assertTrue(np.all(np.isclose(model_std.anis, 3))) + # dim specific checks + with self.assertWarns(AttributeWarning): + Gau_fix(dim=1) + self.assertRaises(ValueError, Gaussian, dim=0) + self.assertRaises(ValueError, Gau_fix, latlon=True) + # check inputs + self.assertRaises(ValueError, model_std.percentile_scale, per=-1.0) + self.assertRaises(ValueError, Gaussian, anis=-1.0) + self.assertRaises(ValueError, Gaussian, len_scale=[1, -1]) + self.assertRaises(ValueError, check_arg_in_bounds, model_std, "wrong") + self.assertWarns(AttributeWarning, Gaussian, wrong_arg=1.0) + with self.assertWarns(AttributeWarning): + self.assertRaises(ValueError, Gaussian, len_rescaled=1.0) + + # check correct subclassing + with self.assertRaises(TypeError): + + class Gau_err(CovModel): + pass + + self.assertAlmostEqual(var, model_var.variogram(2.5)) + self.assertAlmostEqual(var, model_cov.variogram(2.5)) + self.assertAlmostEqual(var, model_cor.variogram(2.5)) + self.assertAlmostEqual(cov, model_var.covariance(2.5)) + self.assertAlmostEqual(cov, model_cov.covariance(2.5)) + self.assertAlmostEqual(cov, model_cor.covariance(2.5)) + self.assertAlmostEqual(corr, model_var.correlation(2.5)) + self.assertAlmostEqual(corr, model_cov.correlation(2.5)) + self.assertAlmostEqual(corr, model_cor.correlation(2.5)) + self.assertAlmostEqual(cor, model_var.cor(2.5)) + self.assertAlmostEqual(cor, model_cov.cor(2.5)) + self.assertAlmostEqual(cor, model_cor.cor(2.5)) + + def test_rescale(self): + model1 = Exponential() + model2 = Exponential(rescale=2.1) + model3 = Exponential(rescale=2.1, len_scale=2.1) + + self.assertAlmostEqual( + model1.integral_scale, 2.1 * model2.integral_scale + ) + self.assertAlmostEqual(model1.integral_scale, model3.integral_scale) + + def test_special_models(self): + # matern converges to gaussian + model1 = Matern() + model1.set_arg_bounds(nu=[0, 101]) + model1.nu = 100 + model2 = Gaussian(rescale=0.5) + self.assertAlmostEqual(model1.variogram(1), model2.variogram(1)) + self.assertAlmostEqual(model1.spectrum(1), model2.spectrum(1), 2) + # stable model gets unstable for alpha < 0.3 + with self.assertWarns(AttributeWarning): + Stable(alpha=0.2) + with self.assertWarns(AttributeWarning): + TPLStable(alpha=0.2) + # corner case for JBessel model + with self.assertWarns(AttributeWarning): + JBessel(dim=3, nu=0.5) if __name__ == "__main__": diff --git a/tests/test_incomprrandmeth.py b/tests/test_incomprrandmeth.py index 2ef39d5cd..6760fd72e 100644 --- a/tests/test_incomprrandmeth.py +++ b/tests/test_incomprrandmeth.py @@ -6,13 +6,13 @@ import copy import unittest import numpy as np -from gstools import Gaussian +import gstools as gs from gstools.field.generator import IncomprRandMeth class TestIncomprRandMeth(unittest.TestCase): def setUp(self): - self.cov_model_2d = Gaussian(dim=2, var=1.5, len_scale=2.5) + self.cov_model_2d = gs.Gaussian(dim=2, var=1.5, len_scale=2.5) self.cov_model_3d = copy.deepcopy(self.cov_model_2d) self.cov_model_3d.dim = 3 self.seed = 19031977 @@ -31,52 +31,32 @@ def setUp(self): ) def test_unstruct_2d(self): - modes = self.rm_2d(self.x_tuple, self.y_tuple) + modes = self.rm_2d((self.x_tuple, self.y_tuple)) self.assertAlmostEqual(modes[0, 0], 0.50751115) self.assertAlmostEqual(modes[0, 1], 1.03291018) self.assertAlmostEqual(modes[1, 1], -0.22003005) def test_unstruct_3d(self): - modes = self.rm_3d(self.x_tuple, self.y_tuple, self.z_tuple) - self.assertAlmostEqual(modes[0, 0], 1.49469700) - self.assertAlmostEqual(modes[0, 1], 1.38687858) - self.assertAlmostEqual(modes[1, 0], -0.27245271) - - def test_struct_2d(self): - modes = self.rm_2d(self.x_grid, self.y_grid, mesh_type="structured") - self.assertAlmostEqual(modes[0, 0, 0], 0.50751115) - self.assertAlmostEqual(modes[0, 1, 0], 0.69751927) - self.assertAlmostEqual(modes[1, 1, 1], -0.19747468) - - def test_struct_3d(self): - modes = self.rm_3d( - self.x_grid, self.y_grid, self.z_grid, mesh_type="structured" - ) - self.assertAlmostEqual(modes[0, 0, 0, 0], 1.49469700) - self.assertAlmostEqual(modes[1, 0, 1, 1], 0.12813365) - self.assertAlmostEqual(modes[1, 1, 0, 1], 0.01443056) - self.assertAlmostEqual(modes[1, 1, 1, 1], -0.12304040) - - def test_struct_unstruct(self): - x_grid = np.arange(0.0, 2.0, 1.0) - y_grid = np.arange(0.0, 2.0, 1.0) - x_tuple = np.array((0.0, 0.0, 1.0, 1.0)) - y_tuple = np.array((0.0, 1.0, 0.0, 1.0)) - unstr_modes = self.rm_2d(x_tuple, y_tuple, mesh_type="unstructured") - str_modes = self.rm_2d(x_grid, y_grid, mesh_type="structured") - for d in range(2): - k = 0 - for i in range(len(x_grid)): - for j in range(len(y_grid)): - self.assertAlmostEqual( - str_modes[d, i, j], unstr_modes[d, k] - ) - k += 1 + modes = self.rm_3d((self.x_tuple, self.y_tuple, self.z_tuple)) + self.assertAlmostEqual(modes[0, 0], 0.7924546333550331) + self.assertAlmostEqual(modes[0, 1], 1.660747056686244) + self.assertAlmostEqual(modes[1, 0], -0.28049855754819514) def test_assertions(self): - cov_model_1d = Gaussian(dim=1, var=1.5, len_scale=2.5) + cov_model_1d = gs.Gaussian(dim=1, var=1.5, len_scale=2.5) self.assertRaises(ValueError, IncomprRandMeth, cov_model_1d) + def test_vector_mean(self): + srf = gs.SRF( + self.cov_model_2d, + mean=(0.5, 0), + generator="VectorField", + seed=198412031, + ) + srf.structured((self.x_grid, self.y_grid)) + self.assertAlmostEqual(np.mean(srf.field[0]), 1.3025621393180298) + self.assertAlmostEqual(np.mean(srf.field[1]), -0.04729596839446052) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_krige.py b/tests/test_krige.py index 3afee2b0a..af7984b41 100644 --- a/tests/test_krige.py +++ b/tests/test_krige.py @@ -5,16 +5,20 @@ import numpy as np import unittest -from gstools import Gaussian, Exponential, Spherical, krige, SRF +import gstools as gs def trend(*xyz): return xyz[0] +def mean_func(*xyz): + return 2 * xyz[0] + + class TestKrige(unittest.TestCase): def setUp(self): - self.cov_models = [Gaussian, Exponential, Spherical] + self.cov_models = [gs.Gaussian, gs.Exponential, gs.Spherical] self.dims = range(1, 4) self.data = np.array( [ @@ -25,12 +29,17 @@ def setUp(self): [4.7, 3.8, 2.5, 1.74], ] ) + # redundant data for pseudo-inverse + self.p_data = np.zeros((3, 3)) + self.p_vals = np.array([1.0, 2.0, 6.0]) + self.p_meth = [1, 2, 3] # method selector # indices for the date in the grid self.data_idx = tuple(np.array(self.data[:, :3] * 10, dtype=int).T) # x, y, z componentes for the conditon position self.cond_pos = (self.data[:, 0], self.data[:, 1], self.data[:, 2]) # condition values self.cond_val = self.data[:, 3] + self.cond_err = np.array([0.01, 0.0, 0.1, 0.05, 0]) # the arithmetic mean of the conditions self.mean = np.mean(self.cond_val) # the grid @@ -53,7 +62,7 @@ def test_simple(self): anis=[0.9, 0.8], angles=[2, 1, 0.5], ) - simple = krige.Simple( + simple = gs.krige.Simple( model, self.cond_pos[:dim], self.cond_val, self.mean ) field_1, __ = simple.unstructured(self.grids[dim - 1]) @@ -78,8 +87,11 @@ def test_ordinary(self): anis=[0.9, 0.8], angles=[2, 1, 0.5], ) - ordinary = krige.Ordinary( - model, self.cond_pos[:dim], self.cond_val, trend_func + ordinary = gs.krige.Ordinary( + model, + self.cond_pos[:dim], + self.cond_val, + trend=trend_func, ) field_1, __ = ordinary.unstructured(self.grids[dim - 1]) field_1 = field_1.reshape(self.grid_shape[:dim]) @@ -104,7 +116,7 @@ def test_universal(self): anis=[0.9, 0.8], angles=[2, 1, 0.5], ) - universal = krige.Universal( + universal = gs.krige.Universal( model, self.cond_pos[:dim], self.cond_val, drift ) field_1, __ = universal.unstructured(self.grids[dim - 1]) @@ -129,7 +141,7 @@ def test_detrended(self): anis=[0.5, 0.2], angles=[0.4, 0.2, 0.1], ) - detrended = krige.Detrended( + detrended = gs.krige.Detrended( model, self.cond_pos[:dim], self.cond_val, trend ) field_1, __ = detrended.unstructured(self.grids[dim - 1]) @@ -149,14 +161,14 @@ def test_extdrift(self): cond_drift = [] for i, grid in enumerate(self.grids): dim = i + 1 - model = Exponential( + model = gs.Exponential( dim=dim, var=2, len_scale=10, anis=[0.9, 0.8], angles=[2, 1, 0.5], ) - srf = SRF(model) + srf = gs.SRF(model) field = srf(grid) ext_drift.append(field) field = field.reshape(self.grid_shape[:dim]) @@ -171,7 +183,7 @@ def test_extdrift(self): anis=[0.5, 0.2], angles=[0.4, 0.2, 0.1], ) - extdrift = krige.ExtDrift( + extdrift = gs.krige.ExtDrift( model, self.cond_pos[:dim], self.cond_val, @@ -193,6 +205,87 @@ def test_extdrift(self): field_2[self.data_idx[:dim]][i], val, places=2 ) + def test_pseudo(self): + for Model in self.cov_models: + for dim in self.dims: + model = Model( + dim=dim, + var=2, + len_scale=10, + anis=[0.5, 0.2], + angles=[0.4, 0.2, 0.1], + ) + for meth in self.p_meth: + krig = gs.krige.Krige( + model, self.p_data[:dim], self.p_vals, unbiased=False + ) + field, __ = krig([0, 0, 0][:dim]) + # with the pseudo-inverse, the estimated value + # should be the mean of the 3 redundant input values + self.assertAlmostEqual( + field[0], np.mean(self.p_vals), places=2 + ) + + def test_error(self): + for Model in self.cov_models: + for dim in self.dims: + model = Model( + dim=dim, + var=5, + len_scale=10, + nugget=0.1, + anis=[0.9, 0.8], + angles=[2, 1, 0.5], + ) + ordinary = gs.krige.Ordinary( + model, + self.cond_pos[:dim], + self.cond_val, + exact=False, + cond_err=self.cond_err, + ) + field, err = ordinary(self.cond_pos[:dim]) + # when the given measurement error is 0, the kriging-var + # should equal the nugget of the model + self.assertAlmostEqual(err[1], model.nugget, places=2) + self.assertAlmostEqual(err[4], model.nugget, places=2) + + def test_raise(self): + # no cond_pos/cond_val given + self.assertRaises(ValueError, gs.krige.Krige, gs.Stable(), None, None) + + def test_krige_mean(self): + # check for constant mean (simple kriging) + krige = gs.krige.Simple(gs.Gaussian(), self.cond_pos, self.cond_val) + mean_f = krige.structured(self.pos, only_mean=True) + self.assertTrue(np.all(np.isclose(mean_f, 0))) + krige = gs.krige.Simple( + gs.Gaussian(), + self.cond_pos, + self.cond_val, + mean=mean_func, + normalizer=gs.normalizer.YeoJohnson, + trend=trend, + ) + # check applying mean, norm, trend + mean_f1 = krige.structured(self.pos, only_mean=True) + mean_f2 = gs.normalizer.tools.apply_mean_norm_trend( + self.pos, + np.zeros(tuple(map(len, self.pos))), + mean=mean_func, + normalizer=gs.normalizer.YeoJohnson, + trend=trend, + mesh_type="structured", + ) + self.assertTrue(np.all(np.isclose(mean_f1, mean_f2))) + krige = gs.krige.Simple(gs.Gaussian(), self.cond_pos, self.cond_val) + mean_f = krige.structured(self.pos, only_mean=True) + self.assertTrue(np.all(np.isclose(mean_f, 0))) + # check for constant mean (ordinary kriging) + krige = gs.krige.Ordinary(gs.Gaussian(), self.cond_pos, self.cond_val) + mean_f = krige.structured(self.pos, only_mean=True) + self.assertTrue(np.all(np.isclose(mean_f, krige.get_mean()))) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_latlon.py b/tests/test_latlon.py new file mode 100644 index 000000000..0dc93e12d --- /dev/null +++ b/tests/test_latlon.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +""" +This is the unittest for latlon related routines. +""" + +import numpy as np +import unittest +import gstools as gs + + +def _rel_err(a, b): + return np.abs(a / ((a + b) / 2) - 1) + + +class ErrMod(gs.CovModel): + def cor(self, h): + return np.exp(-(h ** 2)) + + def fix_dim(self): + return 2 + + +class TestCondition(unittest.TestCase): + def setUp(self): + self.cmod = gs.Gaussian( + latlon=True, var=2, len_scale=777, rescale=gs.EARTH_RADIUS + ) + self.lat = self.lon = range(-80, 81) + + self.data = np.array( + [ + [52.9336, 8.237, 15.7], + [48.6159, 13.0506, 13.9], + [52.4853, 7.9126, 15.1], + [50.7446, 9.345, 17.0], + [52.9437, 12.8518, 21.9], + [53.8633, 8.1275, 11.9], + [47.8342, 10.8667, 11.4], + [51.0881, 12.9326, 17.2], + [48.406, 11.3117, 12.9], + [49.7273, 8.1164, 17.2], + [49.4691, 11.8546, 13.4], + [48.0197, 12.2925, 13.9], + [50.4237, 7.4202, 18.1], + [53.0316, 13.9908, 21.3], + [53.8412, 13.6846, 21.3], + [54.6792, 13.4343, 17.4], + [49.9694, 9.9114, 18.6], + [51.3745, 11.292, 20.2], + [47.8774, 11.3643, 12.7], + [50.5908, 12.7139, 15.8], + ] + ) + + def test_conv(self): + p_ll = gs.tools.geometric.latlon2pos((self.lat, self.lon), 2.56) + ll_p = gs.tools.geometric.pos2latlon(p_ll, 2.56) + for i, v in enumerate(self.lat): + self.assertAlmostEqual(v, ll_p[0, i]) + self.assertAlmostEqual(v, ll_p[1, i]) + self.assertAlmostEqual( + 8, self.cmod.anisometrize(self.cmod.isometrize((8, 6)))[0, 0] + ) + self.assertAlmostEqual( + 6, self.cmod.anisometrize(self.cmod.isometrize((8, 6)))[1, 0] + ) + self.assertAlmostEqual( + 1, self.cmod.isometrize(self.cmod.anisometrize((1, 0, 0)))[0, 0] + ) + + def test_cov_model(self): + self.assertAlmostEqual( + self.cmod.vario_yadrenko(1.234), + self.cmod.sill - self.cmod.cov_yadrenko(1.234), + ) + self.assertAlmostEqual( + self.cmod.cov_yadrenko(1.234), + self.cmod.var * self.cmod.cor_yadrenko(1.234), + ) + # test if correctly handling tries to set anisotropy + self.cmod.anis = [1, 2] + self.cmod.angles = [1, 2, 3] + self.assertAlmostEqual(self.cmod.anis[0], 1) + self.assertAlmostEqual(self.cmod.anis[1], 1) + self.assertAlmostEqual(self.cmod.angles[0], 0) + self.assertAlmostEqual(self.cmod.angles[1], 0) + self.assertAlmostEqual(self.cmod.angles[2], 0) + + def test_vario_est(self): + srf = gs.SRF(self.cmod, seed=12345) + field = srf.structured((self.lat, self.lon)) + + bin_edges = [0.01 * i for i in range(30)] + bin_center, emp_vario = gs.vario_estimate( + *((self.lat, self.lon), field, bin_edges), + latlon=True, + mesh_type="structured", + sampling_size=2000, + sampling_seed=12345, + ) + mod = gs.Gaussian(latlon=True, rescale=gs.EARTH_RADIUS) + mod.fit_variogram(bin_center, emp_vario, nugget=False) + # allow 10 percent relative error + self.assertLess(_rel_err(mod.var, self.cmod.var), 0.1) + self.assertLess(_rel_err(mod.len_scale, self.cmod.len_scale), 0.1) + + def test_krige(self): + bin_max = np.deg2rad(8) + bin_edges = np.linspace(0, bin_max, 5) + emp_vario = gs.vario_estimate( + (self.data[:, 0], self.data[:, 1]), + self.data[:, 2], + bin_edges, + latlon=True, + ) + mod = gs.Spherical(latlon=True, rescale=gs.EARTH_RADIUS) + mod.fit_variogram(*emp_vario, nugget=False) + kri = gs.krige.Ordinary( + mod, + (self.data[:, 0], self.data[:, 1]), + self.data[:, 2], + ) + field, var = kri((self.data[:, 0], self.data[:, 1])) + for i, dat in enumerate(self.data[:, 2]): + self.assertAlmostEqual(field[i], dat) + + def test_cond_srf(self): + bin_max = np.deg2rad(8) + bin_edges = np.linspace(0, bin_max, 5) + emp_vario = gs.vario_estimate( + (self.data[:, 0], self.data[:, 1]), + self.data[:, 2], + bin_edges, + latlon=True, + ) + mod = gs.Spherical(latlon=True, rescale=gs.EARTH_RADIUS) + mod.fit_variogram(*emp_vario, nugget=False) + krige = gs.krige.Ordinary( + mod, (self.data[:, 0], self.data[:, 1]), self.data[:, 2] + ) + crf = gs.CondSRF(krige) + field = crf((self.data[:, 0], self.data[:, 1])) + for i, dat in enumerate(self.data[:, 2]): + self.assertAlmostEqual(field[i], dat, 3) + + def error_test(self): + # try fitting directional variogram + mod = gs.Gaussian(latlon=True) + with self.assertRaises(ValueError): + mod.fit_variogram([0, 1], [[0, 1], [0, 1], [0, 1]]) + # try to use fixed dim=2 with latlon + with self.assertRaises(ValueError): + ErrMod(latlon=True) + # try to estimate latlon vario on wrong dim + with self.assertRaises(ValueError): + gs.vario_estimate([[1], [1], [1]], [1], [0, 1], latlon=True) + # try to estimate directional vario with latlon + with self.assertRaises(ValueError): + gs.vario_estimate([[1], [1]], [1], [0, 1], latlon=True, angles=1) + # try to create a vector field with latlon + with self.assertRaises(ValueError): + srf = gs.SRF(mod, generator="VectorField", mode_no=2) + srf([1, 2]) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_normalize.py b/tests/test_normalize.py new file mode 100644 index 000000000..db845a6f5 --- /dev/null +++ b/tests/test_normalize.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +""" +This is the unittest of the Normalizer class. +""" + +import unittest +import numpy as np +import gstools as gs + + +def _rel_err(a, b): + return np.abs(a / ((a + b) / 2) - 1) + + +class TestNormalizer(unittest.TestCase): + def setUp(self): + self.seed = 20210111 + self.rng = gs.random.RNG(self.seed) + self.mean = 11.1 + self.std = 2.25 + self.smp = self.rng.random.normal(self.mean, self.std, 1000) + self.lmb = 1.5 + + def test_fitting(self): + # boxcox with given data to init + bc_samples = gs.normalizer.BoxCox(lmbda=self.lmb).denormalize(self.smp) + bc_norm = gs.normalizer.BoxCox(data=bc_samples) + self.assertLess(_rel_err(self.lmb, bc_norm.lmbda), 1e-2) + self.assertAlmostEqual( + bc_norm.likelihood(bc_samples), + np.exp(bc_norm.loglikelihood(bc_samples)), + ) + # yeo-johnson with calling fit + yj_norm = gs.normalizer.YeoJohnson(lmbda=self.lmb) + yj_samples = yj_norm.denormalize(self.smp) + yj_norm.fit(yj_samples) + self.assertLess(_rel_err(self.lmb, yj_norm.lmbda), 1e-2) + self.assertAlmostEqual( + yj_norm.likelihood(yj_samples), + np.exp(yj_norm.loglikelihood(yj_samples)), + ) + # modulus with calling fit + mo_norm = gs.normalizer.Modulus(lmbda=self.lmb) + mo_samples = mo_norm.denormalize(self.smp) + mo_norm.fit(mo_samples) + self.assertLess(_rel_err(self.lmb, mo_norm.lmbda), 1e-2) + self.assertAlmostEqual( + mo_norm.likelihood(mo_samples), + np.exp(mo_norm.loglikelihood(mo_samples)), + ) + # manly with calling fit + ma_norm = gs.normalizer.Manly(lmbda=self.lmb) + ma_samples = ma_norm.denormalize(self.smp) + ma_norm.fit(ma_samples) + self.assertLess(_rel_err(self.lmb, ma_norm.lmbda), 1e-2) + # self.assertAlmostEqual( + # ma_norm.likelihood(ma_samples), + # np.exp(ma_norm.loglikelihood(ma_samples)), + # ) # this is comparing infs + + def test_boxcox(self): + # without shift + bc = gs.normalizer.BoxCox(lmbda=0) + self.assertTrue( + np.all( + np.isclose(self.smp, bc.normalize(bc.denormalize(self.smp))) + ) + ) + bc.lmbda = self.lmb + self.assertTrue( + np.all( + np.isclose(self.smp, bc.normalize(bc.denormalize(self.smp))) + ) + ) + # with shift + bc = gs.normalizer.BoxCoxShift(lmbda=0, shift=1.1) + self.assertTrue( + np.all( + np.isclose(self.smp, bc.normalize(bc.denormalize(self.smp))) + ) + ) + bc.lmbda = self.lmb + self.assertTrue( + np.all( + np.isclose(self.smp, bc.normalize(bc.denormalize(self.smp))) + ) + ) + + def test_yeojohnson(self): + yj = gs.normalizer.YeoJohnson(lmbda=0) + self.assertTrue( + np.all( + np.isclose( + self.smp - self.mean, + yj.normalize(yj.denormalize(self.smp - self.mean)), + ) + ) + ) + yj.lmbda = 2 + self.assertTrue( + np.all( + np.isclose( + self.smp - self.mean, + yj.normalize(yj.denormalize(self.smp - self.mean)), + ) + ) + ) + # with shift + yj.lmbda = self.lmb + self.assertTrue( + np.all( + np.isclose( + self.smp - self.mean, + yj.normalize(yj.denormalize(self.smp - self.mean)), + ) + ) + ) + + def test_modulus(self): + mo = gs.normalizer.Modulus(lmbda=0) + self.assertTrue( + np.all( + np.isclose(self.smp, mo.normalize(mo.denormalize(self.smp))) + ) + ) + mo.lmbda = self.lmb + self.assertTrue( + np.all( + np.isclose(self.smp, mo.normalize(mo.denormalize(self.smp))) + ) + ) + + def test_manly(self): + ma = gs.normalizer.Manly(lmbda=0) + self.assertTrue( + np.all( + np.isclose(self.smp, ma.normalize(ma.denormalize(self.smp))) + ) + ) + ma.lmbda = self.lmb + self.assertTrue( + np.all( + np.isclose(self.smp, ma.normalize(ma.denormalize(self.smp))) + ) + ) + + def test_parameterless(self): + no = gs.normalizer.LogNormal() + self.assertTrue( + np.all( + np.isclose(self.smp, no.normalize(no.denormalize(self.smp))) + ) + ) + no = gs.normalizer.Normalizer() + self.assertTrue( + np.all( + np.isclose(self.smp, no.normalize(no.denormalize(self.smp))) + ) + ) + + def test_compare(self): + norm1 = gs.normalizer.BoxCox() + norm2 = gs.normalizer.BoxCox(lmbda=0.5) + norm3 = gs.normalizer.YeoJohnson() + norm4 = "this is not a normalizer" + # check campare + self.assertTrue(norm1 == norm1) + self.assertTrue(norm1 != norm2) + self.assertTrue(norm1 != norm3) + self.assertTrue(norm1 != norm4) + + def test_check(self): + self.assertRaises(ValueError, gs.field.Field, gs.Cubic(), normalizer=5) + + def test_auto_fit(self): + x = y = range(60) + pos = gs.generate_grid([x, y]) + model = gs.Gaussian(dim=2, var=1, len_scale=10) + srf = gs.SRF( + model, seed=20170519, normalizer=gs.normalizer.LogNormal() + ) + srf(pos) + ids = np.arange(srf.field.size) + samples = np.random.RandomState(20210201).choice( + ids, size=60, replace=False + ) + # sample conditioning points from generated field + cond_pos = pos[:, samples] + cond_val = srf.field[samples] + krige = gs.krige.Ordinary( + model=gs.Stable(dim=2), + cond_pos=cond_pos, + cond_val=cond_val, + normalizer=gs.normalizer.BoxCox(), + fit_normalizer=True, + fit_variogram=True, + ) + # test fitting during kriging + self.assertTrue(np.abs(krige.normalizer.lmbda - 0.0) < 1e-1) + self.assertAlmostEqual(krige.model.len_scale, 10.2677, places=4) + self.assertAlmostEqual( + krige.model.sill, + krige.normalizer.normalize(cond_val).var(), + places=4, + ) + # test fitting during vario estimate + bin_center, gamma, normalizer = gs.vario_estimate( + cond_pos, + cond_val, + normalizer=gs.normalizer.BoxCox, + fit_normalizer=True, + ) + model = gs.Stable(dim=2) + model.fit_variogram(bin_center, gamma) + self.assertAlmostEqual(model.var, 0.6426670183, places=4) + self.assertAlmostEqual(model.len_scale, 9.635193952, places=4) + self.assertAlmostEqual(model.nugget, 0.001617908408, places=4) + self.assertAlmostEqual(model.alpha, 2.0, places=4) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_randmeth.py b/tests/test_randmeth.py index 85b2e6914..54dd0aa39 100644 --- a/tests/test_randmeth.py +++ b/tests/test_randmeth.py @@ -30,64 +30,42 @@ def setUp(self): self.rm_3d = RandMeth(self.cov_model_3d, 100, self.seed) def test_unstruct_1d(self): - modes = self.rm_1d(self.x_tuple) + modes = self.rm_1d((self.x_tuple,)) self.assertAlmostEqual(modes[0], 3.19799030) self.assertAlmostEqual(modes[1], 2.44848295) def test_unstruct_2d(self): - modes = self.rm_2d(self.x_tuple, self.y_tuple) + modes = self.rm_2d((self.x_tuple, self.y_tuple)) self.assertAlmostEqual(modes[0], 1.67318010) self.assertAlmostEqual(modes[1], 2.12310269) def test_unstruct_3d(self): - modes = self.rm_3d(self.x_tuple, self.y_tuple, self.z_tuple) - self.assertAlmostEqual(modes[0], 0.55488481) - self.assertAlmostEqual(modes[1], 1.18506639) - - def test_struct_1d(self): - modes = self.rm_1d(self.x_grid, mesh_type="structured") - self.assertAlmostEqual(modes[0], 3.19799030) - self.assertAlmostEqual(modes[1], 2.34788923) - - def test_struct_2d(self): - modes = self.rm_2d(self.x_grid, self.y_grid, mesh_type="structured") - self.assertAlmostEqual(modes[0, 0], 1.67318010) - self.assertAlmostEqual(modes[1, 0], 1.54740003) - self.assertAlmostEqual(modes[0, 1], 2.02106551) - self.assertAlmostEqual(modes[1, 1], 1.86883255) - - def test_struct_3d(self): - modes = self.rm_3d( - self.x_grid, self.y_grid, self.z_grid, mesh_type="structured" - ) - self.assertAlmostEqual(modes[0, 0, 0], 0.55488481) - self.assertAlmostEqual(modes[0, 1, 0], 0.41858766) - self.assertAlmostEqual(modes[1, 1, 0], 0.95133855) - self.assertAlmostEqual(modes[0, 1, 1], 0.65475042) - self.assertAlmostEqual(modes[1, 1, 1], 1.40915120) + modes = self.rm_3d((self.x_tuple, self.y_tuple, self.z_tuple)) + self.assertAlmostEqual(modes[0], 1.3240234883187239) + self.assertAlmostEqual(modes[1], 1.6367244277732766) def test_reset(self): - modes = self.rm_2d(self.x_tuple, self.y_tuple) + modes = self.rm_2d((self.x_tuple, self.y_tuple)) self.assertAlmostEqual(modes[0], 1.67318010) self.assertAlmostEqual(modes[1], 2.12310269) self.rm_2d.seed = self.rm_2d.seed - modes = self.rm_2d(self.x_tuple, self.y_tuple) + modes = self.rm_2d((self.x_tuple, self.y_tuple)) self.assertAlmostEqual(modes[0], 1.67318010) self.assertAlmostEqual(modes[1], 2.12310269) self.rm_2d.seed = 74893621 - modes = self.rm_2d(self.x_tuple, self.y_tuple) + modes = self.rm_2d((self.x_tuple, self.y_tuple)) self.assertAlmostEqual(modes[0], -1.94278053) self.assertAlmostEqual(modes[1], -1.12401651) self.rm_1d.model = self.cov_model_3d - modes = self.rm_1d(self.x_tuple, self.y_tuple, self.z_tuple) - self.assertAlmostEqual(modes[0], 0.55488481) - self.assertAlmostEqual(modes[1], 1.18506639) + modes = self.rm_1d((self.x_tuple, self.y_tuple, self.z_tuple)) + self.assertAlmostEqual(modes[0], 1.3240234883187239) + self.assertAlmostEqual(modes[1], 1.6367244277732766) self.rm_2d.mode_no = 800 - modes = self.rm_2d(self.x_tuple, self.y_tuple) + modes = self.rm_2d((self.x_tuple, self.y_tuple)) self.assertAlmostEqual(modes[0], -3.20809251) self.assertAlmostEqual(modes[1], -2.62032778) diff --git a/tests/test_srf.py b/tests/test_srf.py index 082a7ec38..7cca60bf4 100644 --- a/tests/test_srf.py +++ b/tests/test_srf.py @@ -6,13 +6,22 @@ import unittest import numpy as np -from gstools import SRF, Gaussian +import gstools as gs from gstools import transform as tf +import meshio + +HAS_PYVISTA = False +try: + import pyvista as pv + + HAS_PYVISTA = True +except ImportError: + pass class TestSRF(unittest.TestCase): def setUp(self): - self.cov_model = Gaussian(dim=2, var=1.5, len_scale=4.0) + self.cov_model = gs.Gaussian(dim=2, var=1.5, len_scale=4.0) self.mean = 0.3 self.mode_no = 100 @@ -32,7 +41,7 @@ def setUp(self): def test_shape_1d(self): self.cov_model.dim = 1 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_str = srf([self.x_grid], seed=self.seed, mesh_type="structured") field_unstr = srf( [self.x_tuple], seed=self.seed, mesh_type="unstructured" @@ -42,7 +51,7 @@ def test_shape_1d(self): def test_shape_2d(self): self.cov_model.dim = 2 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_str = srf( (self.x_grid, self.y_grid), seed=self.seed, mesh_type="structured" ) @@ -56,7 +65,7 @@ def test_shape_2d(self): def test_shape_3d(self): self.cov_model.dim = 3 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_str = srf( (self.x_grid, self.y_grid, self.z_grid), seed=self.seed, @@ -75,12 +84,12 @@ def test_shape_3d(self): def test_anisotropy_2d(self): self.cov_model.dim = 2 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_iso = srf( (self.x_grid, self.y_grid), seed=self.seed, mesh_type="structured" ) self.cov_model.anis = 0.5 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_aniso = srf( (self.x_grid, self.y_grid), seed=self.seed, mesh_type="structured" ) @@ -90,14 +99,14 @@ def test_anisotropy_2d(self): def test_anisotropy_3d(self): self.cov_model.dim = 3 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_iso = srf( (self.x_grid, self.y_grid, self.z_grid), seed=self.seed, mesh_type="structured", ) self.cov_model.anis = (0.5, 4.0) - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_aniso = srf( (self.x_grid, self.y_grid, self.z_grid), seed=self.seed, @@ -119,13 +128,13 @@ def test_rotation_unstruct_2d(self): y_u = np.reshape(y_u, x_len * y_len) self.cov_model.anis = 0.25 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field = srf((x_u, y_u), seed=self.seed, mesh_type="unstructured") field_str = np.reshape(field, (y_len, x_len)) self.cov_model.angles = -np.pi / 2.0 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_rot = srf((x_u, y_u), seed=self.seed, mesh_type="unstructured") field_rot_str = np.reshape(field_rot, (y_len, x_len)) @@ -135,7 +144,7 @@ def test_rotation_unstruct_2d(self): def test_rotation_struct_2d(self): self.cov_model.dim = 2 self.cov_model.anis = 0.25 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field = srf( (self.x_grid_c, self.y_grid_c), seed=self.seed, @@ -143,7 +152,7 @@ def test_rotation_struct_2d(self): ) self.cov_model.angles = -np.pi / 2.0 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_rot = srf( (self.x_grid_c, self.y_grid_c), seed=self.seed, @@ -154,7 +163,7 @@ def test_rotation_struct_2d(self): self.assertAlmostEqual(field[1, 2], field_rot[2, 6]) def test_rotation_unstruct_3d(self): - self.cov_model = Gaussian( + self.cov_model = gs.Gaussian( dim=3, var=1.5, len_scale=4.0, anis=(0.25, 0.5) ) x_len = len(self.x_grid_c) @@ -167,12 +176,12 @@ def test_rotation_unstruct_3d(self): y_u = np.reshape(y_u, x_len * y_len * z_len) z_u = np.reshape(z_u, x_len * y_len * z_len) - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field = srf((x_u, y_u, z_u), seed=self.seed, mesh_type="unstructured") field_str = np.reshape(field, (y_len, x_len, z_len)) self.cov_model.angles = (-np.pi / 2.0, -np.pi / 2.0) - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_rot = srf( (x_u, y_u, z_u), seed=self.seed, mesh_type="unstructured" ) @@ -185,7 +194,7 @@ def test_rotation_unstruct_3d(self): def test_rotation_struct_3d(self): self.cov_model.dim = 3 self.cov_model.anis = 0.25 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field = srf( (self.x_grid_c, self.y_grid_c, self.z_grid_c), seed=self.seed, @@ -193,7 +202,7 @@ def test_rotation_struct_3d(self): ) self.cov_model.angles = -np.pi / 2.0 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_rot = srf( (self.x_grid_c, self.y_grid_c, self.z_grid_c), seed=self.seed, @@ -204,7 +213,7 @@ def test_rotation_struct_3d(self): self.assertAlmostEqual(field[0, 0, 1], field_rot[0, 7, 1]) self.cov_model.angles = (0, -np.pi / 2.0) - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field_rot = srf( (self.x_grid_c, self.y_grid_c, self.z_grid_c), seed=self.seed, @@ -216,7 +225,7 @@ def test_rotation_struct_3d(self): self.assertAlmostEqual(field[1, 1, 0], field_rot[7, 1, 1]) def test_calls(self): - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) field = srf((self.x_tuple, self.y_tuple), seed=self.seed) field2 = srf.unstructured((self.x_tuple, self.y_tuple), seed=self.seed) self.assertAlmostEqual(field[0], srf.field[0]) @@ -230,9 +239,26 @@ def test_calls(self): self.assertAlmostEqual(field[0, 0], srf.field[0, 0]) self.assertAlmostEqual(field[0, 0], field2[0, 0]) + @unittest.skipIf(not HAS_PYVISTA, "PyVista is not installed") + def test_mesh_pyvista(self): + """Test the `.mesh` call with various PyVista meshes.""" + # Create model + self.cov_model.dim = 3 + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + # Get the field the normal way for comparison + field = srf((self.x_tuple, self.y_tuple, self.z_tuple), seed=self.seed) + # Create mesh space with PyVista + pv_mesh = pv.PolyData(np.c_[self.x_tuple, self.y_tuple, self.z_tuple]) + # Run the helper + _ = srf.mesh(pv_mesh, seed=self.seed, points="centroids") + self.assertTrue(np.allclose(field, pv_mesh["field"])) + # points="centroids" + _ = srf.mesh(pv_mesh, seed=self.seed, points="points") + self.assertTrue(np.allclose(field, pv_mesh["field"])) + def test_transform(self): self.cov_model.dim = 2 - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) srf((self.x_grid, self.y_grid), seed=self.seed, mesh_type="structured") tf.normal_force_moments(srf) # force ergodicity of the given field self.assertAlmostEqual(srf.field.mean(), srf.mean) @@ -271,8 +297,8 @@ def test_transform(self): np.testing.assert_array_equal(np.unique(srf.field), values) def test_incomprrandmeth(self): - self.cov_model = Gaussian(dim=2, var=0.5, len_scale=1.0) - srf = SRF( + self.cov_model = gs.Gaussian(dim=2, var=0.5, len_scale=1.0) + srf = gs.SRF( self.cov_model, mean=self.mean, mode_no=self.mode_no, @@ -291,26 +317,56 @@ def test_incomprrandmeth(self): # TODO put these checks into test_cov_model def test_assertions(self): # self.cov_model.dim = 0 - # self.assertRaises(ValueError, SRF, self.cov_model, self.mean, self.mode_no) + # self.assertRaises(ValueError, gs.SRF, self.cov_model, self.mean, self.mode_no) # self.cov_model.dim = 4 - # self.assertRaises(ValueError, SRF, self.cov_model, self.mean, self.mode_no) + # self.assertRaises(ValueError, gs.SRF, self.cov_model, self.mean, self.mode_no) self.cov_model.dim = 3 self.cov_model.anis = (0.25, 0.5) - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) self.assertRaises(ValueError, srf, [self.x_tuple]) self.assertRaises(ValueError, srf, [self.x_grid, self.y_grid]) - srf = SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) + srf = gs.SRF(self.cov_model, mean=self.mean, mode_no=self.mode_no) self.assertRaises(ValueError, srf, [self.x_tuple, self.y_tuple]) self.assertRaises( ValueError, srf, [self.x_grid, self.y_grid, self.z_grid] ) - self.assertRaises( - ValueError, - srf, - [self.x_tuple, self.y_tuple, self.z_tuple], - self.seed, - mesh_type="hyper_mesh", + # everything not "unstructured" is treated as "structured" + # self.assertRaises( + # ValueError, + # srf, + # [self.x_tuple, self.y_tuple, self.z_tuple], + # self.seed, + # mesh_type="hyper_mesh", + # ) + + def test_meshio(self): + points = np.array( + [ + [0.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + [0.0, 0.0, 1.0], + [1.0, 0.0, 0.0], + ] ) + cells = [("tetra", np.array([[0, 1, 2, 3]]))] + mesh = meshio.Mesh(points, cells) + model = gs.Gaussian(dim=3, len_scale=0.1) + srf = gs.SRF(model) + srf.mesh(mesh, points="points") + self.assertEqual(len(srf.field), 4) + srf.mesh(mesh, points="centroids") + self.assertEqual(len(srf.field), 1) + + def test_grid_generation(self): + pos1 = [self.x_grid, self.y_grid, self.z_grid] + pos2 = gs.generate_grid(pos1) + time = np.arange(10) + grid1 = gs.generate_grid(pos1 + [time]) + grid2 = gs.generate_st_grid(pos1, time, mesh_type="structured") + grid3 = gs.generate_st_grid(pos2, time, mesh_type="unstructured") + self.assertTrue(np.all(np.isclose(grid1, grid2))) + self.assertTrue(np.all(np.isclose(grid1, grid3))) + self.assertTrue(np.all(np.isclose(grid2, grid3))) if __name__ == "__main__": diff --git a/tests/test_variogram_structured.py b/tests/test_variogram_structured.py index e7ff06721..988dba3d6 100644 --- a/tests/test_variogram_structured.py +++ b/tests/test_variogram_structured.py @@ -5,7 +5,7 @@ import unittest import numpy as np -from gstools import variogram +import gstools as gs class TestVariogramstructured(unittest.TestCase): @@ -17,17 +17,12 @@ def test_doubles(self): (41.2, 40.2, 39.7, 39.2, 40.1, 38.3, 39.1, 40.0, 41.1, 40.3), dtype=np.double, ) - gamma = variogram.vario_estimate_structured(z) + gamma = gs.vario_estimate_axis(z) self.assertAlmostEqual(gamma[1], 0.4917, places=4) def test_ints(self): z = np.array((10, 20, 30, 40), dtype=int) - gamma = variogram.vario_estimate_structured(z) - self.assertAlmostEqual(gamma[1], 50.0, places=4) - - def test_np_int(self): - z = np.array((10, 20, 30, 40), dtype=np.int) - gamma = variogram.vario_estimate_structured(z) + gamma = gs.vario_estimate_axis(z) self.assertAlmostEqual(gamma[1], 50.0, places=4) def test_mixed(self): @@ -35,22 +30,22 @@ def test_mixed(self): (41.2, 40.2, 39.7, 39.2, 40.1, 38.3, 39.1, 40.0, 41.1, 40.3), dtype=np.double, ) - gamma = variogram.vario_estimate_structured(z) + gamma = gs.vario_estimate_axis(z) self.assertAlmostEqual(gamma[1], 0.4917, places=4) z = np.array((10, 20, 30, 40), dtype=int) - gamma = variogram.vario_estimate_structured(z) + gamma = gs.vario_estimate_axis(z) self.assertAlmostEqual(gamma[1], 50.0, places=4) def test_list(self): z = [41.2, 40.2, 39.7, 39.2, 40.1, 38.3, 39.1, 40.0, 41.1, 40.3] - gamma = variogram.vario_estimate_structured(z) + gamma = gs.vario_estimate_axis(z) self.assertAlmostEqual(gamma[1], 0.4917, places=4) def test_cressie_1d(self): z = [41.2, 40.2, 39.7, 39.2, 40.1, 38.3, 39.1, 40.0, 41.1, 40.3] - gamma = variogram.vario_estimate_structured(z, estimator="cressie") + gamma = gs.vario_estimate_axis(z, estimator="cressie") self.assertAlmostEqual(gamma[1], 1.546 / 2.0, places=3) def test_1d(self): @@ -59,7 +54,7 @@ def test_1d(self): (41.2, 40.2, 39.7, 39.2, 40.1, 38.3, 39.1, 40.0, 41.1, 40.3), dtype=np.double, ) - gamma = variogram.vario_estimate_structured(z) + gamma = gs.vario_estimate_axis(z) self.assertAlmostEqual(gamma[0], 0.0000, places=4) self.assertAlmostEqual(gamma[1], 0.4917, places=4) self.assertAlmostEqual(gamma[2], 0.7625, places=4) @@ -71,12 +66,12 @@ def test_masked_1d(self): dtype=np.double, ) z_ma = np.ma.masked_array(z, mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) - gamma = variogram.vario_estimate_structured(z_ma) + gamma = gs.vario_estimate_axis(z_ma) self.assertAlmostEqual(gamma[0], 0.0000, places=4) self.assertAlmostEqual(gamma[1], 0.4917, places=4) self.assertAlmostEqual(gamma[2], 0.7625, places=4) z_ma = np.ma.masked_array(z, mask=[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]) - gamma = variogram.vario_estimate_structured(z_ma) + gamma = gs.vario_estimate_axis(z_ma) self.assertAlmostEqual(gamma[0], 0.0000, places=4) self.assertAlmostEqual(gamma[1], 0.4906, places=4) self.assertAlmostEqual(gamma[2], 0.7107, places=4) @@ -87,8 +82,8 @@ def test_masked_2d(self): mask = np.zeros_like(field) field_ma = np.ma.masked_array(field, mask=mask) - gamma_x = variogram.vario_estimate_structured(field_ma, direction="x") - gamma_y = variogram.vario_estimate_structured(field_ma, direction="y") + gamma_x = gs.vario_estimate_axis(field_ma, direction="x") + gamma_y = gs.vario_estimate_axis(field_ma, direction="y") var = 1.0 / 12.0 self.assertAlmostEqual(gamma_x[0], 0.0, places=2) @@ -101,8 +96,8 @@ def test_masked_2d(self): mask = np.zeros_like(field) mask[0, 0] = 1 field = np.ma.masked_array(field, mask=mask) - gamma_x = variogram.vario_estimate_structured(field_ma, direction="x") - gamma_y = variogram.vario_estimate_structured(field_ma, direction="y") + gamma_x = gs.vario_estimate_axis(field_ma, direction="x") + gamma_y = gs.vario_estimate_axis(field_ma, direction="y") self.assertAlmostEqual(gamma_x[0], 0.0, places=2) self.assertAlmostEqual(gamma_y[0], 0.0, places=2) @@ -112,9 +107,9 @@ def test_masked_3d(self): mask = np.zeros_like(field) field_ma = np.ma.masked_array(field, mask=mask) - gamma_x = variogram.vario_estimate_structured(field_ma, direction="x") - gamma_y = variogram.vario_estimate_structured(field_ma, direction="y") - gamma_z = variogram.vario_estimate_structured(field_ma, direction="z") + gamma_x = gs.vario_estimate_axis(field_ma, direction="x") + gamma_y = gs.vario_estimate_axis(field_ma, direction="y") + gamma_z = gs.vario_estimate_axis(field_ma, direction="z") var = 1.0 / 12.0 self.assertAlmostEqual(gamma_x[0], 0.0, places=2) @@ -130,9 +125,9 @@ def test_masked_3d(self): mask = np.zeros_like(field) mask[0, 0, 0] = 1 field = np.ma.masked_array(field, mask=mask) - gamma_x = variogram.vario_estimate_structured(field_ma, direction="x") - gamma_y = variogram.vario_estimate_structured(field_ma, direction="y") - gamma_z = variogram.vario_estimate_structured(field_ma, direction="z") + gamma_x = gs.vario_estimate_axis(field_ma, direction="x") + gamma_y = gs.vario_estimate_axis(field_ma, direction="y") + gamma_z = gs.vario_estimate_axis(field_ma, direction="z") self.assertAlmostEqual(gamma_x[0], 0.0, places=2) self.assertAlmostEqual(gamma_y[0], 0.0, places=2) self.assertAlmostEqual(gamma_z[0], 0.0, places=2) @@ -144,8 +139,8 @@ def test_uncorrelated_2d(self): rng = np.random.RandomState(1479373475) field = rng.rand(len(x), len(y)) - gamma_x = variogram.vario_estimate_structured(field, direction="x") - gamma_y = variogram.vario_estimate_structured(field, direction="y") + gamma_x = gs.vario_estimate_axis(field, direction="x") + gamma_y = gs.vario_estimate_axis(field, direction="y") var = 1.0 / 12.0 self.assertAlmostEqual(gamma_x[0], 0.0, places=2) @@ -162,10 +157,10 @@ def test_uncorrelated_cressie_2d(self): rng = np.random.RandomState(1479373475) field = rng.rand(len(x), len(y)) - gamma_x = variogram.vario_estimate_structured( + gamma_x = gs.vario_estimate_axis( field, direction="x", estimator="cressie" ) - gamma_y = variogram.vario_estimate_structured( + gamma_y = gs.vario_estimate_axis( field, direction="y", estimator="cressie" ) @@ -183,9 +178,9 @@ def test_uncorrelated_3d(self): rng = np.random.RandomState(1479373475) field = rng.rand(len(x), len(y), len(z)) - gamma = variogram.vario_estimate_structured(field, "x") - gamma = variogram.vario_estimate_structured(field, "y") - gamma = variogram.vario_estimate_structured(field, "z") + gamma = gs.vario_estimate_axis(field, "x") + gamma = gs.vario_estimate_axis(field, "y") + gamma = gs.vario_estimate_axis(field, "z") var = 1.0 / 12.0 self.assertAlmostEqual(gamma[0], 0.0, places=2) @@ -203,11 +198,11 @@ def test_directions_2d(self): # random values repeated along x-axis field_y = np.tile(y_rand, (len(x), 1)) - gamma_x_x = variogram.vario_estimate_structured(field_x, direction="x") - gamma_x_y = variogram.vario_estimate_structured(field_x, direction="y") + # gamma_x_x = gs.vario_estimate_axis(field_x, direction="x") + gamma_x_y = gs.vario_estimate_axis(field_x, direction="y") - gamma_y_x = variogram.vario_estimate_structured(field_y, direction="x") - gamma_y_y = variogram.vario_estimate_structured(field_y, direction="y") + gamma_y_x = gs.vario_estimate_axis(field_y, direction="x") + # gamma_y_y = gs.vario_estimate_axis(field_y, direction="y") self.assertAlmostEqual(gamma_x_y[1], 0.0) self.assertAlmostEqual(gamma_x_y[len(gamma_x_y) // 2], 0.0) @@ -229,17 +224,17 @@ def test_directions_3d(self): field_y = np.tile(y_rand.reshape((1, len(y), 1)), (len(x), 1, len(z))) field_z = np.tile(z_rand.reshape((1, 1, len(z))), (len(x), len(y), 1)) - gamma_x_x = variogram.vario_estimate_structured(field_x, direction="x") - gamma_x_y = variogram.vario_estimate_structured(field_x, direction="y") - gamma_x_z = variogram.vario_estimate_structured(field_x, direction="z") + # gamma_x_x = gs.vario_estimate_axis(field_x, direction="x") + gamma_x_y = gs.vario_estimate_axis(field_x, direction="y") + gamma_x_z = gs.vario_estimate_axis(field_x, direction="z") - gamma_y_x = variogram.vario_estimate_structured(field_y, direction="x") - gamma_y_y = variogram.vario_estimate_structured(field_y, direction="y") - gamma_y_z = variogram.vario_estimate_structured(field_y, direction="z") + gamma_y_x = gs.vario_estimate_axis(field_y, direction="x") + # gamma_y_y = gs.vario_estimate_axis(field_y, direction="y") + gamma_y_z = gs.vario_estimate_axis(field_y, direction="z") - gamma_z_x = variogram.vario_estimate_structured(field_z, direction="x") - gamma_z_y = variogram.vario_estimate_structured(field_z, direction="y") - gamma_z_z = variogram.vario_estimate_structured(field_z, direction="z") + gamma_z_x = gs.vario_estimate_axis(field_z, direction="x") + gamma_z_y = gs.vario_estimate_axis(field_z, direction="y") + # gamma_z_z = gs.vario_estimate_axis(field_z, direction="z") self.assertAlmostEqual(gamma_x_y[1], 0.0) self.assertAlmostEqual(gamma_x_y[len(gamma_x_y) // 2], 0.0) @@ -262,11 +257,20 @@ def test_directions_3d(self): def test_exceptions(self): x = np.linspace(0.0, 10.0, 20) - rng = np.random.RandomState(1479373475) - x_rand = rng.rand(len(x)) - self.assertRaises( - ValueError, variogram.vario_estimate_structured, x, "a" - ) + # rng = np.random.RandomState(1479373475) + # x_rand = rng.rand(len(x)) + self.assertRaises(ValueError, gs.vario_estimate_axis, x, "a") + + def test_missing(self): + x = np.linspace(0.0, 10.0, 10) + x_nan = x.copy() + x_nan[0] = np.nan + x_mask = np.isnan(x_nan) + x = np.ma.array(x, mask=x_mask) + v1 = gs.vario_estimate_axis(x_nan) + v2 = gs.vario_estimate_axis(x) + for i in range(len(v1)): + self.assertAlmostEqual(v1[i], v2[i]) if __name__ == "__main__": diff --git a/tests/test_variogram_unstructured.py b/tests/test_variogram_unstructured.py index c5a59a85a..88660c50d 100644 --- a/tests/test_variogram_unstructured.py +++ b/tests/test_variogram_unstructured.py @@ -5,12 +5,16 @@ import unittest import numpy as np -from gstools import vario_estimate_unstructured +import gstools as gs class TestVariogramUnstructured(unittest.TestCase): def setUp(self): - pass + model = gs.Exponential(dim=3, len_scale=[12, 6, 3]) + x = y = z = range(10) + self.pos = (x, y, z) + srf = gs.SRF(model, seed=123456) + self.field = srf((x, y, z), mesh_type="structured") def test_doubles(self): x = np.arange(1, 11, 1, dtype=np.double) @@ -19,21 +23,14 @@ def test_doubles(self): dtype=np.double, ) bins = np.arange(1, 11, 1, dtype=np.double) - bin_centres, gamma = vario_estimate_unstructured([x], z, bins) + bin_centres, gamma = gs.vario_estimate([x], z, bins) self.assertAlmostEqual(gamma[0], 0.4917, places=4) def test_ints(self): x = np.arange(1, 5, 1, dtype=int) z = np.array((10, 20, 30, 40), dtype=int) bins = np.arange(1, 11, 1, dtype=int) - bin_centres, gamma = vario_estimate_unstructured([x], z, bins) - self.assertAlmostEqual(gamma[0], 50.0, places=4) - - def test_np_int(self): - x = np.arange(1, 5, 1, dtype=np.int) - z = np.array((10, 20, 30, 40), dtype=np.int) - bins = np.arange(1, 11, 1, dtype=np.int) - bin_centres, gamma = vario_estimate_unstructured([x], z, bins) + bin_centres, gamma = gs.vario_estimate([x], z, bins) self.assertAlmostEqual(gamma[0], 50.0, places=4) def test_mixed(self): @@ -43,26 +40,26 @@ def test_mixed(self): dtype=np.double, ) bins = np.arange(1, 11, 1, dtype=int) - bin_centres, gamma = vario_estimate_unstructured([x], z, bins) + bin_centres, gamma = gs.vario_estimate([x], z, bins) self.assertAlmostEqual(gamma[0], 0.4917, places=4) x = np.arange(1, 5, 1, dtype=np.double) z = np.array((10, 20, 30, 40), dtype=int) bins = np.arange(1, 11, 1, dtype=int) - bin_centres, gamma = vario_estimate_unstructured([x], z, bins) + bin_centres, gamma = gs.vario_estimate([x], z, bins) self.assertAlmostEqual(gamma[0], 50.0, places=4) x = np.arange(1, 5, 1, dtype=np.double) z = np.array((10, 20, 30, 40), dtype=int) bins = np.arange(1, 11, 1, dtype=np.double) - bin_centres, gamma = vario_estimate_unstructured([x], z, bins) + bin_centres, gamma = gs.vario_estimate([x], z, bins) self.assertAlmostEqual(gamma[0], 50.0, places=4) def test_list(self): x = np.arange(1, 11, 1, dtype=np.double) z = [41.2, 40.2, 39.7, 39.2, 40.1, 38.3, 39.1, 40.0, 41.1, 40.3] bins = np.arange(1, 11, 1, dtype=np.double) - bin_centres, gamma = vario_estimate_unstructured([x], z, bins) + bin_centres, gamma = gs.vario_estimate([x], z, bins) self.assertAlmostEqual(gamma[1], 0.7625, places=4) def test_1d(self): @@ -73,7 +70,7 @@ def test_1d(self): dtype=np.double, ) bins = np.arange(1, 11, 1, dtype=np.double) - bin_centres, gamma = vario_estimate_unstructured([x], z, bins) + bin_centres, gamma = gs.vario_estimate([x], z, bins) self.assertAlmostEqual(gamma[0], 0.4917, places=4) self.assertAlmostEqual(gamma[1], 0.7625, places=4) @@ -89,7 +86,7 @@ def test_uncorrelated_2d(self): bins = np.arange(0, 100, 10) - bin_centres, gamma = vario_estimate_unstructured((x, y), field, bins) + bin_centres, gamma = gs.vario_estimate((x, y), field, bins) var = 1.0 / 12.0 self.assertAlmostEqual(gamma[0], var, places=2) @@ -110,9 +107,7 @@ def test_uncorrelated_3d(self): bins = np.arange(0, 100, 10) - bin_centres, gamma = vario_estimate_unstructured( - (x, y, z), field, bins - ) + bin_centres, gamma = gs.vario_estimate((x, y, z), field, bins) var = 1.0 / 12.0 self.assertAlmostEqual(gamma[0], var, places=2) @@ -127,7 +122,7 @@ def test_sampling_1d(self): bins = np.arange(0, 100, 10) - bin_centres, gamma = vario_estimate_unstructured( + bin_centres, gamma = gs.vario_estimate( [x], field, bins, sampling_size=5000, sampling_seed=1479373475 ) @@ -148,7 +143,7 @@ def test_sampling_2d(self): bins = np.arange(0, 100, 10) - bin_centres, gamma = vario_estimate_unstructured( + bin_centres, gamma = gs.vario_estimate( (x, y), field, bins, sampling_size=2000, sampling_seed=1479373475 ) @@ -171,7 +166,7 @@ def test_sampling_3d(self): bins = np.arange(0, 100, 10) - bin_centres, gamma = vario_estimate_unstructured( + bin_centres, gamma = gs.vario_estimate( (x, y, z), field, bins, @@ -195,26 +190,217 @@ def test_assertions(self): field = np.arange(0, 10) field_e = np.arange(0, 9) + self.assertRaises(ValueError, gs.vario_estimate, [x_e], field, bins) + self.assertRaises(ValueError, gs.vario_estimate, (x, y_e), field, bins) self.assertRaises( - ValueError, vario_estimate_unstructured, [x_e], field, bins + ValueError, gs.vario_estimate, (x, y_e, z), field, bins ) self.assertRaises( - ValueError, vario_estimate_unstructured, (x, y_e), field, bins + ValueError, gs.vario_estimate, (x, y, z_e), field, bins ) self.assertRaises( - ValueError, vario_estimate_unstructured, (x, y_e, z), field, bins + ValueError, gs.vario_estimate, (x_e, y, z), field, bins ) self.assertRaises( - ValueError, vario_estimate_unstructured, (x, y, z_e), field, bins + ValueError, gs.vario_estimate, (x, y, z), field_e, bins ) + self.assertRaises(ValueError, gs.vario_estimate, [x], field_e, bins) self.assertRaises( - ValueError, vario_estimate_unstructured, (x_e, y, z), field, bins + ValueError, gs.vario_estimate, [x], field, bins, estimator="bla" ) - self.assertRaises( - ValueError, vario_estimate_unstructured, (x, y, z), field_e, bins + + def test_multi_field(self): + x = np.random.RandomState(19970221).rand(100) * 100.0 + model = gs.Exponential(dim=1, var=2, len_scale=10) + srf = gs.SRF(model) + field1 = srf(x, seed=19970221) + field2 = srf(x, seed=20011012) + bins = np.arange(20) * 2 + bin_center, gamma1 = gs.vario_estimate(x, field1, bins) + bin_center, gamma2 = gs.vario_estimate(x, field2, bins) + bin_center, gamma = gs.vario_estimate(x, [field1, field2], bins) + gamma_mean = 0.5 * (gamma1 + gamma2) + for i in range(len(gamma)): + self.assertAlmostEqual(gamma[i], gamma_mean[i], places=2) + + def test_no_data(self): + x1 = np.random.RandomState(19970221).rand(100) * 100.0 + field1 = np.random.RandomState(20011012).rand(100) * 100.0 + field1[:10] = np.nan + x2 = x1[10:] + field2 = field1[10:] + bins = np.arange(20) * 2 + bin_center, gamma1 = gs.vario_estimate(x1, field1, bins) + bin_center, gamma2 = gs.vario_estimate(x2, field2, bins) + for i in range(len(gamma1)): + self.assertAlmostEqual(gamma1[i], gamma2[i], places=2) + + def test_direction_axis(self): + field = np.ma.array(self.field) + field.mask = np.abs(field) < 0.1 + bins = range(10) + __, vario_u = gs.vario_estimate( + *(self.pos, field, bins), + direction=((1, 0, 0), (0, 1, 0), (0, 0, 1)), # x-, y- and z-axis + bandwidth=0.25, # bandwith small enough to only match lines + mesh_type="structured", + ) + vario_s_x = gs.vario_estimate_axis(field, "x") + vario_s_y = gs.vario_estimate_axis(field, "y") + vario_s_z = gs.vario_estimate_axis(field, "z") + for i in range(len(bins) - 1): + self.assertAlmostEqual(vario_u[0][i], vario_s_x[i]) + self.assertAlmostEqual(vario_u[1][i], vario_s_y[i]) + self.assertAlmostEqual(vario_u[2][i], vario_s_z[i]) + + def test_direction_angle(self): + bins = range(0, 10, 2) + __, v2, c2 = gs.vario_estimate( + *(self.pos[:2], self.field[0], bins), + angles=np.pi / 4, # 45 deg + mesh_type="structured", + return_counts=True, + ) + __, v1, c1 = gs.vario_estimate( + *(self.pos[:2], self.field[0], bins), + direction=(1, 1), # 45 deg + mesh_type="structured", + return_counts=True, + ) + for i in range(len(bins) - 1): + self.assertAlmostEqual(v1[i], v2[i]) + self.assertEqual(c1[i], c2[i]) + + def test_direction_assertion(self): + pos = [[1, 2, 3], [1, 2, 3]] + bns = [1, 2] + fld = np.ma.array([1, 2, 3]) + self.assertRaises( # degenerated direction + ValueError, gs.vario_estimate, pos, fld, bns, direction=[0, 0] + ) + self.assertRaises( # wrong shape of direction + ValueError, gs.vario_estimate, pos, fld, bns, direction=[[[3, 1]]] + ) + self.assertRaises( # wrong dimension of direction + ValueError, gs.vario_estimate, pos, fld, bns, direction=[[3, 1, 2]] + ) + self.assertRaises( # wrong shape of angles + ValueError, gs.vario_estimate, pos, fld, bns, angles=[[[1]]] + ) + self.assertRaises( # wrong dimension of angles + ValueError, gs.vario_estimate, pos, fld, bns, angles=[[1, 1]] + ) + self.assertRaises( # direction on latlon + ValueError, + gs.vario_estimate, + pos, + fld, + bns, + direction=[1, 0], + latlon=True, + ) + + def test_mask_no_data(self): + pos = [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]] + bns = [0, 4] + fld1 = np.ma.array([1, 2, 3, 4, 5]) + fld2 = np.ma.array([np.nan, 2, 3, 4, 5]) + fld3 = np.ma.array([1, 2, 3, 4, 5]) + mask = [False, False, True, False, False] + fld1.mask = [True, False, False, False, False] + fld2.mask = mask + __, v1, c1 = gs.vario_estimate( + *(pos, fld1, bns), + mask=mask, + return_counts=True, + ) + __, v2, c2 = gs.vario_estimate(*(pos, fld2, bns), return_counts=True) + __, v3, c3 = gs.vario_estimate( + *(pos, fld3, bns), + no_data=1, + mask=mask, + return_counts=True, + ) + __, v4, c4 = gs.vario_estimate( + *(pos, fld3, bns), + mask=True, + return_counts=True, + ) + __, v5 = gs.vario_estimate(*(pos, fld3, bns), mask=True) + + self.assertAlmostEqual(v1[0], v2[0]) + self.assertAlmostEqual(v1[0], v3[0]) + self.assertEqual(c1[0], c2[0]) + self.assertEqual(c1[0], c3[0]) + self.assertAlmostEqual(v4[0], 0.0) + self.assertEqual(c4[0], 0) + self.assertAlmostEqual(v5[0], 0.0) + + def test_fit_directional(self): + model = gs.Stable(dim=3) + bins = [0, 3, 6, 9, 12] + model.len_scale_bounds = [0, 20] + bin_center, emp_vario, counts = gs.vario_estimate( + *(self.pos, self.field, bins), + direction=model.main_axes(), + mesh_type="structured", + return_counts=True, + ) + # check if this succeeds + model.fit_variogram(bin_center, emp_vario, sill=1, return_r2=True) + self.assertTrue(1 > model.anis[0] > model.anis[1]) + model.fit_variogram(bin_center, emp_vario, sill=1, anis=[0.5, 0.25]) + self.assertTrue(15 > model.len_scale) + model.fit_variogram(bin_center, emp_vario, sill=1, weights=counts) + len_save = model.len_scale + model.fit_variogram(bin_center, emp_vario, sill=1, weights=counts[0]) + self.assertAlmostEqual(len_save, model.len_scale) + # catch wrong dim for dir.-vario + with self.assertRaises(ValueError): + model.fit_variogram(bin_center, emp_vario[:2]) + + def test_auto_binning(self): + # structured mesh + bin_center, emp_vario = gs.vario_estimate( + self.pos, + self.field, + mesh_type="structured", + ) + self.assertEqual(len(bin_center), 21) + self.assertTrue(np.all(bin_center[1:] > bin_center[:-1])) + self.assertTrue(np.all(bin_center > 0)) + # unstructured mesh + bin_center, emp_vario = gs.vario_estimate( + self.pos, + self.field[:, 0, 0], + ) + self.assertEqual(len(bin_center), 8) + self.assertTrue(np.all(bin_center[1:] > bin_center[:-1])) + self.assertTrue(np.all(bin_center > 0)) + # latlon coords + bin_center, emp_vario = gs.vario_estimate( + self.pos[:2], + self.field[..., 0], + mesh_type="structured", + latlon=True, ) + self.assertEqual(len(bin_center), 15) + self.assertTrue(np.all(bin_center[1:] > bin_center[:-1])) + self.assertTrue(np.all(bin_center > 0)) + + def test_standard_bins(self): + # structured mesh + bins = gs.standard_bins(self.pos, dim=3, mesh_type="structured") + self.assertEqual(len(bins), 22) + self.assertTrue(np.all(bins[1:] > bins[:-1])) + self.assertTrue(np.all(bins[1:] > 0)) + # no pos given + self.assertRaises(ValueError, gs.standard_bins) + + def test_raise(self): + # 1d field given for latlon estimation -> needs 2d self.assertRaises( - ValueError, vario_estimate_unstructured, [x], field_e, bins + ValueError, gs.vario_estimate, [[1, 2]], [1, 2], latlon=True )