From 51bf4b8ef0d708905e872d25ab51a47421234f6f Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 14:03:22 -0400 Subject: [PATCH 01/15] Fix cfgstr --- CHANGELOG.md | 2 +- requirements/runtime.txt | 13 ++++++++----- vtool_ibeis/__init__.py | 2 +- vtool_ibeis/geometry.py | 6 ------ vtool_ibeis/matching.py | 6 ++++-- 5 files changed, 14 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 06eaa38..b0cc8dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ We are currently working on porting this changelog to the specifications in This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -### [Version 2.2.3] - Released 202x-xx-xx +### [Version 2.3.0] - Released 202x-xx-xx ### Fixed: * Removed codecov from test requirements diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 5b73cf5..a48a169 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -18,14 +18,16 @@ Pillow>=9.0.0 ; python_version < '3.8' and python_version >= '3.7' # Pyth Pillow>=8.3.2 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 # xdev availpkg numpy --refresh -numpy>=1.23.2 ; python_version >= '3.11' # Python 3.11 +numpy>=1.26.0 ; python_version < '4.0' and python_version >= '3.12' # Python 3.12+ +numpy>=1.23.2 ; python_version < '3.12' and python_version >= '3.11' # Python 3.11 numpy>=1.21.6 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 numpy>=1.19.3 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 numpy>=1.19.2 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 numpy>=1.19.2 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 numpy>=1.19.2 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 -scipy>=1.9.2 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +scipy>=1.11.2 ; python_version < '4.0' and python_version >= '3.12' # Python 3.12+ +scipy>=1.9.2 ; python_version < '3.12' and python_version >= '3.11' # Python 3.11 scipy>=1.8.0 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 scipy>=1.8.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 scipy>=1.8.0 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 @@ -40,7 +42,8 @@ scikit-image>=0.17.2 ; python_version < '3.9' and python_version >= '3.8' scikit-image>=0.17.2 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 scikit-image>=0.17.2 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 -scikit-learn>=1.1.3 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +scikit-learn>=1.3.1 ; python_version < '4.0' and python_version >= '3.12' # Python 3.12+ +scikit-learn>=1.1.3 ; python_version < '3.12' and python_version >= '3.11' # Python 3.11 scikit-learn>=1.0.2 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 scikit-learn>=1.0.2 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 scikit-learn>=1.0.2 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 @@ -53,8 +56,8 @@ statsmodels>=0.13.1 ; python_version < '3.10' and python_version >= '3.9' statsmodels>=0.13.1 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 statsmodels>=0.12.2 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 -ubelt >= 1.2.3 -utool >= 2.1.7 +ubelt >= 1.3.4 +utool >= 2.2.0 delorean >= 1.0.0 # python ~/local/tools/supported_python_versions_pip.py delorean diff --git a/vtool_ibeis/__init__.py b/vtool_ibeis/__init__.py index 76f7e64..d2f584b 100755 --- a/vtool_ibeis/__init__.py +++ b/vtool_ibeis/__init__.py @@ -5,7 +5,7 @@ mkinit vtool_ibeis -i """ # flake8: noqa -__version__ = '2.2.3' +__version__ = '2.3.0' __author__ = 'Jon Crall, Avi Weinstock, Chuck Stewart, Hendrik Weideman, Jason Parham, Zackary Rutfield' __author_email__ = 'erotemic@gmail.com' __url__ = 'https://github.com/Erotemic/vtool_ibeis' diff --git a/vtool_ibeis/geometry.py b/vtool_ibeis/geometry.py index b7a7bdf..a6470dc 100755 --- a/vtool_ibeis/geometry.py +++ b/vtool_ibeis/geometry.py @@ -23,9 +23,6 @@ def verts_from_bbox(bbox, close=False): Returns: list: verts - CommandLine: - python -m vtool_ibeis.geometry --test-verts_from_bbox - Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.geometry import * # NOQA @@ -66,9 +63,6 @@ def draw_border(img_in, color=(0, 128, 255), thickness=2, out=None): thickness (int): out (None): - CommandLine: - python -m vtool_ibeis.geometry --test-draw_border --show - Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.geometry import * # NOQA diff --git a/vtool_ibeis/matching.py b/vtool_ibeis/matching.py index 943d4b5..38fe5cc 100644 --- a/vtool_ibeis/matching.py +++ b/vtool_ibeis/matching.py @@ -102,7 +102,7 @@ def demodata_match(cfgdict={}, apply=True, use_cache=True, recompute=False): cfgstr = ub.hash_data(cfgdict) + hashid cacher = ub.Cacher( 'test_match_v5', - cfgstr=cfgstr, + depends=cfgstr, appname='vtool_ibeis', enabled=use_cache ) @@ -453,8 +453,10 @@ def assign(match, cfgdict={}, verbose=None): K, Knorm, symmetric, checks, weight_key = params annot1 = match.annot1 annot2 = match.annot2 + if verbose is None: + verbose = match.verbose - if match.verbose: + if verbose: print('[match] assign') print('[match] params = ' + ub.repr2(params)) From d135da77fa84dd7d22ea2cc8ed4ad0092cfcc24a Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 14:10:39 -0400 Subject: [PATCH 02/15] Rename test helper so it does not start with test --- CHANGELOG.md | 6 +++++- tests/test_pyflann.py | 30 +++++++++++++++--------------- vtool_ibeis/blend.py | 2 +- vtool_ibeis/features.py | 4 ++-- 4 files changed, 23 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0cc8dd..f5d2c5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,8 +7,12 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ### [Version 2.3.0] - Released 202x-xx-xx -### Fixed: +### Changed +* Remove usage of lena + +### Fixed * Removed codecov from test requirements +* Fixed deprecated use of cfgstr in ubelt Cacher ### [Version 2.2.1] - Released 2023-01-29 diff --git a/tests/test_pyflann.py b/tests/test_pyflann.py index 2ad40b5..704b190 100755 --- a/tests/test_pyflann.py +++ b/tests/test_pyflann.py @@ -102,7 +102,7 @@ class FLANN: """ -def testdata_points(nPts=53, nDims=11, dtype=np.float64): +def demodata_points(nPts=53, nDims=11, dtype=np.float64): pts = np.array(randint(0, 255, (nPts, nDims)), dtype=dtype) return pts @@ -132,7 +132,7 @@ def test_pyflann_hkmeans(): branch_size = 5 num_branches = 7 print('HKmeans') - pts = testdata_points(nPts=1009) + pts = demodata_points(nPts=1009) hkmean_centroids = flann.hierarchical_kmeans(pts, branch_size, num_branches, max_iterations=1000, dtype=None) # print(utool.truncate_str(str(hkmean_centroids))) @@ -161,7 +161,7 @@ def test_pyflann_kmeans(): pytest.skip() flann = FLANN_CLS() num_clusters = 7 - pts = testdata_points(nPts=1009) + pts = demodata_points(nPts=1009) kmeans_centroids = flann.kmeans(pts, num_clusters, max_iterations=None, dtype=None) # print(utool.truncate_str(str(kmeans_centroids))) @@ -176,9 +176,9 @@ def test_pyflann_add_point(): """ # Test parameters num_neighbors = 3 - pts = testdata_points(nPts=1009) - qpts = testdata_points(nPts=7) - newpts = testdata_points(nPts=1013) + pts = demodata_points(nPts=1009) + qpts = demodata_points(nPts=7) + newpts = demodata_points(nPts=1013) # build index print('Build Index') @@ -204,9 +204,9 @@ def test_pyflann_add_point(): assert np.all(indices2 < pts.shape[0] + newpts.shape[0]), 'but not more than the points being added' # Test parameters num_neighbors = 3 - pts = testdata_points(nPts=1009) - qpts = testdata_points(nPts=7) - newpts = testdata_points(nPts=1013) + pts = demodata_points(nPts=1009) + qpts = demodata_points(nPts=7) + newpts = demodata_points(nPts=1013) # build index print('Build Index') @@ -237,8 +237,8 @@ def test_pyflann_searches(): pytest.skip() try: num_neighbors = 3 - pts = testdata_points(nPts=5743, nDims=2) - qpts = testdata_points(nPts=7, nDims=2) + pts = demodata_points(nPts=5743, nDims=2) + qpts = demodata_points(nPts=7, nDims=2) import vtool_ibeis as vt # sample a radius radius = vt.L2(pts[0:1], qpts[0:1])[0] * 2 + 1 @@ -287,8 +287,8 @@ def test_pyflann_tune(): if FLANN_CLS is None: import pytest pytest.skip() - pts = testdata_points(nPts=1009) - qpts = testdata_points(nPts=7) + pts = demodata_points(nPts=1009) + qpts = demodata_points(nPts=7) num_neighbors = 3 #num_data = len(data) # untuned query @@ -329,8 +329,8 @@ def test_pyflann_io(): num_neighbors = 3 nPts = 1009 nQPts = 31 - qpts = testdata_points(nPts=nQPts) - pts = testdata_points(nPts=nPts) + qpts = demodata_points(nPts=nQPts) + pts = demodata_points(nPts=nPts) if FLANN_CLS is None: import pytest pytest.skip() diff --git a/vtool_ibeis/blend.py b/vtool_ibeis/blend.py index aa8a140..09d7c5a 100644 --- a/vtool_ibeis/blend.py +++ b/vtool_ibeis/blend.py @@ -5,7 +5,7 @@ def testdata_blend(scale=128): import vtool_ibeis as vt - img_fpath = ut.grab_test_imgpath('lena.png') + img_fpath = ut.grab_test_imgpath('astro') img1 = vt.imread(img_fpath) rng = np.random.RandomState(0) img2 = vt.perlin_noise(img1.shape[0:2], scale=scale, rng=rng)[None, :].T diff --git a/vtool_ibeis/features.py b/vtool_ibeis/features.py index e6067c0..d811035 100755 --- a/vtool_ibeis/features.py +++ b/vtool_ibeis/features.py @@ -41,7 +41,7 @@ def extract_features(img_or_fpath, feat_type='hesaff+sift', **kwargs): >>> from vtool_ibeis.features import * # NOQA >>> import vtool_ibeis as vt >>> # build test data - >>> img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='lena.png')) + >>> img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='astro')) >>> imgBGR = vt.imread(img_fpath) >>> feat_type = ub.argval('--feat_type', default='hesaff+sift') >>> import pyhesaff @@ -109,7 +109,7 @@ def detect_opencv_keypoints(): import vtool_ibeis as vt import numpy as np # NOQA - #img_fpath = ut.grab_test_imgpath(ub.argval('--fname', default='lena.png')) + #img_fpath = ut.grab_test_imgpath(ub.argval('--fname', default='astro')) img_fpath = ut.grab_test_imgpath(ub.argval('--fname', default='zebra.png')) imgBGR = vt.imread(img_fpath) imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY) From 382925f580229bbd223722d6d1ab0b0e3635f656 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 20:51:13 -0400 Subject: [PATCH 03/15] Update requirements --- requirements/graphics.txt | 6 ++++-- requirements/headless.txt | 7 ++++--- requirements/tests.txt | 13 +++++++++---- tests/test_pyflann.py | 6 ++---- 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/requirements/graphics.txt b/requirements/graphics.txt index 57af9e0..b918a95 100644 --- a/requirements/graphics.txt +++ b/requirements/graphics.txt @@ -1,5 +1,7 @@ -# python ~/local/tools/supported_python_versions_pip.py opencv-python -opencv-python>=4.5.4.58 ; python_version >= '3.10' # Python 3.10+ +# xdev availpkg opencv-python-headless +# --prefer-binary +opencv-python>=4.5.5.64 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +opencv-python>=4.5.4.58 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 opencv-python>=3.4.15.55 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 opencv-python>=3.4.15.55 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 opencv-python>=3.4.15.55 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 diff --git a/requirements/headless.txt b/requirements/headless.txt index 809e889..5aeaa00 100644 --- a/requirements/headless.txt +++ b/requirements/headless.txt @@ -1,6 +1,7 @@ -# python ~/local/tools/supported_python_versions_pip.py opencv-python-headless -# xdev availpkg opencv-python-headless --refresh -opencv-python-headless>=4.5.5.62 ; python_version >= '3.10' # Python 3.10+ +# xdev availpkg opencv-python-headless +# --prefer-binary +opencv-python-headless>=4.5.5.64 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +opencv-python-headless>=4.5.4.58 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 opencv-python-headless>=3.4.15.55 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 opencv-python-headless>=3.4.15.55 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 opencv-python-headless>=3.4.15.55 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 diff --git a/requirements/tests.txt b/requirements/tests.txt index f091e08..fed7817 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,17 +1,22 @@ -xdoctest>=0.14.0 +xdoctest >= 1.1.3 # Pin maximum pytest versions for older python versions # TODO: determine what the actual minimum and maximum acceptable versions of # pytest (that are also compatible with xdoctest) are for each legacy python # major.minor version. # See ~/local/tools/supported_python_versions_pip.py for helper script -pytest>=6.2.5 ; python_version >= '3.10.0' # Python 3.10+ -pytest>=4.6.0 ; python_version < '3.10.0' and python_version >= '3.7.0' # Python 3.7-3.9 -pytest>=4.6.0 ; python_version < '3.7.0' and python_version >= '3.6.0' # Python 3.6 +pytest>=8.1.1 ; python_version < '4.0' and python_version >= '3.13' # Python 3.13+ +pytest>=8.1.1 ; python_version < '3.13' and python_version >= '3.12' # Python 3.12 +pytest>=8.1.1 ; python_version < '3.12' and python_version >= '3.11' # Python 3.11 +pytest>=8.1.1 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 +pytest>=8.1.1 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +pytest>=8.1.1 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +pytest>=4.6.0 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 pytest>=4.6.0, <= 6.1.2 ; python_version < '3.6.0' and python_version >= '3.5.0' # Python 3.5 pytest>=4.6.0, <= 4.6.11 ; python_version < '3.5.0' and python_version >= '3.4.0' # Python 3.4 pytest>=4.6.0, <= 4.6.11 ; python_version < '2.8.0' and python_version >= '2.7.0' # Python 2.7 + ## #pytest-cov >= 2.6.0 ; python_version >= '3.7.0' #pytest-cov >= 2.6.0, <= 2.8.1 ; python_version < '3.7.0' and python_version >= '3.6.0' diff --git a/tests/test_pyflann.py b/tests/test_pyflann.py index 704b190..b671e12 100755 --- a/tests/test_pyflann.py +++ b/tests/test_pyflann.py @@ -280,8 +280,7 @@ def test_pyflann_tune(): """ Example: >>> # ENABLE_DOCTEST - >>> result = test_pyflann_tune() - >>> print(result) + >>> test_pyflann_tune() """ print('Create random qpts and database data') if FLANN_CLS is None: @@ -313,8 +312,7 @@ def test_pyflann_tune(): print(ub.hzcat(['index_untuned, dist_untuned = ', str(index_untuned), str(dist_untuned)])) print(dist_untuned >= dist_tuned) - - return tuned_params + print(f'tuned_params = {ub.urepr(tuned_params, nl=1)}') def test_pyflann_io(): From 393204e4058fb579860d8b6a24301007a4966897 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 20:55:20 -0400 Subject: [PATCH 04/15] Update xcookie --- .github/workflows/tests.yml | 216 ++++++++-- .readthedocs.yml | 5 +- dev/setup_secrets.sh | 6 +- docs/source/conf.py | 836 +++++++++++++++++++++++++++++++----- publish.sh | 123 ++++-- pyproject.toml | 13 +- requirements.txt | 1 + setup.py | 46 +- 8 files changed, 1047 insertions(+), 199 deletions(-) mode change 100755 => 100644 setup.py diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 0c2cb72..1547c80 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -2,6 +2,7 @@ # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions # Based on ~/code/xcookie/xcookie/rc/tests.yml.in # Now based on ~/code/xcookie/xcookie/builders/github_actions.py +# See: https://github.com/Erotemic/xcookie name: PurePyCI @@ -12,14 +13,19 @@ on: jobs: lint_job: + ## + # Run quick linting and typing checks. + # To disable all linting add "linter=false" to the xcookie config. + # To disable type checks add "notypes" to the xcookie tags. + ## runs-on: ubuntu-latest steps: - name: Checkout source - uses: actions/checkout@v3 - - name: Set up Python 3.11 for linting - uses: actions/setup-python@v4.5.0 + uses: actions/checkout@v4.1.1 + - name: Set up Python 3.12 for linting + uses: actions/setup-python@v5.0.0 with: - python-version: '3.11' + python-version: '3.12' - name: Install dependencies run: |- python -m pip install --upgrade pip @@ -29,21 +35,25 @@ jobs: # stop the build if there are Python syntax errors or undefined names flake8 ./vtool_ibeis --count --select=E9,F63,F7,F82 --show-source --statistics build_and_test_sdist: + ## + # Build the pure python package from source and test it in the + # same environment. + ## name: Build sdist runs-on: ubuntu-latest steps: - name: Checkout source - uses: actions/checkout@v3 - - name: Set up Python 3.11 - uses: actions/setup-python@v4.5.0 + uses: actions/checkout@v4.1.1 + - name: Set up Python 3.12 + uses: actions/setup-python@v5.0.0 with: - python-version: '3.11' + python-version: '3.12' - name: Upgrade pip run: |- python -m pip install --upgrade pip - python -m pip install -r requirements/tests.txt - python -m pip install -r requirements/runtime.txt - python -m pip install -r requirements/headless.txt + python -m pip install --prefer-binary -r requirements/tests.txt + python -m pip install --prefer-binary -r requirements/runtime.txt + python -m pip install --prefer-binary -r requirements/headless.txt - name: Build sdist shell: bash run: |- @@ -52,7 +62,7 @@ jobs: python -m build --sdist --outdir wheelhouse - name: Install sdist run: |- - ls -al ./wheelhouse + ls -al wheelhouse pip install --prefer-binary wheelhouse/vtool_ibeis*.tar.gz -v - name: Test minimal loose sdist run: |- @@ -66,7 +76,7 @@ jobs: # Get path to installed package MOD_DPATH=$(python -c "import vtool_ibeis, os; print(os.path.dirname(vtool_ibeis.__file__))") echo "MOD_DPATH = $MOD_DPATH" - python -m pytest --cov={self.mod_name} $MOD_DPATH ../tests + python -m pytest --verbose --cov=vtool_ibeis $MOD_DPATH ../tests cd .. - name: Test full loose sdist run: |- @@ -81,45 +91,51 @@ jobs: # Get path to installed package MOD_DPATH=$(python -c "import vtool_ibeis, os; print(os.path.dirname(vtool_ibeis.__file__))") echo "MOD_DPATH = $MOD_DPATH" - python -m pytest --cov={self.mod_name} $MOD_DPATH ../tests + python -m pytest --verbose --cov=vtool_ibeis $MOD_DPATH ../tests cd .. - - name: Upload sdist artifact - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v3.1.3 + name: Upload sdist artifact with: - name: wheels - path: ./wheelhouse/*.tar.gz + name: sdist_wheels + path: wheelhouse/*.tar.gz build_purepy_wheels: + ## + # Download and test the pure-python wheels that were build in the + # build_purepy_wheels and test them in this independent environment. + ## name: ${{ matrix.python-version }} on ${{ matrix.os }}, arch=${{ matrix.arch }} with ${{ matrix.install-extras }} runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: os: - ubuntu-latest python-version: - - '3.11' + - '3.12' arch: - auto steps: - name: Checkout source - uses: actions/checkout@v3 + uses: actions/checkout@v4.1.1 - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 if: runner.os == 'Linux' && matrix.arch != 'auto' with: platforms: all - name: Setup Python - uses: actions/setup-python@v4.5.0 + uses: actions/setup-python@v5.0.0 with: python-version: ${{ matrix.python-version }} - name: Build pure wheel shell: bash run: |- - python -m pip install setuptools>=0.8 wheel build + python -m pip install setuptools>=0.8 wheel build twine python -m build --wheel --outdir wheelhouse + python -m twine check ./wheelhouse/vtool_ibeis*.whl - name: Show built files shell: bash run: ls -la wheelhouse - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v3.1.3 name: Upload wheels artifact with: name: wheels @@ -130,20 +146,19 @@ jobs: needs: - build_purepy_wheels strategy: + fail-fast: false matrix: + # Xcookie generates an explicit list of environments that will be used + # for testing instead of using the more concise matrix notation. include: - - python-version: '3.7' + - python-version: '3.8' install-extras: tests-strict,runtime-strict,headless-strict os: ubuntu-latest arch: auto - - python-version: '3.11' + - python-version: '3.12' install-extras: tests-strict,runtime-strict,optional-strict,headless-strict os: ubuntu-latest arch: auto - - python-version: '3.7' - install-extras: tests,optional,headless - os: ubuntu-latest - arch: auto - python-version: '3.8' install-extras: tests,optional,headless os: ubuntu-latest @@ -160,19 +175,23 @@ jobs: install-extras: tests,optional,headless os: ubuntu-latest arch: auto + - python-version: '3.12' + install-extras: tests,optional,headless + os: ubuntu-latest + arch: auto steps: - name: Checkout source - uses: actions/checkout@v3 + uses: actions/checkout@v4.1.1 - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 if: runner.os == 'Linux' && matrix.arch != 'auto' with: platforms: all - name: Setup Python - uses: actions/setup-python@v4.5.0 + uses: actions/setup-python@v5.0.0 with: python-version: ${{ matrix.python-version }} - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v2.1.1 name: Download wheels with: name: wheels @@ -189,6 +208,9 @@ jobs: pip install tomli pkginfo export WHEEL_FPATH=$(python -c "import pathlib; print(str(sorted(pathlib.Path('wheelhouse').glob('vtool_ibeis*.whl'))[-1]).replace(chr(92), chr(47)))") export MOD_VERSION=$(python -c "from pkginfo import Wheel; print(Wheel('$WHEEL_FPATH').version)") + echo "$WHEEL_FPATH=WHEEL_FPATH" + echo "$INSTALL_EXTRAS=INSTALL_EXTRAS" + echo "$MOD_VERSION=MOD_VERSION" pip install --prefer-binary "vtool_ibeis[$INSTALL_EXTRAS]==$MOD_VERSION" -f wheelhouse echo "Install finished." - name: Test wheel ${{ matrix.install-extras }} @@ -206,6 +228,7 @@ jobs: ls -altr # Get the path to the installed package and run the tests export MOD_DPATH=$(python -c "import vtool_ibeis, os; print(os.path.dirname(vtool_ibeis.__file__))") + export MOD_NAME=vtool_ibeis echo " --- MOD_DPATH = $MOD_DPATH @@ -213,7 +236,7 @@ jobs: running the pytest command inside the workspace --- " - python -m pytest -p pytester -p no:doctest --xdoctest --cov-config ../pyproject.toml --cov-report term --cov="vtool_ibeis" "$MOD_DPATH" ../tests + python -m pytest --verbose -p pytester -p no:doctest --xdoctest --cov-config ../pyproject.toml --cov-report term --durations=100 --cov="$MOD_NAME" "$MOD_DPATH" ../tests echo "pytest command finished, moving the coverage file to the repo root" ls -al # Move coverage file to a new name @@ -236,10 +259,11 @@ jobs: echo '### The cwd should now have a coverage.xml' ls -altr pwd - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v4.0.1 name: Codecov Upload with: file: ./coverage.xml + token: ${{ secrets.CODECOV_TOKEN }} test_deploy: name: Uploading Test to PyPi runs-on: ubuntu-latest @@ -250,19 +274,24 @@ jobs: - test_purepy_wheels steps: - name: Checkout source - uses: actions/checkout@v3 - - uses: actions/download-artifact@v3 - name: Download wheels and sdist + uses: actions/checkout@v4.1.1 + - uses: actions/download-artifact@v2.1.1 + name: Download wheels with: name: wheels path: wheelhouse + - uses: actions/download-artifact@v2.1.1 + name: Download sdist + with: + name: sdist_wheels + path: wheelhouse - name: Show files to upload shell: bash run: ls -la wheelhouse - name: Sign and Publish env: TWINE_REPOSITORY_URL: https://test.pypi.org/legacy/ - TWINE_USERNAME: ${{ secrets.TEST_TWINE_USERNAME }} + TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.TEST_TWINE_PASSWORD }} CI_SECRET: ${{ secrets.CI_SECRET }} run: |- @@ -283,7 +312,33 @@ jobs: pip install urllib3 requests[security] twine GPG_KEYID=$(cat dev/public_gpg_key) echo "GPG_KEYID = '$GPG_KEYID'" - DO_GPG=True GPG_KEYID=$GPG_KEYID TWINE_REPOSITORY_URL=${TWINE_REPOSITORY_URL} TWINE_PASSWORD=$TWINE_PASSWORD TWINE_USERNAME=$TWINE_USERNAME GPG_EXECUTABLE=$GPG_EXECUTABLE DO_UPLOAD=True DO_TAG=False ./publish.sh + GPG_SIGN_CMD="$GPG_EXECUTABLE --batch --yes --detach-sign --armor --local-user $GPG_KEYID" + WHEEL_PATHS=(wheelhouse/*.whl wheelhouse/*.tar.gz) + WHEEL_PATHS_STR=$(printf '"%s" ' "${WHEEL_PATHS[@]}") + echo "$WHEEL_PATHS_STR" + for WHEEL_PATH in "${WHEEL_PATHS[@]}" + do + echo "------" + echo "WHEEL_PATH = $WHEEL_PATH" + $GPG_SIGN_CMD --output $WHEEL_PATH.asc $WHEEL_PATH + $GPG_EXECUTABLE --verify $WHEEL_PATH.asc $WHEEL_PATH || echo "hack, the first run of gpg very fails" + $GPG_EXECUTABLE --verify $WHEEL_PATH.asc $WHEEL_PATH + done + ls -la wheelhouse + pip install opentimestamps-client + ots stamp wheelhouse/*.whl wheelhouse/*.tar.gz wheelhouse/*.asc + ls -la wheelhouse + twine upload --username __token__ --password "$TWINE_PASSWORD" --repository-url "$TWINE_REPOSITORY_URL" wheelhouse/*.whl wheelhouse/*.tar.gz --skip-existing --verbose || { echo "failed to twine upload" ; exit 1; } + - uses: actions/upload-artifact@v3.1.3 + name: Upload deploy artifacts + with: + name: deploy_artifacts + path: |- + wheelhouse/*.whl + wheelhouse/*.zip + wheelhouse/*.tar.gz + wheelhouse/*.asc + wheelhouse/*.ots live_deploy: name: Uploading Live to PyPi runs-on: ubuntu-latest @@ -294,19 +349,24 @@ jobs: - test_purepy_wheels steps: - name: Checkout source - uses: actions/checkout@v3 - - uses: actions/download-artifact@v3 - name: Download wheels and sdist + uses: actions/checkout@v4.1.1 + - uses: actions/download-artifact@v2.1.1 + name: Download wheels with: name: wheels path: wheelhouse + - uses: actions/download-artifact@v2.1.1 + name: Download sdist + with: + name: sdist_wheels + path: wheelhouse - name: Show files to upload shell: bash run: ls -la wheelhouse - name: Sign and Publish env: TWINE_REPOSITORY_URL: https://upload.pypi.org/legacy/ - TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }} + TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} CI_SECRET: ${{ secrets.CI_SECRET }} run: |- @@ -327,7 +387,71 @@ jobs: pip install urllib3 requests[security] twine GPG_KEYID=$(cat dev/public_gpg_key) echo "GPG_KEYID = '$GPG_KEYID'" - DO_GPG=True GPG_KEYID=$GPG_KEYID TWINE_REPOSITORY_URL=${TWINE_REPOSITORY_URL} TWINE_PASSWORD=$TWINE_PASSWORD TWINE_USERNAME=$TWINE_USERNAME GPG_EXECUTABLE=$GPG_EXECUTABLE DO_UPLOAD=True DO_TAG=False ./publish.sh + GPG_SIGN_CMD="$GPG_EXECUTABLE --batch --yes --detach-sign --armor --local-user $GPG_KEYID" + WHEEL_PATHS=(wheelhouse/*.whl wheelhouse/*.tar.gz) + WHEEL_PATHS_STR=$(printf '"%s" ' "${WHEEL_PATHS[@]}") + echo "$WHEEL_PATHS_STR" + for WHEEL_PATH in "${WHEEL_PATHS[@]}" + do + echo "------" + echo "WHEEL_PATH = $WHEEL_PATH" + $GPG_SIGN_CMD --output $WHEEL_PATH.asc $WHEEL_PATH + $GPG_EXECUTABLE --verify $WHEEL_PATH.asc $WHEEL_PATH || echo "hack, the first run of gpg very fails" + $GPG_EXECUTABLE --verify $WHEEL_PATH.asc $WHEEL_PATH + done + ls -la wheelhouse + pip install opentimestamps-client + ots stamp wheelhouse/*.whl wheelhouse/*.tar.gz wheelhouse/*.asc + ls -la wheelhouse + twine upload --username __token__ --password "$TWINE_PASSWORD" --repository-url "$TWINE_REPOSITORY_URL" wheelhouse/*.whl wheelhouse/*.tar.gz --skip-existing --verbose || { echo "failed to twine upload" ; exit 1; } + - uses: actions/upload-artifact@v3.1.3 + name: Upload deploy artifacts + with: + name: deploy_artifacts + path: |- + wheelhouse/*.whl + wheelhouse/*.zip + wheelhouse/*.tar.gz + wheelhouse/*.asc + wheelhouse/*.ots + release: + name: Create Github Release + if: github.event_name == 'push' && (startsWith(github.event.ref, 'refs/tags') || startsWith(github.event.ref, 'refs/heads/release')) + runs-on: ubuntu-latest + permissions: + contents: write + needs: + - live_deploy + steps: + - name: Checkout source + uses: actions/checkout@v4.1.1 + - uses: actions/download-artifact@v2.1.1 + name: Download artifacts + with: + name: deploy_artifacts + path: wheelhouse + - name: Show files to release + shell: bash + run: ls -la wheelhouse + - run: 'echo "Automatic Release Notes. TODO: improve" > ${{ github.workspace }}-CHANGELOG.txt' + - uses: softprops/action-gh-release@v1 + name: Create Release + id: create_release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + body_path: ${{ github.workspace }}-CHANGELOG.txt + tag_name: ${{ github.ref }} + release_name: Release ${{ github.ref }} + body: Automatic Release + draft: true + prerelease: false + files: |- + wheelhouse/*.whl + wheelhouse/*.asc + wheelhouse/*.ots + wheelhouse/*.zip + wheelhouse/*.tar.gz ### diff --git a/.readthedocs.yml b/.readthedocs.yml index a785ad3..75041f6 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -7,11 +7,14 @@ # Required version: 2 +build: + os: "ubuntu-22.04" + tools: + python: "3.11" sphinx: configuration: docs/source/conf.py formats: all python: - version: 3.7 install: - requirements: requirements/headless.txt - requirements: requirements/docs.txt diff --git a/dev/setup_secrets.sh b/dev/setup_secrets.sh index e2094dd..6321e5a 100644 --- a/dev/setup_secrets.sh +++ b/dev/setup_secrets.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash __doc__=' ============================ SETUP CI SECRET INSTRUCTIONS @@ -166,7 +166,9 @@ upload_github_secrets(){ load_secrets unset GITHUB_TOKEN #printf "%s" "$GITHUB_TOKEN" | gh auth login --hostname Github.com --with-token - gh auth login + if ! gh auth status ; then + gh auth login + fi source dev/secrets_configuration.sh gh secret set "TWINE_USERNAME" -b"${!VARNAME_TWINE_USERNAME}" gh secret set "TEST_TWINE_USERNAME" -b"${!VARNAME_TEST_TWINE_USERNAME}" diff --git a/docs/source/conf.py b/docs/source/conf.py index fc59e87..bad86b9 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,11 +1,15 @@ """ Notes: + Based on template code in: + ~/code/xcookie/xcookie/builders/docs.py + ~/code/xcookie/xcookie/rc/conf_ext.py + http://docs.readthedocs.io/en/latest/getting_started.html pip install sphinx sphinx-autobuild sphinx_rtd_theme sphinxcontrib-napoleon cd ~/code/vtool_ibeis - mkdir docs + mkdir -p docs cd docs sphinx-quickstart @@ -13,37 +17,79 @@ # need to edit the conf.py cd ~/code/vtool_ibeis/docs - sphinx-apidoc -f -o ~/code/vtool_ibeis/docs/source ~/code/vtool_ibeis/vtool_ibeis --separate + sphinx-apidoc --private --separate -f -o ~/code/vtool_ibeis/docs/source/auto ~/code/vtool_ibeis/vtool_ibeis + + # Note: the module should importable before running this + # (e.g. install it in developer mode or munge the PYTHONPATH) make html + git add source/auto/*.rst + Also: To turn on PR checks https://docs.readthedocs.io/en/stable/guides/autobuild-docs-for-pull-requests.html - https://readthedocs.org/dashboard/vtool_ibeis/advanced/ + https://readthedocs.org/dashboard/vtool-ibeis/advanced/ ensure your github account is connected to readthedocs https://readthedocs.org/accounts/social/connections/ ### For gitlab + To enable the read-the-docs go to https://readthedocs.org/dashboard/ and login + The user will need to enable the repo on their readthedocs account: https://readthedocs.org/dashboard/import/manual/? - To enable the read-the-docs go to https://readthedocs.org/dashboard/ and login + Enter the following information: + Set the Repository NAME: vtool_ibeis + Set the Repository URL: https://github.com/Erotemic/vtool_ibeis Make sure you have a .readthedocs.yml file - Click import project: (for github you can select, but gitlab you need to import manually) - Set the Repository NAME: $REPO_NAME - Set the Repository URL: $REPO_URL + For gitlab you also need to setup an integrations. Navigate to: + + https://readthedocs.org/dashboard/vtool-ibeis/integrations/create/ + + Then add gitlab incoming webhook and copy the URL (make sure + you copy the real url and not the text so https is included), + specifically: + + In the "Integration type:" dropdown menu, select + "Gitlab incoming webhook" + + Click "Add integration" + + Copy the text in the "Webhook URL" box to be used later. + + Copy the text in the "Secret" box to be used later. + + Then go to + + https://github.com/Erotemic/vtool_ibeis/hooks + + Click "Add new webhook". + + Copy the text previously saved from the "Webhook URL" box + in the readthedocs form into the "URL" box in the gitlab + form. + + Copy the text previously saved from the "Secret" box + in the readthedocs form into the "Secret token" box in the + gitlab form. + + For trigger permissions select the following checkboxes: + push events, + tag push events, + merge request events + + Click the "Add webhook" button. - For gitlab you also need to setup an integrations and add gitlab - incoming webhook Then go to $REPO_URL/hooks and add the URL + See Docs for more details https://docs.readthedocs.io/en/stable/integrations.html Will also need to activate the main branch: - https://readthedocs.org/projects/vtool_ibeis/versions/ + https://readthedocs.org/projects/vtool-ibeis/versions/ """ # # Configuration file for the Sphinx documentation builder. @@ -90,14 +136,19 @@ def visit_Assign(self, node): return visitor.version project = 'vtool_ibeis' -copyright = '2022, Jon Crall' -author = 'Jon Crall' +copyright = '2024, Jon Crall Jason Parham Hendrik Weideman Avi Weinstock Zackary Rutfield Chuck Stewart' +author = 'Jon Crall Jason Parham Hendrik Weideman Avi Weinstock Zackary Rutfield Chuck Stewart' modname = 'vtool_ibeis' -modpath = join(dirname(dirname(dirname(__file__))), modname, '__init__.py') +repo_dpath = dirname(dirname(dirname(__file__))) +mod_dpath = join(repo_dpath, 'vtool_ibeis') +src_dpath = dirname(mod_dpath) +modpath = join(mod_dpath, '__init__.py') release = parse_version(modpath) version = '.'.join(release.split('.')[0:2]) +# Hack to ensure the module is importable +# sys.path.insert(0, os.path.abspath(src_dpath)) # -- General configuration --------------------------------------------------- @@ -109,13 +160,18 @@ def visit_Assign(self, node): # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ + # 'autoapi.extension', 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', + 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', + 'sphinx.ext.napoleon', 'sphinx.ext.todo', - 'sphinx.ext.autosummary', - # 'myst_parser', # TODO + 'sphinx.ext.viewcode', + 'myst_parser', # For markdown docs + 'sphinx.ext.imgconverter', # For building latexpdf + 'sphinx.ext.githubpages', + # 'sphinxcontrib.redirects', + 'sphinx_reredirects', ] todo_include_todos = True @@ -123,11 +179,37 @@ def visit_Assign(self, node): napoleon_use_param = False napoleon_use_ivar = True +#autoapi_type = 'python' +#autoapi_dirs = [mod_dpath] + autodoc_inherit_docstrings = False +# Hack for geowatch, todo configure +autosummary_mock_imports = [ + 'geowatch.utils.lightning_ext._jsonargparse_ext_ge_4_24_and_lt_4_xx', + 'geowatch.utils.lightning_ext._jsonargparse_ext_ge_4_22_and_lt_4_24', + 'geowatch.utils.lightning_ext._jsonargparse_ext_ge_4_21_and_lt_4_22', + 'geowatch.tasks.fusion.datamodules.temporal_sampling.affinity_sampling', + 'geowatch.tasks.depth_pcd.model', + 'geowatch.tasks.cold.export_change_map', +] + autodoc_member_order = 'bysource' +autoclass_content = 'both' # autodoc_mock_imports = ['torch', 'torchvision', 'visdom'] +# autoapi_modules = { +# modname: { +# 'override': False, +# 'output': 'auto' +# } +# } +# autoapi_dirs = [f'../../src/{modname}'] +# autoapi_keep_files = True + +# References: +# https://stackoverflow.com/questions/21538983/specifying-targets-for-intersphinx-links-to-numpy-scipy-and-matplotlib + intersphinx_mapping = { # 'pytorch': ('http://pytorch.org/docs/master/', None), 'python': ('https://docs.python.org/3', None), @@ -144,7 +226,24 @@ def visit_Assign(self, node): 'xdoctest': ('https://xdoctest.readthedocs.io/en/latest/', None), 'networkx': ('https://networkx.org/documentation/stable/', None), 'scriptconfig': ('https://scriptconfig.readthedocs.io/en/latest/', None), - + 'rich': ('https://rich.readthedocs.io/en/latest/', None), + + 'numpy': ('https://numpy.org/doc/stable/', None), + 'sympy': ('https://docs.sympy.org/latest/', None), + 'scikit-learn': ('https://scikit-learn.org/stable/', None), + 'pandas': ('https://pandas.pydata.org/docs/', None), + 'matplotlib': ('https://matplotlib.org/stable/', None), + + 'pytest': ('https://docs.pytest.org/en/latest/', None), + 'platformdirs': ('https://platformdirs.readthedocs.io/en/latest/', None), + + 'timerit': ('https://timerit.readthedocs.io/en/latest/', None), + 'progiter': ('https://progiter.readthedocs.io/en/latest/', None), + 'dateutil': ('https://dateutil.readthedocs.io/en/latest/', None), + # 'pytest._pytest.doctest': ('https://docs.pytest.org/en/latest/_modules/_pytest/doctest.html', None), + # 'colorama': ('https://pypi.org/project/colorama/', None), + # 'cv2' : ('http://docs.opencv.org/2.4/', None), + # 'h5py' : ('http://docs.h5py.org/en/latest/', None) } __dev_note__ = """ python -m sphinx.ext.intersphinx https://docs.python.org/3/objects.inv @@ -154,6 +253,11 @@ def visit_Assign(self, node): python -m sphinx.ext.intersphinx https://kwimage.readthedocs.io/en/latest/objects.inv python -m sphinx.ext.intersphinx https://ubelt.readthedocs.io/en/latest/objects.inv python -m sphinx.ext.intersphinx https://networkx.org/documentation/stable/objects.inv + +sphobjinv suggest -t 90 -u https://readthedocs.org/projects/pytest/reference/objects.inv +"signal.convolve2d" + +python -m sphinx.ext.intersphinx https://pygments-doc.readthedocs.io/en/latest/objects.inv """ @@ -199,6 +303,7 @@ def visit_Assign(self, node): html_theme_options = { 'collapse_navigation': False, 'display_version': True, + 'navigation_depth': -1, # 'logo_only': True, } # html_logo = '.static/vtool_ibeis.svg' @@ -223,11 +328,26 @@ def visit_Assign(self, node): # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'vtool_ibeisdoc' +htmlhelp_basename = project + 'doc' # -- Options for LaTeX output ------------------------------------------------ +# References: +# https://tex.stackexchange.com/questions/546246/centos-8-the-font-freeserif-cannot-be-found + +""" +# https://www.sphinx-doc.org/en/master/usage/builders/index.html#sphinx.builders.latex.LaTeXBuilder +# https://tex.stackexchange.com/a/570691/83399 +sudo apt install fonts-freefont-otf texlive-luatex texlive-latex-extra texlive-fonts-recommended texlive-latex-recommended tex-gyre latexmk +make latexpdf LATEXMKOPTS="-shell-escape --synctex=-1 -src-specials -interaction=nonstopmode" +make latexpdf LATEXMKOPTS="-lualatex -interaction=nonstopmode" +make LATEXMKOPTS="-lualatex -interaction=nonstopmode" + +""" +# latex_engine = 'lualatex' +# latex_engine = 'xelatex' + latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # @@ -251,7 +371,7 @@ def visit_Assign(self, node): # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'vtool_ibeis.tex', 'vtool_ibeis Documentation', - 'Jon Crall', 'manual'), + 'Jon Crall Jason Parham Hendrik Weideman Avi Weinstock Zackary Rutfield Chuck Stewart', 'manual'), ] @@ -278,116 +398,636 @@ def visit_Assign(self, node): # -- Extension configuration ------------------------------------------------- - - from sphinx.domains.python import PythonDomain # NOQA # from sphinx.application import Sphinx # NOQA from typing import Any, List # NOQA +# HACK TO PREVENT EXCESSIVE TIME. +# TODO: FIXME FOR REAL +MAX_TIME_MINUTES = None +if MAX_TIME_MINUTES: + import ubelt # NOQA + TIMER = ubelt.Timer() + TIMER.tic() + + class PatchedPythonDomain(PythonDomain): """ References: https://github.com/sphinx-doc/sphinx/issues/3866 """ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): - # TODO: can use this to resolve references nicely - # if target.startswith('ub.'): - # target = 'ubelt.' + target[3] + """ + Helps to resolves cross-references + """ + if target.startswith('ub.'): + target = 'ubelt.' + target[3] + if target.startswith('xdoc.'): + target = 'xdoctest.' + target[3] return_value = super(PatchedPythonDomain, self).resolve_xref( env, fromdocname, builder, typ, target, node, contnode) return return_value -def process(app, what_: str, name: str, obj: Any, options: Any, lines: - List[str]) -> None: +class GoogleStyleDocstringProcessor: + """ + A small extension that runs after napoleon and reformats erotemic-flavored + google-style docstrings for sphinx. + """ + + def __init__(self, autobuild=1): + self.debug = 0 + self.registry = {} + if autobuild: + self._register_builtins() + + def register_section(self, tag, alias=None): + """ + Decorator that adds a custom processing function for a non-standard + google style tag. The decorated function should accept a list of + docstring lines, where the first one will be the google-style tag that + likely needs to be replaced, and then return the appropriate sphinx + format (TODO what is the name? Is it just RST?). + """ + alias = [] if alias is None else alias + alias = [alias] if not isinstance(alias, (list, tuple, set)) else alias + alias.append(tag) + alias = tuple(alias) + # TODO: better tag patterns + def _wrap(func): + self.registry[tag] = { + 'tag': tag, + 'alias': alias, + 'func': func, + } + return func + return _wrap + + def _register_builtins(self): + """ + Adds definitions I like of CommandLine, TextArt, and Ignore + """ + + @self.register_section(tag='CommandLine') + def commandline(lines): + new_lines = [] + new_lines.append('.. rubric:: CommandLine') + new_lines.append('') + new_lines.append('.. code-block:: bash') + new_lines.append('') + new_lines.extend(lines[1:]) + return new_lines + + @self.register_section(tag='SpecialExample', alias=['Benchmark', 'Sympy', 'Doctest']) + def benchmark(lines): + import textwrap + new_lines = [] + tag = lines[0].replace(':', '').strip() + # new_lines.append(lines[0]) # TODO: it would be nice to change the tagline. + # new_lines.append('') + new_lines.append('.. rubric:: {}'.format(tag)) + new_lines.append('') + new_text = textwrap.dedent('\n'.join(lines[1:])) + redone = new_text.split('\n') + new_lines.extend(redone) + # import ubelt as ub + # print('new_lines = {}'.format(ub.urepr(new_lines, nl=1))) + # new_lines.append('') + return new_lines + + @self.register_section(tag='TextArt', alias=['Ascii']) + def text_art(lines): + new_lines = [] + new_lines.append('.. rubric:: TextArt') + new_lines.append('') + new_lines.append('.. code-block:: bash') + new_lines.append('') + new_lines.extend(lines[1:]) + return new_lines + + # @self.register_section(tag='TODO', alias=['.. todo::']) + # def todo_section(lines): + # """ + # Fixup todo sections + # """ + # import xdev + # xdev.embed() + # import ubelt as ub + # print('lines = {}'.format(ub.urepr(lines, nl=1))) + # return new_lines + + @self.register_section(tag='Ignore') + def ignore(lines): + return [] + + def process(self, lines): + """ + Example: + >>> import ubelt as ub + >>> self = GoogleStyleDocstringProcessor() + >>> lines = ['Hello world', + >>> '', + >>> 'CommandLine:', + >>> ' hi', + >>> '', + >>> 'CommandLine:', + >>> '', + >>> ' bye', + >>> '', + >>> 'TextArt:', + >>> '', + >>> ' 1', + >>> ' 2', + >>> '', + >>> ' 345', + >>> '', + >>> 'Foobar:', + >>> '', + >>> 'TextArt:'] + >>> new_lines = self.process(lines[:]) + >>> print(chr(10).join(new_lines)) + """ + orig_lines = lines[:] + new_lines = [] + curr_mode = '__doc__' + accum = [] + + def accept(): + """ called when we finish reading a section """ + if curr_mode == '__doc__': + # Keep the lines as-is + new_lines.extend(accum) + else: + # Process this section with the given function + regitem = self.registry[curr_mode] + func = regitem['func'] + fixed = func(accum) + new_lines.extend(fixed) + # Reset the accumulator for the next section + accum[:] = [] + + for line in orig_lines: + + found = None + for regitem in self.registry.values(): + if line.startswith(regitem['alias']): + found = regitem['tag'] + break + if not found and line and not line.startswith(' '): + # if the line startswith anything but a space, we are no longer + # in the previous nested scope. NOTE: This assumption may not + # be general, but it works for my code. + found = '__doc__' + + if found: + # New section is found, accept the previous one and start + # accumulating the new one. + accept() + curr_mode = found + + accum.append(line) + + # Finialize the last section + accept() + + lines[:] = new_lines + # make sure there is a blank line at the end + if lines and lines[-1]: + lines.append('') + + return lines + + def process_docstring_callback(self, app, what_: str, name: str, obj: Any, + options: Any, lines: List[str]) -> None: + """ + Callback to be registered to autodoc-process-docstring + + Custom process to transform docstring lines Remove "Ignore" blocks + + Args: + app (sphinx.application.Sphinx): the Sphinx application object + + what (str): + the type of the object which the docstring belongs to (one of + "module", "class", "exception", "function", "method", "attribute") + + name (str): the fully qualified name of the object + + obj: the object itself + + options: the options given to the directive: an object with + attributes inherited_members, undoc_members, show_inheritance + and noindex that are true if the flag option of same name was + given to the auto directive + + lines (List[str]): the lines of the docstring, see above + + References: + https://www.sphinx-doc.org/en/1.5.1/_modules/sphinx/ext/autodoc.html + https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html + """ + if self.debug: + print(f'ProcessDocstring: name={name}, what_={what_}, num_lines={len(lines)}') + + # print('BEFORE:') + # import ubelt as ub + # print('lines = {}'.format(ub.urepr(lines, nl=1))) + + self.process(lines) + + # docstr = '\n'.join(lines) + # if 'Convert the Mask' in docstr: + # import xdev + # xdev.embed() + + # if 'keys in this dictionary ' in docstr: + # import xdev + # xdev.embed() + + render_doc_images = 0 + + if MAX_TIME_MINUTES and TIMER.toc() > (60 * MAX_TIME_MINUTES): + render_doc_images = False # FIXME too slow on RTD + + if render_doc_images: + # DEVELOPING + if any('REQUIRES(--show)' in line for line in lines): + # import xdev + # xdev.embed() + create_doctest_figure(app, obj, name, lines) + + FIX_EXAMPLE_FORMATTING = 1 + if FIX_EXAMPLE_FORMATTING: + for idx, line in enumerate(lines): + if line == "Example:": + lines[idx] = "**Example:**" + lines.insert(idx + 1, "") + + REFORMAT_SECTIONS = 0 + if REFORMAT_SECTIONS: + REFORMAT_RETURNS = 0 + REFORMAT_PARAMS = 0 + + docstr = SphinxDocstring(lines) + + if REFORMAT_PARAMS: + for found in docstr.find_tagged_lines('Parameters'): + print(found['text']) + edit_slice = found['edit_slice'] + + # TODO: figure out how to do this. + + # # file = 'foo.rst' + # import rstparse + # rst = rstparse.Parser() + # import io + # rst.read(io.StringIO(found['text'])) + # rst.parse() + # for line in rst.lines: + # print(line) + + # # found['text'] + # import docutils + + # settings = docutils.frontend.OptionParser( + # components=(docutils.parsers.rst.Parser,) + # ).get_default_values() + # document = docutils.utils.new_document('', settings) + # from docutils.parsers import rst + # rst.Parser().parse(found['text'], document) + + if REFORMAT_RETURNS: + for found in docstr.find_tagged_lines('returns'): + # FIXME: account for new slice with -2 offset + edit_slice = found['edit_slice'] + text = found['text'] + new_lines = [] + for para in text.split('\n\n'): + indent = para[:len(para) - len(para.lstrip())] + new_paragraph = indent + paragraph(para) + new_lines.append(new_paragraph) + new_lines.append('') + new_lines = new_lines[:-1] + lines[edit_slice] = new_lines + + # print('AFTER:') + # print('lines = {}'.format(ub.urepr(lines, nl=1))) + + # if name == 'kwimage.Affine.translate': + # import sys + # sys.exit(1) + + +class SphinxDocstring: """ - Custom process to transform docstring lines Remove "Ignore" blocks + Helper to parse and modify sphinx docstrings + """ + def __init__(docstr, lines): + docstr.lines = lines + + # FORMAT THE RETURNS SECTION A BIT NICER + import re + tag_pat = re.compile(r'^:(\w*):') + directive_pat = re.compile(r'^.. (\w*)::\s*(\w*)') + + # Split by sphinx types, mark the line offset where they start / stop + sphinx_parts = [] + for idx, line in enumerate(lines): + tag_match = tag_pat.search(line) + directive_match = directive_pat.search(line) + if tag_match: + tag = tag_match.groups()[0] + sphinx_parts.append({ + 'tag': tag, 'start_offset': idx, + 'type': 'tag', + }) + elif directive_match: + tag = directive_match.groups()[0] + sphinx_parts.append({ + 'tag': tag, 'start_offset': idx, + 'type': 'directive', + }) + + prev_offset = len(lines) + for part in sphinx_parts[::-1]: + part['end_offset'] = prev_offset + prev_offset = part['start_offset'] + + docstr.sphinx_parts = sphinx_parts + + if 0: + for line in lines: + print(line) + + def find_tagged_lines(docstr, tag): + for part in docstr.sphinx_parts[::-1]: + if part['tag'] == tag: + edit_slice = slice(part['start_offset'], part['end_offset']) + return_section = docstr.lines[edit_slice] + text = '\n'.join(return_section) + found = { + 'edit_slice': edit_slice, + 'text': text, + } + yield found + + +def paragraph(text): + r""" + Wraps multi-line strings and restructures the text to remove all newlines, + heading, trailing, and double spaces. + + Useful for writing log messages Args: - app (sphinx.application.Sphinx): the Sphinx application object + text (str): typically a multiline string - what (str): - the type of the object which the docstring belongs to (one of - "module", "class", "exception", "function", "method", "attribute") + Returns: + str: the reduced text block + """ + import re + out = re.sub(r'\s\s*', ' ', text).strip() + return out - name (str): the fully qualified name of the object - obj: the object itself +def create_doctest_figure(app, obj, name, lines): + """ + The idea is that each doctest that produces a figure should generate that + and then that figure should be part of the docs. + """ + import xdoctest + import sys + import types + if isinstance(obj, types.ModuleType): + module = obj + else: + module = sys.modules[obj.__module__] + # TODO: read settings from pyproject.toml? + if '--show' not in sys.argv: + sys.argv.append('--show') + if '--nointeract' not in sys.argv: + sys.argv.append('--nointeract') + modpath = module.__file__ + + # print(doctest.format_src()) + import pathlib + # HACK: write to the srcdir + doc_outdir = pathlib.Path(app.outdir) + doc_srcdir = pathlib.Path(app.srcdir) + doc_static_outdir = doc_outdir / '_static' + doc_static_srcdir = doc_srcdir / '_static' + src_fig_dpath = (doc_static_srcdir / 'images') + src_fig_dpath.mkdir(exist_ok=True, parents=True) + out_fig_dpath = (doc_static_outdir / 'images') + out_fig_dpath.mkdir(exist_ok=True, parents=True) + + # fig_dpath = (doc_outdir / 'autofigs' / name).mkdir(exist_ok=True) + + fig_num = 1 + + import kwplot + kwplot.autompl(force='agg') + plt = kwplot.autoplt() + + docstr = '\n'.join(lines) + + # TODO: The freeform parser does not work correctly here. + # We need to parse out the sphinx (epdoc)? individual examples + # so we can get different figures. But we can hack it for now. + + import re + split_parts = re.split('({}\\s*\n)'.format(re.escape('.. rubric:: Example')), docstr) + # split_parts = docstr.split('.. rubric:: Example') + + # import xdev + # xdev.embed() + + def doctest_line_offsets(doctest): + # Where the doctests starts and ends relative to the file + start_line_offset = doctest.lineno - 1 + last_part = doctest._parts[-1] + last_line_offset = start_line_offset + last_part.line_offset + last_part.n_lines - 1 + offsets = { + 'start': start_line_offset, + 'end': last_line_offset, + 'stop': last_line_offset + 1, + } + return offsets + + # from xdoctest import utils + # part_lines = utils.add_line_numbers(docstr.split('\n'), n_digits=3, start=0) + # print('\n'.join(part_lines)) + + to_insert_fpaths = [] + curr_line_offset = 0 + for part in split_parts: + num_lines = part.count('\n') + + doctests = list(xdoctest.core.parse_docstr_examples( + part, modpath=modpath, callname=name, + # style='google' + )) + # print(doctests) + + # doctests = list(xdoctest.core.parse_docstr_examples( + # docstr, modpath=modpath, callname=name)) + + for doctest in doctests: + if '--show' in part: + ... + # print('-- SHOW TEST---')/) + # kwplot.close_figures() + try: + import pytest # NOQA + except ImportError: + pass + try: + from xdoctest.exceptions import Skipped + except ImportError: # nocover + # Define dummy skipped exception if pytest is not available + class Skipped(Exception): + pass + try: + doctest.mode = 'native' + doctest.run(verbose=0, on_error='raise') + ... + except Skipped: + print(f'Skip doctest={doctest}') + except Exception as ex: + print(f'ex={ex}') + print(f'Error in doctest={doctest}') + + offsets = doctest_line_offsets(doctest) + doctest_line_end = curr_line_offset + offsets['stop'] + insert_line_index = doctest_line_end + + figures = kwplot.all_figures() + for fig in figures: + fig_num += 1 + # path_name = path_sanatize(name) + path_name = (name).replace('.', '_') + fig_fpath = src_fig_dpath / f'fig_{path_name}_{fig_num:03d}.jpeg' + fig.savefig(fig_fpath) + print(f'Wrote figure: {fig_fpath}') + to_insert_fpaths.append({ + 'insert_line_index': insert_line_index, + 'fpath': fig_fpath, + }) + + for fig in figures: + plt.close(fig) + # kwplot.close_figures(figures) + + curr_line_offset += (num_lines) + + # if len(doctests) > 1: + # doctests + # import xdev + # xdev.embed() - options: the options given to the directive: an object with - attributes inherited_members, undoc_members, show_inheritance - and noindex that are true if the flag option of same name was - given to the auto directive + INSERT_AT = 'end' + INSERT_AT = 'inline' - lines (List[str]): the lines of the docstring, see above + end_index = len(lines) + # Reverse order for inserts + import shutil + for info in to_insert_fpaths[::-1]: + src_abs_fpath = info['fpath'] - References: - https://www.sphinx-doc.org/en/1.5.1/_modules/sphinx/ext/autodoc.html - https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html - """ - # if what and what_ not in what: - # return - orig_lines = lines[:] + rel_to_static_fpath = src_abs_fpath.relative_to(doc_static_srcdir) + # dst_abs_fpath = doc_static_outdir / rel_to_static_fpath + # dst_abs_fpath.parent.mkdir(parents=True, exist_ok=True) - # text = '\n'.join(lines) - # if 'Example' in text and 'CommandLine' in text: - # import xdev - # xdev.embed() + rel_to_root_fpath = src_abs_fpath.relative_to(doc_srcdir) - ignore_tags = tuple(['Ignore']) + dst_abs_fpath1 = doc_outdir / rel_to_root_fpath + dst_abs_fpath1.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(src_abs_fpath, dst_abs_fpath1) - mode = None - # buffer = None - new_lines = [] - for i, line in enumerate(orig_lines): - - # See if the line triggers a mode change - if line.startswith(ignore_tags): - mode = 'ignore' - elif line.startswith('CommandLine'): - mode = 'cmdline' - elif line and not line.startswith(' '): - # if the line startswith anything but a space, we are no - # longer in the previous nested scope - mode = None - - if mode is None: - new_lines.append(line) - elif mode == 'ignore': - # print('IGNORE line = {!r}'.format(line)) - pass - elif mode == 'cmdline': - if line.startswith('CommandLine'): - new_lines.append('.. rubric:: CommandLine') - new_lines.append('') - new_lines.append('.. code-block:: bash') - new_lines.append('') - # new_lines.append(' # CommandLine') - else: - # new_lines.append(line.strip()) - new_lines.append(line) + dst_abs_fpath2 = doc_outdir / rel_to_static_fpath + dst_abs_fpath2.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(src_abs_fpath, dst_abs_fpath2) + + dst_abs_fpath3 = doc_srcdir / rel_to_static_fpath + dst_abs_fpath3.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(src_abs_fpath, dst_abs_fpath3) + + if INSERT_AT == 'inline': + # Try to insert after test + insert_index = info['insert_line_index'] + elif INSERT_AT == 'end': + insert_index = end_index else: - raise KeyError(mode) + raise KeyError(INSERT_AT) + lines.insert(insert_index, '.. image:: {}'.format('..' / rel_to_root_fpath)) + # lines.insert(insert_index, '.. image:: {}'.format(rel_to_root_fpath)) + # lines.insert(insert_index, '.. image:: {}'.format(rel_to_static_fpath)) + lines.insert(insert_index, '') + - lines[:] = new_lines - # make sure there is a blank line at the end - if lines and lines[-1]: - lines.append('') +def postprocess_hyperlinks(app, doctree, docname): + """ + Extension to fixup hyperlinks. + This should be connected to the Sphinx application's + "autodoc-process-docstring" event. + """ + # Your hyperlink postprocessing logic here + from docutils import nodes + import pathlib + for node in doctree.traverse(nodes.reference): + if 'refuri' in node.attributes: + refuri = node.attributes['refuri'] + if '.rst' in refuri: + if 'source' in node.document: + fpath = pathlib.Path(node.document['source']) + parent_dpath = fpath.parent + if (parent_dpath / refuri).exists(): + node.attributes['refuri'] = refuri.replace('.rst', '.html') + else: + raise AssertionError + + +def fix_rst_todo_section(lines): + new_lines = [] + for line in lines: + ... + ... def setup(app): + import sphinx + app : sphinx.application.Sphinx = app app.add_domain(PatchedPythonDomain, override=True) - if 1: - # New Way - # what = None - app.connect('autodoc-process-docstring', process) - else: - # OLD WAY - # https://stackoverflow.com/questions/26534184/can-sphinx-ignore-certain-tags-in-python-docstrings - # Register a sphinx.ext.autodoc.between listener to ignore everything - # between lines that contain the word IGNORE - # from sphinx.ext.autodoc import between - # app.connect('autodoc-process-docstring', between('^ *Ignore:$', exclude=True)) - pass + + app.connect("doctree-resolved", postprocess_hyperlinks) + + docstring_processor = GoogleStyleDocstringProcessor() + # https://stackoverflow.com/questions/26534184/can-sphinx-ignore-certain-tags-in-python-docstrings + app.connect('autodoc-process-docstring', docstring_processor.process_docstring_callback) + + def copy(src, dst): + import shutil + print(f'Copy {src} -> {dst}') + assert src.exists() + if not dst.parent.exists(): + dst.parent.mkdir() + shutil.copy(src, dst) + + ### Hack for kwcoco: TODO: figure out a way for the user to configure this. + HACK_FOR_KWCOCO = 0 + if HACK_FOR_KWCOCO: + import pathlib + doc_outdir = pathlib.Path(app.outdir) / 'auto' + doc_srcdir = pathlib.Path(app.srcdir) / 'auto' + + mod_dpath = doc_srcdir / '../../../kwcoco' + + src_fpath = (mod_dpath / 'coco_schema.json') + copy(src_fpath, doc_outdir / src_fpath.name) + copy(src_fpath, doc_srcdir / src_fpath.name) + + src_fpath = (mod_dpath / 'coco_schema_informal.rst') + copy(src_fpath, doc_outdir / src_fpath.name) + copy(src_fpath, doc_srcdir / src_fpath.name) return app diff --git a/publish.sh b/publish.sh index 09b22a8..237ee0d 100755 --- a/publish.sh +++ b/publish.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash __doc__=' Script to publish a new version of this library on PyPI. @@ -24,6 +24,10 @@ Args: If True, sign the packages with a GPG key specified by `GPG_KEYID`. defaults to auto. + DO_OTS (bool) : + If True, make an opentimestamp for the package and signature (if + available) + DO_UPLOAD (bool) : If True, upload the packages to the pypi server specified by `TWINE_REPOSITORY_URL`. @@ -111,9 +115,9 @@ check_variable(){ normalize_boolean(){ ARG=$1 ARG=$(echo "$ARG" | awk '{print tolower($0)}') - if [ "$ARG" = "true" ] || [ "$ARG" = "1" ] || [ "$ARG" = "yes" ] || [ "$ARG" = "on" ]; then + if [ "$ARG" = "true" ] || [ "$ARG" = "1" ] || [ "$ARG" = "yes" ] || [ "$ARG" = "y" ] || [ "$ARG" = "on" ]; then echo "True" - elif [ "$ARG" = "false" ] || [ "$ARG" = "0" ] || [ "$ARG" = "no" ] || [ "$ARG" = "off" ]; then + elif [ "$ARG" = "false" ] || [ "$ARG" = "0" ] || [ "$ARG" = "no" ] || [ "$ARG" = "n" ] || [ "$ARG" = "off" ]; then echo "False" else echo "$ARG" @@ -138,11 +142,21 @@ DO_UPLOAD=${DO_UPLOAD:=$ARG_1} DO_TAG=${DO_TAG:=$ARG_1} DO_GPG=${DO_GPG:="auto"} -# Verify that we want to build if [ "$DO_GPG" == "auto" ]; then DO_GPG="True" fi +DO_OTS=${DO_OTS:="auto"} +if [ "$DO_OTS" == "auto" ]; then + # Do opentimestamp if it is available + # python -m pip install opentimestamps-client + if type ots ; then + DO_OTS="True" + else + DO_OTS="False" + fi +fi + DO_BUILD=${DO_BUILD:="auto"} # Verify that we want to build if [ "$DO_BUILD" == "auto" ]; then @@ -150,6 +164,7 @@ if [ "$DO_BUILD" == "auto" ]; then fi DO_GPG=$(normalize_boolean "$DO_GPG") +DO_OTS=$(normalize_boolean "$DO_OTS") DO_BUILD=$(normalize_boolean "$DO_BUILD") DO_UPLOAD=$(normalize_boolean "$DO_UPLOAD") DO_TAG=$(normalize_boolean "$DO_TAG") @@ -237,6 +252,7 @@ GPG_KEYID = '$GPG_KEYID' DO_UPLOAD=${DO_UPLOAD} DO_TAG=${DO_TAG} DO_GPG=${DO_GPG} +DO_OTS=${DO_OTS} DO_BUILD=${DO_BUILD} MODE_LIST_STR=${MODE_LIST_STR} " @@ -375,7 +391,7 @@ ls_array(){ } -WHEEL_PATHS=() +WHEEL_FPATHS=() for _MODE in "${MODE_LIST[@]}" do if [[ "$_MODE" == "sdist" ]]; then @@ -393,35 +409,35 @@ do for new_item in "${_NEW_WHEEL_PATHS[@]}" do if [[ "$new_item" != "" ]]; then - WHEEL_PATHS+=("$new_item") + WHEEL_FPATHS+=("$new_item") fi done done # Dedup the paths -readarray -t WHEEL_PATHS < <(printf '%s\n' "${WHEEL_PATHS[@]}" | sort -u) +readarray -t WHEEL_FPATHS < <(printf '%s\n' "${WHEEL_FPATHS[@]}" | sort -u) -WHEEL_PATHS_STR=$(printf '"%s" ' "${WHEEL_PATHS[@]}") +WHEEL_PATHS_STR=$(printf '"%s" ' "${WHEEL_FPATHS[@]}") echo "WHEEL_PATHS_STR = $WHEEL_PATHS_STR" echo " MODE=$MODE VERSION='$VERSION' -WHEEL_PATHS='$WHEEL_PATHS_STR' +WHEEL_FPATHS='$WHEEL_PATHS_STR' " - +WHEEL_SIGNATURE_FPATHS=() if [ "$DO_GPG" == "True" ]; then echo " === === " - for WHEEL_PATH in "${WHEEL_PATHS[@]}" + for WHEEL_FPATH in "${WHEEL_FPATHS[@]}" do - echo "WHEEL_PATH = $WHEEL_PATH" - check_variable WHEEL_PATH + echo "WHEEL_FPATH = $WHEEL_FPATH" + check_variable WHEEL_FPATH # https://stackoverflow.com/questions/45188811/how-to-gpg-sign-a-file-that-is-built-by-travis-ci # secure gpg --export-secret-keys > all.gpg @@ -432,13 +448,15 @@ if [ "$DO_GPG" == "True" ]; then echo "Signing wheels" GPG_SIGN_CMD="$GPG_EXECUTABLE --batch --yes --detach-sign --armor --local-user $GPG_KEYID" echo "GPG_SIGN_CMD = $GPG_SIGN_CMD" - $GPG_SIGN_CMD --output "$WHEEL_PATH".asc "$WHEEL_PATH" + $GPG_SIGN_CMD --output "$WHEEL_FPATH".asc "$WHEEL_FPATH" echo "Checking wheels" - twine check "$WHEEL_PATH".asc "$WHEEL_PATH" || { echo 'could not check wheels' ; exit 1; } + twine check "$WHEEL_FPATH".asc "$WHEEL_FPATH" || { echo 'could not check wheels' ; exit 1; } echo "Verifying wheels" - $GPG_EXECUTABLE --verify "$WHEEL_PATH".asc "$WHEEL_PATH" || { echo 'could not verify wheels' ; exit 1; } + $GPG_EXECUTABLE --verify "$WHEEL_FPATH".asc "$WHEEL_FPATH" || { echo 'could not verify wheels' ; exit 1; } + + WHEEL_SIGNATURE_FPATHS+=("$WHEEL_FPATH".asc) done echo " === === @@ -448,6 +466,27 @@ else fi + +if [ "$DO_OTS" == "True" ]; then + + echo " + === === + " + if [ "$DO_GPG" == "True" ]; then + # Stamp the wheels and the signatures + ots stamp "${WHEEL_FPATHS[@]}" "${WHEEL_SIGNATURE_FPATHS[@]}" + else + # Stamp only the wheels + ots stamp "${WHEEL_FPATHS[@]}" + fi + echo " + === === + " +else + echo "DO_OTS=False, Skipping OTS sign" +fi + + if [[ "$DO_TAG" == "True" ]]; then TAG_NAME="v${VERSION}" # if we messed up we can delete the tag @@ -467,17 +506,11 @@ if [[ "$DO_UPLOAD" == "True" ]]; then check_variable TWINE_USERNAME check_variable TWINE_PASSWORD "hide" - for WHEEL_PATH in "${WHEEL_PATHS[@]}" + for WHEEL_FPATH in "${WHEEL_FPATHS[@]}" do - if [ "$DO_GPG" == "True" ]; then - twine upload --username "$TWINE_USERNAME" "--password=$TWINE_PASSWORD" \ - --repository-url "$TWINE_REPOSITORY_URL" \ - --sign "$WHEEL_PATH".asc "$WHEEL_PATH" --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } - else - twine upload --username "$TWINE_USERNAME" "--password=$TWINE_PASSWORD" \ - --repository-url "$TWINE_REPOSITORY_URL" \ - "$WHEEL_PATH" --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } - fi + twine upload --username "$TWINE_USERNAME" "--password=$TWINE_PASSWORD" \ + --repository-url "$TWINE_REPOSITORY_URL" \ + "$WHEEL_FPATH" --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } done echo """ !!! FINISH: LIVE RUN !!! @@ -488,7 +521,7 @@ else DEPLOY_REMOTE = '$DEPLOY_REMOTE' DO_UPLOAD = '$DO_UPLOAD' - WHEEL_PATH = '$WHEEL_PATH' + WHEEL_FPATH = '$WHEEL_FPATH' WHEEL_PATHS_STR = '$WHEEL_PATHS_STR' MODE_LIST_STR = '$MODE_LIST_STR' @@ -502,3 +535,39 @@ else !!! FINISH: DRY RUN !!! """ fi + +__devel__=' +# Checking to see how easy it is to upload packages to gitlab. +# This logic should go in the CI script, not sure if it belongs here. + + +export HOST=https://gitlab.kitware.com +export GROUP_NAME=computer-vision +export PROJECT_NAME=geowatch +PROJECT_VERSION=$(geowatch --version) +echo "$PROJECT_VERSION" + +load_secrets +export PRIVATE_GITLAB_TOKEN=$(git_token_for "$HOST") +TMP_DIR=$(mktemp -d -t ci-XXXXXXXXXX) + +curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups" > "$TMP_DIR/all_group_info" +GROUP_ID=$(cat "$TMP_DIR/all_group_info" | jq ". | map(select(.name==\"$GROUP_NAME\")) | .[0].id") +echo "GROUP_ID = $GROUP_ID" + +curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID" > "$TMP_DIR/group_info" +PROJ_ID=$(cat "$TMP_DIR/group_info" | jq ".projects | map(select(.name==\"$PROJECT_NAME\")) | .[0].id") +echo "PROJ_ID = $PROJ_ID" + +ls_array DIST_FPATHS "dist/*" + +for FPATH in "${DIST_FPATHS[@]}" +do + FNAME=$(basename $FPATH) + echo $FNAME + curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" \ + --upload-file $FPATH \ + "https://gitlab.kitware.com/api/v4/projects/$PROJ_ID/packages/generic/$PROJECT_NAME/$PROJECT_VERSION/$FNAME" +done + +' diff --git a/pyproject.toml b/pyproject.toml index d982a69..c4a3d84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,16 +15,21 @@ os = [ "linux" ] ci_pypy_versions = [] author = ['Jon Crall', 'Jason Parham', 'Hendrik Weideman', 'Avi Weinstock', 'Zackary Rutfield', 'Chuck Stewart'] author_email="erotemic@gmail.com" -min_python = 3.7 +min_python = 3.8 version = "{mod_dpath}/__init__.py::__version__" license = "Apache 2" dev_status = "beta" [tool.pytest.ini_options] -addopts = "-p no:doctest --xdoctest --xdoctest-style=google --ignore-glob=setup.py" -norecursedirs = ".git ignore build __pycache__ dev _skbuild" -filterwarnings = [ "default", "ignore:.*No cfgstr given in Cacher constructor or call.*:Warning", "ignore:.*Define the __nice__ method for.*:Warning", "ignore:.*private pytest class or function.*:Warning",] +addopts = "-p no:doctest --xdoctest --xdoctest-style=google --ignore-glob=setup.py --ignore-glob=docs" +norecursedirs = ".git ignore build __pycache__ dev _skbuild docs" +filterwarnings = [ + "default", + "ignore:.*No cfgstr given in Cacher constructor or call.*:Warning", + "ignore:.*Define the __nice__ method for.*:Warning", + "ignore:.*private pytest class or function.*:Warning", +] [tool.coverage.run] branch = true diff --git a/requirements.txt b/requirements.txt index bc18a50..7de5aca 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ -r requirements/runtime.txt -r requirements/tests.txt -r requirements/optional.txt +-r requirements/build.txt \ No newline at end of file diff --git a/setup.py b/setup.py old mode 100755 new mode 100644 index ca5ded6..2a7f180 --- a/setup.py +++ b/setup.py @@ -2,7 +2,8 @@ # Generated by ~/code/xcookie/xcookie/builders/setup.py # based on part ~/code/xcookie/xcookie/rc/setup.py.in import sys -from os.path import exists +import re +from os.path import exists, dirname, join from setuptools import find_packages from setuptools import setup @@ -53,8 +54,6 @@ def parse_description(): pandoc --from=markdown --to=rst --output=README.rst README.md python -c "import setup; print(setup.parse_description())" """ - from os.path import dirname, join, exists - readme_fpath = join(dirname(__file__), "README.rst") # This breaks on pip install, so check that it exists. if exists(readme_fpath): @@ -77,10 +76,10 @@ def parse_requirements(fname="requirements.txt", versions=False): Returns: List[str]: list of requirements items - """ - from os.path import exists, dirname, join - import re + CommandLine: + python -c "import setup, ubelt; print(ubelt.urepr(setup.parse_requirements()))" + """ require_fpath = fname def parse_line(line, dpath=""): @@ -198,35 +197,40 @@ def gen_packages_items(): NAME = "vtool_ibeis" INIT_PATH = "vtool_ibeis/__init__.py" -VERSION = parse_version("vtool_ibeis/__init__.py") +VERSION = parse_version(INIT_PATH) if __name__ == "__main__": setupkw = {} - setupkw["install_requires"] = parse_requirements("requirements/runtime.txt") + setupkw["install_requires"] = parse_requirements( + "requirements/runtime.txt", versions="loose" + ) setupkw["extras_require"] = { - "all": parse_requirements("requirements.txt"), - "tests": parse_requirements("requirements/tests.txt"), - "optional": parse_requirements("requirements/optional.txt"), - "headless": parse_requirements("requirements/headless.txt"), - "graphics": parse_requirements("requirements/graphics.txt"), - # Strict versions + "all": parse_requirements("requirements.txt", versions="loose"), + "headless": parse_requirements("requirements/headless.txt", versions="loose"), + "graphics": parse_requirements("requirements/graphics.txt", versions="loose"), + "build": parse_requirements("requirements/build.txt", versions="loose"), + "docs": parse_requirements("requirements/docs.txt", versions="loose"), + "optional": parse_requirements("requirements/optional.txt", versions="loose"), + "runtime": parse_requirements("requirements/runtime.txt", versions="loose"), + "tests": parse_requirements("requirements/tests.txt", versions="loose"), + "all-strict": parse_requirements("requirements.txt", versions="strict"), "headless-strict": parse_requirements( "requirements/headless.txt", versions="strict" ), "graphics-strict": parse_requirements( "requirements/graphics.txt", versions="strict" ), - "all-strict": parse_requirements("requirements.txt", versions="strict"), + "build-strict": parse_requirements("requirements/build.txt", versions="strict"), + "docs-strict": parse_requirements("requirements/docs.txt", versions="strict"), + "optional-strict": parse_requirements( + "requirements/optional.txt", versions="strict" + ), "runtime-strict": parse_requirements( "requirements/runtime.txt", versions="strict" ), "tests-strict": parse_requirements("requirements/tests.txt", versions="strict"), - "optional-strict": parse_requirements( - "requirements/optional.txt", versions="strict" - ), } - setupkw["name"] = NAME setupkw["version"] = VERSION setupkw[ @@ -239,17 +243,17 @@ def gen_packages_items(): setupkw["long_description_content_type"] = "text/x-rst" setupkw["license"] = "Apache 2" setupkw["packages"] = find_packages(".") - setupkw["python_requires"] = ">=3.7" + setupkw["python_requires"] = ">=3.8" setupkw["classifiers"] = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Utilities", "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", ] setup(**setupkw) From 11920ab1d5f64758cd2f6c27bd9cfc927488bff9 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 21:29:49 -0400 Subject: [PATCH 05/15] Use new test images --- vtool_ibeis/histogram.py | 4 +-- vtool_ibeis/image.py | 50 ++++++++++++++++++------------------ vtool_ibeis/image_filters.py | 16 +++++++----- vtool_ibeis/patch.py | 32 ++++++++--------------- 4 files changed, 47 insertions(+), 55 deletions(-) diff --git a/vtool_ibeis/histogram.py b/vtool_ibeis/histogram.py index 9158fca..ce45ad2 100755 --- a/vtool_ibeis/histogram.py +++ b/vtool_ibeis/histogram.py @@ -975,8 +975,8 @@ def show_ori_image_ondisk(): >>> pt.show_if_requested() """ #if img_fpath is not None: - # img_fpath = ut.get_argval('--fpath', type_=str, default=ut.grab_test_imgpath('star.png')) - # img_fpath = ut.get_argval('--fpath', type_=str, default=ut.grab_test_imgpath('star.png')) + # img_fpath = ut.get_argval('--fpath', type_=str, default=ut.grab_test_imgpath('astro')) + # img_fpath = ut.get_argval('--fpath', type_=str, default=ut.grab_test_imgpath('astro')) # img = vt.imread(img_fpath) # ori_img_fpath = ut.get_argval('--fpath-ori', type_=str, # default=ut.augpath(img_fpath, '_ori')) diff --git a/vtool_ibeis/image.py b/vtool_ibeis/image.py index 2364582..2e0a6d3 100755 --- a/vtool_ibeis/image.py +++ b/vtool_ibeis/image.py @@ -1106,7 +1106,7 @@ def rotate_image_ondisk(img_fpath, theta, out_fpath=None, **kwargs): >>> # DISABLE_DOCTEST >>> from vtool_ibeis.image import * # NOQA >>> # build test data - >>> img_fpath = ut.grab_test_imgpath('star.png') + >>> img_fpath = ut.grab_test_imgpath('astro') >>> theta = TAU * 3 / 8 >>> # execute function >>> out_fpath = None @@ -1508,9 +1508,9 @@ def padded_resize(img, target_size=(64, 64), interpolation=None): >>> # ENABLE_DOCTEST >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt - >>> imgA = vt.imread(ut.grab_test_imgpath('carl.jpg')) - >>> imgB = vt.imread(ut.grab_test_imgpath('ada.jpg')) - >>> imgC = vt.imread(ut.grab_test_imgpath('carl.jpg'), grayscale=True) + >>> imgA = vt.imread(ut.grab_test_imgpath('carl')) + >>> imgB = vt.imread(ut.grab_test_imgpath('astro')) + >>> imgC = vt.imread(ut.grab_test_imgpath('carl'), grayscale=True) >>> #target_size = (64, 64) >>> target_size = (1024, 1024) >>> img3_list = [padded_resize(img, target_size) for img in [imgA, imgB, imgC]] @@ -1554,7 +1554,7 @@ def embed_in_square_image(img, target_size, img_origin=(.5, .5), >>> # DISABLE_DOCTEST >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> img = vt.imread(img_fpath) >>> target_size = tuple(np.array(vt.get_size(img)) * 3) >>> img_origin = (.5, .5) @@ -1707,7 +1707,7 @@ def resize_to_maxdims(img, max_dsize=(64, 64), >>> # ENABLE_DOCTEST >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> img = vt.imread(img_fpath) >>> max_dsize = (1024, 1024) >>> img2 = resize_to_maxdims(img, max_dsize) @@ -1737,7 +1737,7 @@ def resize_thumb(img, max_dsize=(64, 64), interpolation=None): >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt >>> # build test data - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> img = vt.imread(img_fpath) >>> max_dsize = (64, 64) >>> # execute function @@ -2008,11 +2008,11 @@ def saveImage(self, fileName, scale=32.0): def testdata_imglist(): # build test data import vtool_ibeis as vt - img1 = vt.imread(ut.grab_test_imgpath('carl.jpg')) - img2 = vt.imread(ut.grab_test_imgpath('astro.png')) - img3 = vt.imread(ut.grab_test_imgpath('ada.jpg')) - img4 = vt.imread(ut.grab_test_imgpath('jeff.png')) - img5 = vt.imread(ut.grab_test_imgpath('star.png')) + img1 = vt.imread(ut.grab_test_imgpath('carl')) + img2 = vt.imread(ut.grab_test_imgpath('astro')) + img3 = vt.imread(ut.grab_test_imgpath('stars')) + img4 = vt.imread(ut.grab_test_imgpath('pm5644')) + img5 = vt.imread(ut.grab_test_imgpath('parrot')) img_list = [img1, img2, img3, img4, img5] return img_list @@ -2423,9 +2423,9 @@ def ensure_3channel(patch): >>> # ENABLE_DOCTEST >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt - >>> patch1 = vt.imread(ut.grab_test_imgpath('astro.png'))[0:512, 0:500, :] - >>> patch2 = vt.imread(ut.grab_test_imgpath('ada.jpg'))[:, :, 0:1] - >>> patch3 = vt.imread(ut.grab_test_imgpath('jeff.png'))[0:390, 0:400, 0] + >>> patch1 = vt.imread(ut.grab_test_imgpath('astro'))[0:512, 0:500, :] + >>> patch2 = vt.imread(ut.grab_test_imgpath('carl'))[:, :, 0:1] + >>> patch3 = vt.imread(ut.grab_test_imgpath('paraview'))[0:390, 0:400, 0] >>> res1 = ensure_3channel(patch1) >>> res2 = ensure_3channel(patch2) >>> res3 = ensure_3channel(patch3) @@ -2489,8 +2489,8 @@ def stack_images(img1, img2, vert=None, modifysize=False, return_sf=False, >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt >>> # build test data - >>> img1 = vt.imread(ut.grab_test_imgpath('carl.jpg')) - >>> img2 = vt.imread(ut.grab_test_imgpath('astro.png')) + >>> img1 = vt.imread(ut.grab_test_imgpath('carl')) + >>> img2 = vt.imread(ut.grab_test_imgpath('astro')) >>> vert = True >>> modifysize = False >>> # execute function @@ -2645,11 +2645,11 @@ def stack_image_recurse(img_list1, img_list2=None, vert=True, modifysize=False, >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt >>> # build test data - >>> img1 = vt.imread(ut.grab_test_imgpath('carl.jpg')) - >>> img2 = vt.imread(ut.grab_test_imgpath('astro.png')) - >>> img3 = vt.imread(ut.grab_test_imgpath('ada.jpg')) - >>> img4 = vt.imread(ut.grab_test_imgpath('jeff.png')) - >>> img5 = vt.imread(ut.grab_test_imgpath('star.png')) + >>> img1 = vt.imread(ut.grab_test_imgpath('carl')) + >>> img2 = vt.imread(ut.grab_test_imgpath('astro')) + >>> img3 = vt.imread(ut.grab_test_imgpath('paraview')) + >>> img4 = vt.imread(ut.grab_test_imgpath('lowcontrast')) + >>> img5 = vt.imread(ut.grab_test_imgpath('stars')) >>> img_list1 = [img1, img2, img3, img4, img5] >>> img_list2 = None >>> vert = True @@ -2658,7 +2658,7 @@ def stack_image_recurse(img_list1, img_list2=None, vert=True, modifysize=False, >>> # verify results >>> # xdoctest: +REQUIRES(--show) >>> import plottool_ibeis as pt - >>> imshow(imgB) + >>> pt.imshow(imgB) >>> #wh1 = img1.shape[0:2][::-1] >>> #wh2 = img2.shape[0:2][::-1] >>> #pt.draw_bbox((0, 0) + wh1, bbox_color=(1, 0, 0)) @@ -2732,8 +2732,8 @@ def filterflags_valid_images(gpaths, valid_formats=None, Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.image import * # NOQA - >>> gpaths = [ut.grab_test_imgpath('carl.jpg'), - >>> ut.grab_test_imgpath('astro.png')] + >>> gpaths = [ut.grab_test_imgpath('carl'), + >>> ut.grab_test_imgpath('astro')] >>> flags = filterflags_valid_images(gpaths) >>> assert all(flags) """ diff --git a/vtool_ibeis/image_filters.py b/vtool_ibeis/image_filters.py index f7f2f86..69f3dd6 100755 --- a/vtool_ibeis/image_filters.py +++ b/vtool_ibeis/image_filters.py @@ -14,7 +14,8 @@ class IntensityPreproc(object): Doctest: >>> from vtool_ibeis.image_filters import * >>> import vtool_ibeis as vt - >>> chipBGR = vt.imread(ut.grab_file_url('http://i.imgur.com/qVWQaex.jpg')) + >>> #chipBGR = vt.imread(ut.grab_file_url('http://i.imgur.com/qVWQaex.jpg')) + >>> chipBGR = vt.imread(ut.grab_test_imgpath('astro')) >>> filter_list = [ >>> ('medianblur', {}), >>> ('adapteq', {}), @@ -71,7 +72,9 @@ def manta_matcher_filters(chipBGR): >>> from ibeis.core_annots import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb('Mantas') - >>> chipBGR = vt.imread(ut.grab_file_url('http://i.imgur.com/qVWQaex.jpg')) + >>> #chipBGR = vt.imread(ut.grab_file_url('http://i.imgur.com/qVWQaex.jpg')) + >>> chipBGR = vt.imread(ut.grab_test_imgpath('astro')) + """ chipLAB = cv2.cvtColor(chipBGR, cv2.COLOR_BGR2LAB) @@ -99,7 +102,8 @@ def adapteq_fn(chipBGR): >>> from vtool_ibeis.image_filters import * >>> import vtool_ibeis as vt >>> import utool as ut - >>> chipBGR = vt.imread(ut.grab_file_url('http://i.imgur.com/qVWQaex.jpg')) + >>> #chipBGR = vt.imread(ut.grab_file_url('http://i.imgur.com/qVWQaex.jpg')) + >>> chipBGR = vt.imread(ut.grab_test_imgpath('astro')) >>> chip2 = adapteq_fn(chipBGR) >>> # xdoctest: +REQUIRES(--show) >>> import plottool_ibeis as pt @@ -124,8 +128,9 @@ def medianfilter_fn(chipBGR): >>> from vtool_ibeis.image_filters import * >>> import vtool_ibeis as vt >>> import utool as ut - >>> chipBGR = vt.imread(ut.grab_file_url('http://i.imgur.com/qVWQaex.jpg')) - >>> chip2 = adapteq_fn(chipBGR) + >>> #chipBGR = vt.imread(ut.grab_file_url('http://i.imgur.com/qVWQaex.jpg')) + >>> chipBGR = vt.imread(ut.grab_test_imgpath('astro')) + >>> chip2 = medianfilter_fn(chipBGR) >>> # xdoctest: +REQUIRES(--show) >>> import plottool_ibeis as pt >>> pt.imshow(chipBGR, pnum=(1, 2, 1), fnum=1) @@ -199,7 +204,6 @@ def grabcut_fn(chipBGR): return seg_chipBGR - if __name__ == '__main__': """ CommandLine: diff --git a/vtool_ibeis/patch.py b/vtool_ibeis/patch.py index 7fc4e10..05570d7 100755 --- a/vtool_ibeis/patch.py +++ b/vtool_ibeis/patch.py @@ -1065,18 +1065,14 @@ def draw_kp_ori_steps(): >>> draw_kp_ori_steps() >>> pt.show_if_requested() """ - #from vtool_ibeis.patch import * # NOQA - #import vtool_ibeis as vt # build test data import utool as ut import plottool_ibeis as pt - import vtool_ibeis as vt if True: from ibeis.scripts.thesis import TMP_RC import matplotlib as mpl mpl.rcParams.update(TMP_RC) - #import vtool_ibeis as vt np.random.seed(0) USE_COMMANLINE = True if USE_COMMANLINE: @@ -1085,24 +1081,16 @@ def draw_kp_ori_steps(): kp = kpts[fx] else: fx = 0 - USE_EXTERN_STAR = False - if USE_EXTERN_STAR: - img_fpath = ut.grab_test_imgpath('star.png') - imgBGR = vt.imread(img_fpath) - kpts, vecs = vt.extract_features(img_fpath) - kp = np.array([ 3.14742985e+01, 2.95660381e+01, 1.96057682e+01, -5.11199608e-03, 2.05653343e+01, 0.00000000e+00], - dtype=np.float32) - else: - #imgBGR = get_test_patch('stripe', jitter=True) - #imgBGR = get_test_patch('star', jitter=True) - imgBGR = get_test_patch('star2', jitter=True) - #imgBGR = get_test_patch('cross', jitter=False) - #imgBGR = cv2.resize(imgBGR, (41, 41), interpolation=cv2.INTER_LANCZOS4) - imgBGR = cv2.resize(imgBGR, (41, 41), interpolation=cv2.INTER_CUBIC) - theta = 0 # 3.4 # TAU / 16 - #kpts = make_test_image_keypoints(imgBGR, scale=.9, theta=theta) - kpts = make_test_image_keypoints(imgBGR, scale=.3, theta=theta, shift=(.3, .1)) - kp = kpts[0] + #imgBGR = get_test_patch('stripe', jitter=True) + #imgBGR = get_test_patch('star', jitter=True) + imgBGR = get_test_patch('star2', jitter=True) + #imgBGR = get_test_patch('cross', jitter=False) + #imgBGR = cv2.resize(imgBGR, (41, 41), interpolation=cv2.INTER_LANCZOS4) + imgBGR = cv2.resize(imgBGR, (41, 41), interpolation=cv2.INTER_CUBIC) + theta = 0 # 3.4 # TAU / 16 + #kpts = make_test_image_keypoints(imgBGR, scale=.9, theta=theta) + kpts = make_test_image_keypoints(imgBGR, scale=.3, theta=theta, shift=(.3, .1)) + kp = kpts[0] bins = 36 maxima_thresh = .8 converge_lists = [] From 9da292dd0d07100cf5729f37104ce439cdb48939 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 21:37:49 -0400 Subject: [PATCH 06/15] Better test data --- vtool_ibeis/features.py | 5 ++--- vtool_ibeis/image.py | 26 +++++++++++++------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/vtool_ibeis/features.py b/vtool_ibeis/features.py index d811035..0bd5861 100755 --- a/vtool_ibeis/features.py +++ b/vtool_ibeis/features.py @@ -109,8 +109,7 @@ def detect_opencv_keypoints(): import vtool_ibeis as vt import numpy as np # NOQA - #img_fpath = ut.grab_test_imgpath(ub.argval('--fname', default='astro')) - img_fpath = ut.grab_test_imgpath(ub.argval('--fname', default='zebra.png')) + img_fpath = ut.grab_test_imgpath(ub.argval('--fname', default='astro')) imgBGR = vt.imread(img_fpath) imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY) @@ -251,7 +250,7 @@ def compress(self, flags, inplace=False): info = {key: list(ub.compress(val, flags)) for key, val in self.info.items()} return Keypoints(subarr, info) - img_fpath = ut.grab_test_imgpath(ub.argval('--fname', default='zebra.png')) + img_fpath = ut.grab_test_imgpath(ub.argval('--fname', default='astro')) imgBGR = vt.imread(img_fpath) imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY) # http://docs.opencv.org/master/d3/d28/classcv_1_1MSER.html#gsc.tab=0 diff --git a/vtool_ibeis/image.py b/vtool_ibeis/image.py index 2e0a6d3..d5352b6 100755 --- a/vtool_ibeis/image.py +++ b/vtool_ibeis/image.py @@ -289,12 +289,12 @@ def imread(img_fpath, grayscale=False, orient=False, flags=None, Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.image import * # NOQA - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> imgBGR1 = imread(img_fpath, grayscale=False) >>> imgBGR2 = imread(img_fpath, grayscale=True) >>> imgBGR3 = imread(img_fpath, orient=True) - >>> assert imgBGR1.shape == (250, 300, 3) - >>> assert imgBGR2.shape == (250, 300) + >>> assert imgBGR1.shape == (448, 328, 3) + >>> assert imgBGR2.shape == (448, 328) >>> assert np.all(imgBGR1 == imgBGR3) >>> # xdoctest: +REQUIRES(--show) >>> import plottool_ibeis as pt @@ -332,7 +332,7 @@ def imread(img_fpath, grayscale=False, orient=False, flags=None, (2736, 3648, 3) Example: - >>> # ENABLE_DOCTEST + >>> # xdoctest +SKIP("networking") >>> from vtool_ibeis.image import * # NOQA >>> url = 'http://www.sherv.net/cm/emo/funny/2/big-dancing-banana-smiley-emoticon.gif' >>> img_fpath = ut.grab_file_url(url) @@ -540,10 +540,10 @@ def imwrite(img_fpath, imgBGR, fallback=False): >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt >>> import utool as ut - >>> img_fpath1 = ut.grab_test_imgpath('zebra.png') + >>> img_fpath1 = ut.grab_test_imgpath('astro') >>> imgBGR = vt.imread(img_fpath1) >>> img_dpath = ub.ensure_app_cache_dir('vtool_ibeis', 'testwrite') - >>> img_fpath2 = ut.unixjoin(img_dpath, 'zebra.png') + >>> img_fpath2 = ut.unixjoin(img_dpath, 'astro.png') >>> fallback = False >>> imwrite(img_fpath2, imgBGR, fallback=fallback) >>> imgBGR2 = vt.imread(img_fpath2) @@ -555,7 +555,7 @@ def imwrite(img_fpath, imgBGR, fallback=False): if fallback: try: imwrite_fallback(img_fpath, imgBGR) - except Exception as ex: + except Exception: pass msg = '[vt.image] ERROR writing: %s' % (img_fpath,) ut.printex(ex, msg, keys=['imgBGR.shape']) @@ -657,11 +657,11 @@ def open_image_size(image_fpath): Doctest: >>> from vtool_ibeis.image import * # NOQA - >>> image_fpath = ut.grab_test_imgpath('patsy.jpg') + >>> image_fpath = ut.grab_test_imgpath('carl') >>> size = open_image_size(image_fpath) >>> result = ('size = %s' % (str(size),)) >>> print(result) - size = (800, 441) + size = (328, 448) Ignore: # Confirm that Image.open is a lazy load @@ -730,7 +730,7 @@ def warpAffine(img, Aff, dsize, assume_float01=True): >>> # DISABLE_DOCTEST >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> img = vt.imread(img_fpath) >>> Aff = vt.rotation_mat3x3(TAU / 8) >>> dsize = vt.get_size(img) @@ -1422,9 +1422,9 @@ def convert_colorspace(img, colorspace, src_colorspace='BGR'): >>> # DISABLE_DOCTEST >>> from vtool_ibeis.image import * # NOQA >>> import vtool_ibeis as vt - >>> img_fpath = ut.grab_test_imgpath('zebra.png') - >>> img_fpath = ut.grab_file_url('http://itsnasb.com/wp-content/uploads/2013/03/lisa-frank-logo1.jpg') - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> #img_fpath = ut.grab_file_url('http://itsnasb.com/wp-content/uploads/2013/03/lisa-frank-logo1.jpg') + >>> #img_fpath = ut.grab_test_imgpath('carl') + >>> img_fpath = ut.grab_test_imgpath('pm5644') >>> img = vt.imread(img_fpath) >>> img_float = vt.rectify_to_float01(img, np.float32) >>> colorspace = 'LAB' From e6a44ef2136a116963fee3b9b1b72f4eea2c1777 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 21:47:46 -0400 Subject: [PATCH 07/15] Avoid 3.12 for now --- .github/workflows/tests.yml | 16 ++++++---------- pyproject.toml | 1 + setup.py | 1 - 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1547c80..7480174 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -22,10 +22,10 @@ jobs: steps: - name: Checkout source uses: actions/checkout@v4.1.1 - - name: Set up Python 3.12 for linting + - name: Set up Python 3.11 for linting uses: actions/setup-python@v5.0.0 with: - python-version: '3.12' + python-version: '3.11' - name: Install dependencies run: |- python -m pip install --upgrade pip @@ -44,10 +44,10 @@ jobs: steps: - name: Checkout source uses: actions/checkout@v4.1.1 - - name: Set up Python 3.12 + - name: Set up Python 3.11 uses: actions/setup-python@v5.0.0 with: - python-version: '3.12' + python-version: '3.11' - name: Upgrade pip run: |- python -m pip install --upgrade pip @@ -111,7 +111,7 @@ jobs: os: - ubuntu-latest python-version: - - '3.12' + - '3.11' arch: - auto steps: @@ -155,7 +155,7 @@ jobs: install-extras: tests-strict,runtime-strict,headless-strict os: ubuntu-latest arch: auto - - python-version: '3.12' + - python-version: '3.11' install-extras: tests-strict,runtime-strict,optional-strict,headless-strict os: ubuntu-latest arch: auto @@ -175,10 +175,6 @@ jobs: install-extras: tests,optional,headless os: ubuntu-latest arch: auto - - python-version: '3.12' - install-extras: tests,optional,headless - os: ubuntu-latest - arch: auto steps: - name: Checkout source uses: actions/checkout@v4.1.1 diff --git a/pyproject.toml b/pyproject.toml index c4a3d84..57a9957 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ ci_pypy_versions = [] author = ['Jon Crall', 'Jason Parham', 'Hendrik Weideman', 'Avi Weinstock', 'Zackary Rutfield', 'Chuck Stewart'] author_email="erotemic@gmail.com" min_python = 3.8 +max_python = 3.11 version = "{mod_dpath}/__init__.py::__version__" license = "Apache 2" dev_status = "beta" diff --git a/setup.py b/setup.py index 2a7f180..b46e77f 100644 --- a/setup.py +++ b/setup.py @@ -254,6 +254,5 @@ def gen_packages_items(): "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", ] setup(**setupkw) From 1a12096be0618b144e32ee0cbe87d9e623040c63 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 21:57:35 -0400 Subject: [PATCH 08/15] Move off of old demodata --- dev/broken/_dep_matching.py | 1413 --------------------------- tests/test_spatial_verification.py | 2 +- tests/test_sver_wrapper.py | 129 --- vtool_ibeis/coverage_grid.py | 4 +- vtool_ibeis/coverage_kpts.py | 27 +- vtool_ibeis/demodata.py | 6 +- vtool_ibeis/inspect_matches.py | 4 +- vtool_ibeis/matching.py | 16 +- vtool_ibeis/nearest_neighbors.py | 6 +- vtool_ibeis/segmentation.py | 2 +- vtool_ibeis/spatial_verification.py | 4 +- 11 files changed, 38 insertions(+), 1575 deletions(-) delete mode 100644 dev/broken/_dep_matching.py delete mode 100644 tests/test_sver_wrapper.py diff --git a/dev/broken/_dep_matching.py b/dev/broken/_dep_matching.py deleted file mode 100644 index 1126be2..0000000 --- a/dev/broken/_dep_matching.py +++ /dev/null @@ -1,1413 +0,0 @@ - - -def marge_matches(fm_A, fm_B, fsv_A, fsv_B): - """ combines feature matches from two matching algorithms - - Args: - fm_A (ndarray[ndims=2]): type A feature matches - fm_B (ndarray[ndims=2]): type B feature matches - fsv_A (ndarray[ndims=2]): type A feature scores - fsv_B (ndarray[ndims=2]): type B feature scores - - Returns: - tuple: (fm_both, fs_both) - - CommandLine: - python -m vtool_ibeis.matching --test-marge_matches - - Example: - >>> # ENABLE_DOCTEST - >>> from vtool_ibeis.matching import * # NOQA - >>> fm_A = np.array([[ 15, 17], [ 54, 29], [ 95, 111], [ 25, 125], [ 97, 125]], dtype=np.int32) - >>> fm_B = np.array([[ 11, 21], [ 15, 17], [ 25, 125], [ 30, 32]], dtype=np.int32) - >>> fsv_A = np.array([[ .1, .2], [1.0, .9], [.8, .2], [.1, .1], [1.0, .9]], dtype=np.float32) - >>> fsv_B = np.array([[.12], [.3], [.5], [.7]], dtype=np.float32) - >>> # execute function - >>> (fm_both, fs_both) = marge_matches(fm_A, fm_B, fsv_A, fsv_B) - >>> # verify results - >>> result = ub.repr2((fm_both, fs_both), precision=3) - >>> print(result) - ( - np.array([[ 15, 17], - [ 25, 125], - [ 54, 29], - [ 95, 111], - [ 97, 125], - [ 11, 21], - [ 30, 32]], dtype=np.int32), - np.array([[ 0.1 , 0.2 , 0.3 ], - [ 0.1 , 0.1 , 0.5 ], - [ 1. , 0.9 , nan], - [ 0.8 , 0.2 , nan], - [ 1. , 0.9 , nan], - [ nan, nan, 0.12], - [ nan, nan, 0.7 ]], dtype=np.float64), - ) - """ - # Flag rows found in both fmA and fmB - # that are intersecting (both) or unique (only) - import vtool_ibeis as vt - flags_both_A, flags_both_B = vt.intersect2d_flags(fm_A, fm_B) - flags_only_A = np.logical_not(flags_both_A) - flags_only_B = np.logical_not(flags_both_B) - # independent matches - fm_both_AB = fm_A.compress(flags_both_A, axis=0) - fm_only_A = fm_A.compress(flags_only_A, axis=0) - fm_only_B = fm_B.compress(flags_only_B, axis=0) - # independent scores - fsv_both_A = fsv_A.compress(flags_both_A, axis=0) - fsv_both_B = fsv_B.compress(flags_both_B, axis=0) - fsv_only_A = fsv_A.compress(flags_only_A, axis=0) - fsv_only_B = fsv_B.compress(flags_only_B, axis=0) - # build merge offsets - offset1 = len(fm_both_AB) - offset2 = offset1 + len(fm_only_A) - offset3 = offset2 + len(fm_only_B) - # Merge feature matches - fm_merged = np.vstack([fm_both_AB, fm_only_A, fm_only_B]) - # Merge feature scores - num_rows = fm_merged.shape[0] - num_cols_A = fsv_A.shape[1] - num_cols_B = fsv_B.shape[1] - num_cols = num_cols_A + num_cols_B - fsv_merged = np.full((num_rows, num_cols), np.nan) - fsv_merged[0:offset1, 0:num_cols_A] = fsv_both_A - fsv_merged[0:offset1, num_cols_A:] = fsv_both_B - fsv_merged[offset1:offset2, 0:num_cols_A] = fsv_only_A - fsv_merged[offset2:offset3, num_cols_A:] = fsv_only_B - return fm_merged, fsv_merged - - -def ensure_fsv_list(fsv_list): - """ ensure fs is at least Nx1 """ - return [fsv[:, None] if len(fsv.shape) == 1 else fsv - for fsv in fsv_list] - - -from __future__ import absolute_import, division, print_function -#from six.moves import range -import utool as ut -import six # NOQA -import numpy as np -#from vtool_ibeis import keypoint as ktool -from vtool_ibeis import coverage_kpts -from vtool_ibeis import spatial_verification as sver -from vtool_ibeis import matching -#import numpy.linalg as npl -#import scipy.sparse as sps -#import scipy.sparse.linalg as spsl -#from numpy.core.umath_tests import matrix_multiply -#import vtool_ibeis.keypoint as ktool -#import vtool_ibeis.linalg as ltool -#profile = ut.profile - - -def assign_nearest_neighbors(vecs1, vecs2, K=2): - import vtool_ibeis as vt - from vtool_ibeis._pyflann_backend import pyflann - checks = 800 - flann_params = { - 'algorithm': 'kdtree', - 'trees': 8 - } - #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2 - #pseudo_max_dist_sqrd = 2 * (512 ** 2) - flann = vt.flann_cache(vecs1, flann_params=flann_params) - try: - fx2_to_fx1, fx2_to_dist = matching.normalized_nearest_neighbors(flann, vecs2, K, checks) - #fx2_to_fx1, _fx2_to_dist = flann.nn_index(vecs2, num_neighbors=K, checks=checks) - except pyflann.FLANNException: - print('vecs1.shape = %r' % (vecs1.shape,)) - print('vecs2.shape = %r' % (vecs2.shape,)) - print('vecs1.dtype = %r' % (vecs1.dtype,)) - print('vecs2.dtype = %r' % (vecs2.dtype,)) - raise - #fx2_to_dist = np.divide(_fx2_to_dist, pseudo_max_dist_sqrd) - return fx2_to_fx1, fx2_to_dist - - -def baseline_vsone_ratio_matcher(testtup, cfgdict={}): - r""" - spatially constrained ratio matching - - CommandLine: - python -m vtool_ibeis.constrained_matching --test-spatially_constrianed_matcher - - Example: - >>> # DISABLE_DOCTEST - >>> import plottool_ibeis as pt - >>> from vtool_ibeis.constrained_matching import * # NOQA - >>> import vtool_ibeis as vt - >>> testtup = testdata_matcher() - >>> # execute function - >>> basetup, base_meta = baseline_vsone_ratio_matcher(testtup) - >>> # verify results - >>> print(basetup) - """ - rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2 = testtup - return baseline_vsone_ratio_matcher_(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict={}) - - -def spatially_constrianed_matcher(testtup, basetup, cfgdict={}): - r""" - spatially constrained ratio matching - - CommandLine: - python -m vtool_ibeis.constrained_matching --test-spatially_constrianed_matcher - - Example: - >>> # DISABLE_DOCTEST - >>> import plottool_ibeis as pt - >>> from vtool_ibeis.constrained_matching import * # NOQA - >>> import vtool_ibeis as vt - >>> testtup = testdata_matcher() - >>> basetup, base_meta = baseline_vsone_ratio_matcher(testtup) - >>> # execute function - >>> nexttup, next_meta = spatially_constrianed_matcher(testtup, basetup) - >>> # verify results - >>> print(nexttup) - """ - (rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2) = testtup - (fm_ORIG, fs_ORIG, fm_RAT, fs_RAT, fm_SV, fs_SV, H_RAT) = basetup - return spatially_constrianed_matcher_(kpts1, vecs1, kpts2, vecs2, - dlen_sqrd2, H_RAT, cfgdict={}) - - -def baseline_vsone_ratio_matcher_(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict={}): - r""" - Args: - vecs1 (ndarray[uint8_t, ndim=2]): SIFT descriptors - vecs2 (ndarray[uint8_t, ndim=2]): SIFT descriptors - kpts1 (ndarray[float32_t, ndim=2]): keypoints - kpts2 (ndarray[float32_t, ndim=2]): keypoints - - Ignore: - %pylab qt4 - import plottool_ibeis as pt - pt.imshow(rchip1) - pt.draw_kpts2(kpts1) - - pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs) - pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs) - """ - #import vtool_ibeis as vt - sver_xy_thresh = cfgdict.get('sver_xy_thresh', .01) - ratio_thresh = cfgdict.get('ratio_thresh', .625) - #ratio_thresh = .99 - # GET NEAREST NEIGHBORS - fx2_to_fx1, fx2_to_dist = assign_nearest_neighbors(vecs1, vecs2, K=2) - assigntup = matching.assign_unconstrained_matches(fx2_to_fx1, fx2_to_dist) - fx2_match, fx1_match, fx1_norm, match_dist, norm_dist = assigntup - fm_ORIG = np.vstack((fx1_match, fx2_match)).T - fs_ORIG = 1 - np.divide(match_dist, norm_dist) - # APPLY RATIO TEST - fm_RAT, fs_RAT, fm_norm_RAT = matching.ratio_test(fx2_match, fx1_match, fx1_norm, match_dist, norm_dist, ratio_thresh) - # SPATIAL VERIFICATION FILTER - #with ut.EmbedOnException(): - match_weights = np.ones(len(fm_RAT)) - svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_RAT, sver_xy_thresh, dlen_sqrd2, match_weights=match_weights) - if svtup is not None: - (homog_inliers, homog_errors, H_RAT) = svtup[0:3] - else: - H_RAT = np.eye(3) - homog_inliers = [] - fm_SV = fm_RAT[homog_inliers] - fs_SV = fs_RAT[homog_inliers] - fm_norm_SV = fm_norm_RAT[homog_inliers] - - base_tup = (fm_ORIG, fs_ORIG, fm_RAT, fs_RAT, fm_SV, fs_SV, H_RAT) - base_meta = (fm_norm_RAT, fm_norm_SV) - return base_tup, base_meta - - -def spatially_constrianed_matcher_(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, - H_RAT, cfgdict={}): - #import vtool_ibeis as vt - - #match_xy_thresh = .1 - #sver_xy_thresh = .01 - #ratio_thresh2 = .8 - # Observation, scores don't change above K=7 - # on easy test case - #search_K = 7 # 3 - search_K = cfgdict.get('search_K', 7) - ratio_thresh2 = cfgdict.get('ratio_thresh2', .8) - sver_xy_thresh2 = cfgdict.get('sver_xy_thresh2', .01) - normalizer_mode = cfgdict.get('normalizer_mode', 'far') - match_xy_thresh = cfgdict.get('match_xy_thresh', .1) - - # ASSIGN CANDIDATES - # Get candidate nearest neighbors - fx2_to_fx1, fx2_to_dist = assign_nearest_neighbors(vecs1, vecs2, K=search_K) - - # COMPUTE CONSTRAINTS - #normalizer_mode = 'far' - constrain_tup = spatially_constrain_matches(dlen_sqrd2, kpts1, kpts2, H_RAT, - fx2_to_fx1, fx2_to_dist, - match_xy_thresh, - normalizer_mode=normalizer_mode) - (fm_SC, fm_norm_SC, match_dist, norm_dist) = constrain_tup - fx2_match = fm_SC.T[1] - fx1_match = fm_SC.T[1] - fx1_norm = fm_norm_SC.T[1] - - fm_SCR, fs_SCR, fm_norm_SCR = matching.ratio_test(fx2_match, fx1_match, - fx1_norm, match_dist, - norm_dist, ratio_thresh2) - fs_SC = 1 - np.divide(match_dist, norm_dist) # NOQA - #fm_SCR, fs_SCR, fm_norm_SCR = ratio_test2(match_dist, norm_dist, fm_SC, - # fm_norm_SC, ratio_thresh2) - - # Another round of verification - match_weights = np.ones(len(fm_SCR)) - svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_SCR, sver_xy_thresh2, dlen_sqrd2, match_weights=match_weights) - if svtup is not None: - (homog_inliers, homog_errors, H_SCR) = svtup[0:3] - else: - H_SCR = np.eye(3) - homog_inliers = [] - fm_SCRSV = fm_SCR[homog_inliers] - fs_SCRSV = fs_SCR[homog_inliers] - - fm_norm_SVSCR = fm_norm_SCR[homog_inliers] - - nexttup = (fm_SC, fs_SC, fm_SCR, fs_SCR, fm_SCRSV, fs_SCRSV, H_SCR) - next_meta = (fm_norm_SC, fm_norm_SCR, fm_norm_SVSCR) - return nexttup, next_meta - - -#def ratio_test(fx2_to_fx1, fx2_to_dist, ratio_thresh): -# fx2_to_ratio = np.divide(fx2_to_dist.T[0], fx2_to_dist.T[1]) -# fx2_to_isvalid = fx2_to_ratio < ratio_thresh -# fx2_m = np.where(fx2_to_isvalid)[0] -# fx1_m = fx2_to_fx1.T[0].take(fx2_m) -# fs_RAT = np.subtract(1.0, fx2_to_ratio.take(fx2_m)) -# fm_RAT = np.vstack((fx1_m, fx2_m)).T -# # return normalizer info as well -# fx1_m_normalizer = fx2_to_fx1.T[1].take(fx2_m) -# fm_norm_RAT = np.vstack((fx1_m_normalizer, fx2_m)).T -# return fm_RAT, fs_RAT, fm_norm_RAT - - -#def ratio_test2(match_dist_list, norm_dist_list, fm_SC, fm_norm_SC, ratio_thresh2=.8): -# ratio_list = np.divide(match_dist_list, norm_dist_list) -# #ratio_thresh = .625 -# #ratio_thresh = .725 -# isvalid_list = np.less(ratio_list, ratio_thresh2) -# valid_ratios = ratio_list[isvalid_list] -# fm_SCR = fm_SC[isvalid_list] -# fs_SCR = np.subtract(1.0, valid_ratios) # NOQA -# fm_norm_SCR = fm_norm_SC[isvalid_list] -# #fm_SCR = np.vstack((fx1_m, fx2_m)).T # NOQA -# return fm_SCR, fs_SCR, fm_norm_SCR - - -def spatially_constrain_matches(dlen_sqrd2, kpts1, kpts2, H_RAT, - fx2_to_fx1, fx2_to_dist, - match_xy_thresh, normalizer_mode='far'): - r""" - helper for spatially_constrianed_matcher - OLD FUNCTION - - Args: - dlen_sqrd2 (?): - kpts1 (ndarray[float32_t, ndim=2]): keypoints - kpts2 (ndarray[float32_t, ndim=2]): keypoints - H_RAT (ndarray[float64_t, ndim=2]): homography/perspective matrix - fx2_to_fx1 (ndarray): - fx2_to_dist (ndarray): - match_xy_thresh (?): threshold is specified as a fraction of the diagonal chip length - normalizer_mode (str): - """ - # Find the normalized spatial error of all candidate matches - ##### - - # Filter out matches that could not be constrained - - if normalizer_mode == 'plus': - norm_xy_bounds = (0, np.inf) - elif normalizer_mode == 'far': - norm_xy_bounds = (match_xy_thresh, np.inf) - elif normalizer_mode == 'nearby': - norm_xy_bounds = (0, match_xy_thresh) - else: - raise AssertionError('normalizer_mode=%r' % (normalizer_mode,)) - - assigntup = matching.assign_spatially_constrained_matches( - dlen_sqrd2, kpts1, kpts2, H_RAT, fx2_to_fx1, fx2_to_dist, - match_xy_thresh, norm_xy_bounds=norm_xy_bounds) - - fx2_match, fx1_match, fx1_norm, match_dist, norm_dist = assigntup - - fm_constrained = np.vstack((fx1_match, fx2_match)).T - # return noramlizers as well - fm_norm_constrained = np.vstack((fx1_norm, fx2_match)).T - - constraintup = (fm_constrained, fm_norm_constrained, match_dist, norm_dist) - return constraintup - - -def compute_forgroundness(fpath1, kpts1, species='zebra_plains'): - """ - hack in foregroundness - """ - import pyrf - import vtool_ibeis as vt - from os.path import exists - # hack for getting a model (not entirely ibeis independent) - trees_path = ut.get_app_resource_dir('ibeis', 'detectmodels', 'rf', species) - tree_fpath_list = ut.glob(trees_path, '*.txt') - detector = pyrf.Random_Forest_Detector() - # TODO; might need to downsample - forest = detector.forest(tree_fpath_list, verbose=False) - gpath_list = [fpath1] - output_gpath_list = [gpath + '.' + species + '.probchip.png' for gpath in gpath_list] - detectkw = { - 'scale_list': [1.15, 1.0, 0.85, 0.7, 0.55, 0.4, 0.25, 0.1], - 'output_gpath_list': output_gpath_list, - 'mode': 1, # mode one outputs probimage - } - results_iter = detector.detect(forest, gpath_list, **detectkw) - results_list = list(results_iter) # NOQA - probchip_list = [vt.imread(gpath, grayscale=True) if exists(gpath) else None for gpath in output_gpath_list] - #vtpatch.get_warped_patches() - fgweights_list = [] - kpts_list = [kpts1] - for probchip, kpts in zip(probchip_list, kpts_list): - patch_list = [vt.get_warped_patch(probchip, kp)[0].astype(np.float32) / 255.0 for kp in kpts] - weight_list = [vt.gaussian_average_patch(patch) for patch in patch_list] - #weight_list = [patch.sum() / (patch.size) for patch in patch_list] - weights = np.array(weight_list, dtype=np.float32) - fgweights_list.append(weights) - fgweights = fgweights_list[0] - detector.free_forest(forest) - return fgweights - - -def compute_distinctivness(vecs_list, species='zebra_plains'): - """ - hack in distinctivness - """ - from ibeis.algo.hots import distinctiveness_normalizer - cachedir = ut.get_app_resource_dir('ibeis', 'distinctiveness_model') - dstcnvs_normer = distinctiveness_normalizer.DistinctivnessNormalizer(species, cachedir=cachedir) - dstcnvs_normer.load(cachedir) - dstncvs_list = [dstcnvs_normer.get_distinctiveness(vecs) for vecs in vecs_list] - return dstncvs_list - - -@six.add_metaclass(ut.ReloadingMetaclass) -class Annot(object): - """ - fpath1 = ut.grab_test_imgpath(fname1) - fpath2 = ut.grab_test_imgpath(fname2) - annot1 = Annot(fpath1) - annot2 = Annot(fpath2) - annot = annot1 - - """ - def __init__(annot, fpath, species='zebra_plains'): - annot.fpath = fpath - annot.species = species - annot.kpts = None - annot.vecs = None - annot.rchip = None - annot.dstncvs = None - annot.fgweights = None - annot.dstncvs_mask = None - annot.fgweight_mask = None - annot.load() - - def show(annot): - import plottool_ibeis as pt - pt.imshow(annot.rchip) - pt.draw_kpts2(annot.kpts) - - def show_dstncvs_mask(annot, title='wd', update=True, **kwargs): - import plottool_ibeis as pt - pt.imshow(annot.dstncvs_mask * 255.0, update=update, title=title, **kwargs) - - def show_fgweight_mask(annot, title='fg', update=True, **kwargs): - import plottool_ibeis as pt - pt.imshow(annot.fgweight_mask * 255.0, update=update, title=title, **kwargs) - - def load(annot): - from vtool_ibeis import image as gtool - from vtool_ibeis import features as feattool - kpts, vecs = feattool.extract_features(annot.fpath) - annot.kpts = kpts - annot.vecs = vecs - annot.rchip = gtool.imread(annot.fpath) - annot.dstncvs = compute_distinctivness([annot.vecs], annot.species)[0] - annot.fgweights = compute_forgroundness(annot.fpath, annot.kpts, annot.species) - annot.chipshape = annot.rchip.shape - annot.dlen_sqrd = annot.chipshape[0] ** 2 + annot.chipshape[1] ** 2 - - def lazy_compute(annot): - if annot.dstncvs_mask is None: - annot.compute_dstncvs_mask() - if annot.fgweight_mask is None: - annot.compute_fgweight_mask() - - def compute_fgweight_mask(annot): - keys = ['kpts', 'chipshape', 'fgweights'] - kpts, chipshape, fgweights = ut.dict_take(annot.__dict__, keys) - chipsize = chipshape[0:2][::-1] - fgweight_mask = coverage_kpts.make_kpts_coverage_mask( - kpts, chipsize, fgweights, mode='max', resize=True, return_patch=False) - annot.fgweight_mask = fgweight_mask - - def compute_dstncvs_mask(annot): - keys = ['kpts', 'chipshape', 'dstncvs'] - kpts, chipshape, dstncvs = ut.dict_take(annot.__dict__, keys) - chipsize = chipshape[0:2][::-1] - dstncvs_mask = coverage_kpts.make_kpts_coverage_mask( - kpts, chipsize, dstncvs, mode='max', resize=True, return_patch=False) - annot.dstncvs_mask = dstncvs_mask - - def baseline_match(annot, annot2): - cfgdict = {} - annot1 = annot - keys = ['kpts', 'vecs'] - kpts1, vecs1 = ut.dict_take(annot1.__dict__, keys) - kpts2, vecs2 = ut.dict_take(annot2.__dict__, keys) - dlen_sqrd2 = annot2.dlen_sqrd - basetup, base_meta = baseline_vsone_ratio_matcher_(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict) - (fm_ORIG, fs_ORIG, fm_RAT, fs_RAT, fm_SV, fs_SV, H_RAT) = basetup - (fm_norm_RAT, fm_norm_SV) = base_meta - match_ORIG = AnnotMatch(annot1, annot2, fm_ORIG, fs_ORIG, 'ORIG') # NOQA - match_RAT = AnnotMatch(annot1, annot2, fm_RAT, fs_RAT, 'RAT', fm_norm_RAT) # NOQA - match_SV = AnnotMatch(annot1, annot2, fm_SV, fs_SV, 'SV', fm_norm_SV) - match_SV.H = H_RAT - return match_ORIG, match_RAT, match_SV - - def constrained_match(annot, match_SV): - cfgdict = {} - annot1 = match_SV.annot1 - assert annot1 is annot - annot2 = match_SV.annot2 - keys = ['kpts', 'vecs'] - kpts1, vecs1 = ut.dict_take(annot1.__dict__, keys) - kpts2, vecs2 = ut.dict_take(annot2.__dict__, keys) - dlen_sqrd2 = annot2.dlen_sqrd - H_RAT = match_SV.H - nexttup, next_meta = spatially_constrianed_matcher_(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, H_RAT, cfgdict) - (fm_SC, fs_SC, fm_SCR, fs_SCR, fm_SCRSV, fs_SCRSV, H_SCR) = nexttup - (fm_norm_SC, fm_norm_SCR, fm_norm_SCRSV) = next_meta - match_SC = AnnotMatch(annot1, annot2, fm_SC, fs_SC, 'SC', fm_norm_SC) # NOQA - match_SCR = AnnotMatch(annot1, annot2, fm_SCR, fs_SCR, 'SCR', fm_norm_SCR) # NOQA - match_SCRSV = AnnotMatch(annot1, annot2, fm_SCRSV, fs_SCRSV, 'SCRSV', fm_norm_SCRSV) - match_SCRSV.H = H_SCR - return match_SC, match_SCR, match_SCRSV - - -@six.add_metaclass(ut.ReloadingMetaclass) -class AnnotMatch(object): - r""" - - Example1: - >>> from vtool_ibeis.constrained_matching import * # NOQA - >>> fname1, fname2 = 'easy1.png', 'easy2.png' - >>> fpath1 = ut.grab_test_imgpath(fname1) - >>> fpath2 = ut.grab_test_imgpath(fname2) - >>> annot1, annot2 = Annot(fpath1), Annot(fpath2) - >>> match_ORIG, match_RAT, match_SV = annot1.baseline_match(annot2) - >>> match = match_SV - >>> match_SC, match_SCR, match_SCRSV = annot1.constrained_match(match_SV) - >>> match = match_SCR - >>> # ___ - >>> match_list = [match_ORIG, match_RAT, match_SV, match_SC, match_SCR, match_SCRSV] - >>> # false match - >>> fname3 = 'hard3.png' - >>> fpath3 = ut.grab_test_imgpath(fname3) - >>> annot3 = Annot(fpath3) - >>> match_tn_list = [] - >>> match_tn_list.extend(annot1.baseline_match(annot3)) - >>> match_SV_fn = match_tn_list[-1] - >>> match_tn_list.extend(annot1.constrained_match(match_SV_fn)) - >>> # ___ - >>> print('___________') - >>> for match in match_list: - >>> match.print_scores() - >>> print('___________') - >>> for match_tn in match_tn_list: - >>> match_tn.print_scores() - >>> print('___________') - >>> for match, match_tn in zip(match_list, match_tn_list): - >>> match.print_score_diffs(match_tn) - - Ignore:: - match.show_matches(fnum=1, update=True) - - match.show_normalizers(fnum=2, update=True) - """ - def __init__(match, annot1, annot2, fm, fs, key=None, fm_norm=None): - match.key = key - match.annot1 = annot1 - match.annot2 = annot2 - match.fm = fm - match.fs = fs - match.fm_norm = fm_norm - - # Matching coverage of annot2 - match.coverage_mask2 = None - - # Scalar scores of theis match - match.num_matches = None - match.sum_score = None - match.ave_score = None - match.weight_ave_score = None - match.coverage_score = None - match.weighted_coverage_score = None - - def compute_scores(match): - match.num_matches = len(match.fm) - match.sum_score = match.fs.sum() - match.ave_score = match.fs.sum() / match.fs.shape[0] - match.weight_ave_score = match.compute_weighte_average_score() - match.coverage_score = match.coverage_mask2.sum() / np.prod(match.coverage_mask2.shape) - match.weighted_coverage_score = match.compute_weighted_coverage_score() - - def compute_weighte_average_score(match): - """ old scoring measure """ - import vtool_ibeis as vt - # Get distinctivness and forground of matching points - fx1_list, fx2_list = match.fm.T - annot1 = match.annot1 - annot2 = match.annot2 - dstncvs1 = annot1.dstncvs.take(fx1_list) - dstncvs2 = annot2.dstncvs.take(fx2_list) - fgweight1 = annot1.fgweights.take(fx1_list) - fgweight2 = annot2.fgweights.take(fx2_list) - dstncvs = np.sqrt(dstncvs1 * dstncvs2) - fgweight = np.sqrt(fgweight1 * fgweight2) - fsv = np.vstack((match.fs, dstncvs, fgweight)).T - fs_new = vt.weighted_average_scoring(fsv, [0], [1, 2]) - weight_ave_score = fs_new.sum() - return weight_ave_score - - def lazy_compute(match): - match.annot2.lazy_compute() - if match.coverage_mask2 is None: - match.compute_coverage_mask() - match.compute_scores() - - def compute_weighted_coverage_score(match): - weight_mask = np.sqrt(match.annot2.dstncvs_mask * match.annot2.fgweight_mask) - conerage_score = (match.coverage_mask2.sum() / weight_mask.sum()) - return conerage_score - - def compute_coverage_mask(match): - """ compute matching coverage of annot """ - fm = match.fm - fs = match.fs - kpts2 = match.annot2.kpts - chipshape2 = match.annot2.chipshape - chipsize2 = chipshape2[0:2][::-1] - kpts2_m = kpts2.take(fm.T[1], axis=0) - coverage_mask2 = coverage_kpts.make_kpts_coverage_mask( - kpts2_m, chipsize2, fs, mode='max', resize=True, return_patch=False) - match.coverage_mask2 = coverage_mask2 - - # --- INFO --- - - def print_scores(match): - match.lazy_compute() - score_keys = ['num_matches', 'sum_score', 'ave_score', - 'weight_ave_score', 'coverage_score', - 'weighted_coverage_score'] - msglist = [] - for key in score_keys: - msglist.append(' * %s = %6.2f' % (key, match.__dict__[key])) - msglist_aligned = ut.align_lines(msglist, '=') - msg = '\n'.join(msglist_aligned) - print('key = %r' % (match.key,)) - print(msg) - - def print_score_diffs(match, match_tn): - score_keys = ['num_matches', 'sum_score', 'ave_score', - 'weight_ave_score', 'coverage_score', - 'weighted_coverage_score'] - msglist = [' * = , , , '] - for key in score_keys: - score = match.__dict__[key] - score_tn = match_tn.__dict__[key] - score_diff = score - score_tn - score_factor = score / score_tn - msglist.append(' * %s = %6.2f, %6.2f, %6.2f, %6.2f' % (key, score, score_tn, score_diff, score_factor)) - msglist_aligned = ut.align_lines(msglist, '=') - msg = '\n'.join(msglist_aligned) - print('key = %r' % (match.key,)) - print(msg) - - def show_matches(match, fnum=None, pnum=None, update=True): - import plottool_ibeis as pt - from plottool_ibeis import plot_helpers as ph - # hack keys out of namespace - keys = ['rchip', 'kpts'] - rchip1, kpts1 = ut.dict_take(match.annot1.__dict__, keys) - rchip2, kpts2 = ut.dict_take(match.annot2.__dict__, keys) - fs, fm = match.fs, match.fm - cmap = 'hot' - draw_lines = True - if fnum is None: - fnum = pt.next_fnum() - pt.figure(fnum=fnum, pnum=pnum) - #doclf=True, docla=True) - ax, xywh1, xywh2 = pt.show_chipmatch2( - rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs, fnum=fnum, cmap=cmap, - draw_lines=draw_lines) - ph.set_plotdat(ax, 'viztype', 'matches') - ph.set_plotdat(ax, 'key', match.key) - title = match.key + '\n num=%d, sum=%.2f' % (len(fm), sum(fs)) - pt.set_title(title) - if update: - pt.update() - return ax, xywh1, xywh2 - - def show_normalizers(match, fnum=None, pnum=None, update=True): - import plottool_ibeis as pt - from plottool_ibeis import plot_helpers as ph - # hack keys out of namespace - keys = ['rchip', 'kpts'] - rchip1, kpts1 = ut.dict_take(match.annot1.__dict__, keys) - rchip2, kpts2 = ut.dict_take(match.annot2.__dict__, keys) - fs, fm = match.fs, match.fm_norm - cmap = 'cool' - draw_lines = True - if fnum is None: - fnum = pt.next_fnum() - pt.figure(fnum=fnum, pnum=pnum) - #doclf=True, docla=True) - ax, xywh1, xywh2 = pt.show_chipmatch2( - rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs, fnum=fnum, cmap=cmap, - draw_lines=draw_lines) - ph.set_plotdat(ax, 'viztype', 'matches') - ph.set_plotdat(ax, 'key', match.key) - title = match.key + '\n num=%d, sum=%.2f' % (len(fm), sum(fs)) - pt.set_title(title) - if update: - pt.update() - return ax, xywh1, xywh2 - - -def testdata_matcher(fname1='easy1.png', fname2='easy2.png'): - """" - fname1 = 'easy1.png' - fname2 = 'hard3.png' - - annot1 = Annot(fpath1) - annot2 = Annot(fpath2) - """ - import utool as ut - from vtool_ibeis import image as gtool - from vtool_ibeis import features as feattool - fpath1 = ut.grab_test_imgpath(fname1) - fpath2 = ut.grab_test_imgpath(fname2) - kpts1, vecs1 = feattool.extract_features(fpath1) - kpts2, vecs2 = feattool.extract_features(fpath2) - rchip1 = gtool.imread(fpath1) - rchip2 = gtool.imread(fpath2) - #chip1_shape = vt.gtool.open_image_size(fpath1) - chip2_shape = gtool.open_image_size(fpath2) - dlen_sqrd2 = chip2_shape[0] ** 2 + chip2_shape[1] ** 2 - testtup = (rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2) - - return testtup - - -if __name__ == '__main__': - """ - CommandLine: - python -m vtool_ibeis.constrained_matching - python -m vtool_ibeis.constrained_matching --allexamples - python -m vtool_ibeis.constrained_matching --allexamples --noface --nosrc - """ - import multiprocessing - multiprocessing.freeze_support() # for win32 - import utool as ut # NOQA - ut.doctest_funcs() - - -from __future__ import absolute_import, division, print_function -import utool as ut -import six # NOQA -import numpy as np # NOQA -from vtool_ibeis import keypoint as ktool # NOQA -from vtool_ibeis import spatial_verification as sver # NOQA -from vtool_ibeis import constrained_matching -""" -Todo tomorrow: - -add coverage as option to IBEIS -add spatially constrained matching as option to IBEIS - -""" - - -def param_interaction(): - r""" - CommandLine: - python -m vtool_ibeis.test_constrained_matching --test-param_interaction - - Notes: - python -m vtool_ibeis.test_constrained_matching --test-param_interaction - setparam normalizer_mode=nearby - setparam normalizer_mode=far - setparam ratio_thresh=.625 - setparam ratio_thresh=.5 - - setparam ratio_thresh2=.625 - normalizer_mode=plus - - Example: - >>> # DISABLE_DOCTEST - >>> from vtool_ibeis.test_constrained_matching import * # NOQA - >>> # build test data - >>> # execute function - >>> testtup = param_interaction() - >>> # verify results - >>> result = str(testtup) - >>> print(result) - """ - import plottool_ibeis as pt - USE_IBEIS = False and ut.is_developer() - if USE_IBEIS: - from ibeis.algo.hots import devcases - index = 2 - fpath1, fpath2, fpath3 = devcases.get_dev_test_fpaths(index) - testtup1 = testdata_matcher(fpath1, fpath2) - testtup2 = testdata_matcher(fpath1, fpath3) - else: - testtup1 = testdata_matcher('easy1.png', 'easy2.png') - testtup2 = testdata_matcher('easy1.png', 'hard3.png') - testtup_list = [testtup1, testtup2] - simp_list = [SimpleMatcher(testtup) for testtup in testtup_list] - varied_dict = dict([ - ('sver_xy_thresh', .1), - ('ratio_thresh', .625), - ('search_K', 7), - ('ratio_thresh2', .625), - ('sver_xy_thresh2', .01), - ('normalizer_mode', ['nearby', 'far', 'plus'][1]), - ('match_xy_thresh', .1), - ]) - cfgdict_list = ut.all_dict_combinations(varied_dict) - tried_configs = [] - - # DEFINE CUSTOM INTRACTIONS - custom_actions, valid_vizmodes, viz_index_, offset_fnum_ = make_custom_interactions(simp_list) - # /DEFINE CUSTOM INTRACTIONS - - for cfgdict in ut.InteractiveIter(cfgdict_list, - #default_action='reload', - custom_actions=custom_actions, - wraparound=True): - for simp in simp_list: - simp.run_matching(cfgdict=cfgdict) - vizkey = valid_vizmodes[viz_index_[0]].replace('visualize_', '') - print('vizkey = %r' % (vizkey,)) - for fnum_, simp in enumerate(simp_list): - fnum = fnum_ + offset_fnum_[0] - simp.visualize(vizkey, fnum=fnum) - tried_configs.append(cfgdict.copy()) - print('Current Config = ') - print(ub.repr2(cfgdict)) - pt.present() - pt.update() - - -def make_custom_interactions(simp_list): - valid_vizmodes = ut.filter_startswith(dir(SimpleMatcher), 'visualize_') - viz_index_ = [valid_vizmodes.index('visualize_matches')] - def toggle_vizmode(iiter, actionkey, value, viz_index_=viz_index_): - viz_index_[0] = (viz_index_[0] + 1) % len(valid_vizmodes) - print('toggling') - - def set_param(iiter, actionkey, value, viz_index_=viz_index_): - """ - value = 'search_K=3' - """ - paramkey, paramval = value.split('=') - print('parsing value=%r' % (value,)) - def strip_quotes(str_): - dq = ut.DOUBLE_QUOTE - sq = ut.SINGLE_QUOTE - return str_.strip(dq).strip(sq).strip(dq) - # Sanatize - paramkey = strip_quotes(paramkey.strip()) - paramval = ut.smart_cast2(strip_quotes(paramval.strip())) - print('setting cfgdict[%r]=%r' % (paramkey, paramval)) - iiter.iterable[iiter.index][paramkey] = paramval - - offset_fnum_ = [0] - def offset_fnum(iiter, actionkey, value, offset_fnum_=offset_fnum_): - offset_fnum_[0] += len(simp_list) - - custom_actions = [ - ('toggle', ['t'], 'toggles between ' + ut.conj_phrase(valid_vizmodes, 'and'), toggle_vizmode), - ('offset_fnum', ['offset_fnum', 'o'], 'offset the figure number (keeps old figures)', offset_fnum), - ('set_param', ['setparam', 's'], 'sets a config param using key=val format. eg: setparam ratio_thresh=.1', set_param), - ] - return custom_actions, valid_vizmodes, viz_index_, offset_fnum_ - - -def testdata_matcher(fname1='easy1.png', fname2='easy2.png'): - """" - fname1 = 'easy1.png' - fname2 = 'hard3.png' - - python -m vtool_ibeis.test_constrained_matching --test-visualize_matches --show - - Args: - fname1 (str): (default = 'easy1.png') - fname2 (str): (default = 'easy2.png') - - Returns: - ?: testtup - - CommandLine: - python -m vtool_ibeis.test_constrained_matching --test-testdata_matcher - - Example: - >>> # DISABLE_DOCTEST - >>> from vtool_ibeis.test_constrained_matching import * # NOQA - >>> fname1 = 'easy1.png' - >>> fname2 = 'easy2.png' - >>> testtup = testdata_matcher(fname1, fname2) - >>> result = ('testtup = %s' % (str(testtup),)) - >>> print(result) - """ - import utool as ut - #import vtool_ibeis as vt - from vtool_ibeis import image as gtool - from vtool_ibeis import features as feattool - fpath1 = ut.grab_test_imgpath(fname1) - fpath2 = ut.grab_test_imgpath(fname2) - featkw = dict(rotation_invariance=True) - kpts1, vecs1 = feattool.extract_features(fpath1, **featkw) - kpts2, vecs2 = feattool.extract_features(fpath2, **featkw) - #if featkw['rotation_invariance']: - # print('ori stats 1 ' + ut.get_stats_str(vt.get_oris(kpts2))) - # print('ori stats 2 ' + ut.get_stats_str(vt.get_oris(kpts1))) - rchip1 = gtool.imread(fpath1) - rchip2 = gtool.imread(fpath2) - #chip1_shape = vt.gtool.open_image_size(fpath1) - chip2_shape = gtool.open_image_size(fpath2) - dlen_sqrd2 = chip2_shape[0] ** 2 + chip2_shape[1] - testtup = (rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2) - return testtup - - -class SimpleMatcher(object): - def __init__(simp, testtup): - simp.testtup = None - simp.basetup = None - simp.nexttup = None - if testtup is not None: - simp.load_data(testtup) - - def load_data(simp, testtup): - simp.testtup = testtup - - def run_matching(simp, testtup=None, cfgdict={}): - if testtup is None: - testtup = simp.testtup - basetup, base_meta = constrained_matching.baseline_vsone_ratio_matcher(testtup, cfgdict) - nexttup, next_meta = constrained_matching.spatially_constrianed_matcher(testtup, basetup, cfgdict) - simp.nexttup = nexttup - simp.basetup = basetup - simp.testtup = testtup - simp.base_meta = base_meta - simp.next_meta = next_meta - - def setstate_testdata(simp): - testtup = testdata_matcher() - simp.run_matching(testtup) - - def visualize(simp, key, **kwargs): - visualize_method = getattr(simp, 'visualize_' + key) - return visualize_method(**kwargs) - - def start_new_viz(simp, nRows, nCols, fnum=None): - import plottool_ibeis as pt - rchip1, rchip2, kpts1, vecs1, kpts2, vecs2, dlen_sqrd2 = simp.testtup - fm_ORIG, fs_ORIG, fm_RAT, fs_RAT, fm_SV, fs_SV, H_RAT = simp.basetup - fm_SC, fs_SC, fm_SCR, fs_SCR, fm_SCRSV, fs_SCRSV, H_SCR = simp.nexttup - fm_norm_RAT, fm_norm_SV = simp.base_meta - fm_norm_SC, fm_norm_SCR, fm_norm_SVSCR = simp.next_meta - - locals_ = ut.delete_dict_keys(locals(), ['title']) - - keytitle_tups = [ - ('ORIG', 'initial neighbors'), - ('RAT', 'ratio filtered'), - ('SV', 'ratio filtered + SV'), - ('SC', 'spatially constrained'), - ('SCR', 'spatially constrained + ratio'), - ('SCRSV', 'spatially constrained + SV'), - ] - keytitle_dict = dict(keytitle_tups) - key_list = ut.get_list_column(keytitle_tups, 0) - matchtup_dict = { - key: (locals_['fm_' + key], locals_['fs_' + key]) - for key in key_list - } - normtup_dict = { - key: locals_.get('fm_norm_' + key, None) - for key in key_list - } - - next_pnum = pt.make_pnum_nextgen(nRows=nRows, nCols=nCols) - if fnum is None: - fnum = pt.next_fnum() - INTERACTIVE = True - if INTERACTIVE: - from plottool_ibeis import interact_helpers as ih - fig = ih.begin_interaction('qres', fnum) - ih.connect_callback(fig, 'button_press_event', on_single_match_clicked) - else: - pt.figure(fnum=fnum, doclf=True, docla=True) - - def show_matches_(key, **kwargs): - assert key in key_list, 'unknown key=%r' % (key,) - showkw = locals_.copy() - pnum = next_pnum() - showkw['pnum'] = pnum - showkw['fnum'] = fnum - showkw.update(kwargs) - _fm, _fs = matchtup_dict[key] - title = keytitle_dict[key] - if kwargs.get('coverage'): - from vtool_ibeis import coverage_kpts - kpts2, rchip2 = ut.dict_get(locals_, ('kpts2', 'rchip2')) - kpts2_m = kpts2.take(_fm.T[1], axis=0) - chipshape2 = rchip2.shape - chipsize2 = chipshape2[0:2][::-1] - coverage_mask = coverage_kpts.make_kpts_coverage_mask(kpts2_m, chipsize2, fx2_score=_fs, resize=True, return_patch=False) - pt.imshow(coverage_mask * 255, pnum=pnum, fnum=fnum) - else: - if kwargs.get('norm', False): - _fm = normtup_dict[key] - assert _fm is not None, key - showkw['cmap'] = 'cool' - title += ' normalizers' - show_matches(_fm, _fs, title=title, key=key, **showkw) - # state hack - #show_matches_.next_pnum = next_pnum - return show_matches_ - - def visualize_matches(simp, **kwargs): - r""" - CommandLine: - python -m vtool_ibeis.test_constrained_matching --test-visualize_matches --show - - Example: - >>> # DISABLE_DOCTEST - >>> from vtool_ibeis.test_constrained_matching import * # NOQA - >>> import plottool_ibeis as pt - >>> simp = SimpleMatcher(testdata_matcher()) - >>> simp.run_matching() - >>> result = simp.visualize_matches() - >>> pt.show_if_requested() - """ - nRows = 2 - nCols = 3 - show_matches_ = simp.start_new_viz(nRows, nCols, **kwargs) - - show_matches_('ORIG') - show_matches_('RAT') - show_matches_('SV') - show_matches_('SC') - show_matches_('SCR') - show_matches_('SCRSV') - - def visualize_normalizers(simp, **kwargs): - """ - CommandLine: - python -m vtool_ibeis.test_constrained_matching --test-visualize_normalizers --show - - Example: - >>> # DISABLE_DOCTEST - >>> from vtool_ibeis.test_constrained_matching import * # NOQA - >>> import plottool_ibeis as pt - >>> simp = SimpleMatcher(testdata_matcher()) - >>> simp.run_matching() - >>> result = simp.visualize_normalizers() - >>> pt.show_if_requested() - """ - nRows = 2 - nCols = 2 - show_matches_ = simp.start_new_viz(nRows, nCols, **kwargs) - - show_matches_('RAT') - show_matches_('SCR') - - show_matches_('RAT', norm=True) - show_matches_('SCR', norm=True) - - #show_matches_(fm_RAT, fs_RAT, title='ratio filtered') - #show_matches_(fm_SCR, fs_SCR, title='constrained matches') - - #show_matches_(fm_norm_RAT, fs_RAT, title='ratio normalizers', cmap='cool') - #show_matches_(fm_norm_SCR, fs_SCR, title='constrained normalizers', cmap='cool') - - def visualize_coverage(simp, **kwargs): - """ - CommandLine: - python -m vtool_ibeis.test_constrained_matching --test-visualize_coverage --show - - Example: - >>> # DISABLE_DOCTEST - >>> from vtool_ibeis.test_constrained_matching import * # NOQA - >>> import plottool_ibeis as pt - >>> simp = SimpleMatcher(testdata_matcher()) - >>> simp.run_matching() - >>> result = simp.visualize_coverage() - >>> pt.show_if_requested() - """ - nRows = 2 - nCols = 2 - show_matches_ = simp.start_new_viz(nRows, nCols, **kwargs) - - show_matches_('SV', draw_lines=False) - show_matches_('SCRSV', draw_lines=False) - show_matches_('SV', coverage=True) - show_matches_('SCRSV', coverage=True) - - -def show_matches(fm, fs, fnum=1, pnum=None, title='', key=None, simp=None, - cmap='hot', draw_lines=True, **locals_): - #locals_ = locals() - import plottool_ibeis as pt - from plottool_ibeis import plot_helpers as ph - # hack keys out of namespace - keys = 'rchip1, rchip2, kpts1, kpts2'.split(', ') - rchip1, rchip2, kpts1, kpts2 = ut.dict_take(locals_, keys) - pt.figure(fnum=fnum, pnum=pnum) - #doclf=True, docla=True) - ax, xywh1, xywh2 = pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, - fs=fs, fnum=fnum, cmap=cmap, - draw_lines=draw_lines, ori=True) - ph.set_plotdat(ax, 'viztype', 'matches') - ph.set_plotdat(ax, 'simp', simp) - ph.set_plotdat(ax, 'key', key) - title = title + '\n num=%d, sum=%.2f' % (len(fm), sum(fs)) - pt.set_title(title) - return ax, xywh1, xywh2 - #pt.set_figtitle(title) - # if update: - #pt.iup() - - -#def ishow_matches(fm, fs, fnum=1, pnum=None, title='', cmap='hot', **locals_): -# # TODO make things clickable -def on_single_match_clicked(event): - from plottool_ibeis import interact_helpers as ih - from plottool_ibeis import plot_helpers as ph - """ result interaction mpl event callback slot """ - print('[viz] clicked result') - if ih.clicked_outside_axis(event): - pass - else: - ax = event.inaxes - viztype = ph.get_plotdat(ax, 'viztype', '') - #printDBG(str(event.__dict__)) - # Clicked a specific matches - if viztype.startswith('matches'): - #aid2 = ph.get_plotdat(ax, 'aid2', None) - # Ctrl-Click - evkey = '' if event.key is None else event.key - simp = ph.get_plotdat(ax, 'simp', None) - key = ph.get_plotdat(ax, 'key', None) - print('evkey = %r' % evkey) - if evkey.find('control') == 0: - print('[viz] result control clicked') - pass - # Left-Click - else: - print(simp) - print(key) - print('[viz] result clicked') - pass - ph.draw() - - -def show_example(): - r""" - CommandLine: - python -m vtool_ibeis.test_constrained_matching --test-show_example --show - - Example: - >>> # DISABLE_DOCTEST - >>> from vtool_ibeis.test_constrained_matching import * # NOQA - >>> import plottool_ibeis as pt - >>> # build test data - >>> # execute function - >>> result = show_example() - >>> # verify results - >>> print(result) - >>> pt.present() - >>> pt.show_if_requested() - """ - #ut.util_grabdata.get_valid_test_imgkeys() - testtup1 = testdata_matcher('easy1.png', 'easy2.png') - testtup2 = testdata_matcher('easy1.png', 'hard3.png') - simp1 = SimpleMatcher(testtup1) - simp2 = SimpleMatcher(testtup2) - simp1.run_matching() - simp2.run_matching() - simp1.visualize_matches() - simp2.visualize_matches() - #simp1.visualize_normalizers() - #simp2.visualize_normalizers() - #simp1.param_interaction() - - -if __name__ == '__main__': - """ - CommandLine: - python -m vtool_ibeis.test_constrained_matching - python -m vtool_ibeis.test_constrained_matching --allexamples - python -m vtool_ibeis.test_constrained_matching --allexamples --noface --nosrc - """ - import multiprocessing - multiprocessing.freeze_support() # for win32 - import utool as ut # NOQA - ut.doctest_funcs() - - -def spatially_constrained_ratio_match(flann, vecs2, kpts1, kpts2, H, chip2_dlen_sqrd, - match_xy_thresh=1.0, scr_ratio_thresh=.625, scr_K=7, - norm_xy_bounds=(0.0, 1.0), - fm_dtype=np.int32, fs_dtype=np.float32): - """ - performs nearest neighbors, then assigns based on spatial constraints, the - last step performs a ratio test. - - H - a homography H that maps image1 space into image2 space - H should map from query to database chip (1 to 2) - """ - assert H.shape == (3, 3) - # Find several of image2's features nearest matches in image1 - fx2_to_fx1, fx2_to_dist = normalized_nearest_neighbors(flann, vecs2, scr_K, checks=800) - # Then find those which satisfify the constraints - assigntup = assign_spatially_constrained_matches( - chip2_dlen_sqrd, kpts1, kpts2, H, fx2_to_fx1, fx2_to_dist, - match_xy_thresh, norm_xy_bounds=norm_xy_bounds) - fm, fx1_norm, match_dist, norm_dist = assigntup - # filter assignments via the ratio test - scr_tup = ratio_test(fm, fx1_norm, match_dist, norm_dist, scr_ratio_thresh, - fm_dtype=fm_dtype, fs_dtype=fs_dtype) - return scr_tup - - -def ratio_test(fm, fx1_norm, match_dist, norm_dist, - ratio_thresh=.625, fm_dtype=np.int32, fs_dtype=np.float32): - r""" - Lowes ratio test for one-vs-one feature matches. - - Assumes reverse matches (image2 to image1) and returns (image1 to image2) - matches. Generalized to accept any match or normalizer not just K=1 and K=2. - - Args: - fx2_to_fx1 (ndarray): nearest neighbor indices (from flann) - fx2_to_dist (ndarray): nearest neighbor distances (from flann) - ratio_thresh (float): - match_col (int or ndarray): column of matching indices - norm_col (int or ndarray): column of normalizng indices - - Returns: - tuple: (fm_RAT, fs_RAT, fm_norm_RAT) - - CommandLine: - python -m vtool_ibeis.matching --test-ratio_test - - Example: - >>> # ENABLE_DOCTEST - >>> from vtool_ibeis.matching import * # NOQA - >>> fx2_match = np.array([0, 1, 2, 3, 4, 5], dtype=np.int32) - >>> fx1_match = np.array([77, 116, 122, 1075, 530, 45], dtype=np.int32) - >>> fm = np.vstack((fx1_match, fx2_match)).T - >>> fx1_norm = np.array([971, 120, 128, 692, 45, 530], dtype=np.int32) - >>> match_dist = np.array([ 0.059, 0.021, 0.039, 0.15 , 0.227, 0.216]) - >>> norm_dist = np.array([ 0.239, 0.241, 0.248, 0.151, 0.244, 0.236]) - >>> ratio_thresh = .625 - >>> ratio_tup = ratio_test(fm, fx1_norm, match_dist, norm_dist, ratio_thresh) - >>> result = ut.repr3(ratio_tup, precision=3) - >>> print(result) - ( - np.array([[ 77, 0], - [116, 1], - [122, 2]], dtype=np.int32), - np.array([ 0.753, 0.913, 0.843], dtype=np.float32), - np.array([[971, 0], - [120, 1], - [128, 2]], dtype=np.int32), - ) - """ - fx2_to_ratio = np.divide(match_dist, norm_dist).astype(fs_dtype) - fx2_to_isvalid = np.less(fx2_to_ratio, ratio_thresh) - fm_RAT = fm.compress(fx2_to_isvalid, axis=0).astype(fm_dtype) - fx1_norm_RAT = fx1_norm.compress(fx2_to_isvalid).astype(fm_dtype) - # Turn the ratio into a score - fs_RAT = np.subtract(1.0, fx2_to_ratio.compress(fx2_to_isvalid)) - # return normalizer info as well - fm_norm_RAT = np.vstack((fx1_norm_RAT, fm_RAT.T[1])).T - ratio_tup = MatchTup3(fm_RAT, fs_RAT, fm_norm_RAT) - return ratio_tup - - -def unconstrained_ratio_match(flann, vecs2, unc_ratio_thresh=.625, - fm_dtype=np.int32, fs_dtype=np.float32): - """ Lowes ratio matching - - from vtool_ibeis.matching import * # NOQA - fs_dtype = rat_kwargs.get('fs_dtype', np.float32) - fm_dtype = rat_kwargs.get('fm_dtype', np.int32) - unc_ratio_thresh = rat_kwargs.get('unc_ratio_thresh', .625) - - """ - fx2_to_fx1, fx2_to_dist = normalized_nearest_neighbors( - flann, vecs2, K=2, checks=800) - #ut.embed() - assigntup = assign_unconstrained_matches(fx2_to_fx1, fx2_to_dist, 1) - fm, fx1_norm, match_dist, norm_dist = assigntup - ratio_tup = ratio_test(fm, fx1_norm, match_dist, norm_dist, - unc_ratio_thresh, fm_dtype=fm_dtype, - fs_dtype=fs_dtype) - return ratio_tup - - -def assign_spatially_constrained_matches(chip2_dlen_sqrd, kpts1, kpts2, H, - fx2_to_fx1, fx2_to_dist, - match_xy_thresh, - norm_xy_bounds=(0.0, 1.0)): - r""" - assigns spatially constrained vsone match using results of nearest - neighbors. - - Args: - chip2_dlen_sqrd (dict): - kpts1 (ndarray[float32_t, ndim=2]): keypoints - kpts2 (ndarray[float32_t, ndim=2]): keypoints - H (ndarray[float64_t, ndim=2]): homography/perspective matrix that - maps image1 space into image2 space - fx2_to_fx1 (ndarray): image2s nearest feature indices in image1 - fx2_to_dist (ndarray): - match_xy_thresh (float): - norm_xy_bounds (tuple): - - Returns: - tuple: assigntup( - fx2_match, - matching feature indices in image 2 - fx1_match, - matching feature indices in image 1 - fx1_norm, - normmalizing indices in image 1 - match_dist, - descriptor distances between fx2_match and fx1_match - norm_dist, - descriptor distances between fx2_match and fx1_norm - ) - - CommandLine: - python -m vtool_ibeis.matching assign_spatially_constrained_matches - - Example: - >>> # ENABLE_DOCTEST - >>> from vtool_ibeis.matching import * # NOQA - >>> kpts1 = np.array([[ 6., 4., 15.84, 4.66, 7.24, 0. ], - ... [ 9., 3., 20.09, 5.76, 6.2 , 0. ], - ... [ 1., 1., 12.96, 1.73, 8.77, 0. ],]) - >>> kpts2 = np.array([[ 2., 1., 12.11, 0.38, 8.04, 0. ], - ... [ 5., 1., 22.4 , 1.31, 5.04, 0. ], - ... [ 6., 1., 19.25, 1.74, 4.72, 0. ],]) - >>> match_xy_thresh = .37 - >>> chip2_dlen_sqrd = 1400 - >>> norm_xy_bounds = (0.0, 1.0) - >>> H = np.array([[ 2, 0, 0], - >>> [ 0, 1, 0], - >>> [ 0, 0, 1]]) - >>> fx2_to_fx1 = np.array([[2, 1, 0], - >>> [0, 1, 2], - >>> [2, 0, 1]], dtype=np.int32) - >>> fx2_to_dist = np.array([[.40, .80, .85], - >>> [.30, .50, .60], - >>> [.80, .90, .91]], dtype=np.float32) - >>> # verify results - >>> assigntup = assign_spatially_constrained_matches( - >>> chip2_dlen_sqrd, kpts1, kpts2, H, fx2_to_fx1, fx2_to_dist, - >>> match_xy_thresh, norm_xy_bounds) - >>> fm, fx1_norm, match_dist, norm_dist = assigntup - >>> result = ub.repr2(assigntup, precision=3, nobr=True) - >>> print(result) - np.array([[2, 0], - [0, 1], - [2, 2]], dtype=np.int32), - np.array([1, 1, 0], dtype=np.int32), - np.array([ 0.4, 0.3, 0.8], dtype=np.float32), - np.array([ 0.8, 0.5, 0.9], dtype=np.float32), - """ - import vtool_ibeis as vt - index_dtype = fx2_to_fx1.dtype - # Find spatial errors of keypoints under current homography - # (kpts1 mapped into image2 space) - fx2_to_xyerr_sqrd = vt.get_match_spatial_squared_error(kpts1, kpts2, H, fx2_to_fx1) - fx2_to_xyerr = np.sqrt(fx2_to_xyerr_sqrd) - fx2_to_xyerr_norm = np.divide(fx2_to_xyerr, np.sqrt(chip2_dlen_sqrd)) - - # Find matches and normalizers that satisfy spatial constraints - fx2_to_valid_match = ut.inbounds(fx2_to_xyerr_norm, 0.0, match_xy_thresh, eq=True) - fx2_to_valid_normalizer = ut.inbounds(fx2_to_xyerr_norm, *norm_xy_bounds, eq=True) - fx2_to_fx1_match_col = vt.find_first_true_indices(fx2_to_valid_match) - fx2_to_fx1_norm_col = vt.find_next_true_indices(fx2_to_valid_normalizer, - fx2_to_fx1_match_col) - - assert fx2_to_fx1_match_col != fx2_to_fx1_norm_col, 'normlizers are matches!' - - fx2_to_hasmatch = [pos is not None for pos in fx2_to_fx1_norm_col] - # IMAGE 2 Matching Features - fx2_match = np.where(fx2_to_hasmatch)[0].astype(index_dtype) - match_col_list = np.array(ut.take(fx2_to_fx1_match_col, fx2_match), - dtype=fx2_match.dtype) - norm_col_list = np.array(ut.take(fx2_to_fx1_norm_col, fx2_match), - dtype=fx2_match.dtype) - - # We now have 2d coordinates into fx2_to_fx1 - # Covnert into 1d coordinates for flat indexing into fx2_to_fx1 - _match_index_2d = np.vstack((fx2_match, match_col_list)) - _norm_index_2d = np.vstack((fx2_match, norm_col_list)) - _shape2d = fx2_to_fx1.shape - match_index_1d = np.ravel_multi_index(_match_index_2d, _shape2d) - norm_index_1d = np.ravel_multi_index(_norm_index_2d, _shape2d) - - # Find initial matches - # IMAGE 1 Matching Features - fx1_match = fx2_to_fx1.take(match_index_1d) - fx1_norm = fx2_to_fx1.take(norm_index_1d) - # compute constrained ratio score - match_dist = fx2_to_dist.take(match_index_1d) - norm_dist = fx2_to_dist.take(norm_index_1d) - - # package and return - fm = np.vstack((fx1_match, fx2_match)).T - assigntup = fm, fx1_norm, match_dist, norm_dist - return assigntup - - -def gridsearch_match_operation(matches, op_name, basis): - import sklearn - import sklearn.metrics - y_true = np.array([m.annot1['nid'] == m.annot2['nid'] for m in matches]) - grid = ut.all_dict_combinations(basis) - auc_list = [] - for cfgdict in ut.ProgIter(grid, lbl='gridsearch', bs=False): - matches_ = [match.copy() for match in matches] - y_score = [getattr(m, op_name)(cfgdict=cfgdict).fs.sum() - for m in matches_] - auc = sklearn.metrics.roc_auc_score(y_true, y_score) - print('cfgdict = %r' % (cfgdict,)) - print('auc = %r' % (auc,)) - auc_list.append(auc) - print(ut.repr4(ut.sort_dict(ut.dzip(grid, auc_list), 'vals', - reverse=True))) - if len(basis) == 1: - # interpolate along basis - pass diff --git a/tests/test_spatial_verification.py b/tests/test_spatial_verification.py index 72231e1..05260df 100755 --- a/tests/test_spatial_verification.py +++ b/tests/test_spatial_verification.py @@ -131,7 +131,7 @@ def get_dummy_test_vars(): return chip1, chip2, kpts1, kpts2, fm -def get_dummy_test_vars1(fname1='easy1.png', fname2='easy2.png'): +def get_dummy_test_vars1(fname1='tsukuba_r', fname2='tsukuba_l'): import utool as ut from vtool_ibeis import image as gtool from vtool_ibeis import features as feattool diff --git a/tests/test_sver_wrapper.py b/tests/test_sver_wrapper.py deleted file mode 100644 index cbceffc..0000000 --- a/tests/test_sver_wrapper.py +++ /dev/null @@ -1,129 +0,0 @@ -# def test_sver_wrapper2(): -# r""" -# Example: -# >>> # ENABLE_DOCTEST -# >>> from vtool_ibeis.sver_c_wrapper import * # NOQA -# >>> result = test_sver_wrapper2() -# >>> print(result) -# """ -# import vtool_ibeis -# import vtool_ibeis.tests.testdata_nondeterm_sver -# kpts1, kpts2, fm, xy_thresh, scale_thresh, ori_thresh, dlen_sqrd2, min_nInliers, match_weights, full_homog_checks = vtool_ibeis.tests.testdata_nondeterm_sver.testdata_nondeterm_sver() -# inliers_list = [] -# homog_inliers_list = [] - -# for x in range(10): -# sv_tup = vtool_ibeis.spatially_verify_kpts( -# kpts1, kpts2, fm, xy_thresh, scale_thresh, ori_thresh, -# dlen_sqrd2, min_nInliers, match_weights=match_weights, -# full_homog_checks=full_homog_checks, returnAff=True) -# aff_inliers = sv_tup[3] -# inliers_list.append(str(aff_inliers)) -# homog_inliers_list.append(str(sv_tup[0])) - -# #print(sv_tup[0]) -# #print(sv_tup[3]) -# print('unique cases affine inliers: ' + ub.repr2(list(set(inliers_list)))) -# print('unique cases homog inliers: ' + ub.repr2(list(set(homog_inliers_list)))) - - -# def test_sver_wrapper(): -# """ -# Test to ensure cpp and python agree and that cpp is faster - -# Example: -# >>> # ENABLE_DOCTEST -# >>> from vtool_ibeis.sver_c_wrapper import * # NOQA -# >>> test_sver_wrapper() - -# Ignore: -# %timeit call_python_version(*args) -# %timeit get_affine_inliers_cpp(*args) -# """ -# import vtool_ibeis.spatial_verification as sver -# import vtool_ibeis.demodata as demodata -# xy_thresh_sqrd = ktool.KPTS_DTYPE(.4) -# scale_thresh_sqrd = ktool.KPTS_DTYPE(2.0) -# ori_thresh = ktool.KPTS_DTYPE(TAU / 4.0) -# keys = 'xy_thresh_sqrd, scale_thresh_sqrd, ori_thresh'.split(', ') -# print(ub.repr2(ut.dict_subset(locals(), keys))) - -# def report_errors(): -# pass - -# if ut.get_argflag('--demodata'): -# testtup = demodata.testdata_dummy_matches() -# (kpts1, kpts2, fm_input, fs_input, rchip1, rchip2) = testtup -# fm_input = fm_input.astype(fm_dtype) -# #fm_input = fm_input[0:10].astype(fm_dtype) -# #fs_input = fs_input[0:10].astype(np.float32) -# else: -# fname1 = ut.get_argval('--fname1', type_=str, default='easy1.png') -# fname2 = ut.get_argval('--fname2', type_=str, default='easy2.png') -# testtup = demodata.testdata_ratio_matches(fname1, fname2) -# (kpts1, kpts2, fm_input, fs_input, rchip1, rchip2) = testtup - -# # pack up call to aff hypothesis -# import vtool_ibeis as vt -# import scipy.stats.mstats -# scales1 = vt.get_scales(kpts1.take(fm_input.T[0], axis=0)) -# scales2 = vt.get_scales(kpts2.take(fm_input.T[1], axis=0)) -# #fs_input = 1 / scipy.stats.mstats.gmean(np.vstack((scales1, scales2))) -# fs_input = scipy.stats.mstats.gmean(np.vstack((scales1, scales2))) -# print('fs_input = ' + ub.repr2(fs_input)) -# #fs_input[0:-9] = 0 -# #fs_input = np.ones(len(fm_input), dtype=fs_dtype) -# #ut.embed() -# #fs_input = scales1 * scales2 -# args = (kpts1, kpts2, fm_input, fs_input, xy_thresh_sqrd, scale_thresh_sqrd, ori_thresh) - -# ex_list = [] - -# try: -# with ut.Indenter('[TEST1] '): -# inlier_tup = vt.compare_implementations( -# sver.get_affine_inliers, -# get_affine_inliers_cpp, -# args, lbl1='py', lbl2='c', -# output_lbl=('aff_inliers_list', 'aff_errors_list', 'Aff_mats') -# ) -# out_inliers, out_errors, out_mats = inlier_tup -# except AssertionError as ex: -# ex_list.append(ex) -# raise - -# try: -# import functools -# with ut.Indenter('[TEST2] '): -# bestinlier_tup = vt.compare_implementations( -# functools.partial(sver.get_best_affine_inliers, forcepy=True), -# get_best_affine_inliers_cpp, -# args, show_output=True, lbl1='py', lbl2='c', -# output_lbl=('bestinliers', 'besterror', 'bestmat') -# ) -# bestinliers, besterror, bestmat = bestinlier_tup -# except AssertionError as ex: -# ex_list.append(ex) -# raise - -# if len(ex_list) > 0: -# raise AssertionError('some tests failed. see previous stdout') - -# #num_inliers_list = np.array(map(len, out_inliers_c)) -# #best_argx = num_inliers_list.argmax() -# ##best_inliers_py = out_inliers_py[best_argx] -# #best_inliers_c = out_inliers_c[best_argx] -# if ut.show_was_requested(): -# import plottool_ibeis as pt -# fm_output = fm_input.take(bestinliers, axis=0) -# fnum = pt.next_fnum() -# pt.figure(fnum=fnum, doclf=True, docla=True) -# pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm_input, ell_linewidth=5, fnum=fnum, pnum=(2, 1, 1)) -# pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm_output, ell_linewidth=5, fnum=fnum, pnum=(2, 1, 2)) -# pt.show_if_requested() - - -# def call_hello(): -# lib = C.cdll['./sver.so'] -# hello = lib['hello_world'] -# hello() diff --git a/vtool_ibeis/coverage_grid.py b/vtool_ibeis/coverage_grid.py index 0455657..37debb2 100644 --- a/vtool_ibeis/coverage_grid.py +++ b/vtool_ibeis/coverage_grid.py @@ -34,7 +34,7 @@ def make_grid_coverage_mask(kpts, chipsize, weights, pxl_per_bin=4, >>> from vtool_ibeis.coverage_grid import * # NOQA >>> import vtool_ibeis as vt >>> # build test data - >>> kpts, chipsize, weights = coverage_kpts.testdata_coverage('easy1.png') + >>> kpts, chipsize, weights = coverage_kpts.testdata_coverage('tsukuba_l') >>> pxl_per_bin = 4 >>> grid_steps = 2 >>> # execute function @@ -323,7 +323,7 @@ def gridsearch_coverage_grid_mask(): """ import plottool_ibeis as pt cfgdict_list, cfglbl_list = get_coverage_grid_gridsearch_configs() - kpts, chipsize, weights = coverage_kpts.testdata_coverage('easy1.png') + kpts, chipsize, weights = coverage_kpts.testdata_coverage('astro') gridmask_list = [ 255 * make_grid_coverage_mask(kpts, chipsize, weights, **cfgdict) for cfgdict in ub.ProgIter(cfgdict_list, desc='coverage grid') diff --git a/vtool_ibeis/coverage_kpts.py b/vtool_ibeis/coverage_kpts.py index 9669155..4fa8546 100755 --- a/vtool_ibeis/coverage_kpts.py +++ b/vtool_ibeis/coverage_kpts.py @@ -434,7 +434,8 @@ def gridsearch_kpts_coverage_mask(): """ import plottool_ibeis as pt cfgdict_list, cfglbl_list = get_coverage_kpts_gridsearch_configs() - kpts, chipsize, weights = testdata_coverage('easy1.png') + # kpts, chipsize, weights = testdata_coverage('easy1.png') + kpts, chipsize, weights = testdata_coverage('astro') imgmask_list = [ 255 * make_kpts_coverage_mask(kpts, chipsize, weights, return_patch=False, **cfgdict) @@ -459,18 +460,18 @@ def testdata_coverage(fname=None): # build test data kpts, vecs = vt.demodata.get_testdata_kpts(fname, with_vecs=True) # HACK IN DISTINCTIVENESS - if fname is not None: - from ibeis.algo.hots import distinctiveness_normalizer - cachedir = ub.ensure_app_cache_dir('ibeis', 'distinctiveness_model') - species = 'zebra_plains' - dstcnvs_normer = distinctiveness_normalizer.DistinctivnessNormalizer(species, cachedir=cachedir) - dstcnvs_normer.load(cachedir) - weights = dstcnvs_normer.get_distinctiveness(vecs) - else: - kpts = np.vstack((kpts, [0, 0, 1, 1, 1, 0])) - kpts = np.vstack((kpts, [0.01, 10, 1, 1, 1, 0])) - kpts = np.vstack((kpts, [0.94, 11.5, 1, 1, 1, 0])) - weights = np.ones(len(kpts)) + # if fname is not None: + # from ibeis.algo.hots import distinctiveness_normalizer + # cachedir = ub.ensure_app_cache_dir('ibeis', 'distinctiveness_model') + # species = 'zebra_plains' + # dstcnvs_normer = distinctiveness_normalizer.DistinctivnessNormalizer(species, cachedir=cachedir) + # dstcnvs_normer.load(cachedir) + # weights = dstcnvs_normer.get_distinctiveness(vecs) + # else: + kpts = np.vstack((kpts, [0, 0, 1, 1, 1, 0])) + kpts = np.vstack((kpts, [0.01, 10, 1, 1, 1, 0])) + kpts = np.vstack((kpts, [0.94, 11.5, 1, 1, 1, 0])) + weights = np.ones(len(kpts)) chipsize = tuple(vt.iceil(vt.get_kpts_image_extent(kpts)[2:4]).tolist()) return kpts, chipsize, weights diff --git a/vtool_ibeis/demodata.py b/vtool_ibeis/demodata.py index 5145f97..fcd855d 100755 --- a/vtool_ibeis/demodata.py +++ b/vtool_ibeis/demodata.py @@ -633,7 +633,7 @@ def get_testdata_kpts(fname=None, with_vecs=False): return kpts -def testdata_ratio_matches(fname1='easy1.png', fname2='easy2.png', **kwargs): +def testdata_ratio_matches(fname1='tsukuba_r', fname2='tsukuba_l', **kwargs): r""" Runs simple ratio-test matching between two images. Technically this is not demodata data. @@ -659,8 +659,8 @@ def testdata_ratio_matches(fname1='easy1.png', fname2='easy2.png', **kwargs): >>> # xdoctest: +REQUIRES(module:pyhesaff) >>> from vtool_ibeis.demodata import * # NOQA >>> import vtool_ibeis as vt - >>> fname1 = ut.get_argval('--fname1', type_=str, default='easy1.png') - >>> fname2 = ut.get_argval('--fname2', type_=str, default='easy2.png') + >>> fname1 = ut.get_argval('--fname1', type_=str, default='tsukuba_l') + >>> fname2 = ut.get_argval('--fname2', type_=str, default='tsukuba_r') >>> default_dict = vt.get_extract_features_default_params() >>> default_dict['ratio_thresh'] = .625 >>> kwargs = ut.argparse_dict(default_dict) diff --git a/vtool_ibeis/inspect_matches.py b/vtool_ibeis/inspect_matches.py index 89894a2..ca35194 100644 --- a/vtool_ibeis/inspect_matches.py +++ b/vtool_ibeis/inspect_matches.py @@ -69,8 +69,8 @@ class MatchInspector(INSPECT_BASE): >>> import vtool_ibeis as vt >>> gt.ensure_qapp() >>> ut.qtensure() - >>> annot1 = lazy_test_annot('easy1.png') - >>> annot2 = lazy_test_annot('easy2.png') + >>> annot1 = lazy_test_annot('tsukuba_r') + >>> annot2 = lazy_test_annot('tsukuba_l') >>> match = vt.PairwiseMatch(annot1, annot2) >>> self = MatchInspector(match=match) >>> self.show() diff --git a/vtool_ibeis/matching.py b/vtool_ibeis/matching.py index 38fe5cc..2a11357 100644 --- a/vtool_ibeis/matching.py +++ b/vtool_ibeis/matching.py @@ -107,8 +107,8 @@ def demodata_match(cfgdict={}, apply=True, use_cache=True, recompute=False): enabled=use_cache ) match = cacher.tryload() - annot1 = lazy_test_annot('easy1.png') - annot2 = lazy_test_annot('easy2.png') + annot1 = lazy_test_annot('tsukuba_l') + annot2 = lazy_test_annot('tsukuba_r') if match is None or recompute: match = vt.PairwiseMatch(annot1, annot2) if apply: @@ -141,8 +141,10 @@ class PairwiseMatch(ub.NiceRepr): >>> # xdoctest: +REQUIRES(module:pyhesaff) >>> from vtool_ibeis.matching import * # NOQA >>> import vtool_ibeis as vt - >>> imgR = vt.imread(ut.grab_test_imgpath('easy1.png')) - >>> imgL = vt.imread(ut.grab_test_imgpath('easy2.png')) + >>> #imgR = vt.imread(ut.grab_test_imgpath('easy1.png')) + >>> #imgL = vt.imread(ut.grab_test_imgpath('easy2.png')) + >>> imgR = vt.imread(ut.grab_test_imgpath('tsukuba_r')) + >>> imgL = vt.imread(ut.grab_test_imgpath('tsukuba_l')) >>> annot1 = {'rchip': imgR} >>> annot2 = {'rchip': imgL} >>> match = vt.PairwiseMatch(annot1, annot2) @@ -162,8 +164,8 @@ class PairwiseMatch(ub.NiceRepr): >>> match.ishow() >>> from vtool_ibeis.matching import * # NOQA >>> import vtool_ibeis as vt - >>> imgR = vt.imread(ut.grab_test_imgpath('easy1.png')) - >>> imgL = vt.imread(ut.grab_test_imgpath('easy2.png')) + >>> imgR = vt.imread(ut.grab_test_imgpath('tsukuba_r')) + >>> imgL = vt.imread(ut.grab_test_imgpath('tsukuba_l')) >>> annot1 = {'rchip': imgR} >>> annot2 = {'rchip': imgL} >>> match = vt.PairwiseMatch(annot1, annot2) @@ -1449,7 +1451,7 @@ def ensure_metadata_feats(annot, cfgdict={}): >>> # ENABLE_DOCTEST >>> # xdoctest: +REQUIRES(module:pyhesaff) >>> from vtool_ibeis.matching import * # NOQA - >>> rchip_fpath = ut.grab_test_imgpath('easy1.png') + >>> rchip_fpath = ut.grab_test_imgpath('astro') >>> annot = ut.LazyDict({'rchip_fpath': rchip_fpath}) >>> cfgdict = {} >>> ensure_metadata_feats(annot, cfgdict) diff --git a/vtool_ibeis/nearest_neighbors.py b/vtool_ibeis/nearest_neighbors.py index f3d8217..c6a5026 100755 --- a/vtool_ibeis/nearest_neighbors.py +++ b/vtool_ibeis/nearest_neighbors.py @@ -109,8 +109,10 @@ def test_cv2_flann(): from vtool_ibeis import demodata import plottool_ibeis as pt import vtool_ibeis as vt - img1 = vt.imread(ut.grab_test_imgpath('easy1.png')) - img2 = vt.imread(ut.grab_test_imgpath('easy2.png')) + # img1 = vt.imread(ut.grab_test_imgpath('easy1.png')) + # img2 = vt.imread(ut.grab_test_imgpath('easy2.png')) + img1 = vt.imread(ut.grab_test_imgpath('tsukuba_l')) + img2 = vt.imread(ut.grab_test_imgpath('tsukuba_r')) stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15) disparity = stereo.compute(img1, img2) diff --git a/vtool_ibeis/segmentation.py b/vtool_ibeis/segmentation.py index b86146e..5a551f5 100755 --- a/vtool_ibeis/segmentation.py +++ b/vtool_ibeis/segmentation.py @@ -85,7 +85,7 @@ def demo_grabcut(bgr_img): >>> import utool as ut >>> import plottool_ibeis as pt >>> import vtool_ibeis as vt - >>> img_fpath = ut.grab_test_imgpath('easy1.png') + >>> img_fpath = ut.grab_test_imgpath('astro') >>> bgr_img = vt.imread(img_fpath) >>> # execute function >>> print(bgr_img.shape) diff --git a/vtool_ibeis/spatial_verification.py b/vtool_ibeis/spatial_verification.py index 54c1663..a1e9c96 100755 --- a/vtool_ibeis/spatial_verification.py +++ b/vtool_ibeis/spatial_verification.py @@ -374,8 +374,8 @@ def testdata_matching_affine_inliers(): ori_thresh = 1.57 xy_thresh_sqrd = dlen_sqrd2 * xy_thresh featkw = ut.argparse_dict(vt.get_extract_features_default_params()) - fname1 = ut.get_argval('--fname1', type_=str, default='easy1.png') - fname2 = ut.get_argval('--fname2', type_=str, default='easy2.png') + fname1 = ut.get_argval('--fname1', type_=str, default='tsukuba_l') + fname2 = ut.get_argval('--fname2', type_=str, default='tsukuba_r') (kpts1, kpts2, fm, fs, rchip1, rchip2) = demodata.testdata_ratio_matches(fname1, fname2, **featkw) aff_inliers, aff_errors, Aff = get_best_affine_inliers_( kpts1, kpts2, fm, fs, xy_thresh_sqrd, scale_thresh, ori_thresh) From 9904d66b4e9be17202f1feccaa7897fdd4f9de35 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 22:01:20 -0400 Subject: [PATCH 09/15] More demodata fixes --- vtool_ibeis/coverage_kpts.py | 6 +++--- vtool_ibeis/geometry.py | 6 +++--- vtool_ibeis/image.py | 6 +++--- vtool_ibeis/patch.py | 2 +- vtool_ibeis/quality_classifier.py | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/vtool_ibeis/coverage_kpts.py b/vtool_ibeis/coverage_kpts.py index 4fa8546..99da5b5 100755 --- a/vtool_ibeis/coverage_kpts.py +++ b/vtool_ibeis/coverage_kpts.py @@ -37,7 +37,7 @@ def make_kpts_heatmask(kpts, chipsize, cmap='plasma'): >>> from vtool_ibeis.coverage_kpts import * # NOQA >>> import vtool_ibeis as vt >>> import pyhesaff - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> (kpts, vecs) = pyhesaff.detect_feats(img_fpath) >>> chip = vt.imread(img_fpath) >>> kpts = kpts[0:100] @@ -129,7 +129,7 @@ def make_kpts_coverage_mask( >>> import vtool_ibeis as vt >>> import plottool_ibeis as pt >>> import pyhesaff - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> (kpts, vecs) = pyhesaff.detect_feats(img_fpath) >>> kpts = kpts[::10] >>> chip = vt.imread(img_fpath) @@ -210,7 +210,7 @@ def warp_patch_onto_kpts( >>> from vtool_ibeis.coverage_kpts import * # NOQA >>> import vtool_ibeis as vt >>> import pyhesaff - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> (kpts, vecs) = pyhesaff.detect_feats(img_fpath) >>> kpts = kpts[::15] >>> chip = vt.imread(img_fpath) diff --git a/vtool_ibeis/geometry.py b/vtool_ibeis/geometry.py index a6470dc..6abcf92 100755 --- a/vtool_ibeis/geometry.py +++ b/vtool_ibeis/geometry.py @@ -67,7 +67,7 @@ def draw_border(img_in, color=(0, 128, 255), thickness=2, out=None): >>> # ENABLE_DOCTEST >>> from vtool_ibeis.geometry import * # NOQA >>> import vtool_ibeis as vt - >>> img_in = vt.imread(ut.grab_test_imgpath('carl.jpg')) + >>> img_in = vt.imread(ut.grab_test_imgpath('carl')) >>> color = (0, 128, 255) >>> thickness = 20 >>> out = None @@ -115,7 +115,7 @@ def draw_verts(img_in, verts, color=(0, 128, 255), thickness=2, out=None): >>> import plottool_ibeis as pt >>> import vtool_ibeis as vt >>> # build test data - >>> img_in = vt.imread(ut.grab_test_imgpath('carl.jpg')) + >>> img_in = vt.imread(ut.grab_test_imgpath('carl')) >>> verts = ((10, 10), (10, 100), (100, 100), (100, 10)) >>> color = (0, 128, 255) >>> thickness = 2 @@ -137,7 +137,7 @@ def draw_verts(img_in, verts, color=(0, 128, 255), thickness=2, out=None): >>> import plottool_ibeis as pt >>> import vtool_ibeis as vt >>> # build test data - >>> img_in = vt.imread(ut.grab_test_imgpath('carl.jpg')) + >>> img_in = vt.imread(ut.grab_test_imgpath('carl')) >>> verts = ((10, 10), (10, 100), (100, 100), (100, 10)) >>> color = (0, 128, 255) >>> thickness = 2 diff --git a/vtool_ibeis/image.py b/vtool_ibeis/image.py index d5352b6..79ec823 100755 --- a/vtool_ibeis/image.py +++ b/vtool_ibeis/image.py @@ -289,12 +289,12 @@ def imread(img_fpath, grayscale=False, orient=False, flags=None, Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.image import * # NOQA - >>> img_fpath = ut.grab_test_imgpath('carl') + >>> img_fpath = ut.grab_test_imgpath('astro') >>> imgBGR1 = imread(img_fpath, grayscale=False) >>> imgBGR2 = imread(img_fpath, grayscale=True) >>> imgBGR3 = imread(img_fpath, orient=True) - >>> assert imgBGR1.shape == (448, 328, 3) - >>> assert imgBGR2.shape == (448, 328) + >>> assert imgBGR1.shape == (512, 512, 3) + >>> assert imgBGR2.shape == (512, 512) >>> assert np.all(imgBGR1 == imgBGR3) >>> # xdoctest: +REQUIRES(--show) >>> import plottool_ibeis as pt diff --git a/vtool_ibeis/patch.py b/vtool_ibeis/patch.py index 05570d7..2e20a4e 100755 --- a/vtool_ibeis/patch.py +++ b/vtool_ibeis/patch.py @@ -570,7 +570,7 @@ def get_warped_patches(img, kpts, flags=cv2.INTER_LANCZOS4, >>> from vtool_ibeis.patch import * # NOQA >>> import vtool_ibeis as vt >>> # build test data - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> img = vt.imread(img_fpath) >>> use_cpp = ut.get_argflag('--use_cpp') >>> kpts, desc = vt.extract_features(img_fpath) diff --git a/vtool_ibeis/quality_classifier.py b/vtool_ibeis/quality_classifier.py index c12419d..6ea4c5f 100644 --- a/vtool_ibeis/quality_classifier.py +++ b/vtool_ibeis/quality_classifier.py @@ -19,7 +19,7 @@ def compute_average_contrast(img): >>> # ENABLE_DOCTEST >>> from vtool_ibeis.quality_classifier import * # NOQA >>> import vtool_ibeis as vt - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> img = vt.imread(img_fpath, grayscale=True) >>> average_contrast, gradmag_sqrd = compute_average_contrast(img) >>> # xdoctest: +REQUIRES(module:plottool_ibeis) @@ -104,7 +104,7 @@ def fourier_devtest(img): >>> # DISABLE_DOCTEST >>> from vtool_ibeis.quality_classifier import * # NOQA >>> import vtool_ibeis as vt - >>> img_fpath = ut.grab_test_imgpath('carl.jpg') + >>> img_fpath = ut.grab_test_imgpath('carl') >>> img = vt.imread(img_fpath, grayscale=True) >>> magnitude_spectrum = fourier_devtest(img) """ From 17da36619fd1ac3593a821510ece697274fcbac6 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 22:24:50 -0400 Subject: [PATCH 10/15] More testdata fixes --- vtool_ibeis/spatial_verification.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/vtool_ibeis/spatial_verification.py b/vtool_ibeis/spatial_verification.py index a1e9c96..48999a6 100755 --- a/vtool_ibeis/spatial_verification.py +++ b/vtool_ibeis/spatial_verification.py @@ -958,8 +958,8 @@ def spatially_verify_kpts(kpts1, kpts2, fm, >>> from vtool_ibeis.spatial_verification import * >>> import vtool_ibeis.demodata as demodata >>> import vtool_ibeis as vt - >>> fname1 = ut.get_argval('--fname1', type_=str, default='easy1.png') - >>> fname2 = ut.get_argval('--fname2', type_=str, default='easy2.png') + >>> fname1 = ut.get_argval('--fname1', type_=str, default='tsukuba_r') + >>> fname2 = ut.get_argval('--fname2', type_=str, default='tsukuba_l') >>> default_dict = vt.get_extract_features_default_params() >>> default_dict['ratio_thresh'] = .625 >>> kwargs = ut.argparse_dict(default_dict) @@ -993,8 +993,6 @@ def spatially_verify_kpts(kpts1, kpts2, fm, >>> aff_tup = (aff_inliers, Aff) >>> pt.draw_sv.show_sv(rchip1, rchip2, kpts1, kpts2, fm, aff_tup=aff_tup, homog_tup=homog_tup, refine_method=refine_method) >>> pt.show_if_requested() - tuple(numpy.ndarray, tuple(numpy.ndarray*3), numpy.ndarray, numpy.ndarray, tuple(numpy.ndarray*3), numpy.ndarray) - """ if len(fm) == 0: if VERBOSE_SVER: From 113c85ca59ddeb8062445edeceec064f39a79813 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 22:30:48 -0400 Subject: [PATCH 11/15] Fix numpy req --- requirements/runtime.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/runtime.txt b/requirements/runtime.txt index a48a169..a8bfaf7 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -19,7 +19,7 @@ Pillow>=8.3.2 ; python_version < '3.7' and python_version >= '3.6' # Pyth # xdev availpkg numpy --refresh numpy>=1.26.0 ; python_version < '4.0' and python_version >= '3.12' # Python 3.12+ -numpy>=1.23.2 ; python_version < '3.12' and python_version >= '3.11' # Python 3.11 +numpy>=1.24.0 ; python_version < '3.12' and python_version >= '3.11' # Python 3.11 numpy>=1.21.6 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 numpy>=1.19.3 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 numpy>=1.19.2 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 From d8cd9921b05bf1457a9134afed6ed121a7600139 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 22:39:15 -0400 Subject: [PATCH 12/15] Avoid minimum reqs without binary wheels --- README.rst | 8 +------- requirements/runtime.txt | 9 ++++++--- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/README.rst b/README.rst index 8bb9c6f..8f542bd 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,7 @@ vtool_ibeis =========== -|Pypi| |Downloads| |Codecov| |Travis| |Appveyor| +|Pypi| |Downloads| |Codecov| Vision Tools - tools for computer vision. Part of the WildMe / IBEIS Project. @@ -27,12 +27,6 @@ Repos relevant to the ibeis project: * https://github.com/Erotemic/ibeis -.. |CircleCI| image:: https://circleci.com/gh/Erotemic/vtool_ibeis.svg?style=svg - :target: https://circleci.com/gh/Erotemic/vtool_ibeis -.. |Travis| image:: https://img.shields.io/travis/Erotemic/vtool_ibeis/master.svg?label=Travis%20CI - :target: https://travis-ci.org/Erotemic/vtool_ibeis?branch=master -.. |Appveyor| image:: https://ci.appveyor.com/api/projects/status/github/Erotemic/vtool_ibeis?branch=master&svg=True - :target: https://ci.appveyor.com/project/Erotemic/vtool_ibeis/branch/master .. |Codecov| image:: https://codecov.io/github/Erotemic/vtool_ibeis/badge.svg?branch=master&service=github :target: https://codecov.io/github/Erotemic/vtool_ibeis?branch=master .. |Pypi| image:: https://img.shields.io/pypi/v/vtool_ibeis.svg diff --git a/requirements/runtime.txt b/requirements/runtime.txt index a8bfaf7..7c75676 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -10,7 +10,8 @@ networkx>=2.5 ; python_version < '3.7' and python_version >= '3.6' # Py networkx>=2.3 ; python_version < '3.6' and python_version >= '3.5' # Python 3.5 networkx>=1.11 ; python_version < '3.5' and python_version >= '2.7' # Python 2.7 -Pillow>=9.4.0 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +Pillow>=10.0.0 ; python_version < '4.0' and python_version >= '3.12' # Python 3.12+ +Pillow>=9.4.0 ; python_version < '3.12' and python_version >= '3.11' # Python 3.11 Pillow>=9.1.0 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 Pillow>=8.3.2 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 Pillow>=8.3.2 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 @@ -35,7 +36,8 @@ scipy>=1.6.0 ; python_version < '3.8' and python_version >= '3.7' # Pytho six >= 1.10.0 -scikit-image>=0.19.3 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +scikit-image>=0.22.0 ; python_version < '4.0' and python_version >= '3.12' # Python 3.12+ +scikit-image>=0.20.0 ; python_version < '3.12' and python_version >= '3.11' # Python 3.11 scikit-image>=0.19.0 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 scikit-image>=0.18.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 scikit-image>=0.17.2 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 @@ -50,7 +52,8 @@ scikit-learn>=1.0.2 ; python_version < '3.9' and python_version >= '3.8' scikit-learn>=0.24.1 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 scikit-learn>=0.24.1 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 -statsmodels>=0.13.3 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +statsmodels>=0.14.0 ; python_version < '4.0' and python_version >= '3.12' # Python 3.12+ +statsmodels>=0.13.3 ; python_version < '3.12' and python_version >= '3.11' # Python 3.11 statsmodels>=0.13.1 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 statsmodels>=0.13.1 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 statsmodels>=0.13.1 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 From 9b5d24dba036dcd291a5a6f898a1d0f522a7cae6 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sat, 13 Apr 2024 23:10:59 -0400 Subject: [PATCH 13/15] Bump networkx req --- requirements/runtime.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 7c75676..f99a194 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -4,7 +4,8 @@ # python ~/local/tools/supported_python_versions_pip.py statsmodels # python ~/local/tools/supported_python_versions_pip.py numpy -networkx>=2.7 ; python_version >= '3.8' # Python 3.8+ +networkx>=2.8 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +networkx>=2.7 ; python_version < '3.11' and python_version >= '3.8' # Python 3.8-3.11 networkx>=2.6.2 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 networkx>=2.5 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 networkx>=2.3 ; python_version < '3.6' and python_version >= '3.5' # Python 3.5 From a17228538cdeec7a1b4f51e19a40c1f524b1ea42 Mon Sep 17 00:00:00 2001 From: joncrall Date: Sun, 14 Apr 2024 01:03:44 -0400 Subject: [PATCH 14/15] Tag Helper for Release --- .github/workflows/tests.yml | 6 ++++++ setup.py | 0 2 files changed, 6 insertions(+) mode change 100644 => 100755 setup.py diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7480174..f664e95 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -430,6 +430,12 @@ jobs: shell: bash run: ls -la wheelhouse - run: 'echo "Automatic Release Notes. TODO: improve" > ${{ github.workspace }}-CHANGELOG.txt' + - name: Tag Release Commit + if: (startsWith(github.event.ref, 'refs/heads/release')) + run: |- + export VERSION=$(python -c "import setup; print(setup.VERSION)" + git tag "v$VERSION" + git push origin "v$VERSION" - uses: softprops/action-gh-release@v1 name: Create Release id: create_release diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 From 78462facbb9757dabaae59a5a5789232e6defd6b Mon Sep 17 00:00:00 2001 From: joncrall Date: Sun, 14 Apr 2024 01:04:25 -0400 Subject: [PATCH 15/15] Update secrets --- dev/ci_public_gpg_key.pgp.enc | 98 +++++++++++++++---------------- dev/ci_secret_gpg_subkeys.pgp.enc | 68 ++++++++++----------- dev/gpg_owner_trust.enc | 23 ++++---- 3 files changed, 95 insertions(+), 94 deletions(-) diff --git a/dev/ci_public_gpg_key.pgp.enc b/dev/ci_public_gpg_key.pgp.enc index da0c912..0ad5729 100644 --- a/dev/ci_public_gpg_key.pgp.enc +++ b/dev/ci_public_gpg_key.pgp.enc @@ -1,49 +1,49 @@ -U2FsdGVkX1/5oEAzIx7PjeBSifgxlso8hcjB7RY1/piOf/3QmSMfX+HhmvzSYjHc -lEFQM39nZtk1hZSJDFfkOCOSBETMKeSFFgIEfdrPQnYXASE5PHNL/HZROICBdKdt -9M/pjVN9J9QDkWWQcO71sdo8r+f55UrsBYOBeQrOu1BlM14lvYW7Zsl8zKTd5NHG -cOe7UH5OeXkA1bIbKdKDIwQsuYjLycIeR6XATwNzB/CyKhW7HRh+kupYiVO/jAyS -auT8zoAYt4kgz7vpZ+212BtRuiwnhCUvSQT5mNKKpKSLPmYDFSkrcXnntbVcOswP -ocm4O2wHxUGybWOg8mJY6jSOe6/hZ92o5aMgAdPykgL5OHQ6HPvb3NesCDYRWw1Q -WdEpMI8W+lKtlMjbFZ1YO9wXqsga6wosW+vGUN5kGjyXPM/RLbAaCwcjlqNO+G7u -jMjWT7JyC0IJR4U0fOdo3OOM2RVUJAMySgg3MD54DWvDgCEOZmklyvQGHPE9iUmT -s7j3stPwHbpGqbmgL5Sw9fzi0hh+5wRCo/0KTRJfA+Jq7m1ijFJcRgtzhpRss3A7 -qw25H3iy0aUgwez8l4Zj+P4zVx+OeFFou91TyYKKelFFv+nhK9NJEdCV/HA/Uasb -uJlk7eYPpCsMY7a8InzjqJpTCcVj2bMIAD+UC0c5Mn9t47G9NfJpqnOb9PJhZwSp -viS0q4jce+gcY7g9seEpuSuuIMX/u0sxQ5toh5kHd6EUk17AsWqm6DwuE8O3Lqpr -0NITPSI6+XMc0D15K49DHPjLWPbXIOVEeYpAdOJ+GiNuF9SbexUlchtp0IgaMuyI -/HUiZRAHFwyQMWKuQR+0ZlNBSwstj2aF/x6XLMNWREVFI2ye30o65edwgGUKNh1W -pBEwbR2Maez7AECib/MCGPf38I8ssKM6dsA9eidUxvquZVPeSRJ2g/Up4WYfMnPF -ssU6LTKQKw1xE+IXdPfO5HNU0QoooPlN6QAATFTX30Ut8IU0D7ZXj4RgN/j7tTbA -D+OMdmchv/KLtVZUPHwxhwc7qWTliFk4EzSoI6ICuU8GLKD7fq0qhtVepl2DmbmT -TdLTxoFBonh4WlE3vPdpuzRkDQImA7getZ9cnC40SPp1g2jZ+K+MvF4tEjgeUpOV -hPmWTTxjER3ljBD6f5o64qqFiifwCb42+d3qX8U+BaVo1Yfjsr8rv/ByGeG8oMPM -Yo9gYJ5QJM25gKo1rIg++Hf/JPrMh5z/wobbqME1nr9DNoGrjSlA6FrDHNr5ux6e -at/C78PG/Zj6P6lHJUNmKJv9iW/Tq4Y+4etv+AfN4ZLKEE3jMpOCj866ASqm9waq -cBH4FRqUke3/aOuG/kUWO+D/AVuWJhT+FKDAZZXuJLv3N0NWvjzxZewiKXMYjzHj -GKUa+57t6i6vaKfjVDMyF9wpeGuhVqDAOoCmsZRlaK7KK4FmXFoHlXh4rfomEnPF -jzrl7JsqVavNWtA3D/AzfeTo+Pz1XYkmNcaQlQkchcWWulS3GsTaV8cYXFTqmpTS -ajgZtIDdcwu7GwRW2y3exBGwQPxoW39+bIVLeauesl8QRIJw9442xYjGiD93Hhq2 -270PdVv3toEL9ScO0A20NpXZ+29TOdh2AdUWkeGC2c8eCznMFZz/OuFS4NpjF7Gc -l8eJbBWjI1+C+3dgQT8Inb5H+c4oES5RDH/b/CPxi7O5/5ohJFzcH/ybY3SV0gXQ -M+BsEBo+QFrxIf/R+j4EfTKY6Vzfr66gXlnIr9U2cHZycF94dCCtU1xUkgwr88ox -aumtWU7Qo7QmmuZNzJiLXvBw1eeSxqYzAyN4C5oOHRj0g8ZjD2ZSuY6RjOKcOs8H -y03thbec6crw5tEclELQHx53TD/RgWoeO54c1T/gNayE22zCC645pM5sZ7rsu/rW -vtiP2jxP6KXge5sYRyK0vSRvtG+E3V1r4dbuqRec5bFzIgGXvdp2+6B3YZbSYeI8 -7r3hyHucmuDj7i9kmRLvKgAneL8Cfsvz7PjxCsz1v2WRF+Ez7SZFRFhpffABQ3LK -toNmvKcxTkRlZF8oEydExtyc2SLXrF1yuMBCdCgwxjE1E3dOQPSQ/50n54v4XwbM -GEx1ol0J0FUHEi3gUJ55LvcHnhE9kOrkrq/ZsGy4RrBf79SyZ2ejwr0AeORLLGki -kDQasrXU7uDrIV3mxVJQtpsYpS8Dd0WEBPGIfxr0k+1Gixxig8UyWAJmr871v1Js -8tK2s+tXHIUKLReMW060w19aV0pkfY+vuLjy+8sF5s+/DG7gl0ZWRMA4PKEd9UiE -HXHRF6xa5i3khuZzVOaKu+dCsvBVTMp7BluJE7a5PXc14BU9kSgZGPOoaoCdvlaU -myzdoIoxulcyDwzLoBBy1cCZhrqpTt1QncNd6XlgykAUwGEnlOLclct4bRR5mm3X -7Yqglc9+J3eSQ13xP5ovHRDO3nZd4a0Scb91Dojb6zB09x0bpItlitnhLInizeVd -NLeYkp881bLWxUGTN4Gy4ZKaLmUU2DEDNa8iy2yy8vqDXEbSlQDIbCIUPz8jn1SF -XC0B0CvR//1SP0ct4qmpuukjJrfYd8xc2eEl0qyZzskBALzAXc/L+Nb6g6MXpPmm -6UfqNvR0VFvG92N9XmDh+t/zQuQDOZUZjQG7O5dSoNS9TRpbh0zd9YNyAUBtNmG1 -9uEBGWkLqUFsX66DzpP94hXSwAALCZMBQyD08AFHvc1ToIBkUqqlMqODWR4CPFVO -DXFBN5QuK1eyTt+QPUCKe1GJKOP7H39nbA2PIGsPtDYgf4f7NEApm+wxZxHY35AR -UbS7HxO0RKTxOGGQ/SNrqx41SkshLxKp1eb9aVkGb71FzClL7WS9ujE0qNgtwYua -yyOqgw1vZm9zMjMP1DJ86wIO/ijU/decpoGDPcWXj2gH3iggY08m7ag53PDFKvWS -n2pv2xsp7hZE2Z2hhmwQfGTPIE/6ndpz3A27B8CP6OEnHChiC3+Nmt5moD02dHnN -sVOVPTTMwCDnGXgLWkupH69vzriE0obpB0cXysNswrLgl7fkRnPsrq05+75UOib8 -D+qAOgL0BLGgDDkNiJgEWg== +U2FsdGVkX1/+SABvW4ARSsu/3uNcmdidyb/jvs5jtJXx4ZQS0HkGS04qaP/S6zBh +zy3EF7sjf9VEPgn7zpGKs4CeGgxBFnXfsDhUCAwKZj3ZzVz2twZJsncG6+A6dFQp +RAdJejO3byyAQW+qycAlXoaMu92A7qJQa0IECccMgppoTPszxf9/xbZ/fiQeY0MB +Ae9xRo1BA/wN2OtIXGBYkV2N5rInMIxXIpBkSZ0tC/G2Azj0fn0ydTdwqp5nh8Ia +YD+fYd9rki71cjZa94ucygsW958FhQ2SF+yJ7RsHfUjt5fshYByW7Q86HElhoaMb +YojTof2tnObFogISANdpTQYg2avbq0bqJilJ0iQcAjeZaSTE9UXzjfrcrwo99SBY +aPSDZpm4eJCbfsy+kwKgjolMEy9tIJLu/De3cUKF4uW24K41gtfUCLb1PLmWvh/T +TV31gZA+anAbrkEBl7p5uxgsp/03Qc3USAWO4UH7R/oTe1IPKKo1snXk7I8HHsG+ +pQbKRCcsLjmHXWz/bzXT5uH7DX6fOmFp4xR4q8q5pLG/PPgz2DCs//MBTNe02FlD +q336cEMV7M9awaE+vz9I59toNv9ll10U0Dz6tM9SCgUTdcvtEkoskyotp2HHkdSi +BVhjW0gWAPUwJFadF7GA9dX/jMkCrrIBBxjUxgtZs+OaZ11ShqkiQYwFaYkBK8DL +piJHgX7h/QiDSRxjhr/cq9stUPNyA/jZYh7VXhuEkIg+kxpgC5zUdFqAqwobEH22 +Pb6HSnCGIe78N3iL2QjUrW+XsTQYJz7q9iUAFNUI8qYNqn2r6NwXjGjUq9hTREG1 +uyIf6NxTGdPPTK58DZPVYT0PR9ZsBqbh2RfC9TFp9W7kcJcYpasEQkTaG02M8awt +hR74gWNHde1fSpWz0fZSvkP+tajNj9oQbBhcGUXCpe7S0AOb7b+PRSXNvdLaRdvL +gS7H9KCCMW8Be8KZ0p048efCknFD7LYSzQz23sZ6fDZRxXhVuP+o/47Mlhxopark +dllG5a4FxGoScN/374UAd20bDE/iCyE7XVxd1UwS6ldHkbkZVQnwd1zESO310/dy +wW1Diafq0J/LUhqZ8o/DCTYWd9avdS6UsxCwLQQ7FzbqCgv4nmp+9zul8QIjuosS +SDUXmNuYKEt/3ht8DVhYPH+Svok2AxqzuEu8cZKBfbIhF8/LygGI876FnMw+CP6X +HF/vFOOEWDH3kQHIhLZ29hXVVOvMIhtmbaulpQEaaNM91p5qdtMgfac6rf8HlVkT +fnkrABRgyIxmrwbgqlZHPB+IzTVYPqjzknJtrYyXAIszo31O7ii724V0ICqrGxuh +54nm3U7Ev52tOnvoc7E4jXcRPlkJNf3rHhCaG8wpyW6Vjf0JJGep2k3P6H3pY18b +V1GJJEdtFzp48LlFp88orUi+Mcr1Zei5fpS927Ba77lSM0XFSBnAVO300Oq/IPRo +RmujJhWjjv4jqaTFLPQIvvttEcpnrVY41401Gbc60itFTkghVkprmO+X9tn9e6vr +T+T9ClhkeB7lQ4fjHikhssSjkBufvSON5ICq+Nosl+xjdGLnt0leqdSLKvkVDT+Z +LOAEAT2KYVKw+G62jelfJAonqVzdx3g9ppW/RRjzi8Z3Xi93h2jCLEqTujqefDNA +vUaVUlJkxvxa+h9Pd6yBd7qTQCMERPuOcCLSHNo82uf902DxavzhvKqYAHjPsYSS +HxBdSP/GahOKKx//f3ynIWiuHnODYEno8LzPDk9MWsyBLbDCguSsX3bf3f445Z/U +angKXedrsPfHdwZK6W7cf94E4m4OUeJBNUgffLIAO5n5An1t/6CBgeCfoMCvIP1J +OsCbN7ObI5sxnjW7/aiuC8/CQ0W95iJ/gzXMm/YfABA1pEr6Ul2WoVkPiOPmeZrC +SpTZTTRv/BrNivpqTA5Q6qAu4Eb4ux2nRF3P34v1pO1auAw3IhYT0nRKg07vn2jE +76oRrMfHTshyMyBlQwPzUKmBcpww3kSLZtW+AhDfJtsp0tWebqcMnLEYpH8UmReq +Di9FFoh9hTRqZxFRNd7UH/I/QSJ/+7xMCU6imINzFvyYsIKDEL7lSays96EYAAvV +HO17ryfOFFloIeK8OD0yTbLgFTdym6GOtoHnp/mJp9k6lm00YZjnkqVLVy2pWD2m +NGZVnF4gHdp18lmeex0yddaiFAJKx4DTyFVKSkTuin14W9jw6wMiML/5Os3u1FD0 +4fdrtWh1xbna3LxBHuHZIF3zzEO1HEYKvFoi3nvlGHERd5/YdaBTK1TioxhUODzf +DIgJtFKlctoyCGwOOKffSQEgma+hh3xlyku4LUvtkhDqwtINGt4uRQm6bwB8aYqF +z+Ae+JrjTtuAwlcvyNZWSbcA7j5BFKsyNP1mAMtkQCJpleK/PLOzigt5u55aQ7AK +LTpS2tkyqDQksaog844VmQIFCzXwrjZ/oV3pBLtBGlZnEPCMYKxJnwzbvzUjIzfh +20RShROUC3xHWSf7PmoOzVs3GRkaVdGlEBcvuRRv0An0P6HCIEgF+mE+I41GnJwy +HV3pbFdCuDNwfz/m4tbxPzyjUQwBVut9BLkRDl5jd44QdODpOcbZkhIHIpwjNJP3 +LSLd6mGg2uc4+QFc0dkEUBA29vX3yWQS3pgmjY/4CoMZwO//mxNSDZ0EBwGvVqsU +jSMbwwKbIc6azPnD0Uz0YbvRvuJdrIqB5s0khpWG83zO1DxPoPI64YvoRiTVHRtc +QY+Dpjd/i2NKaipEBAAc3lmMs6mvNyHPbbFNbCPiF+SmijLuVKV9YspOjRf0ilue +QXVXDcA91VxEz7ExS563a39KwDNJhSTSYrPxCiBgwqXyiYzdRcdTOlBnY/8m81ZE +BOy3FdNO9AIf4yhdgquoFrv1MMx5MDV6lcmXxjB9m1o1yN+Bq4p0TYY36wxt7K0L +tGFZHYxaBxYtcJVFrNFyBPUL/nJFJPRtkD/JCNQJk90hEPR11eNhNQI2p3d1v8mo ++5fRsYXyhxauzBOg1mS+kykfemYNdy2qBq0sMqOfl78t3zqyepMCfICEAOLz/MRj +Vnd1S1Kr6efKzoh5hAKTkQ== diff --git a/dev/ci_secret_gpg_subkeys.pgp.enc b/dev/ci_secret_gpg_subkeys.pgp.enc index 416a914..278a03a 100644 --- a/dev/ci_secret_gpg_subkeys.pgp.enc +++ b/dev/ci_secret_gpg_subkeys.pgp.enc @@ -1,34 +1,34 @@ -U2FsdGVkX196sgHSAL54JUQQUDH+8rhL2CvXrIEd0EYozYvpA1CkkcBZRSFJVXOP -wM4cY7HDDUE6Tm7mz2O8l9d1T0gNpSYxWu5BwOoPxJfpyWHVYBrEpDQthExO1act -PG4ybFhFB+rudr+4ueaCjpyZyHOgU0QGkPURYonNBZYIJQCN7m/uwNTiMKLdHj3D -eNxtPJmGHQRJ/eDecuWIpe89xU87q8KnaZqlsnTSWg7Zh3e/CGfh6GYo8exeorEI -6uIckoqMVP1v7kHgA/D3R5JbKxSMLfspqL+nRtrPJ8QWbZjADRd4pD/6sy2h2f5R -+HH9Pae4Nob3sS0abD8YB2JvhmlqvA1sCcH5feZ/hkg+KCQioNsupdVyURVWlJX9 -UWp+8Djmk0RqFe3YRU80wEgqCXXT8bF0C+aIq7737i1MJ56eeKzmXF/klpHSMmHL -RClLjET6saRoQ9Mg5jlcMQ2J8fgwDME2GXzy3WuACk6MFneLTJ88MpiDvQs8johw -ShM+lWid6tgXcy7e4FNYn6t7LDBa+u9Fu/1rGINg+7PVobwRr5gf5Ner3ehrgL5t -b0x19N5dfhDc48zyVG7Zgdp8NAYw+yQ8zy11xvF5c+ilOofbMfdkE50dyPA1IVAM -/lbNExPcAHjQOoOIHIWPKVR/JU42Xcvz+toAuRE1T3PvmZIksuJ0CeRG0fFriPJw -vjgtlC5uGda3ZVttOXUwGHMqkmg3IQ/q+ORQ0SNzZUTdhGW4TDqb6lqaA/yuFwUk -LnXhXlcS9OZWKUS9jEAJInLtdJbHjw+cxIiRIWTAjnosrLe7cI/pUTQX6uef6e4J -OqHQHyKI+nvOg/1O7MNZ1N+1KkccbNRJjL+mfqJ9CnUykoDg0kSUPRnbrvVuehO0 -JZxZzBfqxUAXyq3PMhtCajrthFynHjTkOLTb1mRGn+k/U+TOUnKwU5F0dJNfnttv -wofwndEtwp2z6+1rO4Wb3nvDYUF64dszCs8oH376evjxBYCT3EfOFv6SxuWc0kRK -vtrskAY4JNs2+VH4cy3+CNYkrXxbbAsg9CAlanUe7RpYHEH7fYhlu2YuAesof5jK -3WtdN5QeI/FbQeglzLEf7bn0otj1t6hKPxV9cKa/wdjdfgcTZ/J2Wp92D5D7JYqO -fgzpEmbgNhRiHQnv1u24jmyFjjduAn05FbTYycM/ETkVWBQr9HtvmuryW7vN8FAt -7BbZMjV/G9Qxt24M0CGqqEsSqbFQh8lXWya7xmUYCWdZOlFdMucKxOIyoH9idT8J -Y6A2A8C5tpxkXpIAvsqkXbCEHPM4/s1sB2otEDlIRvEXzP3c3C5HhYW75sC21gHR -d2YMB/03CdI4zFyAad0Cka2SzNKZHn0bE6plch3GKcyHVGvjCTqzuqSZ7nDPb8Mu -A7nUj6+DMzekA0giSOOyFCq57697x/KTZfSuBZMGz/UJ78KZhgNeTloBrHaHk0n4 -SkD7PjdQoe9HuGXhuWgrnagGF2YYxIh4NV2G6GT9XNrX7EIY6Ut9WUGsCAthUZtp -cOtbkExDjwJnZVJP+TaEDZ5JN6rmeOT9QMXlfEeG9a7vrZI8hfmLaP5FwypP6dfC -DtkdOqp28BenJc9cfYeFgeM3h9jU90bRLny1pA5Wigo5lut5bKQCHu7W7LywPmMd -dgIq7p2NuA7u6c1WRBYmjgLfDYjyjWw6toDwxCMmYk3rchD5P061RlOGYmG62aae -1TuoP9BfOaTyPmtkbs0/J3ezaWeOifftkFst/qNS672F8fSgor5+laQxKn0FOr34 -LzCEzFfBayjDm6/7QNBwggyM25bfDTOYkV/YzBD7VPzGPDALAi9krux6GQIkCozk -/sqoh0K9EtJaUdPFvJFSpk1PR5xxM0RnhJjhEv3G4RIhxOs7aZWQ29qo3KBARuvz -uj/+TLJbENxfcw1BhPVwLjwN0sOKn1Baqq0fFLoANcgqkOInZZQyWfxkvYS/XnME -PZrhmVMBIZAOUIhwnsW0rwQD4/JE9vbFG1m096au8ktRWQAgf1ogJS3ThbknFhVn -YZcnsLGVy608iOyiUPqgeieZeiYAhRr69Kx3xi3lWMFTCIl9omlpy0XeyK13R237 -G2vjQU9/cbt+HG1J6Ohd4o7zYTJb3SJ6bbYOy+nxAnI/GzMRq1FP/vAwYQBjwr09 +U2FsdGVkX1/rtWDYOAnLdCAxFxVu+Rsuy31X951jRjPvbu4pZ44qA5bPWCuyuWM7 +vQkVXs3zqWZcxNBk4qzGZzvaAxaHWGVp3vgnbsmPRt9C5tQwltl4Iy7f2yivniJG +swCrn+66w/tTYVYrwlRSfJRUCBibfais2wtQxB7NO/JM4+mbG38pj4PGsP+RRw8I +g3UXVeGybO+1dwafIbSCL/hF/0vnCqiyksYyCTzr9nV/AKiNbexzAAeJFMFtk0pW +GJpskITzkCQMiYRe674FI4Yye/vHHllPWCFa+3ALrXczRjdsSpTu122kgzRosXhm +MV6pn9c4yQfnMb19VPLXtppP1q2wmPEeWf6Tw0JR60ge9LyYvzSENxaaqRPJWWJR +JG2peXnhYJUSB+0/Y9CYxfF0zVQLIpUMDw5g9IygchhjiPuDbJfzsHPTa432e8cl +JBxzIsTsF4S8z3Td6AoXtoEaMENvClrbp7I/pSdZGpQ01HtnbcsFm1b1aRq9AHrW +dpzuUV9f0HHmFYM+mle6cQ8T7qe7QoDEpGTO2duV5GjJ1WVRfkWRD1Rh7ZQk0Pit +G6zNFglQMhjQi02kMb5c0SA2NKPQJ3r1LJscaBEjR5Y3F0Q+AtoYDDXBwcsnhj4l +YhN1trGO0irvwQiBPW04ViW7BnPfgw5yJViD8J+d+5P4Hhat2dZsl7RXIehffvTV +1gxjm9VzQuSTheweCN+rwjy/0GhcvoQeQjNBZXToH8AVbN3OLkIY1jFigyhB6bZF +enaWkYx5gXAW5UVI/sAafg91JAlHsvPveImD+mFdbI5vtCTKMCzE/S9kQbWTIbY2 +o4gpfkFjHmYZdClYQkZgst35Lb6QauGp8ozd1LNzQ/CAIia9EEwA4ZfIQlOgaLGQ +6hRU+jibRvde/bZkB9B6f0U+dYSLQBRV3v9W3MYUwGNjgopyqAlUjp4j+UoE7WoO +1n/8yYVb1XurKY7rPz3mzxheIxUJJhAAz36Sn95Aq3vrAO9VQu9jwxDyRYXlqIL+ +TPZffKh/IQGHCE2UqTiBdSxjKODJDGFYAWvNSkermdgQbz0MNooYn02vGqfUga4n +didBxwYURw/JEaoh2uOf9L0YYvY+m+WowuU9F8x3/WjHsz8Um0wL/eoYI6oR7ShJ +BxdhIl1/bHiDL3xHdHjQ5s9SVEY3UVEWA7xDuG2Br/Izfr3kDwQjYwVH7p9bZHID +IuogG/2EevCTJCrRYrBc5YtRYTZCwe8XmZEjHMAhRzOLlvDGv2/SoHNzzjZnac28 +fnF4l9hPQn+4jC3dW5tO9iiQQnq/FihiyJNcU+R+d+KPV7g73NyhuTKQhIW3U2W1 +RdrBU9d5Gf1dN9w0mhI8leqCT8a9T87eeSjZIAkHLqjGGRjARR5QYDIkh5w4uaEJ +kOPCN02sHk4Z16h1d3d+n1G4uCdjIAh+SS401GnA1NW4rqZpps7WiY7cskyY/C+e +txbUcnFd7Ces7bmsX5qyrEp8q6V4OdbUiBfvDzZBd2dwj4g8yzuCZ67vihqrCyum +rYIymDVElkggkJFGvIm0BKkq3fw1ONTTU52mT5na8vMZclWNtKIToyECfPLWE2d6 +M8Ejdirn1LtnCfGqHei0yuvOMViPHtTLkwp+QHs2gIjRsIEfkwzKduCaeXcRsE91 +lLTzHh3sF5iMBGtlHhrWdOADpkp4BPAqP4amQFUFrCrNhnnnxa2CEc49dn6Jv2AT +HbI31qD0o4hZiJVrufZWuoWeqmMawtZLokkds/tAPgjBV507WFNKMEVqNJYM9WA9 +C5C3wZ+XPSL4vx+5CHXhxqBfDtW+7vlvVgipwi4ayhqOif15zE8Ttjljijuevf8w +8183yTqFgjP/6vzegtla+NWl/q9RdEte84gVU5I/ymL8uzPbgRhPhf9E4YIwAsOO +4HK7EBNjJWGtY8n62mstGnFchnN2fTZaCOarNqnOuWM8b+uuwYHVV873V6q+RECt +ThLHwEHWSDteMWYBIjUFaiPLSGgGfD2QnchfO/P0zGOjipyabtxLhHnlQ+DRutw0 +ZwdfmThEx7lEKQyqBWpTEEuv5CMoKQjEfP9wLBahCK+5J3Ia799o9jbGjIj8M6I/ +r2r20ss7yxux37EvS5WAfmKL5Yd1wR5a2yHVsEnsDyGh7WREx8chn5NqI1GXx2qS diff --git a/dev/gpg_owner_trust.enc b/dev/gpg_owner_trust.enc index 36629da..96a8377 100644 --- a/dev/gpg_owner_trust.enc +++ b/dev/gpg_owner_trust.enc @@ -1,11 +1,12 @@ -U2FsdGVkX18yBxdXn/WEsxApENo5QAn/0GfUrFo2ak5yobTZSNes4SZ9w+wYc6q7 -veu+beozOcFzdTyAqJLirKuQ611E1plRaDziK+kuEzq0EMgs4xJcNSf7fKyxaXah -mnM9bels46ctMft4mYEVyUK++4vQimYWA6d0P5p8enKKr/5xZ6MjXLzhKDR0AaFf -7T7o5KZo37ZABHrPYh+w70dtHy1SmUymOJiMbGnfFJmyrFPXC5+aF0mZ6VsWK/e9 -Q9HbxmxleWTA8Mdl4A63byFp5UmK4ZZUlUGK+9uDK/Tdji4x0q4aFCHmb/9FBRPT -KgHYZ0yzt8sJF9hXfspuikWDAgonMEXNLPc7XUW/asdOUxMRYwnmy8JzIYfJfUTC -CkyvGtBWn3IuG5H6/gMhPAxBd1uW6zDQTY6YshAEuUuNv815+9NjRCmE7w5951Tw -40DnDbpR5+6WjkPZn8ZXSPEf+Abtz05swuV9/JG9vS/mwyq8tSneSxJzJUJyUidz -YgIdvwwKGYqYwZIE+3ZG0GcFfQ8XgiCVOx9mxPB7Y6ZUfjgYyRTMyXRS2Q4Ay/WO -0M3Yz1x/M17ruIhYbLNcDGOBAORxjYwLysPW+PJCgzf0A452ROiYYTnV7krU+HMD -C5Z5YdrOy8qKFtNOM1mcuQ== +U2FsdGVkX19AM0Wl4Fc3ljMV6jUqimVH6wDMnxepRgKlwGQJoL8NhLc2X506HQGW +ONs+eQlZPXd6MucZ10oFZN28jJ3j/2oCMBBc1r8Dmvezu+V122Qf9NeWmlsp45Oz +AYUHjEylDCntPb7i5NyssQ0PVNSdaMOVTWIPvJ4IIItHdSziKkaKguzTvQiN/LlM +dM/aaUYqn7R2bZXKP/AB/781es+gK/dfWsNE8Adik971ioiNrRAl3FFqpfWWpANC +dt32CXAhd37X11R5vxzB6n+uJ/AmKVRM7S1D74Ks3X6vnCft34JzBlrzI5kXS/bB +DVApR1IiAyJeGsO576XASqnLrjxV/UBYLvWyjoIBRRjOgn1/mwAM3RmHMjWsJlbP +43Zlm46x86SWSYHwpbnb70qhk64bjcroJyN0tOgxIijY5iG+bQYFbSjAqZq8YoMI +RCtpQy64ZUIR3DKFafly6ZrMgPlyARsGSwtdGSfrQnpLHRNpCR71FklpQ4o/7pNr +Dnr5fF3OHfCuARx/AokOTE4tsjJfrjSUoElszUNOTLE5WaWqzadWGc9tlrgFnOiJ +h/psNCZpfdId54vxZJFy14FRrQshx05Szsei8XtHGZZLEJ4Mm1iv2jsJfQv0usUv +3gKCllKxboXzfbte9U3B7nhcGl3UN8tNYxdiPey0XkcYin5jr9bQ4qZCGbuiF6fP +KBW6iYuODdm8jJCu481A3g==