diff --git a/.github/workflows/update_opencv.yml b/.github/workflows/update_opencv.yml
deleted file mode 100644
index 53e98e4..0000000
--- a/.github/workflows/update_opencv.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-name: Update OpenCV
-
-on:
- workflow_dispatch:
- inputs:
- version:
- description: 'OpenCV Version (e.g. 4.9.0)'
- required: true
-
-jobs:
- build:
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Download and unzip the OpenCV sdk
- run: |
- wget https://github.com/opencv/opencv/releases/download/${{ github.event.inputs.version }}/opencv-${{ github.event.inputs.version }}-android-sdk.zip
- unzip opencv-${{ github.event.inputs.version }}-android-sdk.zip
-
- - name: Update OpenCV Native SDK
- run: |
- rm -rd opencv/native
- mkdir opencv/native
- mkdir opencv/native/jni
- mkdir opencv/native/libs
- cp -r OpenCV-android-sdk/sdk/native/jni/include opencv/native/jni/
- cp -r OpenCV-android-sdk/sdk/native/libs opencv/native/
-
- - name: Update OpenCV Java SDK
- run: sed -i 's/ext.opencv_version = .*/ext.opencv_version = "${{ github.event.inputs.version }}"/g' build.gradle
-
- - name: Create Pull Request
- uses: peter-evans/create-pull-request@v6
- with:
- branch: feature/update_opencv
- title: Update of OpenCV
- commit-message: Update of OpenCV
- add-paths: |
- opencv/native/*
- build.gradle
-
diff --git a/README.md b/README.md
index b0a96ed..df3a19d 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,6 @@ Copyright of the logo: The Coca-Cola Company
### How do I get set up?
* IDE: Android Studio (tested with 2023.3.1)
* Android SDK & NDK
-* Dependencies: OpenCV 4 library (included) [License](/opencv/LICENSE) [Copyright](/opencv/COPYRIGHT)
* Template image location: res/drawable Changeable in CameraPreviewView
### Default template image
diff --git a/app/CMakeLists.txt b/app/CMakeLists.txt
index c03b0be..475af29 100644
--- a/app/CMakeLists.txt
+++ b/app/CMakeLists.txt
@@ -1,57 +1,27 @@
-
-#
-# Copyright (C) The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
cmake_minimum_required(VERSION 3.4.1)
-project(FeatureMatchingNative)
+# register the project for the C++ language
+project(app CXX)
-# create a variable for accessing the opencv library dir
-set(
- lib_src_dir
- ${CMAKE_SOURCE_DIR}/../opencv/native
-)
+# find the opencv native sdk inside the AAR file / maven dependency (prerequisite: prefab = true)
+find_package(OpenCV REQUIRED CONFIG)
-# register the opencv header files...
-include_directories(${lib_src_dir}/jni/include)
-
-# register a new library to import...
+# register the custom c++ code of the app
add_library(
- lib_opencv
+ app
SHARED
- IMPORTED
+ src/main/cpp/native_opencv.cpp
)
-# ...and assign the corresponding location of the precompiled opencv library
+# compile with c++20
set_target_properties(
- lib_opencv
- PROPERTIES IMPORTED_LOCATION
- ${lib_src_dir}/libs/${ANDROID_ABI}/libopencv_java4.so
-)
-
-# register the custom c++ code of the app as library
-add_library(
- native_opencv
- SHARED
- src/main/cpp/native_opencv.cpp
+ app
+ PROPERTIES
+ CXX_STANDARD 20
)
# link the required libs together
target_link_libraries(
- native_opencv # includes the custom c++ code of the app
- lib_opencv # includes the precompiled opencv lib
- android # include android sdk
+ app # includes the custom c++ code of the app
+ OpenCV::opencv_java4 # includes the opencv lib from the maven dependency
log # include android logging sdk
)
diff --git a/app/build.gradle b/app/build.gradle
index ee32a44..40ffb23 100644
--- a/app/build.gradle
+++ b/app/build.gradle
@@ -22,9 +22,10 @@ android {
}
externalNativeBuild {
cmake {
- cppFlags += "-std=c++20"
+ arguments "-DANDROID_STL=c++_shared"
}
}
+
}
buildTypes {
release {
@@ -38,6 +39,9 @@ android {
path 'CMakeLists.txt'
}
}
+ buildFeatures {
+ prefab true // allows to access native libraries from OpenCV AAR in C++
+ }
}
kotlin.jvmToolchain(java_version)
diff --git a/app/src/main/java/com/michaeltroger/featureMatchingNative/views/CameraPreviewView.kt b/app/src/main/java/com/michaeltroger/featureMatchingNative/views/CameraPreviewView.kt
index 3419a24..ee61b0a 100644
--- a/app/src/main/java/com/michaeltroger/featureMatchingNative/views/CameraPreviewView.kt
+++ b/app/src/main/java/com/michaeltroger/featureMatchingNative/views/CameraPreviewView.kt
@@ -255,7 +255,7 @@ class CameraPreviewView(
// loading C++ libraries
init {
- System.loadLibrary("native_opencv")
+ System.loadLibrary("app")
}
}
diff --git a/opencv/COPYRIGHT b/opencv/COPYRIGHT
deleted file mode 100644
index 6b0b688..0000000
--- a/opencv/COPYRIGHT
+++ /dev/null
@@ -1,11 +0,0 @@
-Copyright (C) 2000-2022, Intel Corporation, all rights reserved.
-Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
-Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
-Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
-Copyright (C) 2015-2023, OpenCV Foundation, all rights reserved.
-Copyright (C) 2008-2016, Itseez Inc., all rights reserved.
-Copyright (C) 2019-2023, Xperience AI, all rights reserved.
-Copyright (C) 2019-2022, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
-Copyright (C) 2022-2023, Southern University of Science And Technology, all rights reserved.
-
-Third party copyrights are property of their respective owners.
diff --git a/opencv/LICENSE b/opencv/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/opencv/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/opencv/native/jni/include/opencv2/calib3d.hpp b/opencv/native/jni/include/opencv2/calib3d.hpp
deleted file mode 100644
index 7b15563..0000000
--- a/opencv/native/jni/include/opencv2/calib3d.hpp
+++ /dev/null
@@ -1,4103 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef OPENCV_CALIB3D_HPP
-#define OPENCV_CALIB3D_HPP
-
-#include "opencv2/core.hpp"
-#include "opencv2/core/types.hpp"
-#include "opencv2/features2d.hpp"
-#include "opencv2/core/affine.hpp"
-
-/**
- @defgroup calib3d Camera Calibration and 3D Reconstruction
-
-The functions in this section use a so-called pinhole camera model. The view of a scene
-is obtained by projecting a scene's 3D point \f$P_w\f$ into the image plane using a perspective
-transformation which forms the corresponding pixel \f$p\f$. Both \f$P_w\f$ and \f$p\f$ are
-represented in homogeneous coordinates, i.e. as 3D and 2D homogeneous vector respectively. You will
-find a brief introduction to projective geometry, homogeneous vectors and homogeneous
-transformations at the end of this section's introduction. For more succinct notation, we often drop
-the 'homogeneous' and say vector instead of homogeneous vector.
-
-The distortion-free projective transformation given by a pinhole camera model is shown below.
-
-\f[s \; p = A \begin{bmatrix} R|t \end{bmatrix} P_w,\f]
-
-where \f$P_w\f$ is a 3D point expressed with respect to the world coordinate system,
-\f$p\f$ is a 2D pixel in the image plane, \f$A\f$ is the camera intrinsic matrix,
-\f$R\f$ and \f$t\f$ are the rotation and translation that describe the change of coordinates from
-world to camera coordinate systems (or camera frame) and \f$s\f$ is the projective transformation's
-arbitrary scaling and not part of the camera model.
-
-The camera intrinsic matrix \f$A\f$ (notation used as in @cite Zhang2000 and also generally notated
-as \f$K\f$) projects 3D points given in the camera coordinate system to 2D pixel coordinates, i.e.
-
-\f[p = A P_c.\f]
-
-The camera intrinsic matrix \f$A\f$ is composed of the focal lengths \f$f_x\f$ and \f$f_y\f$, which are
-expressed in pixel units, and the principal point \f$(c_x, c_y)\f$, that is usually close to the
-image center:
-
-\f[A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1},\f]
-
-and thus
-
-\f[s \vecthree{u}{v}{1} = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1} \vecthree{X_c}{Y_c}{Z_c}.\f]
-
-The matrix of intrinsic parameters does not depend on the scene viewed. So, once estimated, it can
-be re-used as long as the focal length is fixed (in case of a zoom lens). Thus, if an image from the
-camera is scaled by a factor, all of these parameters need to be scaled (multiplied/divided,
-respectively) by the same factor.
-
-The joint rotation-translation matrix \f$[R|t]\f$ is the matrix product of a projective
-transformation and a homogeneous transformation. The 3-by-4 projective transformation maps 3D points
-represented in camera coordinates to 2D points in the image plane and represented in normalized
-camera coordinates \f$x' = X_c / Z_c\f$ and \f$y' = Y_c / Z_c\f$:
-
-\f[Z_c \begin{bmatrix}
-x' \\
-y' \\
-1
-\end{bmatrix} = \begin{bmatrix}
-1 & 0 & 0 & 0 \\
-0 & 1 & 0 & 0 \\
-0 & 0 & 1 & 0
-\end{bmatrix}
-\begin{bmatrix}
-X_c \\
-Y_c \\
-Z_c \\
-1
-\end{bmatrix}.\f]
-
-The homogeneous transformation is encoded by the extrinsic parameters \f$R\f$ and \f$t\f$ and
-represents the change of basis from world coordinate system \f$w\f$ to the camera coordinate sytem
-\f$c\f$. Thus, given the representation of the point \f$P\f$ in world coordinates, \f$P_w\f$, we
-obtain \f$P\f$'s representation in the camera coordinate system, \f$P_c\f$, by
-
-\f[P_c = \begin{bmatrix}
-R & t \\
-0 & 1
-\end{bmatrix} P_w,\f]
-
-This homogeneous transformation is composed out of \f$R\f$, a 3-by-3 rotation matrix, and \f$t\f$, a
-3-by-1 translation vector:
-
-\f[\begin{bmatrix}
-R & t \\
-0 & 1
-\end{bmatrix} = \begin{bmatrix}
-r_{11} & r_{12} & r_{13} & t_x \\
-r_{21} & r_{22} & r_{23} & t_y \\
-r_{31} & r_{32} & r_{33} & t_z \\
-0 & 0 & 0 & 1
-\end{bmatrix},
-\f]
-
-and therefore
-
-\f[\begin{bmatrix}
-X_c \\
-Y_c \\
-Z_c \\
-1
-\end{bmatrix} = \begin{bmatrix}
-r_{11} & r_{12} & r_{13} & t_x \\
-r_{21} & r_{22} & r_{23} & t_y \\
-r_{31} & r_{32} & r_{33} & t_z \\
-0 & 0 & 0 & 1
-\end{bmatrix}
-\begin{bmatrix}
-X_w \\
-Y_w \\
-Z_w \\
-1
-\end{bmatrix}.\f]
-
-Combining the projective transformation and the homogeneous transformation, we obtain the projective
-transformation that maps 3D points in world coordinates into 2D points in the image plane and in
-normalized camera coordinates:
-
-\f[Z_c \begin{bmatrix}
-x' \\
-y' \\
-1
-\end{bmatrix} = \begin{bmatrix} R|t \end{bmatrix} \begin{bmatrix}
-X_w \\
-Y_w \\
-Z_w \\
-1
-\end{bmatrix} = \begin{bmatrix}
-r_{11} & r_{12} & r_{13} & t_x \\
-r_{21} & r_{22} & r_{23} & t_y \\
-r_{31} & r_{32} & r_{33} & t_z
-\end{bmatrix}
-\begin{bmatrix}
-X_w \\
-Y_w \\
-Z_w \\
-1
-\end{bmatrix},\f]
-
-with \f$x' = X_c / Z_c\f$ and \f$y' = Y_c / Z_c\f$. Putting the equations for instrincs and extrinsics together, we can write out
-\f$s \; p = A \begin{bmatrix} R|t \end{bmatrix} P_w\f$ as
-
-\f[s \vecthree{u}{v}{1} = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}
-\begin{bmatrix}
-r_{11} & r_{12} & r_{13} & t_x \\
-r_{21} & r_{22} & r_{23} & t_y \\
-r_{31} & r_{32} & r_{33} & t_z
-\end{bmatrix}
-\begin{bmatrix}
-X_w \\
-Y_w \\
-Z_w \\
-1
-\end{bmatrix}.\f]
-
-If \f$Z_c \ne 0\f$, the transformation above is equivalent to the following,
-
-\f[\begin{bmatrix}
-u \\
-v
-\end{bmatrix} = \begin{bmatrix}
-f_x X_c/Z_c + c_x \\
-f_y Y_c/Z_c + c_y
-\end{bmatrix}\f]
-
-with
-
-\f[\vecthree{X_c}{Y_c}{Z_c} = \begin{bmatrix}
-R|t
-\end{bmatrix} \begin{bmatrix}
-X_w \\
-Y_w \\
-Z_w \\
-1
-\end{bmatrix}.\f]
-
-The following figure illustrates the pinhole camera model.
-
-![Pinhole camera model](pics/pinhole_camera_model.png)
-
-Real lenses usually have some distortion, mostly radial distortion, and slight tangential distortion.
-So, the above model is extended as:
-
-\f[\begin{bmatrix}
-u \\
-v
-\end{bmatrix} = \begin{bmatrix}
-f_x x'' + c_x \\
-f_y y'' + c_y
-\end{bmatrix}\f]
-
-where
-
-\f[\begin{bmatrix}
-x'' \\
-y''
-\end{bmatrix} = \begin{bmatrix}
-x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + 2 p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4 \\
-y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\
-\end{bmatrix}\f]
-
-with
-
-\f[r^2 = x'^2 + y'^2\f]
-
-and
-
-\f[\begin{bmatrix}
-x'\\
-y'
-\end{bmatrix} = \begin{bmatrix}
-X_c/Z_c \\
-Y_c/Z_c
-\end{bmatrix},\f]
-
-if \f$Z_c \ne 0\f$.
-
-The distortion parameters are the radial coefficients \f$k_1\f$, \f$k_2\f$, \f$k_3\f$, \f$k_4\f$, \f$k_5\f$, and \f$k_6\f$
-,\f$p_1\f$ and \f$p_2\f$ are the tangential distortion coefficients, and \f$s_1\f$, \f$s_2\f$, \f$s_3\f$, and \f$s_4\f$,
-are the thin prism distortion coefficients. Higher-order coefficients are not considered in OpenCV.
-
-The next figures show two common types of radial distortion: barrel distortion
-(\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically decreasing)
-and pincushion distortion (\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically increasing).
-Radial distortion is always monotonic for real lenses,
-and if the estimator produces a non-monotonic result,
-this should be considered a calibration failure.
-More generally, radial distortion must be monotonic and the distortion function must be bijective.
-A failed estimation result may look deceptively good near the image center
-but will work poorly in e.g. AR/SFM applications.
-The optimization method used in OpenCV camera calibration does not include these constraints as
-the framework does not support the required integer programming and polynomial inequalities.
-See [issue #15992](https://github.com/opencv/opencv/issues/15992) for additional information.
-
-![](pics/distortion_examples.png)
-![](pics/distortion_examples2.png)
-
-In some cases, the image sensor may be tilted in order to focus an oblique plane in front of the
-camera (Scheimpflug principle). This can be useful for particle image velocimetry (PIV) or
-triangulation with a laser fan. The tilt causes a perspective distortion of \f$x''\f$ and
-\f$y''\f$. This distortion can be modeled in the following way, see e.g. @cite Louhichi07.
-
-\f[\begin{bmatrix}
-u \\
-v
-\end{bmatrix} = \begin{bmatrix}
-f_x x''' + c_x \\
-f_y y''' + c_y
-\end{bmatrix},\f]
-
-where
-
-\f[s\vecthree{x'''}{y'''}{1} =
-\vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}(\tau_x, \tau_y)}
-{0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}
-{0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\f]
-
-and the matrix \f$R(\tau_x, \tau_y)\f$ is defined by two rotations with angular parameter
-\f$\tau_x\f$ and \f$\tau_y\f$, respectively,
-
-\f[
-R(\tau_x, \tau_y) =
-\vecthreethree{\cos(\tau_y)}{0}{-\sin(\tau_y)}{0}{1}{0}{\sin(\tau_y)}{0}{\cos(\tau_y)}
-\vecthreethree{1}{0}{0}{0}{\cos(\tau_x)}{\sin(\tau_x)}{0}{-\sin(\tau_x)}{\cos(\tau_x)} =
-\vecthreethree{\cos(\tau_y)}{\sin(\tau_y)\sin(\tau_x)}{-\sin(\tau_y)\cos(\tau_x)}
-{0}{\cos(\tau_x)}{\sin(\tau_x)}
-{\sin(\tau_y)}{-\cos(\tau_y)\sin(\tau_x)}{\cos(\tau_y)\cos(\tau_x)}.
-\f]
-
-In the functions below the coefficients are passed or returned as
-
-\f[(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f]
-
-vector. That is, if the vector contains four elements, it means that \f$k_3=0\f$ . The distortion
-coefficients do not depend on the scene viewed. Thus, they also belong to the intrinsic camera
-parameters. And they remain the same regardless of the captured image resolution. If, for example, a
-camera has been calibrated on images of 320 x 240 resolution, absolutely the same distortion
-coefficients can be used for 640 x 480 images from the same camera while \f$f_x\f$, \f$f_y\f$,
-\f$c_x\f$, and \f$c_y\f$ need to be scaled appropriately.
-
-The functions below use the above model to do the following:
-
-- Project 3D points to the image plane given intrinsic and extrinsic parameters.
-- Compute extrinsic parameters given intrinsic parameters, a few 3D points, and their
-projections.
-- Estimate intrinsic and extrinsic camera parameters from several views of a known calibration
-pattern (every view is described by several 3D-2D point correspondences).
-- Estimate the relative position and orientation of the stereo camera "heads" and compute the
-*rectification* transformation that makes the camera optical axes parallel.
-
- Homogeneous Coordinates
-Homogeneous Coordinates are a system of coordinates that are used in projective geometry. Their use
-allows to represent points at infinity by finite coordinates and simplifies formulas when compared
-to the cartesian counterparts, e.g. they have the advantage that affine transformations can be
-expressed as linear homogeneous transformation.
-
-One obtains the homogeneous vector \f$P_h\f$ by appending a 1 along an n-dimensional cartesian
-vector \f$P\f$ e.g. for a 3D cartesian vector the mapping \f$P \rightarrow P_h\f$ is:
-
-\f[\begin{bmatrix}
-X \\
-Y \\
-Z
-\end{bmatrix} \rightarrow \begin{bmatrix}
-X \\
-Y \\
-Z \\
-1
-\end{bmatrix}.\f]
-
-For the inverse mapping \f$P_h \rightarrow P\f$, one divides all elements of the homogeneous vector
-by its last element, e.g. for a 3D homogeneous vector one gets its 2D cartesian counterpart by:
-
-\f[\begin{bmatrix}
-X \\
-Y \\
-W
-\end{bmatrix} \rightarrow \begin{bmatrix}
-X / W \\
-Y / W
-\end{bmatrix},\f]
-
-if \f$W \ne 0\f$.
-
-Due to this mapping, all multiples \f$k P_h\f$, for \f$k \ne 0\f$, of a homogeneous point represent
-the same point \f$P_h\f$. An intuitive understanding of this property is that under a projective
-transformation, all multiples of \f$P_h\f$ are mapped to the same point. This is the physical
-observation one does for pinhole cameras, as all points along a ray through the camera's pinhole are
-projected to the same image point, e.g. all points along the red ray in the image of the pinhole
-camera model above would be mapped to the same image coordinate. This property is also the source
-for the scale ambiguity s in the equation of the pinhole camera model.
-
-As mentioned, by using homogeneous coordinates we can express any change of basis parameterized by
-\f$R\f$ and \f$t\f$ as a linear transformation, e.g. for the change of basis from coordinate system
-0 to coordinate system 1 becomes:
-
-\f[P_1 = R P_0 + t \rightarrow P_{h_1} = \begin{bmatrix}
-R & t \\
-0 & 1
-\end{bmatrix} P_{h_0}.\f]
-
-@note
- - Many functions in this module take a camera intrinsic matrix as an input parameter. Although all
- functions assume the same structure of this parameter, they may name it differently. The
- parameter's description, however, will be clear in that a camera intrinsic matrix with the structure
- shown above is required.
- - A calibration sample for 3 cameras in a horizontal position can be found at
- opencv_source_code/samples/cpp/3calibration.cpp
- - A calibration sample based on a sequence of images can be found at
- opencv_source_code/samples/cpp/calibration.cpp
- - A calibration sample in order to do 3D reconstruction can be found at
- opencv_source_code/samples/cpp/build3dmodel.cpp
- - A calibration example on stereo calibration can be found at
- opencv_source_code/samples/cpp/stereo_calib.cpp
- - A calibration example on stereo matching can be found at
- opencv_source_code/samples/cpp/stereo_match.cpp
- - (Python) A camera calibration sample can be found at
- opencv_source_code/samples/python/calibrate.py
-
- @{
- @defgroup calib3d_fisheye Fisheye camera model
-
- Definitions: Let P be a point in 3D of coordinates X in the world reference frame (stored in the
- matrix X) The coordinate vector of P in the camera reference frame is:
-
- \f[Xc = R X + T\f]
-
- where R is the rotation matrix corresponding to the rotation vector om: R = rodrigues(om); call x, y
- and z the 3 coordinates of Xc:
-
- \f[x = Xc_1 \\ y = Xc_2 \\ z = Xc_3\f]
-
- The pinhole projection coordinates of P is [a; b] where
-
- \f[a = x / z \ and \ b = y / z \\ r^2 = a^2 + b^2 \\ \theta = atan(r)\f]
-
- Fisheye distortion:
-
- \f[\theta_d = \theta (1 + k_1 \theta^2 + k_2 \theta^4 + k_3 \theta^6 + k_4 \theta^8)\f]
-
- The distorted point coordinates are [x'; y'] where
-
- \f[x' = (\theta_d / r) a \\ y' = (\theta_d / r) b \f]
-
- Finally, conversion into pixel coordinates: The final pixel coordinates vector [u; v] where:
-
- \f[u = f_x (x' + \alpha y') + c_x \\
- v = f_y y' + c_y\f]
-
- Summary:
- Generic camera model @cite Kannala2006 with perspective projection and without distortion correction
-
- @defgroup calib3d_c C API
-
- @}
- */
-
-namespace cv
-{
-
-//! @addtogroup calib3d
-//! @{
-
-//! type of the robust estimation algorithm
-enum { LMEDS = 4, //!< least-median of squares algorithm
- RANSAC = 8, //!< RANSAC algorithm
- RHO = 16, //!< RHO algorithm
- USAC_DEFAULT = 32, //!< USAC algorithm, default settings
- USAC_PARALLEL = 33, //!< USAC, parallel version
- USAC_FM_8PTS = 34, //!< USAC, fundamental matrix 8 points
- USAC_FAST = 35, //!< USAC, fast settings
- USAC_ACCURATE = 36, //!< USAC, accurate settings
- USAC_PROSAC = 37, //!< USAC, sorted points, runs PROSAC
- USAC_MAGSAC = 38 //!< USAC, runs MAGSAC++
- };
-
-enum SolvePnPMethod {
- SOLVEPNP_ITERATIVE = 0, //!< Pose refinement using non-linear Levenberg-Marquardt minimization scheme @cite Madsen04 @cite Eade13 \n
- //!< Initial solution for non-planar "objectPoints" needs at least 6 points and uses the DLT algorithm. \n
- //!< Initial solution for planar "objectPoints" needs at least 4 points and uses pose from homography decomposition.
- SOLVEPNP_EPNP = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp
- SOLVEPNP_P3P = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete
- SOLVEPNP_DLS = 3, //!< **Broken implementation. Using this flag will fallback to EPnP.** \n
- //!< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct
- SOLVEPNP_UPNP = 4, //!< **Broken implementation. Using this flag will fallback to EPnP.** \n
- //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive
- SOLVEPNP_AP3P = 5, //!< An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17
- SOLVEPNP_IPPE = 6, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n
- //!< Object points must be coplanar.
- SOLVEPNP_IPPE_SQUARE = 7, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n
- //!< This is a special case suitable for marker pose estimation.\n
- //!< 4 coplanar object points must be defined in the following order:
- //!< - point 0: [-squareLength / 2, squareLength / 2, 0]
- //!< - point 1: [ squareLength / 2, squareLength / 2, 0]
- //!< - point 2: [ squareLength / 2, -squareLength / 2, 0]
- //!< - point 3: [-squareLength / 2, -squareLength / 2, 0]
- SOLVEPNP_SQPNP = 8, //!< SQPnP: A Consistently Fast and Globally OptimalSolution to the Perspective-n-Point Problem @cite Terzakis2020SQPnP
-#ifndef CV_DOXYGEN
- SOLVEPNP_MAX_COUNT //!< Used for count
-#endif
-};
-
-enum { CALIB_CB_ADAPTIVE_THRESH = 1,
- CALIB_CB_NORMALIZE_IMAGE = 2,
- CALIB_CB_FILTER_QUADS = 4,
- CALIB_CB_FAST_CHECK = 8,
- CALIB_CB_EXHAUSTIVE = 16,
- CALIB_CB_ACCURACY = 32,
- CALIB_CB_LARGER = 64,
- CALIB_CB_MARKER = 128,
- CALIB_CB_PLAIN = 256
- };
-
-enum { CALIB_CB_SYMMETRIC_GRID = 1,
- CALIB_CB_ASYMMETRIC_GRID = 2,
- CALIB_CB_CLUSTERING = 4
- };
-
-enum { CALIB_NINTRINSIC = 18,
- CALIB_USE_INTRINSIC_GUESS = 0x00001,
- CALIB_FIX_ASPECT_RATIO = 0x00002,
- CALIB_FIX_PRINCIPAL_POINT = 0x00004,
- CALIB_ZERO_TANGENT_DIST = 0x00008,
- CALIB_FIX_FOCAL_LENGTH = 0x00010,
- CALIB_FIX_K1 = 0x00020,
- CALIB_FIX_K2 = 0x00040,
- CALIB_FIX_K3 = 0x00080,
- CALIB_FIX_K4 = 0x00800,
- CALIB_FIX_K5 = 0x01000,
- CALIB_FIX_K6 = 0x02000,
- CALIB_RATIONAL_MODEL = 0x04000,
- CALIB_THIN_PRISM_MODEL = 0x08000,
- CALIB_FIX_S1_S2_S3_S4 = 0x10000,
- CALIB_TILTED_MODEL = 0x40000,
- CALIB_FIX_TAUX_TAUY = 0x80000,
- CALIB_USE_QR = 0x100000, //!< use QR instead of SVD decomposition for solving. Faster but potentially less precise
- CALIB_FIX_TANGENT_DIST = 0x200000,
- // only for stereo
- CALIB_FIX_INTRINSIC = 0x00100,
- CALIB_SAME_FOCAL_LENGTH = 0x00200,
- // for stereo rectification
- CALIB_ZERO_DISPARITY = 0x00400,
- CALIB_USE_LU = (1 << 17), //!< use LU instead of SVD decomposition for solving. much faster but potentially less precise
- CALIB_USE_EXTRINSIC_GUESS = (1 << 22) //!< for stereoCalibrate
- };
-
-//! the algorithm for finding fundamental matrix
-enum { FM_7POINT = 1, //!< 7-point algorithm
- FM_8POINT = 2, //!< 8-point algorithm
- FM_LMEDS = 4, //!< least-median algorithm. 7-point algorithm is used.
- FM_RANSAC = 8 //!< RANSAC algorithm. It needs at least 15 points. 7-point algorithm is used.
- };
-
-enum HandEyeCalibrationMethod
-{
- CALIB_HAND_EYE_TSAI = 0, //!< A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/Eye Calibration @cite Tsai89
- CALIB_HAND_EYE_PARK = 1, //!< Robot Sensor Calibration: Solving AX = XB on the Euclidean Group @cite Park94
- CALIB_HAND_EYE_HORAUD = 2, //!< Hand-eye Calibration @cite Horaud95
- CALIB_HAND_EYE_ANDREFF = 3, //!< On-line Hand-Eye Calibration @cite Andreff99
- CALIB_HAND_EYE_DANIILIDIS = 4 //!< Hand-Eye Calibration Using Dual Quaternions @cite Daniilidis98
-};
-
-enum RobotWorldHandEyeCalibrationMethod
-{
- CALIB_ROBOT_WORLD_HAND_EYE_SHAH = 0, //!< Solving the robot-world/hand-eye calibration problem using the kronecker product @cite Shah2013SolvingTR
- CALIB_ROBOT_WORLD_HAND_EYE_LI = 1 //!< Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product @cite Li2010SimultaneousRA
-};
-
-enum SamplingMethod { SAMPLING_UNIFORM=0, SAMPLING_PROGRESSIVE_NAPSAC=1, SAMPLING_NAPSAC=2,
- SAMPLING_PROSAC=3 };
-enum LocalOptimMethod {LOCAL_OPTIM_NULL=0, LOCAL_OPTIM_INNER_LO=1, LOCAL_OPTIM_INNER_AND_ITER_LO=2,
- LOCAL_OPTIM_GC=3, LOCAL_OPTIM_SIGMA=4};
-enum ScoreMethod {SCORE_METHOD_RANSAC=0, SCORE_METHOD_MSAC=1, SCORE_METHOD_MAGSAC=2, SCORE_METHOD_LMEDS=3};
-enum NeighborSearchMethod { NEIGH_FLANN_KNN=0, NEIGH_GRID=1, NEIGH_FLANN_RADIUS=2 };
-enum PolishingMethod { NONE_POLISHER=0, LSQ_POLISHER=1, MAGSAC=2, COV_POLISHER=3 };
-
-struct CV_EXPORTS_W_SIMPLE UsacParams
-{ // in alphabetical order
- CV_WRAP UsacParams();
- CV_PROP_RW double confidence;
- CV_PROP_RW bool isParallel;
- CV_PROP_RW int loIterations;
- CV_PROP_RW LocalOptimMethod loMethod;
- CV_PROP_RW int loSampleSize;
- CV_PROP_RW int maxIterations;
- CV_PROP_RW NeighborSearchMethod neighborsSearch;
- CV_PROP_RW int randomGeneratorState;
- CV_PROP_RW SamplingMethod sampler;
- CV_PROP_RW ScoreMethod score;
- CV_PROP_RW double threshold;
- CV_PROP_RW PolishingMethod final_polisher;
- CV_PROP_RW int final_polisher_iterations;
-};
-
-/** @brief Converts a rotation matrix to a rotation vector or vice versa.
-
-@param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
-@param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
-@param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
-derivatives of the output array components with respect to the input array components.
-
-\f[\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\f]
-
-Inverse transformation can be also done easily, since
-
-\f[\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\f]
-
-A rotation vector is a convenient and most compact representation of a rotation matrix (since any
-rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
-optimization procedures like @ref calibrateCamera, @ref stereoCalibrate, or @ref solvePnP .
-
-@note More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
-can be found in:
- - A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi @cite Gallego2014ACF
-
-@note Useful information on SE(3) and Lie Groups can be found in:
- - A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco @cite blanco2010tutorial
- - Lie Groups for 2D and 3D Transformation, Ethan Eade @cite Eade17
- - A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan @cite Sol2018AML
- */
-CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() );
-
-
-
-/** Levenberg-Marquardt solver. Starting with the specified vector of parameters it
- optimizes the target vector criteria "err"
- (finds local minima of each target vector component absolute value).
-
- When needed, it calls user-provided callback.
-*/
-class CV_EXPORTS LMSolver : public Algorithm
-{
-public:
- class CV_EXPORTS Callback
- {
- public:
- virtual ~Callback() {}
- /**
- computes error and Jacobian for the specified vector of parameters
-
- @param param the current vector of parameters
- @param err output vector of errors: err_i = actual_f_i - ideal_f_i
- @param J output Jacobian: J_ij = d(ideal_f_i)/d(param_j)
-
- when J=noArray(), it means that it does not need to be computed.
- Dimensionality of error vector and param vector can be different.
- The callback should explicitly allocate (with "create" method) each output array
- (unless it's noArray()).
- */
- virtual bool compute(InputArray param, OutputArray err, OutputArray J) const = 0;
- };
-
- /**
- Runs Levenberg-Marquardt algorithm using the passed vector of parameters as the start point.
- The final vector of parameters (whether the algorithm converged or not) is stored at the same
- vector. The method returns the number of iterations used. If it's equal to the previously specified
- maxIters, there is a big chance the algorithm did not converge.
-
- @param param initial/final vector of parameters.
-
- Note that the dimensionality of parameter space is defined by the size of param vector,
- and the dimensionality of optimized criteria is defined by the size of err vector
- computed by the callback.
- */
- virtual int run(InputOutputArray param) const = 0;
-
- /**
- Sets the maximum number of iterations
- @param maxIters the number of iterations
- */
- virtual void setMaxIters(int maxIters) = 0;
- /**
- Retrieves the current maximum number of iterations
- */
- virtual int getMaxIters() const = 0;
-
- /**
- Creates Levenberg-Marquard solver
-
- @param cb callback
- @param maxIters maximum number of iterations that can be further
- modified using setMaxIters() method.
- */
- static Ptr create(const Ptr& cb, int maxIters);
- static Ptr create(const Ptr& cb, int maxIters, double eps);
-};
-
-
-
-/** @example samples/cpp/tutorial_code/features2D/Homography/pose_from_homography.cpp
-An example program about pose estimation from coplanar points
-
-Check @ref tutorial_homography "the corresponding tutorial" for more details
-*/
-
-/** @brief Finds a perspective transformation between two planes.
-
-@param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
-or vector\ .
-@param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
-a vector\ .
-@param method Method used to compute a homography matrix. The following methods are possible:
-- **0** - a regular method using all the points, i.e., the least squares method
-- @ref RANSAC - RANSAC-based robust method
-- @ref LMEDS - Least-Median robust method
-- @ref RHO - PROSAC-based robust method
-@param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
-(used in the RANSAC and RHO methods only). That is, if
-\f[\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\f]
-then the point \f$i\f$ is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
-it usually makes sense to set this parameter somewhere in the range of 1 to 10.
-@param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
-mask values are ignored.
-@param maxIters The maximum number of RANSAC iterations.
-@param confidence Confidence level, between 0 and 1.
-
-The function finds and returns the perspective transformation \f$H\f$ between the source and the
-destination planes:
-
-\f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f]
-
-so that the back-projection error
-
-\f[\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\f]
-
-is minimized. If the parameter method is set to the default value 0, the function uses all the point
-pairs to compute an initial homography estimate with a simple least-squares scheme.
-
-However, if not all of the point pairs ( \f$srcPoints_i\f$, \f$dstPoints_i\f$ ) fit the rigid perspective
-transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
-you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
-random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
-using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
-computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
-LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
-the mask of inliers/outliers.
-
-Regardless of the method, robust or not, the computed homography matrix is refined further (using
-inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
-re-projection error even more.
-
-The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
-distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
-correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
-noise is rather small, use the default method (method=0).
-
-The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
-determined up to a scale. Thus, it is normalized so that \f$h_{33}=1\f$. Note that whenever an \f$H\f$ matrix
-cannot be estimated, an empty one will be returned.
-
-@sa
-getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
-perspectiveTransform
- */
-CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
- int method = 0, double ransacReprojThreshold = 3,
- OutputArray mask=noArray(), const int maxIters = 2000,
- const double confidence = 0.995);
-
-/** @overload */
-CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
- OutputArray mask, int method = 0, double ransacReprojThreshold = 3 );
-
-
-CV_EXPORTS_W Mat findHomography(InputArray srcPoints, InputArray dstPoints, OutputArray mask,
- const UsacParams ¶ms);
-
-/** @brief Computes an RQ decomposition of 3x3 matrices.
-
-@param src 3x3 input matrix.
-@param mtxR Output 3x3 upper-triangular matrix.
-@param mtxQ Output 3x3 orthogonal matrix.
-@param Qx Optional output 3x3 rotation matrix around x-axis.
-@param Qy Optional output 3x3 rotation matrix around y-axis.
-@param Qz Optional output 3x3 rotation matrix around z-axis.
-
-The function computes a RQ decomposition using the given rotations. This function is used in
-#decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
-and a rotation matrix.
-
-It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
-degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
-sequence of rotations about the three principal axes that results in the same orientation of an
-object, e.g. see @cite Slabaugh . Returned three rotation matrices and corresponding three Euler angles
-are only one of the possible solutions.
- */
-CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
- OutputArray Qx = noArray(),
- OutputArray Qy = noArray(),
- OutputArray Qz = noArray());
-
-/** @brief Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
-
-@param projMatrix 3x4 input projection matrix P.
-@param cameraMatrix Output 3x3 camera intrinsic matrix \f$\cameramatrix{A}\f$.
-@param rotMatrix Output 3x3 external rotation matrix R.
-@param transVect Output 4x1 translation vector T.
-@param rotMatrixX Optional 3x3 rotation matrix around x-axis.
-@param rotMatrixY Optional 3x3 rotation matrix around y-axis.
-@param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
-@param eulerAngles Optional three-element vector containing three Euler angles of rotation in
-degrees.
-
-The function computes a decomposition of a projection matrix into a calibration and a rotation
-matrix and the position of a camera.
-
-It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
-be used in OpenGL. Note, there is always more than one sequence of rotations about the three
-principal axes that results in the same orientation of an object, e.g. see @cite Slabaugh . Returned
-three rotation matrices and corresponding three Euler angles are only one of the possible solutions.
-
-The function is based on #RQDecomp3x3 .
- */
-CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
- OutputArray rotMatrix, OutputArray transVect,
- OutputArray rotMatrixX = noArray(),
- OutputArray rotMatrixY = noArray(),
- OutputArray rotMatrixZ = noArray(),
- OutputArray eulerAngles =noArray() );
-
-/** @brief Computes partial derivatives of the matrix product for each multiplied matrix.
-
-@param A First multiplied matrix.
-@param B Second multiplied matrix.
-@param dABdA First output derivative matrix d(A\*B)/dA of size
-\f$\texttt{A.rows*B.cols} \times {A.rows*A.cols}\f$ .
-@param dABdB Second output derivative matrix d(A\*B)/dB of size
-\f$\texttt{A.rows*B.cols} \times {B.rows*B.cols}\f$ .
-
-The function computes partial derivatives of the elements of the matrix product \f$A*B\f$ with regard to
-the elements of each of the two input matrices. The function is used to compute the Jacobian
-matrices in #stereoCalibrate but can also be used in any other similar optimization function.
- */
-CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB );
-
-/** @brief Combines two rotation-and-shift transformations.
-
-@param rvec1 First rotation vector.
-@param tvec1 First translation vector.
-@param rvec2 Second rotation vector.
-@param tvec2 Second translation vector.
-@param rvec3 Output rotation vector of the superposition.
-@param tvec3 Output translation vector of the superposition.
-@param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
-@param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
-@param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
-@param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
-@param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
-@param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
-@param dt3dr2 Optional output derivative of tvec3 with regard to rvec2
-@param dt3dt2 Optional output derivative of tvec3 with regard to tvec2
-
-The functions compute:
-
-\f[\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\f]
-
-where \f$\mathrm{rodrigues}\f$ denotes a rotation vector to a rotation matrix transformation, and
-\f$\mathrm{rodrigues}^{-1}\f$ denotes the inverse transformation. See #Rodrigues for details.
-
-Also, the functions can compute the derivatives of the output vectors with regards to the input
-vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
-your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
-function that contains a matrix multiplication.
- */
-CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
- InputArray rvec2, InputArray tvec2,
- OutputArray rvec3, OutputArray tvec3,
- OutputArray dr3dr1 = noArray(), OutputArray dr3dt1 = noArray(),
- OutputArray dr3dr2 = noArray(), OutputArray dr3dt2 = noArray(),
- OutputArray dt3dr1 = noArray(), OutputArray dt3dt1 = noArray(),
- OutputArray dt3dr2 = noArray(), OutputArray dt3dt2 = noArray() );
-
-/** @brief Projects 3D points to an image plane.
-
-@param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
-1-channel or 1xN/Nx1 3-channel (or vector\ ), where N is the number of points in the view.
-@param rvec The rotation vector (@ref Rodrigues) that, together with tvec, performs a change of
-basis from world to camera coordinate system, see @ref calibrateCamera for details.
-@param tvec The translation vector, see parameter description above.
-@param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$\distcoeffs\f$ . If the vector is empty, the zero distortion coefficients are assumed.
-@param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
-vector\ .
-@param jacobian Optional output 2Nx(10+\) jacobian matrix of derivatives of image
-points with respect to components of the rotation vector, translation vector, focal lengths,
-coordinates of the principal point and the distortion coefficients. In the old interface different
-components of the jacobian are returned via different output parameters.
-@param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
-function assumes that the aspect ratio (\f$f_x / f_y\f$) is fixed and correspondingly adjusts the
-jacobian matrix.
-
-The function computes the 2D projections of 3D points to the image plane, given intrinsic and
-extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
-derivatives of image points coordinates (as functions of all the input parameters) with respect to
-the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
-optimization in @ref calibrateCamera, @ref solvePnP, and @ref stereoCalibrate. The function itself
-can also be used to compute a re-projection error, given the current intrinsic and extrinsic
-parameters.
-
-@note By setting rvec = tvec = \f$[0, 0, 0]\f$, or by setting cameraMatrix to a 3x3 identity matrix,
-or by passing zero distortion coefficients, one can get various useful partial cases of the
-function. This means, one can compute the distorted coordinates for a sparse set of points or apply
-a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
- */
-CV_EXPORTS_W void projectPoints( InputArray objectPoints,
- InputArray rvec, InputArray tvec,
- InputArray cameraMatrix, InputArray distCoeffs,
- OutputArray imagePoints,
- OutputArray jacobian = noArray(),
- double aspectRatio = 0 );
-
-/** @example samples/cpp/tutorial_code/features2D/Homography/homography_from_camera_displacement.cpp
-An example program about homography from the camera displacement
-
-Check @ref tutorial_homography "the corresponding tutorial" for more details
-*/
-
-/** @brief Finds an object pose from 3D-2D point correspondences.
-
-@see @ref calib3d_solvePnP
-
-This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
-coordinate frame to the camera coordinate frame, using different methods:
-- P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): need 4 input points to return a unique solution.
-- @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
-- @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
-Number of input points must be 4. Object points must be defined in the following order:
- - point 0: [-squareLength / 2, squareLength / 2, 0]
- - point 1: [ squareLength / 2, squareLength / 2, 0]
- - point 2: [ squareLength / 2, -squareLength / 2, 0]
- - point 3: [-squareLength / 2, -squareLength / 2, 0]
-- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
-
-@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
-1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here.
-@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
-where N is the number of points. vector\ can be also passed here.
-@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
-assumed.
-@param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
-the model coordinate system to the camera coordinate system.
-@param tvec Output translation vector.
-@param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
-the provided rvec and tvec values as initial approximations of the rotation and translation
-vectors, respectively, and further optimizes them.
-@param flags Method for solving a PnP problem: see @ref calib3d_solvePnP_flags
-
-More information about Perspective-n-Points is described in @ref calib3d_solvePnP
-
-@note
- - An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- - If you are using Python:
- - Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- - The methods @ref SOLVEPNP_DLS and @ref SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, @ref SOLVEPNP_EPNP method will be used instead.
- - The minimum number of points is 4 in the general case. In the case of @ref SOLVEPNP_P3P and @ref SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- - With @ref SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- - With @ref SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- - With @ref SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- - point 0: [-squareLength / 2, squareLength / 2, 0]
- - point 1: [ squareLength / 2, squareLength / 2, 0]
- - point 2: [ squareLength / 2, -squareLength / 2, 0]
- - point 3: [-squareLength / 2, -squareLength / 2, 0]
- - With @ref SOLVEPNP_SQPNP input points must be >= 3
- */
-CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
- InputArray cameraMatrix, InputArray distCoeffs,
- OutputArray rvec, OutputArray tvec,
- bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE );
-
-/** @brief Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
-
-@see @ref calib3d_solvePnP
-
-@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
-1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here.
-@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
-where N is the number of points. vector\ can be also passed here.
-@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
-assumed.
-@param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
-the model coordinate system to the camera coordinate system.
-@param tvec Output translation vector.
-@param useExtrinsicGuess Parameter used for @ref SOLVEPNP_ITERATIVE. If true (1), the function uses
-the provided rvec and tvec values as initial approximations of the rotation and translation
-vectors, respectively, and further optimizes them.
-@param iterationsCount Number of iterations.
-@param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
-is the maximum allowed distance between the observed and computed point projections to consider it
-an inlier.
-@param confidence The probability that the algorithm produces a useful result.
-@param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
-@param flags Method for solving a PnP problem (see @ref solvePnP ).
-
-The function estimates an object pose given a set of object points, their corresponding image
-projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
-a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
-projections imagePoints and the projected (using @ref projectPoints ) objectPoints. The use of RANSAC
-makes the function resistant to outliers.
-
-@note
- - An example of how to use solvePNPRansac for object detection can be found at
- opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
- - The default method used to estimate the camera pose for the Minimal Sample Sets step
- is #SOLVEPNP_EPNP. Exceptions are:
- - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
- - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
- - The method used to estimate the camera pose using all the inliers is defined by the
- flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
- the method #SOLVEPNP_EPNP will be used instead.
- */
-CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
- InputArray cameraMatrix, InputArray distCoeffs,
- OutputArray rvec, OutputArray tvec,
- bool useExtrinsicGuess = false, int iterationsCount = 100,
- float reprojectionError = 8.0, double confidence = 0.99,
- OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE );
-
-
-/*
-Finds rotation and translation vector.
-If cameraMatrix is given then run P3P. Otherwise run linear P6P and output cameraMatrix too.
-*/
-CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
- InputOutputArray cameraMatrix, InputArray distCoeffs,
- OutputArray rvec, OutputArray tvec, OutputArray inliers,
- const UsacParams ¶ms=UsacParams());
-
-/** @brief Finds an object pose from 3 3D-2D point correspondences.
-
-@see @ref calib3d_solvePnP
-
-@param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or
-1x3/3x1 3-channel. vector\ can be also passed here.
-@param imagePoints Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel.
- vector\ can be also passed here.
-@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
-assumed.
-@param rvecs Output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from
-the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.
-@param tvecs Output translation vectors.
-@param flags Method for solving a P3P problem:
-- @ref SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
-"Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete).
-- @ref SOLVEPNP_AP3P Method is based on the paper of T. Ke and S. Roumeliotis.
-"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
-
-The function estimates the object pose given 3 object points, their corresponding image
-projections, as well as the camera intrinsic matrix and the distortion coefficients.
-
-@note
-The solutions are sorted by reprojection errors (lowest to highest).
- */
-CV_EXPORTS_W int solveP3P( InputArray objectPoints, InputArray imagePoints,
- InputArray cameraMatrix, InputArray distCoeffs,
- OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
- int flags );
-
-/** @brief Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
-to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
-
-@see @ref calib3d_solvePnP
-
-@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
-where N is the number of points. vector\ can also be passed here.
-@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
-where N is the number of points. vector\ can also be passed here.
-@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
-assumed.
-@param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
-the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
-@param tvec Input/Output translation vector. Input values are used as an initial solution.
-@param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
-
-The function refines the object pose given at least 3 object points, their corresponding image
-projections, an initial solution for the rotation and translation vector,
-as well as the camera intrinsic matrix and the distortion coefficients.
-The function minimizes the projection error with respect to the rotation and the translation vectors, according
-to a Levenberg-Marquardt iterative minimization @cite Madsen04 @cite Eade13 process.
- */
-CV_EXPORTS_W void solvePnPRefineLM( InputArray objectPoints, InputArray imagePoints,
- InputArray cameraMatrix, InputArray distCoeffs,
- InputOutputArray rvec, InputOutputArray tvec,
- TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON));
-
-/** @brief Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
-to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
-
-@see @ref calib3d_solvePnP
-
-@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
-where N is the number of points. vector\ can also be passed here.
-@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
-where N is the number of points. vector\ can also be passed here.
-@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
-assumed.
-@param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
-the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
-@param tvec Input/Output translation vector. Input values are used as an initial solution.
-@param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
-@param VVSlambda Gain for the virtual visual servoing control law, equivalent to the \f$\alpha\f$
-gain in the Damped Gauss-Newton formulation.
-
-The function refines the object pose given at least 3 object points, their corresponding image
-projections, an initial solution for the rotation and translation vector,
-as well as the camera intrinsic matrix and the distortion coefficients.
-The function minimizes the projection error with respect to the rotation and the translation vectors, using a
-virtual visual servoing (VVS) @cite Chaumette06 @cite Marchand16 scheme.
- */
-CV_EXPORTS_W void solvePnPRefineVVS( InputArray objectPoints, InputArray imagePoints,
- InputArray cameraMatrix, InputArray distCoeffs,
- InputOutputArray rvec, InputOutputArray tvec,
- TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON),
- double VVSlambda = 1);
-
-/** @brief Finds an object pose from 3D-2D point correspondences.
-
-@see @ref calib3d_solvePnP
-
-This function returns a list of all the possible solutions (a solution is a
-couple), depending on the number of input points and the chosen method:
-- P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
-- @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
-- @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
-Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- - point 0: [-squareLength / 2, squareLength / 2, 0]
- - point 1: [ squareLength / 2, squareLength / 2, 0]
- - point 2: [ squareLength / 2, -squareLength / 2, 0]
- - point 3: [-squareLength / 2, -squareLength / 2, 0]
-- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
-Only 1 solution is returned.
-
-@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
-1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here.
-@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
-where N is the number of points. vector\ can be also passed here.
-@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
-assumed.
-@param rvecs Vector of output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from
-the model coordinate system to the camera coordinate system.
-@param tvecs Vector of output translation vectors.
-@param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
-the provided rvec and tvec values as initial approximations of the rotation and translation
-vectors, respectively, and further optimizes them.
-@param flags Method for solving a PnP problem: see @ref calib3d_solvePnP_flags
-@param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is @ref SOLVEPNP_ITERATIVE
-and useExtrinsicGuess is set to true.
-@param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is @ref SOLVEPNP_ITERATIVE
-and useExtrinsicGuess is set to true.
-@param reprojectionError Optional vector of reprojection error, that is the RMS error
-(\f$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \f$) between the input image points
-and the 3D object points projected with the estimated pose.
-
-More information is described in @ref calib3d_solvePnP
-
-@note
- - An example of how to use solvePnP for planar augmented reality can be found at
- opencv_source_code/samples/python/plane_ar.py
- - If you are using Python:
- - Numpy array slices won't work as input because solvePnP requires contiguous
- arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
- modules/calib3d/src/solvepnp.cpp version 2.4.9)
- - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
- to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
- which requires 2-channel information.
- - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
- it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
- np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- - The methods @ref SOLVEPNP_DLS and @ref SOLVEPNP_UPNP cannot be used as the current implementations are
- unstable and sometimes give completely wrong results. If you pass one of these two
- flags, @ref SOLVEPNP_EPNP method will be used instead.
- - The minimum number of points is 4 in the general case. In the case of @ref SOLVEPNP_P3P and @ref SOLVEPNP_AP3P
- methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
- of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- - With @ref SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
- are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
- global solution to converge.
- - With @ref SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
- - With @ref SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
- Number of input points must be 4. Object points must be defined in the following order:
- - point 0: [-squareLength / 2, squareLength / 2, 0]
- - point 1: [ squareLength / 2, squareLength / 2, 0]
- - point 2: [ squareLength / 2, -squareLength / 2, 0]
- - point 3: [-squareLength / 2, -squareLength / 2, 0]
- */
-CV_EXPORTS_W int solvePnPGeneric( InputArray objectPoints, InputArray imagePoints,
- InputArray cameraMatrix, InputArray distCoeffs,
- OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
- bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE,
- InputArray rvec = noArray(), InputArray tvec = noArray(),
- OutputArray reprojectionError = noArray() );
-
-/** @brief Finds an initial camera intrinsic matrix from 3D-2D point correspondences.
-
-@param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
-coordinate space. In the old interface all the per-view vectors are concatenated. See
-#calibrateCamera for details.
-@param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
-old interface all the per-view vectors are concatenated.
-@param imageSize Image size in pixels used to initialize the principal point.
-@param aspectRatio If it is zero or negative, both \f$f_x\f$ and \f$f_y\f$ are estimated independently.
-Otherwise, \f$f_x = f_y \cdot \texttt{aspectRatio}\f$ .
-
-The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
-Currently, the function only supports planar calibration patterns, which are patterns where each
-object point has z-coordinate =0.
- */
-CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
- InputArrayOfArrays imagePoints,
- Size imageSize, double aspectRatio = 1.0 );
-
-/** @brief Finds the positions of internal corners of the chessboard.
-
-@param image Source chessboard view. It must be an 8-bit grayscale or color image.
-@param patternSize Number of inner corners per a chessboard row and column
-( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
-@param corners Output array of detected corners.
-@param flags Various operation flags that can be zero or a combination of the following values:
-- @ref CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
-and white, rather than a fixed threshold level (computed from the average image brightness).
-- @ref CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with #equalizeHist before
-applying fixed or adaptive thresholding.
-- @ref CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
-square-like shape) to filter out false quads extracted at the contour retrieval stage.
-- @ref CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
-and shortcut the call if none is found. This can drastically speed up the call in the
-degenerate condition when no chessboard is observed.
-- @ref CALIB_CB_PLAIN All other flags are ignored. The input image is taken as is.
-No image processing is done to improve to find the checkerboard. This has the effect of speeding up the
-execution of the function but could lead to not recognizing the checkerboard if the image
-is not previously binarized in the appropriate manner.
-
-The function attempts to determine whether the input image is a view of the chessboard pattern and
-locate the internal chessboard corners. The function returns a non-zero value if all of the corners
-are found and they are placed in a certain order (row by row, left to right in every row).
-Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
-a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
-squares touch each other. The detected coordinates are approximate, and to determine their positions
-more accurately, the function calls #cornerSubPix. You also may use the function #cornerSubPix with
-different parameters if returned coordinates are not accurate enough.
-
-Sample usage of detecting and drawing chessboard corners: :
-@code
- Size patternsize(8,6); //interior number of corners
- Mat gray = ....; //source image
- vector corners; //this will be filled by the detected corners
-
- //CALIB_CB_FAST_CHECK saves a lot of time on images
- //that do not contain any chessboard corners
- bool patternfound = findChessboardCorners(gray, patternsize, corners,
- CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
- + CALIB_CB_FAST_CHECK);
-
- if(patternfound)
- cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
- TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
-
- drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
-@endcode
-@note The function requires white space (like a square-thick border, the wider the better) around
-the board to make the detection more robust in various environments. Otherwise, if there is no
-border and the background is dark, the outer black squares cannot be segmented properly and so the
-square grouping and ordering algorithm fails.
-
-Use gen_pattern.py (@ref tutorial_camera_calibration_pattern) to create checkerboard.
- */
-CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners,
- int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE );
-
-/*
- Checks whether the image contains chessboard of the specific size or not.
- If yes, nonzero value is returned.
-*/
-CV_EXPORTS_W bool checkChessboard(InputArray img, Size size);
-
-/** @brief Finds the positions of internal corners of the chessboard using a sector based approach.
-
-@param image Source chessboard view. It must be an 8-bit grayscale or color image.
-@param patternSize Number of inner corners per a chessboard row and column
-( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
-@param corners Output array of detected corners.
-@param flags Various operation flags that can be zero or a combination of the following values:
-- @ref CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before detection.
-- @ref CALIB_CB_EXHAUSTIVE Run an exhaustive search to improve detection rate.
-- @ref CALIB_CB_ACCURACY Up sample input image to improve sub-pixel accuracy due to aliasing effects.
-- @ref CALIB_CB_LARGER The detected pattern is allowed to be larger than patternSize (see description).
-- @ref CALIB_CB_MARKER The detected pattern must have a marker (see description).
-This should be used if an accurate camera calibration is required.
-@param meta Optional output arrray of detected corners (CV_8UC1 and size = cv::Size(columns,rows)).
-Each entry stands for one corner of the pattern and can have one of the following values:
-- 0 = no meta data attached
-- 1 = left-top corner of a black cell
-- 2 = left-top corner of a white cell
-- 3 = left-top corner of a black cell with a white marker dot
-- 4 = left-top corner of a white cell with a black marker dot (pattern origin in case of markers otherwise first corner)
-
-The function is analog to #findChessboardCorners but uses a localized radon
-transformation approximated by box filters being more robust to all sort of
-noise, faster on larger images and is able to directly return the sub-pixel
-position of the internal chessboard corners. The Method is based on the paper
-@cite duda2018 "Accurate Detection and Localization of Checkerboard Corners for
-Calibration" demonstrating that the returned sub-pixel positions are more
-accurate than the one returned by cornerSubPix allowing a precise camera
-calibration for demanding applications.
-
-In the case, the flags @ref CALIB_CB_LARGER or @ref CALIB_CB_MARKER are given,
-the result can be recovered from the optional meta array. Both flags are
-helpful to use calibration patterns exceeding the field of view of the camera.
-These oversized patterns allow more accurate calibrations as corners can be
-utilized, which are as close as possible to the image borders. For a
-consistent coordinate system across all images, the optional marker (see image
-below) can be used to move the origin of the board to the location where the
-black circle is located.
-
-@note The function requires a white boarder with roughly the same width as one
-of the checkerboard fields around the whole board to improve the detection in
-various environments. In addition, because of the localized radon
-transformation it is beneficial to use round corners for the field corners
-which are located on the outside of the board. The following figure illustrates
-a sample checkerboard optimized for the detection. However, any other checkerboard
-can be used as well.
-
-Use gen_pattern.py (@ref tutorial_camera_calibration_pattern) to create checkerboard.
-![Checkerboard](pics/checkerboard_radon.png)
- */
-CV_EXPORTS_AS(findChessboardCornersSBWithMeta)
-bool findChessboardCornersSB(InputArray image,Size patternSize, OutputArray corners,
- int flags,OutputArray meta);
-/** @overload */
-CV_EXPORTS_W inline
-bool findChessboardCornersSB(InputArray image, Size patternSize, OutputArray corners,
- int flags = 0)
-{
- return findChessboardCornersSB(image, patternSize, corners, flags, noArray());
-}
-
-/** @brief Estimates the sharpness of a detected chessboard.
-
-Image sharpness, as well as brightness, are a critical parameter for accuracte
-camera calibration. For accessing these parameters for filtering out
-problematic calibraiton images, this method calculates edge profiles by traveling from
-black to white chessboard cell centers. Based on this, the number of pixels is
-calculated required to transit from black to white. This width of the
-transition area is a good indication of how sharp the chessboard is imaged
-and should be below ~3.0 pixels.
-
-@param image Gray image used to find chessboard corners
-@param patternSize Size of a found chessboard pattern
-@param corners Corners found by #findChessboardCornersSB
-@param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
-@param vertical By default edge responses for horizontal lines are calculated
-@param sharpness Optional output array with a sharpness value for calculated edge responses (see description)
-
-The optional sharpness array is of type CV_32FC1 and has for each calculated
-profile one row with the following five entries:
-* 0 = x coordinate of the underlying edge in the image
-* 1 = y coordinate of the underlying edge in the image
-* 2 = width of the transition area (sharpness)
-* 3 = signal strength in the black cell (min brightness)
-* 4 = signal strength in the white cell (max brightness)
-
-@return Scalar(average sharpness, average min brightness, average max brightness,0)
-*/
-CV_EXPORTS_W Scalar estimateChessboardSharpness(InputArray image, Size patternSize, InputArray corners,
- float rise_distance=0.8F,bool vertical=false,
- OutputArray sharpness=noArray());
-
-
-//! finds subpixel-accurate positions of the chessboard corners
-CV_EXPORTS_W bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners, Size region_size );
-
-/** @brief Renders the detected chessboard corners.
-
-@param image Destination image. It must be an 8-bit color image.
-@param patternSize Number of inner corners per a chessboard row and column
-(patternSize = cv::Size(points_per_row,points_per_column)).
-@param corners Array of detected corners, the output of #findChessboardCorners.
-@param patternWasFound Parameter indicating whether the complete board was found or not. The
-return value of #findChessboardCorners should be passed here.
-
-The function draws individual chessboard corners detected either as red circles if the board was not
-found, or as colored corners connected with lines if the board was found.
- */
-CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
- InputArray corners, bool patternWasFound );
-
-/** @brief Draw axes of the world/object coordinate system from pose estimation. @sa solvePnP
-
-@param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
-@param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters.
-\f$\cameramatrix{A}\f$
-@param distCoeffs Input vector of distortion coefficients
-\f$\distcoeffs\f$. If the vector is empty, the zero distortion coefficients are assumed.
-@param rvec Rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
-the model coordinate system to the camera coordinate system.
-@param tvec Translation vector.
-@param length Length of the painted axes in the same unit than tvec (usually in meters).
-@param thickness Line thickness of the painted axes.
-
-This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
-OX is drawn in red, OY in green and OZ in blue.
- */
-CV_EXPORTS_W void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs,
- InputArray rvec, InputArray tvec, float length, int thickness=3);
-
-struct CV_EXPORTS_W_SIMPLE CirclesGridFinderParameters
-{
- CV_WRAP CirclesGridFinderParameters();
- CV_PROP_RW cv::Size2f densityNeighborhoodSize;
- CV_PROP_RW float minDensity;
- CV_PROP_RW int kmeansAttempts;
- CV_PROP_RW int minDistanceToAddKeypoint;
- CV_PROP_RW int keypointScale;
- CV_PROP_RW float minGraphConfidence;
- CV_PROP_RW float vertexGain;
- CV_PROP_RW float vertexPenalty;
- CV_PROP_RW float existingVertexGain;
- CV_PROP_RW float edgeGain;
- CV_PROP_RW float edgePenalty;
- CV_PROP_RW float convexHullFactor;
- CV_PROP_RW float minRNGEdgeSwitchDist;
-
- enum GridType
- {
- SYMMETRIC_GRID, ASYMMETRIC_GRID
- };
- GridType gridType;
-
- CV_PROP_RW float squareSize; //!< Distance between two adjacent points. Used by CALIB_CB_CLUSTERING.
- CV_PROP_RW float maxRectifiedDistance; //!< Max deviation from prediction. Used by CALIB_CB_CLUSTERING.
-};
-
-#ifndef DISABLE_OPENCV_3_COMPATIBILITY
-typedef CirclesGridFinderParameters CirclesGridFinderParameters2;
-#endif
-
-/** @brief Finds centers in the grid of circles.
-
-@param image grid view of input circles; it must be an 8-bit grayscale or color image.
-@param patternSize number of circles per row and column
-( patternSize = Size(points_per_row, points_per_colum) ).
-@param centers output array of detected centers.
-@param flags various operation flags that can be one of the following values:
-- @ref CALIB_CB_SYMMETRIC_GRID uses symmetric pattern of circles.
-- @ref CALIB_CB_ASYMMETRIC_GRID uses asymmetric pattern of circles.
-- @ref CALIB_CB_CLUSTERING uses a special algorithm for grid detection. It is more robust to
-perspective distortions but much more sensitive to background clutter.
-@param blobDetector feature detector that finds blobs like dark circles on light background.
- If `blobDetector` is NULL then `image` represents Point2f array of candidates.
-@param parameters struct for finding circles in a grid pattern.
-
-The function attempts to determine whether the input image contains a grid of circles. If it is, the
-function locates centers of the circles. The function returns a non-zero value if all of the centers
-have been found and they have been placed in a certain order (row by row, left to right in every
-row). Otherwise, if the function fails to find all the corners or reorder them, it returns 0.
-
-Sample usage of detecting and drawing the centers of circles: :
-@code
- Size patternsize(7,7); //number of centers
- Mat gray = ...; //source image
- vector centers; //this will be filled by the detected centers
-
- bool patternfound = findCirclesGrid(gray, patternsize, centers);
-
- drawChessboardCorners(img, patternsize, Mat(centers), patternfound);
-@endcode
-@note The function requires white space (like a square-thick border, the wider the better) around
-the board to make the detection more robust in various environments.
- */
-CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
- OutputArray centers, int flags,
- const Ptr &blobDetector,
- const CirclesGridFinderParameters& parameters);
-
-/** @overload */
-CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
- OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
- const Ptr &blobDetector = SimpleBlobDetector::create());
-
-/** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration
-pattern.
-
-@param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
-the calibration pattern coordinate space (e.g. std::vector>). The outer
-vector contains as many elements as the number of pattern views. If the same calibration pattern
-is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
-possible to use partially occluded patterns or even different patterns in different views. Then,
-the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
-XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
-In the old interface all the vectors of object points from different views are concatenated
-together.
-@param imagePoints In the new interface it is a vector of vectors of the projections of calibration
-pattern points (e.g. std::vector>). imagePoints.size() and
-objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
-respectively. In the old interface all the vectors of object points from different views are
-concatenated together.
-@param imageSize Size of the image used only to initialize the camera intrinsic matrix.
-@param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
-\f$\cameramatrix{A}\f$ . If @ref CALIB_USE_INTRINSIC_GUESS
-and/or @ref CALIB_FIX_ASPECT_RATIO, @ref CALIB_FIX_PRINCIPAL_POINT or @ref CALIB_FIX_FOCAL_LENGTH
-are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
-@param distCoeffs Input/output vector of distortion coefficients
-\f$\distcoeffs\f$.
-@param rvecs Output vector of rotation vectors (@ref Rodrigues ) estimated for each pattern view
-(e.g. std::vector>). That is, each i-th rotation vector together with the corresponding
-i-th translation vector (see the next output parameter description) brings the calibration pattern
-from the object coordinate space (in which object points are specified) to the camera coordinate
-space. In more technical terms, the tuple of the i-th rotation and translation vector performs
-a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
-tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
-space.
-@param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
-describtion above.
-@param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
-parameters. Order of deviations values:
-\f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
- s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
-@param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
-parameters. Order of deviations values: \f$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\f$ where M is
-the number of pattern views. \f$R_i, T_i\f$ are concatenated 1x3 vectors.
- @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
-@param flags Different flags that may be zero or a combination of the following values:
-- @ref CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
-fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
-center ( imageSize is used), and focal distances are computed in a least-squares fashion.
-Note, that if intrinsic parameters are known, there is no need to use this function just to
-estimate extrinsic parameters. Use @ref solvePnP instead.
-- @ref CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
-optimization. It stays at the center or at a different location specified when
- @ref CALIB_USE_INTRINSIC_GUESS is set too.
-- @ref CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
-ratio fx/fy stays the same as in the input cameraMatrix . When
- @ref CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
-ignored, only their ratio is computed and used further.
-- @ref CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \f$(p_1, p_2)\f$ are set
-to zeros and stay zero.
-- @ref CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
- @ref CALIB_USE_INTRINSIC_GUESS is set.
-- @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K6 The corresponding radial distortion
-coefficient is not changed during the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is
-set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
-- @ref CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
-backward compatibility, this extra flag should be explicitly specified to make the
-calibration function use the rational model and return 8 coefficients or more.
-- @ref CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
-backward compatibility, this extra flag should be explicitly specified to make the
-calibration function use the thin prism model and return 12 coefficients or more.
-- @ref CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
-the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
-supplied distCoeffs matrix is used. Otherwise, it is set to 0.
-- @ref CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
-backward compatibility, this extra flag should be explicitly specified to make the
-calibration function use the tilted sensor model and return 14 coefficients.
-- @ref CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
-the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
-supplied distCoeffs matrix is used. Otherwise, it is set to 0.
-@param criteria Termination criteria for the iterative optimization algorithm.
-
-@return the overall RMS re-projection error.
-
-The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
-views. The algorithm is based on @cite Zhang2000 and @cite BouguetMCT . The coordinates of 3D object
-points and their corresponding 2D projections in each view must be specified. That may be achieved
-by using an object with known geometry and easily detectable feature points. Such an object is
-called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
-a calibration rig (see @ref findChessboardCorners). Currently, initialization of intrinsic
-parameters (when @ref CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
-patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
-be used as long as initial cameraMatrix is provided.
-
-The algorithm performs the following steps:
-
-- Compute the initial intrinsic parameters (the option only available for planar calibration
- patterns) or read them from the input parameters. The distortion coefficients are all set to
- zeros initially unless some of CALIB_FIX_K? are specified.
-
-- Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
- done using @ref solvePnP .
-
-- Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
- that is, the total sum of squared distances between the observed feature points imagePoints and
- the projected (using the current estimates for camera parameters and the poses) object points
- objectPoints. See @ref projectPoints for details.
-
-@note
- If you use a non-square (i.e. non-N-by-N) grid and @ref findChessboardCorners for calibration,
- and @ref calibrateCamera returns bad values (zero distortion coefficients, \f$c_x\f$ and
- \f$c_y\f$ very far from the image center, and/or large differences between \f$f_x\f$ and
- \f$f_y\f$ (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
- instead of using patternSize=cvSize(cols,rows) in @ref findChessboardCorners.
-
-@note
- The function may throw exceptions, if unsupported combination of parameters is provided or
- the system is underconstrained.
-
-@sa
- calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
- undistort
- */
-CV_EXPORTS_AS(calibrateCameraExtended) double calibrateCamera( InputArrayOfArrays objectPoints,
- InputArrayOfArrays imagePoints, Size imageSize,
- InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
- OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
- OutputArray stdDeviationsIntrinsics,
- OutputArray stdDeviationsExtrinsics,
- OutputArray perViewErrors,
- int flags = 0, TermCriteria criteria = TermCriteria(
- TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
-
-/** @overload */
-CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
- InputArrayOfArrays imagePoints, Size imageSize,
- InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
- OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
- int flags = 0, TermCriteria criteria = TermCriteria(
- TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
-
-/** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
-
-This function is an extension of #calibrateCamera with the method of releasing object which was
-proposed in @cite strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
-targets (calibration plates), this method can dramatically improve the precision of the estimated
-camera parameters. Both the object-releasing method and standard method are supported by this
-function. Use the parameter **iFixedPoint** for method selection. In the internal implementation,
-#calibrateCamera is a wrapper for this function.
-
-@param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
-coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
-the identical calibration board must be used in each view and it must be fully visible, and all
-objectPoints[i] must be the same and all points should be roughly close to a plane. **The calibration
-target has to be rigid, or at least static if the camera (rather than the calibration target) is
-shifted for grabbing images.**
-@param imagePoints Vector of vectors of the projections of calibration pattern points. See
-#calibrateCamera for details.
-@param imageSize Size of the image used only to initialize the intrinsic camera matrix.
-@param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
-a switch for calibration method selection. If object-releasing method to be used, pass in the
-parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
-make standard calibration method selected. Usually the top-right corner point of the calibration
-board grid is recommended to be fixed when object-releasing method being utilized. According to
-\cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
-and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
-newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
-@param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
-@param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
-@param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
-for details.
-@param tvecs Output vector of translation vectors estimated for each pattern view.
-@param newObjPoints The updated output vector of calibration pattern points. The coordinates might
-be scaled based on three fixed points. The returned coordinates are accurate only if the above
-mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
-is ignored with standard calibration method.
-@param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
-See #calibrateCamera for details.
-@param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
-See #calibrateCamera for details.
-@param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
-of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
-parameter is ignored with standard calibration method.
- @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
-@param flags Different flags that may be zero or a combination of some predefined values. See
-#calibrateCamera for details. If the method of releasing object is used, the calibration time may
-be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
-less precise and less stable in some rare cases.
-@param criteria Termination criteria for the iterative optimization algorithm.
-
-@return the overall RMS re-projection error.
-
-The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
-views. The algorithm is based on @cite Zhang2000, @cite BouguetMCT and @cite strobl2011iccv. See
-#calibrateCamera for other detailed explanations.
-@sa
- calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
- */
-CV_EXPORTS_AS(calibrateCameraROExtended) double calibrateCameraRO( InputArrayOfArrays objectPoints,
- InputArrayOfArrays imagePoints, Size imageSize, int iFixedPoint,
- InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
- OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
- OutputArray newObjPoints,
- OutputArray stdDeviationsIntrinsics,
- OutputArray stdDeviationsExtrinsics,
- OutputArray stdDeviationsObjPoints,
- OutputArray perViewErrors,
- int flags = 0, TermCriteria criteria = TermCriteria(
- TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
-
-/** @overload */
-CV_EXPORTS_W double calibrateCameraRO( InputArrayOfArrays objectPoints,
- InputArrayOfArrays imagePoints, Size imageSize, int iFixedPoint,
- InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
- OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
- OutputArray newObjPoints,
- int flags = 0, TermCriteria criteria = TermCriteria(
- TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
-
-/** @brief Computes useful camera characteristics from the camera intrinsic matrix.
-
-@param cameraMatrix Input camera intrinsic matrix that can be estimated by #calibrateCamera or
-#stereoCalibrate .
-@param imageSize Input image size in pixels.
-@param apertureWidth Physical width in mm of the sensor.
-@param apertureHeight Physical height in mm of the sensor.
-@param fovx Output field of view in degrees along the horizontal sensor axis.
-@param fovy Output field of view in degrees along the vertical sensor axis.
-@param focalLength Focal length of the lens in mm.
-@param principalPoint Principal point in mm.
-@param aspectRatio \f$f_y/f_x\f$
-
-The function computes various useful camera characteristics from the previously estimated camera
-matrix.
-
-@note
- Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for
- the chessboard pitch (it can thus be any value).
- */
-CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, Size imageSize,
- double apertureWidth, double apertureHeight,
- CV_OUT double& fovx, CV_OUT double& fovy,
- CV_OUT double& focalLength, CV_OUT Point2d& principalPoint,
- CV_OUT double& aspectRatio );
-
-/** @brief Calibrates a stereo camera set up. This function finds the intrinsic parameters
-for each of the two cameras and the extrinsic parameters between the two cameras.
-
-@param objectPoints Vector of vectors of the calibration pattern points. The same structure as
-in @ref calibrateCamera. For each pattern view, both cameras need to see the same object
-points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
-equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
-be equal for each i.
-@param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
-observed by the first camera. The same structure as in @ref calibrateCamera.
-@param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
-observed by the second camera. The same structure as in @ref calibrateCamera.
-@param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
-@ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
-@param distCoeffs1 Input/output vector of distortion coefficients, the same as in
-@ref calibrateCamera.
-@param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
-cameraMatrix1.
-@param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
-description for distCoeffs1.
-@param imageSize Size of the image used only to initialize the camera intrinsic matrices.
-@param R Output rotation matrix. Together with the translation vector T, this matrix brings
-points given in the first camera's coordinate system to points in the second camera's
-coordinate system. In more technical terms, the tuple of R and T performs a change of basis
-from the first camera's coordinate system to the second camera's coordinate system. Due to its
-duality, this tuple is equivalent to the position of the first camera with respect to the
-second camera coordinate system.
-@param T Output translation vector, see description above.
-@param E Output essential matrix.
-@param F Output fundamental matrix.
-@param rvecs Output vector of rotation vectors ( @ref Rodrigues ) estimated for each pattern view in the
-coordinate system of the first camera of the stereo pair (e.g. std::vector). More in detail, each
-i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
-description) brings the calibration pattern from the object coordinate space (in which object points are
-specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
-the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
-to camera coordinate space of the first camera of the stereo pair.
-@param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description
-of previous output parameter ( rvecs ).
-@param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
-@param flags Different flags that may be zero or a combination of the following values:
-- @ref CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
-matrices are estimated.
-- @ref CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
-according to the specified flags. Initial values are provided by the user.
-- @ref CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
-Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
-- @ref CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
-- @ref CALIB_FIX_FOCAL_LENGTH Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ .
-- @ref CALIB_FIX_ASPECT_RATIO Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$
-.
-- @ref CALIB_SAME_FOCAL_LENGTH Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ .
-- @ref CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
-zeros and fix there.
-- @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K6 Do not change the corresponding radial
-distortion coefficient during the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set,
-the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
-- @ref CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
-compatibility, this extra flag should be explicitly specified to make the calibration
-function use the rational model and return 8 coefficients. If the flag is not set, the
-function computes and returns only 5 distortion coefficients.
-- @ref CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
-backward compatibility, this extra flag should be explicitly specified to make the
-calibration function use the thin prism model and return 12 coefficients. If the flag is not
-set, the function computes and returns only 5 distortion coefficients.
-- @ref CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
-the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
-supplied distCoeffs matrix is used. Otherwise, it is set to 0.
-- @ref CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
-backward compatibility, this extra flag should be explicitly specified to make the
-calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
-set, the function computes and returns only 5 distortion coefficients.
-- @ref CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
-the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
-supplied distCoeffs matrix is used. Otherwise, it is set to 0.
-@param criteria Termination criteria for the iterative optimization algorithm.
-
-The function estimates the transformation between two cameras making a stereo pair. If one computes
-the poses of an object relative to the first camera and to the second camera,
-( \f$R_1\f$,\f$T_1\f$ ) and (\f$R_2\f$,\f$T_2\f$), respectively, for a stereo camera where the
-relative position and orientation between the two cameras are fixed, then those poses definitely
-relate to each other. This means, if the relative position and orientation (\f$R\f$,\f$T\f$) of the
-two cameras is known, it is possible to compute (\f$R_2\f$,\f$T_2\f$) when (\f$R_1\f$,\f$T_1\f$) is
-given. This is what the described function does. It computes (\f$R\f$,\f$T\f$) such that:
-
-\f[R_2=R R_1\f]
-\f[T_2=R T_1 + T.\f]
-
-Therefore, one can compute the coordinate representation of a 3D point for the second camera's
-coordinate system when given the point's coordinate representation in the first camera's coordinate
-system:
-
-\f[\begin{bmatrix}
-X_2 \\
-Y_2 \\
-Z_2 \\
-1
-\end{bmatrix} = \begin{bmatrix}
-R & T \\
-0 & 1
-\end{bmatrix} \begin{bmatrix}
-X_1 \\
-Y_1 \\
-Z_1 \\
-1
-\end{bmatrix}.\f]
-
-
-Optionally, it computes the essential matrix E:
-
-\f[E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\f]
-
-where \f$T_i\f$ are components of the translation vector \f$T\f$ : \f$T=[T_0, T_1, T_2]^T\f$ .
-And the function can also compute the fundamental matrix F:
-
-\f[F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\f]
-
-Besides the stereo-related information, the function can also perform a full calibration of each of
-the two cameras. However, due to the high dimensionality of the parameter space and noise in the
-input data, the function can diverge from the correct solution. If the intrinsic parameters can be
-estimated with high accuracy for each of the cameras individually (for example, using
-#calibrateCamera ), you are recommended to do so and then pass @ref CALIB_FIX_INTRINSIC flag to the
-function along with the computed intrinsic parameters. Otherwise, if all the parameters are
-estimated at once, it makes sense to restrict some parameters, for example, pass
- @ref CALIB_SAME_FOCAL_LENGTH and @ref CALIB_ZERO_TANGENT_DIST flags, which is usually a
-reasonable assumption.
-
-Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
-points in all the available views from both cameras. The function returns the final value of the
-re-projection error.
- */
-CV_EXPORTS_AS(stereoCalibrateExtended) double stereoCalibrate( InputArrayOfArrays objectPoints,
- InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
- InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
- InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,
- Size imageSize, InputOutputArray R, InputOutputArray T, OutputArray E, OutputArray F,
- OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray perViewErrors, int flags = CALIB_FIX_INTRINSIC,
- TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) );
-
-/// @overload
-CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
- InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
- InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
- InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,
- Size imageSize, OutputArray R,OutputArray T, OutputArray E, OutputArray F,
- int flags = CALIB_FIX_INTRINSIC,
- TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) );
-
-/// @overload
-CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
- InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
- InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
- InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,
- Size imageSize, InputOutputArray R, InputOutputArray T, OutputArray E, OutputArray F,
- OutputArray perViewErrors, int flags = CALIB_FIX_INTRINSIC,
- TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) );
-
-/** @brief Computes rectification transforms for each head of a calibrated stereo camera.
-
-@param cameraMatrix1 First camera intrinsic matrix.
-@param distCoeffs1 First camera distortion parameters.
-@param cameraMatrix2 Second camera intrinsic matrix.
-@param distCoeffs2 Second camera distortion parameters.
-@param imageSize Size of the image used for stereo calibration.
-@param R Rotation matrix from the coordinate system of the first camera to the second camera,
-see @ref stereoCalibrate.
-@param T Translation vector from the coordinate system of the first camera to the second camera,
-see @ref stereoCalibrate.
-@param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
-brings points given in the unrectified first camera's coordinate system to points in the rectified
-first camera's coordinate system. In more technical terms, it performs a change of basis from the
-unrectified first camera's coordinate system to the rectified first camera's coordinate system.
-@param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
-brings points given in the unrectified second camera's coordinate system to points in the rectified
-second camera's coordinate system. In more technical terms, it performs a change of basis from the
-unrectified second camera's coordinate system to the rectified second camera's coordinate system.
-@param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
-camera, i.e. it projects points given in the rectified first camera coordinate system into the
-rectified first camera's image.
-@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
-camera, i.e. it projects points given in the rectified first camera coordinate system into the
-rectified second camera's image.
-@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see @ref reprojectImageTo3D).
-@param flags Operation flags that may be zero or @ref CALIB_ZERO_DISPARITY . If the flag is set,
-the function makes the principal points of each camera have the same pixel coordinates in the
-rectified views. And if the flag is not set, the function may still shift the images in the
-horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
-useful image area.
-@param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
-scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
-images are zoomed and shifted so that only valid pixels are visible (no black areas after
-rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
-pixels from the original images from the cameras are retained in the rectified images (no source
-image pixels are lost). Any intermediate value yields an intermediate result between
-those two extreme cases.
-@param newImageSize New image resolution after rectification. The same size should be passed to
-#initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
-is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
-preserve details in the original image, especially when there is a big radial distortion.
-@param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
-are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
-(see the picture below).
-@param validPixROI2 Optional output rectangles inside the rectified images where all the pixels
-are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
-(see the picture below).
-
-The function computes the rotation matrices for each camera that (virtually) make both camera image
-planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
-the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
-as input. As output, it provides two rotation matrices and also two projection matrices in the new
-coordinates. The function distinguishes the following two cases:
-
-- **Horizontal stereo**: the first and the second camera views are shifted relative to each other
- mainly along the x-axis (with possible small vertical shift). In the rectified images, the
- corresponding epipolar lines in the left and right cameras are horizontal and have the same
- y-coordinate. P1 and P2 look like:
-
- \f[\texttt{P1} = \begin{bmatrix}
- f & 0 & cx_1 & 0 \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\f]
-
- \f[\texttt{P2} = \begin{bmatrix}
- f & 0 & cx_2 & T_x \cdot f \\
- 0 & f & cy & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix} ,\f]
-
- \f[\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx_1 \\
- 0 & 1 & 0 & -cy \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_x} & \frac{cx_1 - cx_2}{T_x}
- \end{bmatrix} \f]
-
- where \f$T_x\f$ is a horizontal shift between the cameras and \f$cx_1=cx_2\f$ if
- @ref CALIB_ZERO_DISPARITY is set.
-
-- **Vertical stereo**: the first and the second camera views are shifted relative to each other
- mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
- lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
-
- \f[\texttt{P1} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_1 & 0 \\
- 0 & 0 & 1 & 0
- \end{bmatrix}\f]
-
- \f[\texttt{P2} = \begin{bmatrix}
- f & 0 & cx & 0 \\
- 0 & f & cy_2 & T_y \cdot f \\
- 0 & 0 & 1 & 0
- \end{bmatrix},\f]
-
- \f[\texttt{Q} = \begin{bmatrix}
- 1 & 0 & 0 & -cx \\
- 0 & 1 & 0 & -cy_1 \\
- 0 & 0 & 0 & f \\
- 0 & 0 & -\frac{1}{T_y} & \frac{cy_1 - cy_2}{T_y}
- \end{bmatrix} \f]
-
- where \f$T_y\f$ is a vertical shift between the cameras and \f$cy_1=cy_2\f$ if
- @ref CALIB_ZERO_DISPARITY is set.
-
-As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
-matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
-initialize the rectification map for each camera.
-
-See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
-the corresponding image regions. This means that the images are well rectified, which is what most
-stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
-their interiors are all valid pixels.
-
-![image](pics/stereo_undistort.jpg)
- */
-CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
- InputArray cameraMatrix2, InputArray distCoeffs2,
- Size imageSize, InputArray R, InputArray T,
- OutputArray R1, OutputArray R2,
- OutputArray P1, OutputArray P2,
- OutputArray Q, int flags = CALIB_ZERO_DISPARITY,
- double alpha = -1, Size newImageSize = Size(),
- CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 );
-
-/** @brief Computes a rectification transform for an uncalibrated stereo camera.
-
-@param points1 Array of feature points in the first image.
-@param points2 The corresponding points in the second image. The same formats as in
-#findFundamentalMat are supported.
-@param F Input fundamental matrix. It can be computed from the same set of point pairs using
-#findFundamentalMat .
-@param imgSize Size of the image.
-@param H1 Output rectification homography matrix for the first image.
-@param H2 Output rectification homography matrix for the second image.
-@param threshold Optional threshold used to filter out the outliers. If the parameter is greater
-than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
-for which \f$|\texttt{points2[i]}^T \cdot \texttt{F} \cdot \texttt{points1[i]}|>\texttt{threshold}\f$ )
-are rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
-
-The function computes the rectification transformations without knowing intrinsic parameters of the
-cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
-related difference from #stereoRectify is that the function outputs not the rectification
-transformations in the object (3D) space, but the planar perspective transformations encoded by the
-homography matrices H1 and H2 . The function implements the algorithm @cite Hartley99 .
-
-@note
- While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
- depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
- it would be better to correct it before computing the fundamental matrix and calling this
- function. For example, distortion coefficients can be estimated for each head of stereo camera
- separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or
- just the point coordinates can be corrected with #undistortPoints .
- */
-CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
- InputArray F, Size imgSize,
- OutputArray H1, OutputArray H2,
- double threshold = 5 );
-
-//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
-CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
- InputArray cameraMatrix2, InputArray distCoeffs2,
- InputArray cameraMatrix3, InputArray distCoeffs3,
- InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3,
- Size imageSize, InputArray R12, InputArray T12,
- InputArray R13, InputArray T13,
- OutputArray R1, OutputArray R2, OutputArray R3,
- OutputArray P1, OutputArray P2, OutputArray P3,
- OutputArray Q, double alpha, Size newImgSize,
- CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
-
-/** @brief Returns the new camera intrinsic matrix based on the free scaling parameter.
-
-@param cameraMatrix Input camera intrinsic matrix.
-@param distCoeffs Input vector of distortion coefficients
-\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
-assumed.
-@param imageSize Original image size.
-@param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
-valid) and 1 (when all the source image pixels are retained in the undistorted image). See
-#stereoRectify for details.
-@param newImgSize Image size after rectification. By default, it is set to imageSize .
-@param validPixROI Optional output rectangle that outlines all-good-pixels region in the
-undistorted image. See roi1, roi2 description in #stereoRectify .
-@param centerPrincipalPoint Optional flag that indicates whether in the new camera intrinsic matrix the
-principal point should be at the image center or not. By default, the principal point is chosen to
-best fit a subset of the source image (determined by alpha) to the corrected image.
-@return new_camera_matrix Output new camera intrinsic matrix.
-
-The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
-By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
-image pixels if there is valuable information in the corners alpha=1 , or get something in between.
-When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
-"virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
-coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
-#initUndistortRectifyMap to produce the maps for #remap .
- */
-CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
- Size imageSize, double alpha, Size newImgSize = Size(),
- CV_OUT Rect* validPixROI = 0,
- bool centerPrincipalPoint = false);
-
-/** @brief Computes Hand-Eye calibration: \f$_{}^{g}\textrm{T}_c\f$
-
-@param[in] R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point
-expressed in the gripper frame to the robot base frame (\f$_{}^{b}\textrm{T}_g\f$).
-This is a vector (`vector`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
-for all the transformations from gripper frame to robot base frame.
-@param[in] t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point
-expressed in the gripper frame to the robot base frame (\f$_{}^{b}\textrm{T}_g\f$).
-This is a vector (`vector`) that contains the `(3x1)` translation vectors for all the transformations
-from gripper frame to robot base frame.
-@param[in] R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
-expressed in the target frame to the camera frame (\f$_{}^{c}\textrm{T}_t\f$).
-This is a vector (`vector`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
-for all the transformations from calibration target frame to camera frame.
-@param[in] t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
-expressed in the target frame to the camera frame (\f$_{}^{c}\textrm{T}_t\f$).
-This is a vector (`vector`) that contains the `(3x1)` translation vectors for all the transformations
-from calibration target frame to camera frame.
-@param[out] R_cam2gripper Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
-expressed in the camera frame to the gripper frame (\f$_{}^{g}\textrm{T}_c\f$).
-@param[out] t_cam2gripper Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
-expressed in the camera frame to the gripper frame (\f$_{}^{g}\textrm{T}_c\f$).
-@param[in] method One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod
-
-The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
-rotation then the translation (separable solutions) and the following methods are implemented:
- - R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
- - F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
- - R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
-
-Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
-with the following implemented methods:
- - N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
- - K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
-
-The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
-mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
-
-The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
-end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
-the suitable transformations to the function, see below.
-
-![](pics/hand-eye_figure.png)
-
-The calibration procedure is the following:
- - a static calibration pattern is used to estimate the transformation between the target frame
- and the camera frame
- - the robot gripper is moved in order to acquire several poses
- - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
- instance the robot kinematics
-\f[
- \begin{bmatrix}
- X_b\\
- Y_b\\
- Z_b\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
-\f]
- - for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
- for instance a pose estimation method (PnP) from 2D-3D point correspondences
-\f[
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_t\\
- Y_t\\
- Z_t\\
- 1
- \end{bmatrix}
-\f]
-
-The Hand-Eye calibration procedure returns the following homogeneous transformation
-\f[
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
-\f]
-
-This problem is also known as solving the \f$\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\f$ equation:
- - for an eye-in-hand configuration
-\f[
- \begin{align*}
- ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
- \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
-
- (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &=
- \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
-
- \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
- \end{align*}
-\f]
-
- - for an eye-to-hand configuration
-\f[
- \begin{align*}
- ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
- \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
-
- (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &=
- \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
-
- \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
- \end{align*}
-\f]
-
-\note
-Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
-\note
-A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
-So at least 3 different poses are required, but it is strongly recommended to use many more poses.
-
- */
-CV_EXPORTS_W void calibrateHandEye( InputArrayOfArrays R_gripper2base, InputArrayOfArrays t_gripper2base,
- InputArrayOfArrays R_target2cam, InputArrayOfArrays t_target2cam,
- OutputArray R_cam2gripper, OutputArray t_cam2gripper,
- HandEyeCalibrationMethod method=CALIB_HAND_EYE_TSAI );
-
-/** @brief Computes Robot-World/Hand-Eye calibration: \f$_{}^{w}\textrm{T}_b\f$ and \f$_{}^{c}\textrm{T}_g\f$
-
-@param[in] R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point
-expressed in the world frame to the camera frame (\f$_{}^{c}\textrm{T}_w\f$).
-This is a vector (`vector`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
-for all the transformations from world frame to the camera frame.
-@param[in] t_world2cam Translation part extracted from the homogeneous matrix that transforms a point
-expressed in the world frame to the camera frame (\f$_{}^{c}\textrm{T}_w\f$).
-This is a vector (`vector`) that contains the `(3x1)` translation vectors for all the transformations
-from world frame to the camera frame.
-@param[in] R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
-expressed in the robot base frame to the gripper frame (\f$_{}^{g}\textrm{T}_b\f$).
-This is a vector (`vector`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
-for all the transformations from robot base frame to the gripper frame.
-@param[in] t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
-expressed in the robot base frame to the gripper frame (\f$_{}^{g}\textrm{T}_b\f$).
-This is a vector (`vector`) that contains the `(3x1)` translation vectors for all the transformations
-from robot base frame to the gripper frame.
-@param[out] R_base2world Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
-expressed in the robot base frame to the world frame (\f$_{}^{w}\textrm{T}_b\f$).
-@param[out] t_base2world Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
-expressed in the robot base frame to the world frame (\f$_{}^{w}\textrm{T}_b\f$).
-@param[out] R_gripper2cam Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
-expressed in the gripper frame to the camera frame (\f$_{}^{c}\textrm{T}_g\f$).
-@param[out] t_gripper2cam Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
-expressed in the gripper frame to the camera frame (\f$_{}^{c}\textrm{T}_g\f$).
-@param[in] method One of the implemented Robot-World/Hand-Eye calibration method, see cv::RobotWorldHandEyeCalibrationMethod
-
-The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
-rotation then the translation (separable solutions):
- - M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
-
-Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
-with the following implemented method:
- - A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
-
-The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
-and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
-
-![](pics/robot-world_hand-eye_figure.png)
-
-The calibration procedure is the following:
- - a static calibration pattern is used to estimate the transformation between the target frame
- and the camera frame
- - the robot gripper is moved in order to acquire several poses
- - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
- instance the robot kinematics
-\f[
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_b\\
- Y_b\\
- Z_b\\
- 1
- \end{bmatrix}
-\f]
- - for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
- for instance a pose estimation method (PnP) from 2D-3D point correspondences
-\f[
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_w\\
- Y_w\\
- Z_w\\
- 1
- \end{bmatrix}
-\f]
-
-The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
-\f[
- \begin{bmatrix}
- X_w\\
- Y_w\\
- Z_w\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_b\\
- Y_b\\
- Z_b\\
- 1
- \end{bmatrix}
-\f]
-\f[
- \begin{bmatrix}
- X_c\\
- Y_c\\
- Z_c\\
- 1
- \end{bmatrix}
- =
- \begin{bmatrix}
- _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\
- 0_{1 \times 3} & 1
- \end{bmatrix}
- \begin{bmatrix}
- X_g\\
- Y_g\\
- Z_g\\
- 1
- \end{bmatrix}
-\f]
-
-This problem is also known as solving the \f$\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\f$ equation, with:
- - \f$\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\f$
- - \f$\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\f$
- - \f$\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\f$
- - \f$\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\f$
-
-\note
-At least 3 measurements are required (input vectors size must be greater or equal to 3).
-
- */
-CV_EXPORTS_W void calibrateRobotWorldHandEye( InputArrayOfArrays R_world2cam, InputArrayOfArrays t_world2cam,
- InputArrayOfArrays R_base2gripper, InputArrayOfArrays t_base2gripper,
- OutputArray R_base2world, OutputArray t_base2world,
- OutputArray R_gripper2cam, OutputArray t_gripper2cam,
- RobotWorldHandEyeCalibrationMethod method=CALIB_ROBOT_WORLD_HAND_EYE_SHAH );
-
-/** @brief Converts points from Euclidean to homogeneous space.
-
-@param src Input vector of N-dimensional points.
-@param dst Output vector of N+1-dimensional points.
-
-The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of
-point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).
- */
-CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
-
-/** @brief Converts points from homogeneous to Euclidean space.
-
-@param src Input vector of N-dimensional points.
-@param dst Output vector of N-1-dimensional points.
-
-The function converts points homogeneous to Euclidean space using perspective projection. That is,
-each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the
-output point coordinates will be (0,0,0,...).
- */
-CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
-
-/** @brief Converts points to/from homogeneous coordinates.
-
-@param src Input array or vector of 2D, 3D, or 4D points.
-@param dst Output vector of 2D, 3D, or 4D points.
-
-The function converts 2D or 3D points from/to homogeneous coordinates by calling either
-#convertPointsToHomogeneous or #convertPointsFromHomogeneous.
-
-@note The function is obsolete. Use one of the previous two functions instead.
- */
-CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
-
-/** @brief Calculates a fundamental matrix from the corresponding points in two images.
-
-@param points1 Array of N points from the first image. The point coordinates should be
-floating-point (single or double precision).
-@param points2 Array of the second image points of the same size and format as points1 .
-@param method Method for computing a fundamental matrix.
-- @ref FM_7POINT for a 7-point algorithm. \f$N = 7\f$
-- @ref FM_8POINT for an 8-point algorithm. \f$N \ge 8\f$
-- @ref FM_RANSAC for the RANSAC algorithm. \f$N \ge 8\f$
-- @ref FM_LMEDS for the LMedS algorithm. \f$N \ge 8\f$
-@param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
-line in pixels, beyond which the point is considered an outlier and is not used for computing the
-final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
-point localization, image resolution, and the image noise.
-@param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
-of confidence (probability) that the estimated matrix is correct.
-@param[out] mask optional output mask
-@param maxIters The maximum number of robust method iterations.
-
-The epipolar geometry is described by the following equation:
-
-\f[[p_2; 1]^T F [p_1; 1] = 0\f]
-
-where \f$F\f$ is a fundamental matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
-second images, respectively.
-
-The function calculates the fundamental matrix using one of four methods listed above and returns
-the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
-algorithm, the function may return up to 3 solutions ( \f$9 \times 3\f$ matrix that stores all 3
-matrices sequentially).
-
-The calculated fundamental matrix may be passed further to #computeCorrespondEpilines that finds the
-epipolar lines corresponding to the specified points. It can also be passed to
-#stereoRectifyUncalibrated to compute the rectification transformation. :
-@code
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector points1(point_count);
- vector points2(point_count);
-
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
-
- Mat fundamental_matrix =
- findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
-@endcode
- */
-CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
- int method, double ransacReprojThreshold, double confidence,
- int maxIters, OutputArray mask = noArray() );
-
-/** @overload */
-CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
- int method = FM_RANSAC,
- double ransacReprojThreshold = 3., double confidence = 0.99,
- OutputArray mask = noArray() );
-
-/** @overload */
-CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
- OutputArray mask, int method = FM_RANSAC,
- double ransacReprojThreshold = 3., double confidence = 0.99 );
-
-
-CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
- OutputArray mask, const UsacParams ¶ms);
-
-/** @brief Calculates an essential matrix from the corresponding points in two images.
-
-@param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
-be floating-point (single or double precision).
-@param points2 Array of the second image points of the same size and format as points1 .
-@param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ .
-Note that this function assumes that points1 and points2 are feature points from cameras with the
-same camera intrinsic matrix. If this assumption does not hold for your use case, use
-#undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
-to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
-passing these coordinates, pass the identity matrix for this parameter.
-@param method Method for computing an essential matrix.
-- @ref RANSAC for the RANSAC algorithm.
-- @ref LMEDS for the LMedS algorithm.
-@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
-confidence (probability) that the estimated matrix is correct.
-@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
-line in pixels, beyond which the point is considered an outlier and is not used for computing the
-final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
-point localization, image resolution, and the image noise.
-@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
-for the other points. The array is computed only in the RANSAC and LMedS methods.
-@param maxIters The maximum number of robust method iterations.
-
-This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 .
-@cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
-
-\f[[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\f]
-
-where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
-second images, respectively. The result of this function may be passed further to
-#decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
-CV_EXPORTS_W
-Mat findEssentialMat(
- InputArray points1, InputArray points2,
- InputArray cameraMatrix, int method = RANSAC,
- double prob = 0.999, double threshold = 1.0,
- int maxIters = 1000, OutputArray mask = noArray()
-);
-
-/** @overload */
-CV_EXPORTS
-Mat findEssentialMat(
- InputArray points1, InputArray points2,
- InputArray cameraMatrix, int method,
- double prob, double threshold,
- OutputArray mask
-); // TODO remove from OpenCV 5.0
-
-/** @overload
-@param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
-be floating-point (single or double precision).
-@param points2 Array of the second image points of the same size and format as points1 .
-@param focal focal length of the camera. Note that this function assumes that points1 and points2
-are feature points from cameras with same focal length and principal point.
-@param pp principal point of the camera.
-@param method Method for computing a fundamental matrix.
-- @ref RANSAC for the RANSAC algorithm.
-- @ref LMEDS for the LMedS algorithm.
-@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
-line in pixels, beyond which the point is considered an outlier and is not used for computing the
-final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
-point localization, image resolution, and the image noise.
-@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
-confidence (probability) that the estimated matrix is correct.
-@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
-for the other points. The array is computed only in the RANSAC and LMedS methods.
-@param maxIters The maximum number of robust method iterations.
-
-This function differs from the one above that it computes camera intrinsic matrix from focal length and
-principal point:
-
-\f[A =
-\begin{bmatrix}
-f & 0 & x_{pp} \\
-0 & f & y_{pp} \\
-0 & 0 & 1
-\end{bmatrix}\f]
- */
-CV_EXPORTS_W
-Mat findEssentialMat(
- InputArray points1, InputArray points2,
- double focal = 1.0, Point2d pp = Point2d(0, 0),
- int method = RANSAC, double prob = 0.999,
- double threshold = 1.0, int maxIters = 1000,
- OutputArray mask = noArray()
-);
-
-/** @overload */
-CV_EXPORTS
-Mat findEssentialMat(
- InputArray points1, InputArray points2,
- double focal, Point2d pp,
- int method, double prob,
- double threshold, OutputArray mask
-); // TODO remove from OpenCV 5.0
-
-/** @brief Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
-
-@param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
-be floating-point (single or double precision).
-@param points2 Array of the second image points of the same size and format as points1 .
-@param cameraMatrix1 Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
-Note that this function assumes that points1 and points2 are feature points from cameras with the
-same camera matrix. If this assumption does not hold for your use case, use
-#undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
-to normalized image coordinates, which are valid for the identity camera matrix. When
-passing these coordinates, pass the identity matrix for this parameter.
-@param cameraMatrix2 Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
-Note that this function assumes that points1 and points2 are feature points from cameras with the
-same camera matrix. If this assumption does not hold for your use case, use
-#undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
-to normalized image coordinates, which are valid for the identity camera matrix. When
-passing these coordinates, pass the identity matrix for this parameter.
-@param distCoeffs1 Input vector of distortion coefficients
-\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
-of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
-@param distCoeffs2 Input vector of distortion coefficients
-\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
-of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
-@param method Method for computing an essential matrix.
-- @ref RANSAC for the RANSAC algorithm.
-- @ref LMEDS for the LMedS algorithm.
-@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
-confidence (probability) that the estimated matrix is correct.
-@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
-line in pixels, beyond which the point is considered an outlier and is not used for computing the
-final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
-point localization, image resolution, and the image noise.
-@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
-for the other points. The array is computed only in the RANSAC and LMedS methods.
-
-This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 .
-@cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
-
-\f[[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\f]
-
-where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
-second images, respectively. The result of this function may be passed further to
-#decomposeEssentialMat or #recoverPose to recover the relative pose between cameras.
- */
-CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,
- InputArray cameraMatrix1, InputArray distCoeffs1,
- InputArray cameraMatrix2, InputArray distCoeffs2,
- int method = RANSAC,
- double prob = 0.999, double threshold = 1.0,
- OutputArray mask = noArray() );
-
-
-CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,
- InputArray cameraMatrix1, InputArray cameraMatrix2,
- InputArray dist_coeff1, InputArray dist_coeff2, OutputArray mask,
- const UsacParams ¶ms);
-
-/** @brief Decompose an essential matrix to possible rotations and translation.
-
-@param E The input essential matrix.
-@param R1 One possible rotation matrix.
-@param R2 Another possible rotation matrix.
-@param t One possible translation.
-
-This function decomposes the essential matrix E using svd decomposition @cite HartleyZ00. In
-general, four possible poses exist for the decomposition of E. They are \f$[R_1, t]\f$,
-\f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$.
-
-If E gives the epipolar constraint \f$[p_2; 1]^T A^{-T} E A^{-1} [p_1; 1] = 0\f$ between the image
-points \f$p_1\f$ in the first image and \f$p_2\f$ in second image, then any of the tuples
-\f$[R_1, t]\f$, \f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$ is a change of basis from the first
-camera's coordinate system to the second camera's coordinate system. However, by decomposing E, one
-can only get the direction of the translation. For this reason, the translation t is returned with
-unit length.
- */
-CV_EXPORTS_W void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
-
-/** @brief Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of
-inliers that pass the check.
-
-@param points1 Array of N 2D points from the first image. The point coordinates should be
-floating-point (single or double precision).
-@param points2 Array of the second image points of the same size and format as points1 .
-@param cameraMatrix1 Input/output camera matrix for the first camera, the same as in
-@ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
-@param distCoeffs1 Input/output vector of distortion coefficients, the same as in
-@ref calibrateCamera.
-@param cameraMatrix2 Input/output camera matrix for the first camera, the same as in
-@ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
-@param distCoeffs2 Input/output vector of distortion coefficients, the same as in
-@ref calibrateCamera.
-@param E The output essential matrix.
-@param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
-that performs a change of basis from the first camera's coordinate system to the second camera's
-coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
-described below.
-@param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and
-therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
-length.
-@param method Method for computing an essential matrix.
-- @ref RANSAC for the RANSAC algorithm.
-- @ref LMEDS for the LMedS algorithm.
-@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
-confidence (probability) that the estimated matrix is correct.
-@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
-line in pixels, beyond which the point is considered an outlier and is not used for computing the
-final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
-point localization, image resolution, and the image noise.
-@param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
-inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
-recover pose. In the output mask only inliers which pass the cheirality check.
-
-This function decomposes an essential matrix using @ref decomposeEssentialMat and then verifies
-possible pose hypotheses by doing cheirality check. The cheirality check means that the
-triangulated 3D points should have positive depth. Some details can be found in @cite Nister03.
-
-This function can be used to process the output E and mask from @ref findEssentialMat. In this
-scenario, points1 and points2 are the same input for findEssentialMat.:
-@code
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector points1(point_count);
- vector points2(point_count);
-
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
-
- // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration.
- Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2;
-
- // Output: Essential matrix, relative rotation and relative translation.
- Mat E, R, t, mask;
-
- recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask);
-@endcode
- */
-CV_EXPORTS_W int recoverPose( InputArray points1, InputArray points2,
- InputArray cameraMatrix1, InputArray distCoeffs1,
- InputArray cameraMatrix2, InputArray distCoeffs2,
- OutputArray E, OutputArray R, OutputArray t,
- int method = cv::RANSAC, double prob = 0.999, double threshold = 1.0,
- InputOutputArray mask = noArray());
-
-/** @brief Recovers the relative camera rotation and the translation from an estimated essential
-matrix and the corresponding points in two images, using chirality check. Returns the number of
-inliers that pass the check.
-
-@param E The input essential matrix.
-@param points1 Array of N 2D points from the first image. The point coordinates should be
-floating-point (single or double precision).
-@param points2 Array of the second image points of the same size and format as points1 .
-@param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ .
-Note that this function assumes that points1 and points2 are feature points from cameras with the
-same camera intrinsic matrix.
-@param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
-that performs a change of basis from the first camera's coordinate system to the second camera's
-coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
-described below.
-@param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and
-therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
-length.
-@param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
-inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
-recover pose. In the output mask only inliers which pass the chirality check.
-
-This function decomposes an essential matrix using @ref decomposeEssentialMat and then verifies
-possible pose hypotheses by doing chirality check. The chirality check means that the
-triangulated 3D points should have positive depth. Some details can be found in @cite Nister03.
-
-This function can be used to process the output E and mask from @ref findEssentialMat. In this
-scenario, points1 and points2 are the same input for #findEssentialMat :
-@code
- // Example. Estimation of fundamental matrix using the RANSAC algorithm
- int point_count = 100;
- vector points1(point_count);
- vector points2(point_count);
-
- // initialize the points here ...
- for( int i = 0; i < point_count; i++ )
- {
- points1[i] = ...;
- points2[i] = ...;
- }
-
- // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
- Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
-
- Mat E, R, t, mask;
-
- E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
- recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
-@endcode
- */
-CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
- InputArray cameraMatrix, OutputArray R, OutputArray t,
- InputOutputArray mask = noArray() );
-
-/** @overload
-@param E The input essential matrix.
-@param points1 Array of N 2D points from the first image. The point coordinates should be
-floating-point (single or double precision).
-@param points2 Array of the second image points of the same size and format as points1 .
-@param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
-that performs a change of basis from the first camera's coordinate system to the second camera's
-coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
-description below.
-@param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and
-therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
-length.
-@param focal Focal length of the camera. Note that this function assumes that points1 and points2
-are feature points from cameras with same focal length and principal point.
-@param pp principal point of the camera.
-@param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
-inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
-recover pose. In the output mask only inliers which pass the chirality check.
-
-This function differs from the one above that it computes camera intrinsic matrix from focal length and
-principal point:
-
-\f[A =
-\begin{bmatrix}
-f & 0 & x_{pp} \\
-0 & f & y_{pp} \\
-0 & 0 & 1
-\end{bmatrix}\f]
- */
-CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
- OutputArray R, OutputArray t,
- double focal = 1.0, Point2d pp = Point2d(0, 0),
- InputOutputArray mask = noArray() );
-
-/** @overload
-@param E The input essential matrix.
-@param points1 Array of N 2D points from the first image. The point coordinates should be
-floating-point (single or double precision).
-@param points2 Array of the second image points of the same size and format as points1.
-@param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ .
-Note that this function assumes that points1 and points2 are feature points from cameras with the
-same camera intrinsic matrix.
-@param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
-that performs a change of basis from the first camera's coordinate system to the second camera's
-coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
-description below.
-@param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and
-therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
-length.
-@param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
-points).
-@param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
-inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to
-recover pose. In the output mask only inliers which pass the chirality check.
-@param triangulatedPoints 3D points which were reconstructed by triangulation.
-
-This function differs from the one above that it outputs the triangulated 3D point that are used for
-the chirality check.
- */
-CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
- InputArray cameraMatrix, OutputArray R, OutputArray t, double distanceThresh, InputOutputArray mask = noArray(),
- OutputArray triangulatedPoints = noArray());
-
-/** @brief For points in an image of a stereo pair, computes the corresponding epilines in the other image.
-
-@param points Input points. \f$N \times 1\f$ or \f$1 \times N\f$ matrix of type CV_32FC2 or
-vector\ .
-@param whichImage Index of the image (1 or 2) that contains the points .
-@param F Fundamental matrix that can be estimated using #findFundamentalMat or #stereoRectify .
-@param lines Output vector of the epipolar lines corresponding to the points in the other image.
-Each line \f$ax + by + c=0\f$ is encoded by 3 numbers \f$(a, b, c)\f$ .
-
-For every point in one of the two images of a stereo pair, the function finds the equation of the
-corresponding epipolar line in the other image.
-
-From the fundamental matrix definition (see #findFundamentalMat ), line \f$l^{(2)}_i\f$ in the second
-image for the point \f$p^{(1)}_i\f$ in the first image (when whichImage=1 ) is computed as:
-
-\f[l^{(2)}_i = F p^{(1)}_i\f]
-
-And vice versa, when whichImage=2, \f$l^{(1)}_i\f$ is computed from \f$p^{(2)}_i\f$ as:
-
-\f[l^{(1)}_i = F^T p^{(2)}_i\f]
-
-Line coefficients are defined up to a scale. They are normalized so that \f$a_i^2+b_i^2=1\f$ .
- */
-CV_EXPORTS_W void computeCorrespondEpilines( InputArray points, int whichImage,
- InputArray F, OutputArray lines );
-
-/** @brief This function reconstructs 3-dimensional points (in homogeneous coordinates) by using
-their observations with a stereo camera.
-
-@param projMatr1 3x4 projection matrix of the first camera, i.e. this matrix projects 3D points
-given in the world's coordinate system into the first image.
-@param projMatr2 3x4 projection matrix of the second camera, i.e. this matrix projects 3D points
-given in the world's coordinate system into the second image.
-@param projPoints1 2xN array of feature points in the first image. In the case of the c++ version,
-it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
-@param projPoints2 2xN array of corresponding points in the second image. In the case of the c++
-version, it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
-@param points4D 4xN array of reconstructed points in homogeneous coordinates. These points are
-returned in the world's coordinate system.
-
-@note
- Keep in mind that all input data should be of float type in order for this function to work.
-
-@note
- If the projection matrices from @ref stereoRectify are used, then the returned points are
- represented in the first camera's rectified coordinate system.
-
-@sa
- reprojectImageTo3D
- */
-CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
- InputArray projPoints1, InputArray projPoints2,
- OutputArray points4D );
-
-/** @brief Refines coordinates of corresponding points.
-
-@param F 3x3 fundamental matrix.
-@param points1 1xN array containing the first set of points.
-@param points2 1xN array containing the second set of points.
-@param newPoints1 The optimized points1.
-@param newPoints2 The optimized points2.
-
-The function implements the Optimal Triangulation Method (see Multiple View Geometry @cite HartleyZ00 for details).
-For each given point correspondence points1[i] \<-\> points2[i], and a fundamental matrix F, it
-computes the corrected correspondences newPoints1[i] \<-\> newPoints2[i] that minimize the geometric
-error \f$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\f$ (where \f$d(a,b)\f$ is the
-geometric distance between points \f$a\f$ and \f$b\f$ ) subject to the epipolar constraint
-\f$newPoints2^T \cdot F \cdot newPoints1 = 0\f$ .
- */
-CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
- OutputArray newPoints1, OutputArray newPoints2 );
-
-/** @brief Filters off small noise blobs (speckles) in the disparity map
-
-@param img The input 16-bit signed disparity image
-@param newVal The disparity value used to paint-off the speckles
-@param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
-affected by the algorithm
-@param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
-blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
-disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
-account when specifying this parameter value.
-@param buf The optional temporary buffer to avoid memory allocation within the function.
- */
-CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
- int maxSpeckleSize, double maxDiff,
- InputOutputArray buf = noArray() );
-
-//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by #stereoRectify)
-CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
- int minDisparity, int numberOfDisparities,
- int blockSize );
-
-//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
-CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
- int minDisparity, int numberOfDisparities,
- int disp12MaxDisp = 1 );
-
-/** @brief Reprojects a disparity image to 3D space.
-
-@param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
-floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
-fractional bits. If the disparity is 16-bit signed format, as computed by @ref StereoBM or
-@ref StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
-being used here.
-@param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
-_3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
-uses Q obtained by @ref stereoRectify, then the returned points are represented in the first
-camera's rectified coordinate system.
-@param Q \f$4 \times 4\f$ perspective transformation matrix that can be obtained with
-@ref stereoRectify.
-@param handleMissingValues Indicates, whether the function should handle missing values (i.e.
-points where the disparity was not computed). If handleMissingValues=true, then pixels with the
-minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
-to 3D points with a very large Z value (currently set to 10000).
-@param ddepth The optional output array depth. If it is -1, the output image will have CV_32F
-depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
-
-The function transforms a single-channel disparity map to a 3-channel image representing a 3D
-surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
-computes:
-
-\f[\begin{bmatrix}
-X \\
-Y \\
-Z \\
-W
-\end{bmatrix} = Q \begin{bmatrix}
-x \\
-y \\
-\texttt{disparity} (x,y) \\
-z
-\end{bmatrix}.\f]
-
-@sa
- To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
- */
-CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
- OutputArray _3dImage, InputArray Q,
- bool handleMissingValues = false,
- int ddepth = -1 );
-
-/** @brief Calculates the Sampson Distance between two points.
-
-The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as:
-\f[
-sd( \texttt{pt1} , \texttt{pt2} )=
-\frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2}
-{((\texttt{F} \cdot \texttt{pt1})(0))^2 +
-((\texttt{F} \cdot \texttt{pt1})(1))^2 +
-((\texttt{F}^t \cdot \texttt{pt2})(0))^2 +
-((\texttt{F}^t \cdot \texttt{pt2})(1))^2}
-\f]
-The fundamental matrix may be calculated using the #findFundamentalMat function. See @cite HartleyZ00 11.4.3 for details.
-@param pt1 first homogeneous 2d point
-@param pt2 second homogeneous 2d point
-@param F fundamental matrix
-@return The computed Sampson distance.
-*/
-CV_EXPORTS_W double sampsonDistance(InputArray pt1, InputArray pt2, InputArray F);
-
-/** @brief Computes an optimal affine transformation between two 3D point sets.
-
-It computes
-\f[
-\begin{bmatrix}
-x\\
-y\\
-z\\
-\end{bmatrix}
-=
-\begin{bmatrix}
-a_{11} & a_{12} & a_{13}\\
-a_{21} & a_{22} & a_{23}\\
-a_{31} & a_{32} & a_{33}\\
-\end{bmatrix}
-\begin{bmatrix}
-X\\
-Y\\
-Z\\
-\end{bmatrix}
-+
-\begin{bmatrix}
-b_1\\
-b_2\\
-b_3\\
-\end{bmatrix}
-\f]
-
-@param src First input 3D point set containing \f$(X,Y,Z)\f$.
-@param dst Second input 3D point set containing \f$(x,y,z)\f$.
-@param out Output 3D affine transformation matrix \f$3 \times 4\f$ of the form
-\f[
-\begin{bmatrix}
-a_{11} & a_{12} & a_{13} & b_1\\
-a_{21} & a_{22} & a_{23} & b_2\\
-a_{31} & a_{32} & a_{33} & b_3\\
-\end{bmatrix}
-\f]
-@param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
-@param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
-an inlier.
-@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
-between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
-significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
-
-The function estimates an optimal 3D affine transformation between two 3D point sets using the
-RANSAC algorithm.
- */
-CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
- OutputArray out, OutputArray inliers,
- double ransacThreshold = 3, double confidence = 0.99);
-
-/** @brief Computes an optimal affine transformation between two 3D point sets.
-
-It computes \f$R,s,t\f$ minimizing \f$\sum{i} dst_i - c \cdot R \cdot src_i \f$
-where \f$R\f$ is a 3x3 rotation matrix, \f$t\f$ is a 3x1 translation vector and \f$s\f$ is a
-scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
-The estimated affine transform has a homogeneous scale which is a subclass of affine
-transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
-points each.
-
-@param src First input 3D point set.
-@param dst Second input 3D point set.
-@param scale If null is passed, the scale parameter c will be assumed to be 1.0.
-Else the pointed-to variable will be set to the optimal scale.
-@param force_rotation If true, the returned rotation will never be a reflection.
-This might be unwanted, e.g. when optimizing a transform between a right- and a
-left-handed coordinate system.
-@return 3D affine transformation matrix \f$3 \times 4\f$ of the form
-\f[T =
-\begin{bmatrix}
-R & t\\
-\end{bmatrix}
-\f]
-
- */
-CV_EXPORTS_W cv::Mat estimateAffine3D(InputArray src, InputArray dst,
- CV_OUT double* scale = nullptr, bool force_rotation = true);
-
-/** @brief Computes an optimal translation between two 3D point sets.
- *
- * It computes
- * \f[
- * \begin{bmatrix}
- * x\\
- * y\\
- * z\\
- * \end{bmatrix}
- * =
- * \begin{bmatrix}
- * X\\
- * Y\\
- * Z\\
- * \end{bmatrix}
- * +
- * \begin{bmatrix}
- * b_1\\
- * b_2\\
- * b_3\\
- * \end{bmatrix}
- * \f]
- *
- * @param src First input 3D point set containing \f$(X,Y,Z)\f$.
- * @param dst Second input 3D point set containing \f$(x,y,z)\f$.
- * @param out Output 3D translation vector \f$3 \times 1\f$ of the form
- * \f[
- * \begin{bmatrix}
- * b_1 \\
- * b_2 \\
- * b_3 \\
- * \end{bmatrix}
- * \f]
- * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
- * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
- * an inlier.
- * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
- * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
- * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
- *
- * The function estimates an optimal 3D translation between two 3D point sets using the
- * RANSAC algorithm.
- * */
-CV_EXPORTS_W int estimateTranslation3D(InputArray src, InputArray dst,
- OutputArray out, OutputArray inliers,
- double ransacThreshold = 3, double confidence = 0.99);
-
-/** @brief Computes an optimal affine transformation between two 2D point sets.
-
-It computes
-\f[
-\begin{bmatrix}
-x\\
-y\\
-\end{bmatrix}
-=
-\begin{bmatrix}
-a_{11} & a_{12}\\
-a_{21} & a_{22}\\
-\end{bmatrix}
-\begin{bmatrix}
-X\\
-Y\\
-\end{bmatrix}
-+
-\begin{bmatrix}
-b_1\\
-b_2\\
-\end{bmatrix}
-\f]
-
-@param from First input 2D point set containing \f$(X,Y)\f$.
-@param to Second input 2D point set containing \f$(x,y)\f$.
-@param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
-@param method Robust method used to compute transformation. The following methods are possible:
-- @ref RANSAC - RANSAC-based robust method
-- @ref LMEDS - Least-Median robust method
-RANSAC is the default method.
-@param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
-a point as an inlier. Applies only to RANSAC.
-@param maxIters The maximum number of robust method iterations.
-@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
-between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
-significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
-@param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
-Passing 0 will disable refining, so the output matrix will be output of robust method.
-
-@return Output 2D affine transformation matrix \f$2 \times 3\f$ or empty matrix if transformation
-could not be estimated. The returned matrix has the following form:
-\f[
-\begin{bmatrix}
-a_{11} & a_{12} & b_1\\
-a_{21} & a_{22} & b_2\\
-\end{bmatrix}
-\f]
-
-The function estimates an optimal 2D affine transformation between two 2D point sets using the
-selected robust algorithm.
-
-The computed transformation is then refined further (using only inliers) with the
-Levenberg-Marquardt method to reduce the re-projection error even more.
-
-@note
-The RANSAC method can handle practically any ratio of outliers but needs a threshold to
-distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
-correctly only when there are more than 50% of inliers.
-
-@sa estimateAffinePartial2D, getAffineTransform
-*/
-CV_EXPORTS_W cv::Mat estimateAffine2D(InputArray from, InputArray to, OutputArray inliers = noArray(),
- int method = RANSAC, double ransacReprojThreshold = 3,
- size_t maxIters = 2000, double confidence = 0.99,
- size_t refineIters = 10);
-
-
-CV_EXPORTS_W cv::Mat estimateAffine2D(InputArray pts1, InputArray pts2, OutputArray inliers,
- const UsacParams ¶ms);
-
-/** @brief Computes an optimal limited affine transformation with 4 degrees of freedom between
-two 2D point sets.
-
-@param from First input 2D point set.
-@param to Second input 2D point set.
-@param inliers Output vector indicating which points are inliers.
-@param method Robust method used to compute transformation. The following methods are possible:
-- @ref RANSAC - RANSAC-based robust method
-- @ref LMEDS - Least-Median robust method
-RANSAC is the default method.
-@param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
-a point as an inlier. Applies only to RANSAC.
-@param maxIters The maximum number of robust method iterations.
-@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
-between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
-significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
-@param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
-Passing 0 will disable refining, so the output matrix will be output of robust method.
-
-@return Output 2D affine transformation (4 degrees of freedom) matrix \f$2 \times 3\f$ or
-empty matrix if transformation could not be estimated.
-
-The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
-combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
-estimation.
-
-The computed transformation is then refined further (using only inliers) with the
-Levenberg-Marquardt method to reduce the re-projection error even more.
-
-Estimated transformation matrix is:
-\f[ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
- \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
-\end{bmatrix} \f]
-Where \f$ \theta \f$ is the rotation angle, \f$ s \f$ the scaling factor and \f$ t_x, t_y \f$ are
-translations in \f$ x, y \f$ axes respectively.
-
-@note
-The RANSAC method can handle practically any ratio of outliers but need a threshold to
-distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
-correctly only when there are more than 50% of inliers.
-
-@sa estimateAffine2D, getAffineTransform
-*/
-CV_EXPORTS_W cv::Mat estimateAffinePartial2D(InputArray from, InputArray to, OutputArray inliers = noArray(),
- int method = RANSAC, double ransacReprojThreshold = 3,
- size_t maxIters = 2000, double confidence = 0.99,
- size_t refineIters = 10);
-
-/** @example samples/cpp/tutorial_code/features2D/Homography/decompose_homography.cpp
-An example program with homography decomposition.
-
-Check @ref tutorial_homography "the corresponding tutorial" for more details.
-*/
-
-/** @brief Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
-
-@param H The input homography matrix between two images.
-@param K The input camera intrinsic matrix.
-@param rotations Array of rotation matrices.
-@param translations Array of translation matrices.
-@param normals Array of plane normal matrices.
-
-This function extracts relative camera motion between two views of a planar object and returns up to
-four mathematical solution tuples of rotation, translation, and plane normal. The decomposition of
-the homography matrix H is described in detail in @cite Malis2007.
-
-If the homography H, induced by the plane, gives the constraint
-\f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f] on the source image points
-\f$p_i\f$ and the destination image points \f$p'_i\f$, then the tuple of rotations[k] and
-translations[k] is a change of basis from the source camera's coordinate system to the destination
-camera's coordinate system. However, by decomposing H, one can only get the translation normalized
-by the (typically unknown) depth of the scene, i.e. its direction but with normalized length.
-
-If point correspondences are available, at least two solutions may further be invalidated, by
-applying positive depth constraint, i.e. all points must be in front of the camera.
- */
-CV_EXPORTS_W int decomposeHomographyMat(InputArray H,
- InputArray K,
- OutputArrayOfArrays rotations,
- OutputArrayOfArrays translations,
- OutputArrayOfArrays normals);
-
-/** @brief Filters homography decompositions based on additional information.
-
-@param rotations Vector of rotation matrices.
-@param normals Vector of plane normal matrices.
-@param beforePoints Vector of (rectified) visible reference points before the homography is applied
-@param afterPoints Vector of (rectified) visible reference points after the homography is applied
-@param possibleSolutions Vector of int indices representing the viable solution set after filtering
-@param pointsMask optional Mat/Vector of 8u type representing the mask for the inliers as given by the #findHomography function
-
-This function is intended to filter the output of the #decomposeHomographyMat based on additional
-information as described in @cite Malis2007 . The summary of the method: the #decomposeHomographyMat function
-returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
-sets of points visible in the camera frame before and after the homography transformation is applied,
-we can determine which are the true potential solutions and which are the opposites by verifying which
-homographies are consistent with all visible reference points being in front of the camera. The inputs
-are left unchanged; the filtered solution set is returned as indices into the existing one.
-
-*/
-CV_EXPORTS_W void filterHomographyDecompByVisibleRefpoints(InputArrayOfArrays rotations,
- InputArrayOfArrays normals,
- InputArray beforePoints,
- InputArray afterPoints,
- OutputArray possibleSolutions,
- InputArray pointsMask = noArray());
-
-/** @brief The base class for stereo correspondence algorithms.
- */
-class CV_EXPORTS_W StereoMatcher : public Algorithm
-{
-public:
- enum { DISP_SHIFT = 4,
- DISP_SCALE = (1 << DISP_SHIFT)
- };
-
- /** @brief Computes disparity map for the specified stereo pair
-
- @param left Left 8-bit single-channel image.
- @param right Right image of the same size and the same type as the left one.
- @param disparity Output disparity map. It has the same size as the input images. Some algorithms,
- like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value
- has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.
- */
- CV_WRAP virtual void compute( InputArray left, InputArray right,
- OutputArray disparity ) = 0;
-
- CV_WRAP virtual int getMinDisparity() const = 0;
- CV_WRAP virtual void setMinDisparity(int minDisparity) = 0;
-
- CV_WRAP virtual int getNumDisparities() const = 0;
- CV_WRAP virtual void setNumDisparities(int numDisparities) = 0;
-
- CV_WRAP virtual int getBlockSize() const = 0;
- CV_WRAP virtual void setBlockSize(int blockSize) = 0;
-
- CV_WRAP virtual int getSpeckleWindowSize() const = 0;
- CV_WRAP virtual void setSpeckleWindowSize(int speckleWindowSize) = 0;
-
- CV_WRAP virtual int getSpeckleRange() const = 0;
- CV_WRAP virtual void setSpeckleRange(int speckleRange) = 0;
-
- CV_WRAP virtual int getDisp12MaxDiff() const = 0;
- CV_WRAP virtual void setDisp12MaxDiff(int disp12MaxDiff) = 0;
-};
-
-
-/** @brief Class for computing stereo correspondence using the block matching algorithm, introduced and
-contributed to OpenCV by K. Konolige.
- */
-class CV_EXPORTS_W StereoBM : public StereoMatcher
-{
-public:
- enum { PREFILTER_NORMALIZED_RESPONSE = 0,
- PREFILTER_XSOBEL = 1
- };
-
- CV_WRAP virtual int getPreFilterType() const = 0;
- CV_WRAP virtual void setPreFilterType(int preFilterType) = 0;
-
- CV_WRAP virtual int getPreFilterSize() const = 0;
- CV_WRAP virtual void setPreFilterSize(int preFilterSize) = 0;
-
- CV_WRAP virtual int getPreFilterCap() const = 0;
- CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
-
- CV_WRAP virtual int getTextureThreshold() const = 0;
- CV_WRAP virtual void setTextureThreshold(int textureThreshold) = 0;
-
- CV_WRAP virtual int getUniquenessRatio() const = 0;
- CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0;
-
- CV_WRAP virtual int getSmallerBlockSize() const = 0;
- CV_WRAP virtual void setSmallerBlockSize(int blockSize) = 0;
-
- CV_WRAP virtual Rect getROI1() const = 0;
- CV_WRAP virtual void setROI1(Rect roi1) = 0;
-
- CV_WRAP virtual Rect getROI2() const = 0;
- CV_WRAP virtual void setROI2(Rect roi2) = 0;
-
- /** @brief Creates StereoBM object
-
- @param numDisparities the disparity search range. For each pixel algorithm will find the best
- disparity from 0 (default minimum disparity) to numDisparities. The search range can then be
- shifted by changing the minimum disparity.
- @param blockSize the linear size of the blocks compared by the algorithm. The size should be odd
- (as the block is centered at the current pixel). Larger block size implies smoother, though less
- accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher
- chance for algorithm to find a wrong correspondence.
-
- The function create StereoBM object. You can then call StereoBM::compute() to compute disparity for
- a specific stereo pair.
- */
- CV_WRAP static Ptr create(int numDisparities = 0, int blockSize = 21);
-};
-
-/** @brief The class implements the modified H. Hirschmuller algorithm @cite HH08 that differs from the original
-one as follows:
-
-- By default, the algorithm is single-pass, which means that you consider only 5 directions
-instead of 8. Set mode=StereoSGBM::MODE_HH in createStereoSGBM to run the full variant of the
-algorithm but beware that it may consume a lot of memory.
-- The algorithm matches blocks, not individual pixels. Though, setting blockSize=1 reduces the
-blocks to single pixels.
-- Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi
-sub-pixel metric from @cite BT98 is used. Though, the color images are supported as well.
-- Some pre- and post- processing steps from K. Konolige algorithm StereoBM are included, for
-example: pre-filtering (StereoBM::PREFILTER_XSOBEL type) and post-filtering (uniqueness
-check, quadratic interpolation and speckle filtering).
-
-@note
- - (Python) An example illustrating the use of the StereoSGBM matching algorithm can be found
- at opencv_source_code/samples/python/stereo_match.py
- */
-class CV_EXPORTS_W StereoSGBM : public StereoMatcher
-{
-public:
- enum
- {
- MODE_SGBM = 0,
- MODE_HH = 1,
- MODE_SGBM_3WAY = 2,
- MODE_HH4 = 3
- };
-
- CV_WRAP virtual int getPreFilterCap() const = 0;
- CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
-
- CV_WRAP virtual int getUniquenessRatio() const = 0;
- CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0;
-
- CV_WRAP virtual int getP1() const = 0;
- CV_WRAP virtual void setP1(int P1) = 0;
-
- CV_WRAP virtual int getP2() const = 0;
- CV_WRAP virtual void setP2(int P2) = 0;
-
- CV_WRAP virtual int getMode() const = 0;
- CV_WRAP virtual void setMode(int mode) = 0;
-
- /** @brief Creates StereoSGBM object
-
- @param minDisparity Minimum possible disparity value. Normally, it is zero but sometimes
- rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
- @param numDisparities Maximum disparity minus minimum disparity. The value is always greater than
- zero. In the current implementation, this parameter must be divisible by 16.
- @param blockSize Matched block size. It must be an odd number \>=1 . Normally, it should be
- somewhere in the 3..11 range.
- @param P1 The first parameter controlling the disparity smoothness. See below.
- @param P2 The second parameter controlling the disparity smoothness. The larger the values are,
- the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1
- between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor
- pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good
- P1 and P2 values are shown (like 8\*number_of_image_channels\*blockSize\*blockSize and
- 32\*number_of_image_channels\*blockSize\*blockSize , respectively).
- @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right
- disparity check. Set it to a non-positive value to disable the check.
- @param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first
- computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval.
- The result values are passed to the Birchfield-Tomasi pixel cost function.
- @param uniquenessRatio Margin in percentage by which the best (minimum) computed cost function
- value should "win" the second best value to consider the found match correct. Normally, a value
- within the 5-15 range is good enough.
- @param speckleWindowSize Maximum size of smooth disparity regions to consider their noise speckles
- and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the
- 50-200 range.
- @param speckleRange Maximum disparity variation within each connected component. If you do speckle
- filtering, set the parameter to a positive value, it will be implicitly multiplied by 16.
- Normally, 1 or 2 is good enough.
- @param mode Set it to StereoSGBM::MODE_HH to run the full-scale two-pass dynamic programming
- algorithm. It will consume O(W\*H\*numDisparities) bytes, which is large for 640x480 stereo and
- huge for HD-size pictures. By default, it is set to false .
-
- The first constructor initializes StereoSGBM with all the default parameters. So, you only have to
- set StereoSGBM::numDisparities at minimum. The second constructor enables you to set each parameter
- to a custom value.
- */
- CV_WRAP static Ptr create(int minDisparity = 0, int numDisparities = 16, int blockSize = 3,
- int P1 = 0, int P2 = 0, int disp12MaxDiff = 0,
- int preFilterCap = 0, int uniquenessRatio = 0,
- int speckleWindowSize = 0, int speckleRange = 0,
- int mode = StereoSGBM::MODE_SGBM);
-};
-
-
-//! cv::undistort mode
-enum UndistortTypes
-{
- PROJ_SPHERICAL_ORTHO = 0,
- PROJ_SPHERICAL_EQRECT = 1
-};
-
-/** @brief Transforms an image to compensate for lens distortion.
-
-The function transforms an image to compensate radial and tangential lens distortion.
-
-The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
-(with bilinear interpolation). See the former function for details of the transformation being
-performed.
-
-Those pixels in the destination image, for which there is no correspondent pixels in the source
-image, are filled with zeros (black color).
-
-A particular subset of the source image that will be visible in the corrected image can be regulated
-by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
-newCameraMatrix depending on your requirements.
-
-The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
-the resolution of images is different from the resolution used at the calibration stage, \f$f_x,
-f_y, c_x\f$ and \f$c_y\f$ need to be scaled accordingly, while the distortion coefficients remain
-the same.
-
-@param src Input (distorted) image.
-@param dst Output (corrected) image that has the same size and type as src .
-@param cameraMatrix Input camera matrix \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
-of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
-@param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as
-cameraMatrix but you may additionally scale and shift the result by using a different matrix.
- */
-CV_EXPORTS_W void undistort( InputArray src, OutputArray dst,
- InputArray cameraMatrix,
- InputArray distCoeffs,
- InputArray newCameraMatrix = noArray() );
-
-/** @brief Computes the undistortion and rectification transformation map.
-
-The function computes the joint undistortion and rectification transformation and represents the
-result in the form of maps for #remap. The undistorted image looks like original, as if it is
-captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a
-monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by
-#getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera,
-newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
-
-Also, this new camera is oriented differently in the coordinate space, according to R. That, for
-example, helps to align two heads of a stereo camera so that the epipolar lines on both images
-become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera).
-
-The function actually builds the maps for the inverse mapping algorithm that is used by #remap. That
-is, for each pixel \f$(u, v)\f$ in the destination (corrected and rectified) image, the function
-computes the corresponding coordinates in the source image (that is, in the original image from
-camera). The following process is applied:
-\f[
-\begin{array}{l}
-x \leftarrow (u - {c'}_x)/{f'}_x \\
-y \leftarrow (v - {c'}_y)/{f'}_y \\
-{[X\,Y\,W]} ^T \leftarrow R^{-1}*[x \, y \, 1]^T \\
-x' \leftarrow X/W \\
-y' \leftarrow Y/W \\
-r^2 \leftarrow x'^2 + y'^2 \\
-x'' \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
-+ 2p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4\\
-y'' \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
-+ p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\
-s\vecthree{x'''}{y'''}{1} =
-\vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)}
-{0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}
-{0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\
-map_x(u,v) \leftarrow x''' f_x + c_x \\
-map_y(u,v) \leftarrow y''' f_y + c_y
-\end{array}
-\f]
-where \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
-are the distortion coefficients.
-
-In case of a stereo camera, this function is called twice: once for each camera head, after
-#stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera
-was not calibrated, it is still possible to compute the rectification transformations directly from
-the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes
-homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
-space. R can be computed from H as
-\f[\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\f]
-where cameraMatrix can be chosen arbitrarily.
-
-@param cameraMatrix Input camera matrix \f$A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
-of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
-@param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,
-computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
-is assumed. In #initUndistortRectifyMap R assumed to be an identity matrix.
-@param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$.
-@param size Undistorted image size.
-@param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
-@param map1 The first output map.
-@param map2 The second output map.
- */
-CV_EXPORTS_W
-void initUndistortRectifyMap(InputArray cameraMatrix, InputArray distCoeffs,
- InputArray R, InputArray newCameraMatrix,
- Size size, int m1type, OutputArray map1, OutputArray map2);
-
-/** @brief Computes the projection and inverse-rectification transformation map. In essense, this is the inverse of
-#initUndistortRectifyMap to accomodate stereo-rectification of projectors ('inverse-cameras') in projector-camera pairs.
-
-The function computes the joint projection and inverse rectification transformation and represents the
-result in the form of maps for #remap. The projected image looks like a distorted version of the original which,
-once projected by a projector, should visually match the original. In case of a monocular camera, newCameraMatrix
-is usually equal to cameraMatrix, or it can be computed by
-#getOptimalNewCameraMatrix for a better control over scaling. In case of a projector-camera pair,
-newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
-
-The projector is oriented differently in the coordinate space, according to R. In case of projector-camera pairs,
-this helps align the projector (in the same manner as #initUndistortRectifyMap for the camera) to create a stereo-rectified pair. This
-allows epipolar lines on both images to become horizontal and have the same y-coordinate (in case of a horizontally aligned projector-camera pair).
-
-The function builds the maps for the inverse mapping algorithm that is used by #remap. That
-is, for each pixel \f$(u, v)\f$ in the destination (projected and inverse-rectified) image, the function
-computes the corresponding coordinates in the source image (that is, in the original digital image). The following process is applied:
-
-\f[
-\begin{array}{l}
-\text{newCameraMatrix}\\
-x \leftarrow (u - {c'}_x)/{f'}_x \\
-y \leftarrow (v - {c'}_y)/{f'}_y \\
-
-\\\text{Undistortion}
-\\\scriptsize{\textit{though equation shown is for radial undistortion, function implements cv::undistortPoints()}}\\
-r^2 \leftarrow x^2 + y^2 \\
-\theta \leftarrow \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}\\
-x' \leftarrow \frac{x}{\theta} \\
-y' \leftarrow \frac{y}{\theta} \\
-
-\\\text{Rectification}\\
-{[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
-x'' \leftarrow X/W \\
-y'' \leftarrow Y/W \\
-
-\\\text{cameraMatrix}\\
-map_x(u,v) \leftarrow x'' f_x + c_x \\
-map_y(u,v) \leftarrow y'' f_y + c_y
-\end{array}
-\f]
-where \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
-are the distortion coefficients vector distCoeffs.
-
-In case of a stereo-rectified projector-camera pair, this function is called for the projector while #initUndistortRectifyMap is called for the camera head.
-This is done after #stereoRectify, which in turn is called after #stereoCalibrate. If the projector-camera pair
-is not calibrated, it is still possible to compute the rectification transformations directly from
-the fundamental matrix using #stereoRectifyUncalibrated. For the projector and camera, the function computes
-homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
-space. R can be computed from H as
-\f[\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\f]
-where cameraMatrix can be chosen arbitrarily.
-
-@param cameraMatrix Input camera matrix \f$A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
-of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
-@param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2,
-computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
-is assumed.
-@param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$.
-@param size Distorted image size.
-@param m1type Type of the first output map. Can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
-@param map1 The first output map for #remap.
-@param map2 The second output map for #remap.
- */
-CV_EXPORTS_W
-void initInverseRectificationMap( InputArray cameraMatrix, InputArray distCoeffs,
- InputArray R, InputArray newCameraMatrix,
- const Size& size, int m1type, OutputArray map1, OutputArray map2 );
-
-//! initializes maps for #remap for wide-angle
-CV_EXPORTS
-float initWideAngleProjMap(InputArray cameraMatrix, InputArray distCoeffs,
- Size imageSize, int destImageWidth,
- int m1type, OutputArray map1, OutputArray map2,
- enum UndistortTypes projType = PROJ_SPHERICAL_EQRECT, double alpha = 0);
-static inline
-float initWideAngleProjMap(InputArray cameraMatrix, InputArray distCoeffs,
- Size imageSize, int destImageWidth,
- int m1type, OutputArray map1, OutputArray map2,
- int projType, double alpha = 0)
-{
- return initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize, destImageWidth,
- m1type, map1, map2, (UndistortTypes)projType, alpha);
-}
-
-/** @brief Returns the default new camera matrix.
-
-The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
-centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
-
-In the latter case, the new camera matrix will be:
-
-\f[\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\f]
-
-where \f$f_x\f$ and \f$f_y\f$ are \f$(0,0)\f$ and \f$(1,1)\f$ elements of cameraMatrix, respectively.
-
-By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
-move the principal point. However, when you work with stereo, it is important to move the principal
-points in both views to the same y-coordinate (which is required by most of stereo correspondence
-algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
-each view where the principal points are located at the center.
-
-@param cameraMatrix Input camera matrix.
-@param imgsize Camera view image size in pixels.
-@param centerPrincipalPoint Location of the principal point in the new camera matrix. The
-parameter indicates whether this location should be at the image center or not.
- */
-CV_EXPORTS_W
-Mat getDefaultNewCameraMatrix(InputArray cameraMatrix, Size imgsize = Size(),
- bool centerPrincipalPoint = false);
-
-/** @brief Computes the ideal point coordinates from the observed point coordinates.
-
-The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
-sparse set of points instead of a raster image. Also the function performs a reverse transformation
-to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
-planar object, it does, up to a translation vector, if the proper R is specified.
-
-For each observed point coordinate \f$(u, v)\f$ the function computes:
-\f[
-\begin{array}{l}
-x^{"} \leftarrow (u - c_x)/f_x \\
-y^{"} \leftarrow (v - c_y)/f_y \\
-(x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
-{[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\
-x \leftarrow X/W \\
-y \leftarrow Y/W \\
-\text{only performed if P is specified:} \\
-u' \leftarrow x {f'}_x + {c'}_x \\
-v' \leftarrow y {f'}_y + {c'}_y
-\end{array}
-\f]
-
-where *undistort* is an approximate iterative algorithm that estimates the normalized original
-point coordinates out of the normalized distorted point coordinates ("normalized" means that the
-coordinates do not depend on the camera matrix).
-
-The function can be used for both a stereo camera head or a monocular camera (when R is empty).
-@param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
-vector\ ).
-@param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\ ) after undistortion and reverse perspective
-transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
-@param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
-@param distCoeffs Input vector of distortion coefficients
-\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
-of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
-@param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
-#stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
-@param P New camera matrix (3x3) or new projection matrix (3x4) \f$\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}\f$. P1 or P2 computed by
-#stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
- */
-CV_EXPORTS_W
-void undistortPoints(InputArray src, OutputArray dst,
- InputArray cameraMatrix, InputArray distCoeffs,
- InputArray R = noArray(), InputArray P = noArray());
-/** @overload
- @note Default version of #undistortPoints does 5 iterations to compute undistorted points.
- */
-CV_EXPORTS_AS(undistortPointsIter)
-void undistortPoints(InputArray src, OutputArray dst,
- InputArray cameraMatrix, InputArray distCoeffs,
- InputArray R, InputArray P, TermCriteria criteria);
-
-/**
- * @brief Compute undistorted image points position
- *
- * @param src Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or
-CV_64FC2) (or vector\ ).
- * @param dst Output undistorted points position (1xN/Nx1 2-channel or vector\ ).
- * @param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
- * @param distCoeffs Distortion coefficients
- */
-CV_EXPORTS_W
-void undistortImagePoints(InputArray src, OutputArray dst, InputArray cameraMatrix,
- InputArray distCoeffs,
- TermCriteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5,
- 0.01));
-
-//! @} calib3d
-
-/** @brief The methods in this namespace use a so-called fisheye camera model.
- @ingroup calib3d_fisheye
-*/
-namespace fisheye
-{
-//! @addtogroup calib3d_fisheye
-//! @{
-
- enum{
- CALIB_USE_INTRINSIC_GUESS = 1 << 0,
- CALIB_RECOMPUTE_EXTRINSIC = 1 << 1,
- CALIB_CHECK_COND = 1 << 2,
- CALIB_FIX_SKEW = 1 << 3,
- CALIB_FIX_K1 = 1 << 4,
- CALIB_FIX_K2 = 1 << 5,
- CALIB_FIX_K3 = 1 << 6,
- CALIB_FIX_K4 = 1 << 7,
- CALIB_FIX_INTRINSIC = 1 << 8,
- CALIB_FIX_PRINCIPAL_POINT = 1 << 9,
- CALIB_ZERO_DISPARITY = 1 << 10,
- CALIB_FIX_FOCAL_LENGTH = 1 << 11
- };
-
- /** @brief Projects points using fisheye model
-
- @param objectPoints Array of object points, 1xN/Nx1 3-channel (or vector\ ), where N is
- the number of points in the view.
- @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
- vector\.
- @param affine
- @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
- @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
- @param alpha The skew coefficient.
- @param jacobian Optional output 2Nx15 jacobian matrix of derivatives of image points with respect
- to components of the focal lengths, coordinates of the principal point, distortion coefficients,
- rotation vector, translation vector, and the skew. In the old interface different components of
- the jacobian are returned via different output parameters.
-
- The function computes projections of 3D points to the image plane given intrinsic and extrinsic
- camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of
- image points coordinates (as functions of all the input parameters) with respect to the particular
- parameters, intrinsic and/or extrinsic.
- */
- CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
- InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
-
- /** @overload */
- CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
- InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
-
- /** @brief Distorts 2D points using fisheye model.
-
- @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\ ), where N is
- the number of points in the view.
- @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
- @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
- @param alpha The skew coefficient.
- @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\ .
-
- Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
- This means if you want to distort image points you have to multiply them with \f$K^{-1}\f$.
- */
- CV_EXPORTS_W void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);
-
- /** @brief Undistorts 2D points using fisheye model
-
- @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\ ), where N is the
- number of points in the view.
- @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
- @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
- @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel
- @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- @param criteria Termination criteria
- @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\ .
- */
- CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted,
- InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray(),
- TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 1e-8));
-
- /** @brief Computes undistortion and rectification maps for image transform by #remap. If D is empty zero
- distortion is used, if R or P is empty identity matrixes are used.
-
- @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
- @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
- @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel
- @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- @param size Undistorted image size.
- @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See #convertMaps
- for details.
- @param map1 The first output map.
- @param map2 The second output map.
- */
- CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P,
- const cv::Size& size, int m1type, OutputArray map1, OutputArray map2);
-
- /** @brief Transforms an image to compensate for fisheye lens distortion.
-
- @param distorted image with fisheye lens distortion.
- @param undistorted Output image with compensated fisheye lens distortion.
- @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
- @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
- @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
- may additionally scale and shift the result by using a different matrix.
- @param new_size the new size
-
- The function transforms an image to compensate radial and tangential lens distortion.
-
- The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
- (with bilinear interpolation). See the former function for details of the transformation being
- performed.
-
- See below the results of undistortImage.
- - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
- k_4, k_5, k_6) of distortion were optimized under calibration)
- - b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
- k_3, k_4) of fisheye distortion were optimized under calibration)
- - c\) original image was captured with fisheye lens
-
- Pictures a) and b) almost the same. But if we consider points of image located far from the center
- of image, we can notice that on image a) these points are distorted.
-
- ![image](pics/fisheye_undistorted.jpg)
- */
- CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted,
- InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size());
-
- /** @brief Estimates new camera intrinsic matrix for undistortion or rectification.
-
- @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
- @param image_size Size of the image
- @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
- @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
- 1-channel or 1x1 3-channel
- @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
- @param balance Sets the new focal length in range between the min focal length and the max focal
- length. Balance is in range of [0, 1].
- @param new_size the new size
- @param fov_scale Divisor for new focal length.
- */
- CV_EXPORTS_W void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
- OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0);
-
- /** @brief Performs camera calibration
-
- @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
- coordinate space.
- @param imagePoints vector of vectors of the projections of calibration pattern points.
- imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
- objectPoints[i].size() for each i.
- @param image_size Size of the image used only to initialize the camera intrinsic matrix.
- @param K Output 3x3 floating-point camera intrinsic matrix
- \f$\cameramatrix{A}\f$ . If
- @ref fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
- initialized before calling the function.
- @param D Output vector of distortion coefficients \f$\distcoeffsfisheye\f$.
- @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
- That is, each k-th rotation vector together with the corresponding k-th translation vector (see
- the next output parameter description) brings the calibration pattern from the model coordinate
- space (in which object points are specified) to the world coordinate space, that is, a real
- position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
- @param tvecs Output vector of translation vectors estimated for each pattern view.
- @param flags Different flags that may be zero or a combination of the following values:
- - @ref fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- - @ref fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- of intrinsic optimization.
- - @ref fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- - @ref fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- - @ref fisheye::CALIB_FIX_K1,..., @ref fisheye::CALIB_FIX_K4 Selected distortion coefficients
- are set to zeros and stay zero.
- - @ref fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
-optimization. It stays at the center or at a different location specified when @ref fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- - @ref fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
-optimization. It is the \f$max(width,height)/\pi\f$ or the provided \f$f_x\f$, \f$f_y\f$ when @ref fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
- @param criteria Termination criteria for the iterative optimization algorithm.
- */
- CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
- InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
- TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
-
- /** @brief Stereo rectification for fisheye camera model
-
- @param K1 First camera intrinsic matrix.
- @param D1 First camera distortion parameters.
- @param K2 Second camera intrinsic matrix.
- @param D2 Second camera distortion parameters.
- @param imageSize Size of the image used for stereo calibration.
- @param R Rotation matrix between the coordinate systems of the first and the second
- cameras.
- @param tvec Translation vector between coordinate systems of the cameras.
- @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
- @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
- @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
- camera.
- @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
- camera.
- @param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see #reprojectImageTo3D ).
- @param flags Operation flags that may be zero or @ref fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
- the function makes the principal points of each camera have the same pixel coordinates in the
- rectified views. And if the flag is not set, the function may still shift the images in the
- horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
- useful image area.
- @param newImageSize New image resolution after rectification. The same size should be passed to
- #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
- is passed (default), it is set to the original imageSize . Setting it to larger value can help you
- preserve details in the original image, especially when there is a big radial distortion.
- @param balance Sets the new focal length in range between the min focal length and the max focal
- length. Balance is in range of [0, 1].
- @param fov_scale Divisor for new focal length.
- */
- CV_EXPORTS_W void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,
- OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(),
- double balance = 0.0, double fov_scale = 1.0);
-
- /** @brief Performs stereo calibration
-
- @param objectPoints Vector of vectors of the calibration pattern points.
- @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
- observed by the first camera.
- @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
- observed by the second camera.
- @param K1 Input/output first camera intrinsic matrix:
- \f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
- any of @ref fisheye::CALIB_USE_INTRINSIC_GUESS , @ref fisheye::CALIB_FIX_INTRINSIC are specified,
- some or all of the matrix components must be initialized.
- @param D1 Input/output vector of distortion coefficients \f$\distcoeffsfisheye\f$ of 4 elements.
- @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
- @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
- similar to D1 .
- @param imageSize Size of the image used only to initialize camera intrinsic matrix.
- @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
- @param T Output translation vector between the coordinate systems of the cameras.
- @param rvecs Output vector of rotation vectors ( @ref Rodrigues ) estimated for each pattern view in the
- coordinate system of the first camera of the stereo pair (e.g. std::vector). More in detail, each
- i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter
- description) brings the calibration pattern from the object coordinate space (in which object points are
- specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms,
- the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space
- to camera coordinate space of the first camera of the stereo pair.
- @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description
- of previous output parameter ( rvecs ).
- @param flags Different flags that may be zero or a combination of the following values:
- - @ref fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices
- are estimated.
- - @ref fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of
- fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
- center (imageSize is used), and focal distances are computed in a least-squares fashion.
- - @ref fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration
- of intrinsic optimization.
- - @ref fisheye::CALIB_CHECK_COND The functions will check validity of condition number.
- - @ref fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero.
- - @ref fisheye::CALIB_FIX_K1,..., @ref fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
- zero.
- @param criteria Termination criteria for the iterative optimization algorithm.
- */
- CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
- InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
- OutputArray R, OutputArray T, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = fisheye::CALIB_FIX_INTRINSIC,
- TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
-
- /// @overload
- CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
- InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
- OutputArray R, OutputArray T, int flags = fisheye::CALIB_FIX_INTRINSIC,
- TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
-
-//! @} calib3d_fisheye
-} // end namespace fisheye
-
-} //end namespace cv
-
-#if 0 //def __cplusplus
-//////////////////////////////////////////////////////////////////////////////////////////
-class CV_EXPORTS CvLevMarq
-{
-public:
- CvLevMarq();
- CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
- cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
- bool completeSymmFlag=false );
- ~CvLevMarq();
- void init( int nparams, int nerrs, CvTermCriteria criteria=
- cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
- bool completeSymmFlag=false );
- bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
- bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
-
- void clear();
- void step();
- enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
-
- cv::Ptr mask;
- cv::Ptr prevParam;
- cv::Ptr param;
- cv::Ptr J;
- cv::Ptr err;
- cv::Ptr JtJ;
- cv::Ptr JtJN;
- cv::Ptr JtErr;
- cv::Ptr JtJV;
- cv::Ptr JtJW;
- double prevErrNorm, errNorm;
- int lambdaLg10;
- CvTermCriteria criteria;
- int state;
- int iters;
- bool completeSymmFlag;
- int solveMethod;
-};
-#endif
-
-#endif
diff --git a/opencv/native/jni/include/opencv2/calib3d/calib3d.hpp b/opencv/native/jni/include/opencv2/calib3d/calib3d.hpp
deleted file mode 100644
index b3da45e..0000000
--- a/opencv/native/jni/include/opencv2/calib3d/calib3d.hpp
+++ /dev/null
@@ -1,48 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifdef __OPENCV_BUILD
-#error this is a compatibility header which should not be used inside the OpenCV library
-#endif
-
-#include "opencv2/calib3d.hpp"
diff --git a/opencv/native/jni/include/opencv2/calib3d/calib3d_c.h b/opencv/native/jni/include/opencv2/calib3d/calib3d_c.h
deleted file mode 100644
index e2af07b..0000000
--- a/opencv/native/jni/include/opencv2/calib3d/calib3d_c.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef OPENCV_CALIB3D_C_H
-#define OPENCV_CALIB3D_C_H
-
-#include "opencv2/core/types_c.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Calculates fundamental matrix given a set of corresponding points */
-#define CV_FM_7POINT 1
-#define CV_FM_8POINT 2
-
-#define CV_LMEDS 4
-#define CV_RANSAC 8
-
-#define CV_FM_LMEDS_ONLY CV_LMEDS
-#define CV_FM_RANSAC_ONLY CV_RANSAC
-#define CV_FM_LMEDS CV_LMEDS
-#define CV_FM_RANSAC CV_RANSAC
-
-enum
-{
- CV_ITERATIVE = 0,
- CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
- CV_P3P = 2, // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
- CV_DLS = 3 // Joel A. Hesch and Stergios I. Roumeliotis. "A Direct Least-Squares (DLS) Method for PnP"
-};
-
-#define CV_CALIB_CB_ADAPTIVE_THRESH 1
-#define CV_CALIB_CB_NORMALIZE_IMAGE 2
-#define CV_CALIB_CB_FILTER_QUADS 4
-#define CV_CALIB_CB_FAST_CHECK 8
-
-#define CV_CALIB_USE_INTRINSIC_GUESS 1
-#define CV_CALIB_FIX_ASPECT_RATIO 2
-#define CV_CALIB_FIX_PRINCIPAL_POINT 4
-#define CV_CALIB_ZERO_TANGENT_DIST 8
-#define CV_CALIB_FIX_FOCAL_LENGTH 16
-#define CV_CALIB_FIX_K1 32
-#define CV_CALIB_FIX_K2 64
-#define CV_CALIB_FIX_K3 128
-#define CV_CALIB_FIX_K4 2048
-#define CV_CALIB_FIX_K5 4096
-#define CV_CALIB_FIX_K6 8192
-#define CV_CALIB_RATIONAL_MODEL 16384
-#define CV_CALIB_THIN_PRISM_MODEL 32768
-#define CV_CALIB_FIX_S1_S2_S3_S4 65536
-#define CV_CALIB_TILTED_MODEL 262144
-#define CV_CALIB_FIX_TAUX_TAUY 524288
-#define CV_CALIB_FIX_TANGENT_DIST 2097152
-
-#define CV_CALIB_NINTRINSIC 18
-
-#define CV_CALIB_FIX_INTRINSIC 256
-#define CV_CALIB_SAME_FOCAL_LENGTH 512
-
-#define CV_CALIB_ZERO_DISPARITY 1024
-
-/* stereo correspondence parameters and functions */
-#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
-#define CV_STEREO_BM_XSOBEL 1
-
-#ifdef __cplusplus
-} // extern "C"
-
-//////////////////////////////////////////////////////////////////////////////////////////
-class CV_EXPORTS CvLevMarq
-{
-public:
- CvLevMarq();
- CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
- cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
- bool completeSymmFlag=false );
- ~CvLevMarq();
- void init( int nparams, int nerrs, CvTermCriteria criteria=
- cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
- bool completeSymmFlag=false );
- bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
- bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
-
- void clear();
- void step();
- enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
-
- cv::Ptr mask;
- cv::Ptr prevParam;
- cv::Ptr param;
- cv::Ptr J;
- cv::Ptr err;
- cv::Ptr JtJ;
- cv::Ptr JtJN;
- cv::Ptr JtErr;
- cv::Ptr JtJV;
- cv::Ptr JtJW;
- double prevErrNorm, errNorm;
- int lambdaLg10;
- CvTermCriteria criteria;
- int state;
- int iters;
- bool completeSymmFlag;
- int solveMethod;
-};
-
-#endif
-
-#endif /* OPENCV_CALIB3D_C_H */
diff --git a/opencv/native/jni/include/opencv2/core.hpp b/opencv/native/jni/include/opencv2/core.hpp
deleted file mode 100644
index bd5de32..0000000
--- a/opencv/native/jni/include/opencv2/core.hpp
+++ /dev/null
@@ -1,3396 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2015, Intel Corporation, all rights reserved.
-// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
-// Copyright (C) 2015, OpenCV Foundation, all rights reserved.
-// Copyright (C) 2015, Itseez Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef OPENCV_CORE_HPP
-#define OPENCV_CORE_HPP
-
-#ifndef __cplusplus
-# error core.hpp header must be compiled as C++
-#endif
-
-#include "opencv2/core/cvdef.h"
-#include "opencv2/core/base.hpp"
-#include "opencv2/core/cvstd.hpp"
-#include "opencv2/core/traits.hpp"
-#include "opencv2/core/matx.hpp"
-#include "opencv2/core/types.hpp"
-#include "opencv2/core/mat.hpp"
-#include "opencv2/core/persistence.hpp"
-
-/**
-@defgroup core Core functionality
-@{
- @defgroup core_basic Basic structures
- @defgroup core_c C structures and operations
- @{
- @defgroup core_c_glue Connections with C++
- @}
- @defgroup core_array Operations on arrays
- @defgroup core_async Asynchronous API
- @defgroup core_xml XML/YAML Persistence
- @defgroup core_cluster Clustering
- @defgroup core_utils Utility and system functions and macros
- @{
- @defgroup core_logging Logging facilities
- @defgroup core_utils_sse SSE utilities
- @defgroup core_utils_neon NEON utilities
- @defgroup core_utils_vsx VSX utilities
- @defgroup core_utils_softfloat Softfloat support
- @defgroup core_utils_samples Utility functions for OpenCV samples
- @}
- @defgroup core_opengl OpenGL interoperability
- @defgroup core_ipp Intel IPP Asynchronous C/C++ Converters
- @defgroup core_optim Optimization Algorithms
- @defgroup core_directx DirectX interoperability
- @defgroup core_eigen Eigen support
- @defgroup core_opencl OpenCL support
- @defgroup core_va_intel Intel VA-API/OpenCL (CL-VA) interoperability
- @defgroup core_hal Hardware Acceleration Layer
- @{
- @defgroup core_hal_functions Functions
- @defgroup core_hal_interface Interface
- @defgroup core_hal_intrin Universal intrinsics
- @{
- @defgroup core_hal_intrin_impl Private implementation helpers
- @}
- @defgroup core_lowlevel_api Low-level API for external libraries / plugins
- @}
- @defgroup core_parallel Parallel Processing
- @{
- @defgroup core_parallel_backend Parallel backends API
- @}
-@}
- */
-
-namespace cv {
-
-//! @addtogroup core_utils
-//! @{
-
-/*! @brief Class passed to an error.
-
-This class encapsulates all or almost all necessary
-information about the error happened in the program. The exception is
-usually constructed and thrown implicitly via CV_Error and CV_Error_ macros.
-@see error
- */
-class CV_EXPORTS Exception : public std::exception
-{
-public:
- /*!
- Default constructor
- */
- Exception();
- /*!
- Full constructor. Normally the constructor is not called explicitly.
- Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used.
- */
- Exception(int _code, const String& _err, const String& _func, const String& _file, int _line);
- virtual ~Exception() throw();
-
- /*!
- \return the error description and the context as a text string.
- */
- virtual const char *what() const throw() CV_OVERRIDE;
- void formatMessage();
-
- String msg; ///< the formatted error message
-
- int code; ///< error code @see CVStatus
- String err; ///< error description
- String func; ///< function name. Available only when the compiler supports getting it
- String file; ///< source file name where the error has occurred
- int line; ///< line number in the source file where the error has occurred
-};
-
-/*! @brief Signals an error and raises the exception.
-
-By default the function prints information about the error to stderr,
-then it either stops if cv::setBreakOnError() had been called before or raises the exception.
-It is possible to alternate error processing by using #redirectError().
-@param exc the exception raisen.
-@deprecated drop this version
- */
-CV_EXPORTS CV_NORETURN void error(const Exception& exc);
-
-enum SortFlags { SORT_EVERY_ROW = 0, //!< each matrix row is sorted independently
- SORT_EVERY_COLUMN = 1, //!< each matrix column is sorted
- //!< independently; this flag and the previous one are
- //!< mutually exclusive.
- SORT_ASCENDING = 0, //!< each matrix row is sorted in the ascending
- //!< order.
- SORT_DESCENDING = 16 //!< each matrix row is sorted in the
- //!< descending order; this flag and the previous one are also
- //!< mutually exclusive.
- };
-
-//! @} core_utils
-
-//! @addtogroup core
-//! @{
-
-//! Covariation flags
-enum CovarFlags {
- /** The output covariance matrix is calculated as:
- \f[\texttt{scale} \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...]^T \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...],\f]
- The covariance matrix will be nsamples x nsamples. Such an unusual covariance matrix is used
- for fast PCA of a set of very large vectors (see, for example, the EigenFaces technique for
- face recognition). Eigenvalues of this "scrambled" matrix match the eigenvalues of the true
- covariance matrix. The "true" eigenvectors can be easily calculated from the eigenvectors of
- the "scrambled" covariance matrix. */
- COVAR_SCRAMBLED = 0,
- /**The output covariance matrix is calculated as:
- \f[\texttt{scale} \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...] \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...]^T,\f]
- covar will be a square matrix of the same size as the total number of elements in each input
- vector. One and only one of #COVAR_SCRAMBLED and #COVAR_NORMAL must be specified.*/
- COVAR_NORMAL = 1,
- /** If the flag is specified, the function does not calculate mean from
- the input vectors but, instead, uses the passed mean vector. This is useful if mean has been
- pre-calculated or known in advance, or if the covariance matrix is calculated by parts. In
- this case, mean is not a mean vector of the input sub-set of vectors but rather the mean
- vector of the whole set.*/
- COVAR_USE_AVG = 2,
- /** If the flag is specified, the covariance matrix is scaled. In the
- "normal" mode, scale is 1./nsamples . In the "scrambled" mode, scale is the reciprocal of the
- total number of elements in each input vector. By default (if the flag is not specified), the
- covariance matrix is not scaled ( scale=1 ).*/
- COVAR_SCALE = 4,
- /** If the flag is
- specified, all the input vectors are stored as rows of the samples matrix. mean should be a
- single-row vector in this case.*/
- COVAR_ROWS = 8,
- /** If the flag is
- specified, all the input vectors are stored as columns of the samples matrix. mean should be a
- single-column vector in this case.*/
- COVAR_COLS = 16
-};
-
-//! @addtogroup core_cluster
-//! @{
-
-//! k-Means flags
-enum KmeansFlags {
- /** Select random initial centers in each attempt.*/
- KMEANS_RANDOM_CENTERS = 0,
- /** Use kmeans++ center initialization by Arthur and Vassilvitskii [Arthur2007].*/
- KMEANS_PP_CENTERS = 2,
- /** During the first (and possibly the only) attempt, use the
- user-supplied labels instead of computing them from the initial centers. For the second and
- further attempts, use the random or semi-random centers. Use one of KMEANS_\*_CENTERS flag
- to specify the exact method.*/
- KMEANS_USE_INITIAL_LABELS = 1
-};
-
-//! @} core_cluster
-
-//! @addtogroup core_array
-//! @{
-
-enum ReduceTypes { REDUCE_SUM = 0, //!< the output is the sum of all rows/columns of the matrix.
- REDUCE_AVG = 1, //!< the output is the mean vector of all rows/columns of the matrix.
- REDUCE_MAX = 2, //!< the output is the maximum (column/row-wise) of all rows/columns of the matrix.
- REDUCE_MIN = 3, //!< the output is the minimum (column/row-wise) of all rows/columns of the matrix.
- REDUCE_SUM2 = 4 //!< the output is the sum of all squared rows/columns of the matrix.
- };
-
-//! @} core_array
-
-/** @brief Swaps two matrices
-*/
-CV_EXPORTS void swap(Mat& a, Mat& b);
-/** @overload */
-CV_EXPORTS void swap( UMat& a, UMat& b );
-
-//! @} core
-
-//! @addtogroup core_array
-//! @{
-
-/** @brief Computes the source location of an extrapolated pixel.
-
-The function computes and returns the coordinate of a donor pixel corresponding to the specified
-extrapolated pixel when using the specified extrapolation border mode. For example, if you use
-cv::BORDER_WRAP mode in the horizontal direction, cv::BORDER_REFLECT_101 in the vertical direction and
-want to compute value of the "virtual" pixel Point(-5, 100) in a floating-point image img , it
-looks like:
-@code{.cpp}
- float val = img.at(borderInterpolate(100, img.rows, cv::BORDER_REFLECT_101),
- borderInterpolate(-5, img.cols, cv::BORDER_WRAP));
-@endcode
-Normally, the function is not called directly. It is used inside filtering functions and also in
-copyMakeBorder.
-@param p 0-based coordinate of the extrapolated pixel along one of the axes, likely \<0 or \>= len
-@param len Length of the array along the corresponding axis.
-@param borderType Border type, one of the #BorderTypes, except for #BORDER_TRANSPARENT and
-#BORDER_ISOLATED . When borderType==#BORDER_CONSTANT , the function always returns -1, regardless
-of p and len.
-
-@sa copyMakeBorder
-*/
-CV_EXPORTS_W int borderInterpolate(int p, int len, int borderType);
-
-/** @example samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp
-An example using copyMakeBorder function.
-Check @ref tutorial_copyMakeBorder "the corresponding tutorial" for more details
-*/
-
-/** @brief Forms a border around an image.
-
-The function copies the source image into the middle of the destination image. The areas to the
-left, to the right, above and below the copied source image will be filled with extrapolated
-pixels. This is not what filtering functions based on it do (they extrapolate pixels on-fly), but
-what other more complex functions, including your own, may do to simplify image boundary handling.
-
-The function supports the mode when src is already in the middle of dst . In this case, the
-function does not copy src itself but simply constructs the border, for example:
-
-@code{.cpp}
- // let border be the same in all directions
- int border=2;
- // constructs a larger image to fit both the image and the border
- Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth());
- // select the middle part of it w/o copying data
- Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows));
- // convert image from RGB to grayscale
- cvtColor(rgb, gray, COLOR_RGB2GRAY);
- // form a border in-place
- copyMakeBorder(gray, gray_buf, border, border,
- border, border, BORDER_REPLICATE);
- // now do some custom filtering ...
- ...
-@endcode
-@note When the source image is a part (ROI) of a bigger image, the function will try to use the
-pixels outside of the ROI to form a border. To disable this feature and always do extrapolation, as
-if src was not a ROI, use borderType | #BORDER_ISOLATED.
-
-@param src Source image.
-@param dst Destination image of the same type as src and the size Size(src.cols+left+right,
-src.rows+top+bottom) .
-@param top the top pixels
-@param bottom the bottom pixels
-@param left the left pixels
-@param right Parameter specifying how many pixels in each direction from the source image rectangle
-to extrapolate. For example, top=1, bottom=1, left=1, right=1 mean that 1 pixel-wide border needs
-to be built.
-@param borderType Border type. See borderInterpolate for details.
-@param value Border value if borderType==BORDER_CONSTANT .
-
-@sa borderInterpolate
-*/
-CV_EXPORTS_W void copyMakeBorder(InputArray src, OutputArray dst,
- int top, int bottom, int left, int right,
- int borderType, const Scalar& value = Scalar() );
-
-/** @brief Calculates the per-element sum of two arrays or an array and a scalar.
-
-The function add calculates:
-- Sum of two arrays when both input arrays have the same size and the same number of channels:
-\f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) + \texttt{src2}(I)) \quad \texttt{if mask}(I) \ne0\f]
-- Sum of an array and a scalar when src2 is constructed from Scalar or has the same number of
-elements as `src1.channels()`:
-\f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) + \texttt{src2} ) \quad \texttt{if mask}(I) \ne0\f]
-- Sum of a scalar and an array when src1 is constructed from Scalar or has the same number of
-elements as `src2.channels()`:
-\f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1} + \texttt{src2}(I) ) \quad \texttt{if mask}(I) \ne0\f]
-where `I` is a multi-dimensional index of array elements. In case of multi-channel arrays, each
-channel is processed independently.
-
-The first function in the list above can be replaced with matrix expressions:
-@code{.cpp}
- dst = src1 + src2;
- dst += src1; // equivalent to add(dst, src1, dst);
-@endcode
-The input arrays and the output array can all have the same or different depths. For example, you
-can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit
-floating-point array. Depth of the output array is determined by the dtype parameter. In the second
-and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can
-be set to the default -1. In this case, the output array will have the same depth as the input
-array, be it src1, src2 or both.
-@note Saturation is not applied when the output array has the depth CV_32S. You may even get
-result of an incorrect sign in the case of overflow.
-@note (Python) Be careful to difference behaviour between src1/src2 are single number and they are tuple/array.
-`add(src,X)` means `add(src,(X,X,X,X))`.
-`add(src,(X,))` means `add(src,(X,0,0,0))`.
-@param src1 first input array or a scalar.
-@param src2 second input array or a scalar.
-@param dst output array that has the same size and number of channels as the input array(s); the
-depth is defined by dtype or src1/src2.
-@param mask optional operation mask - 8-bit single channel array, that specifies elements of the
-output array to be changed.
-@param dtype optional depth of the output array (see the discussion below).
-@sa subtract, addWeighted, scaleAdd, Mat::convertTo
-*/
-CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst,
- InputArray mask = noArray(), int dtype = -1);
-
-/** @brief Calculates the per-element difference between two arrays or array and a scalar.
-
-The function subtract calculates:
-- Difference between two arrays, when both input arrays have the same size and the same number of
-channels:
- \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) - \texttt{src2}(I)) \quad \texttt{if mask}(I) \ne0\f]
-- Difference between an array and a scalar, when src2 is constructed from Scalar or has the same
-number of elements as `src1.channels()`:
- \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) - \texttt{src2} ) \quad \texttt{if mask}(I) \ne0\f]
-- Difference between a scalar and an array, when src1 is constructed from Scalar or has the same
-number of elements as `src2.channels()`:
- \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1} - \texttt{src2}(I) ) \quad \texttt{if mask}(I) \ne0\f]
-- The reverse difference between a scalar and an array in the case of `SubRS`:
- \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src2} - \texttt{src1}(I) ) \quad \texttt{if mask}(I) \ne0\f]
-where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each
-channel is processed independently.
-
-The first function in the list above can be replaced with matrix expressions:
-@code{.cpp}
- dst = src1 - src2;
- dst -= src1; // equivalent to subtract(dst, src1, dst);
-@endcode
-The input arrays and the output array can all have the same or different depths. For example, you
-can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of
-the output array is determined by dtype parameter. In the second and third cases above, as well as
-in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this
-case the output array will have the same depth as the input array, be it src1, src2 or both.
-@note Saturation is not applied when the output array has the depth CV_32S. You may even get
-result of an incorrect sign in the case of overflow.
-@note (Python) Be careful to difference behaviour between src1/src2 are single number and they are tuple/array.
-`subtract(src,X)` means `subtract(src,(X,X,X,X))`.
-`subtract(src,(X,))` means `subtract(src,(X,0,0,0))`.
-@param src1 first input array or a scalar.
-@param src2 second input array or a scalar.
-@param dst output array of the same size and the same number of channels as the input array.
-@param mask optional operation mask; this is an 8-bit single channel array that specifies elements
-of the output array to be changed.
-@param dtype optional depth of the output array
-@sa add, addWeighted, scaleAdd, Mat::convertTo
- */
-CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst,
- InputArray mask = noArray(), int dtype = -1);
-
-
-/** @brief Calculates the per-element scaled product of two arrays.
-
-The function multiply calculates the per-element product of two arrays:
-
-\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{scale} \cdot \texttt{src1} (I) \cdot \texttt{src2} (I))\f]
-
-There is also a @ref MatrixExpressions -friendly variant of the first function. See Mat::mul .
-
-For a not-per-element matrix product, see gemm .
-
-@note Saturation is not applied when the output array has the depth
-CV_32S. You may even get result of an incorrect sign in the case of
-overflow.
-@note (Python) Be careful to difference behaviour between src1/src2 are single number and they are tuple/array.
-`multiply(src,X)` means `multiply(src,(X,X,X,X))`.
-`multiply(src,(X,))` means `multiply(src,(X,0,0,0))`.
-@param src1 first input array.
-@param src2 second input array of the same size and the same type as src1.
-@param dst output array of the same size and type as src1.
-@param scale optional scale factor.
-@param dtype optional depth of the output array
-@sa add, subtract, divide, scaleAdd, addWeighted, accumulate, accumulateProduct, accumulateSquare,
-Mat::convertTo
-*/
-CV_EXPORTS_W void multiply(InputArray src1, InputArray src2,
- OutputArray dst, double scale = 1, int dtype = -1);
-
-/** @brief Performs per-element division of two arrays or a scalar by an array.
-
-The function cv::divide divides one array by another:
-\f[\texttt{dst(I) = saturate(src1(I)*scale/src2(I))}\f]
-or a scalar by an array when there is no src1 :
-\f[\texttt{dst(I) = saturate(scale/src2(I))}\f]
-
-Different channels of multi-channel arrays are processed independently.
-
-For integer types when src2(I) is zero, dst(I) will also be zero.
-
-@note In case of floating point data there is no special defined behavior for zero src2(I) values.
-Regular floating-point division is used.
-Expect correct IEEE-754 behaviour for floating-point data (with NaN, Inf result values).
-
-@note Saturation is not applied when the output array has the depth CV_32S. You may even get
-result of an incorrect sign in the case of overflow.
-@note (Python) Be careful to difference behaviour between src1/src2 are single number and they are tuple/array.
-`divide(src,X)` means `divide(src,(X,X,X,X))`.
-`divide(src,(X,))` means `divide(src,(X,0,0,0))`.
-@param src1 first input array.
-@param src2 second input array of the same size and type as src1.
-@param scale scalar factor.
-@param dst output array of the same size and type as src2.
-@param dtype optional depth of the output array; if -1, dst will have depth src2.depth(), but in
-case of an array-by-array division, you can only pass -1 when src1.depth()==src2.depth().
-@sa multiply, add, subtract
-*/
-CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst,
- double scale = 1, int dtype = -1);
-
-/** @overload */
-CV_EXPORTS_W void divide(double scale, InputArray src2,
- OutputArray dst, int dtype = -1);
-
-/** @brief Calculates the sum of a scaled array and another array.
-
-The function scaleAdd is one of the classical primitive linear algebra operations, known as DAXPY
-or SAXPY in [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms). It calculates
-the sum of a scaled array and another array:
-\f[\texttt{dst} (I)= \texttt{scale} \cdot \texttt{src1} (I) + \texttt{src2} (I)\f]
-The function can also be emulated with a matrix expression, for example:
-@code{.cpp}
- Mat A(3, 3, CV_64F);
- ...
- A.row(0) = A.row(1)*2 + A.row(2);
-@endcode
-@param src1 first input array.
-@param alpha scale factor for the first array.
-@param src2 second input array of the same size and type as src1.
-@param dst output array of the same size and type as src1.
-@sa add, addWeighted, subtract, Mat::dot, Mat::convertTo
-*/
-CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst);
-
-/** @example samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp
-Check @ref tutorial_trackbar "the corresponding tutorial" for more details
-*/
-
-/** @brief Calculates the weighted sum of two arrays.
-
-The function addWeighted calculates the weighted sum of two arrays as follows:
-\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{src1} (I)* \texttt{alpha} + \texttt{src2} (I)* \texttt{beta} + \texttt{gamma} )\f]
-where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each
-channel is processed independently.
-The function can be replaced with a matrix expression:
-@code{.cpp}
- dst = src1*alpha + src2*beta + gamma;
-@endcode
-@note Saturation is not applied when the output array has the depth CV_32S. You may even get
-result of an incorrect sign in the case of overflow.
-@param src1 first input array.
-@param alpha weight of the first array elements.
-@param src2 second input array of the same size and channel number as src1.
-@param beta weight of the second array elements.
-@param gamma scalar added to each sum.
-@param dst output array that has the same size and number of channels as the input arrays.
-@param dtype optional depth of the output array; when both input arrays have the same depth, dtype
-can be set to -1, which will be equivalent to src1.depth().
-@sa add, subtract, scaleAdd, Mat::convertTo
-*/
-CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2,
- double beta, double gamma, OutputArray dst, int dtype = -1);
-
-/** @brief Scales, calculates absolute values, and converts the result to 8-bit.
-
-On each element of the input array, the function convertScaleAbs
-performs three operations sequentially: scaling, taking an absolute
-value, conversion to an unsigned 8-bit type:
-\f[\texttt{dst} (I)= \texttt{saturate\_cast} (| \texttt{src} (I)* \texttt{alpha} + \texttt{beta} |)\f]
-In case of multi-channel arrays, the function processes each channel
-independently. When the output is not 8-bit, the operation can be
-emulated by calling the Mat::convertTo method (or by using matrix
-expressions) and then by calculating an absolute value of the result.
-For example:
-@code{.cpp}
- Mat_ A(30,30);
- randu(A, Scalar(-100), Scalar(100));
- Mat_ B = A*5 + 3;
- B = abs(B);
- // Mat_ B = abs(A*5+3) will also do the job,
- // but it will allocate a temporary matrix
-@endcode
-@param src input array.
-@param dst output array.
-@param alpha optional scale factor.
-@param beta optional delta added to the scaled values.
-@sa Mat::convertTo, cv::abs(const Mat&)
-*/
-CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst,
- double alpha = 1, double beta = 0);
-
-/** @brief Converts an array to half precision floating number.
-
-This function converts FP32 (single precision floating point) from/to FP16 (half precision floating point). CV_16S format is used to represent FP16 data.
-There are two use modes (src -> dst): CV_32F -> CV_16S and CV_16S -> CV_32F. The input array has to have type of CV_32F or
-CV_16S to represent the bit depth. If the input array is neither of them, the function will raise an error.
-The format of half precision floating point is defined in IEEE 754-2008.
-
-@param src input array.
-@param dst output array.
-*/
-CV_EXPORTS_W void convertFp16(InputArray src, OutputArray dst);
-
-/** @brief Performs a look-up table transform of an array.
-
-The function LUT fills the output array with values from the look-up table. Indices of the entries
-are taken from the input array. That is, the function processes each element of src as follows:
-\f[\texttt{dst} (I) \leftarrow \texttt{lut(src(I) + d)}\f]
-where
-\f[d = \fork{0}{if \(\texttt{src}\) has depth \(\texttt{CV_8U}\)}{128}{if \(\texttt{src}\) has depth \(\texttt{CV_8S}\)}\f]
-@param src input array of 8-bit elements.
-@param lut look-up table of 256 elements; in case of multi-channel input array, the table should
-either have a single channel (in this case the same table is used for all channels) or the same
-number of channels as in the input array.
-@param dst output array of the same size and number of channels as src, and the same depth as lut.
-@sa convertScaleAbs, Mat::convertTo
-*/
-CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst);
-
-/** @brief Calculates the sum of array elements.
-
-The function cv::sum calculates and returns the sum of array elements,
-independently for each channel.
-@param src input array that must have from 1 to 4 channels.
-@sa countNonZero, mean, meanStdDev, norm, minMaxLoc, reduce
-*/
-CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src);
-
-/** @brief Checks for the presence of at least one non-zero array element.
-
-The function returns whether there are non-zero elements in src
-@param src single-channel array.
-@sa mean, meanStdDev, norm, minMaxLoc, calcCovarMatrix
-*/
-CV_EXPORTS_W bool hasNonZero( InputArray src );
-
-/** @brief Counts non-zero array elements.
-
-The function returns the number of non-zero elements in src :
-\f[\sum _{I: \; \texttt{src} (I) \ne0 } 1\f]
-@param src single-channel array.
-@sa mean, meanStdDev, norm, minMaxLoc, calcCovarMatrix
-*/
-CV_EXPORTS_W int countNonZero( InputArray src );
-
-/** @brief Returns the list of locations of non-zero pixels
-
-Given a binary matrix (likely returned from an operation such
-as threshold(), compare(), >, ==, etc, return all of
-the non-zero indices as a cv::Mat or std::vector (x,y)
-For example:
-@code{.cpp}
- cv::Mat binaryImage; // input, binary image
- cv::Mat locations; // output, locations of non-zero pixels
- cv::findNonZero(binaryImage, locations);
-
- // access pixel coordinates
- Point pnt = locations.at(i);
-@endcode
-or
-@code{.cpp}
- cv::Mat binaryImage; // input, binary image
- vector locations; // output, locations of non-zero pixels
- cv::findNonZero(binaryImage, locations);
-
- // access pixel coordinates
- Point pnt = locations[i];
-@endcode
-@param src single-channel array
-@param idx the output array, type of cv::Mat or std::vector, corresponding to non-zero indices in the input
-*/
-CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx );
-
-/** @brief Calculates an average (mean) of array elements.
-
-The function cv::mean calculates the mean value M of array elements,
-independently for each channel, and return it:
-\f[\begin{array}{l} N = \sum _{I: \; \texttt{mask} (I) \ne 0} 1 \\ M_c = \left ( \sum _{I: \; \texttt{mask} (I) \ne 0}{ \texttt{mtx} (I)_c} \right )/N \end{array}\f]
-When all the mask elements are 0's, the function returns Scalar::all(0)
-@param src input array that should have from 1 to 4 channels so that the result can be stored in
-Scalar_ .
-@param mask optional operation mask.
-@sa countNonZero, meanStdDev, norm, minMaxLoc
-*/
-CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask = noArray());
-
-/** Calculates a mean and standard deviation of array elements.
-
-The function cv::meanStdDev calculates the mean and the standard deviation M
-of array elements independently for each channel and returns it via the
-output parameters:
-\f[\begin{array}{l} N = \sum _{I, \texttt{mask} (I) \ne 0} 1 \\ \texttt{mean} _c = \frac{\sum_{ I: \; \texttt{mask}(I) \ne 0} \texttt{src} (I)_c}{N} \\ \texttt{stddev} _c = \sqrt{\frac{\sum_{ I: \; \texttt{mask}(I) \ne 0} \left ( \texttt{src} (I)_c - \texttt{mean} _c \right )^2}{N}} \end{array}\f]
-When all the mask elements are 0's, the function returns
-mean=stddev=Scalar::all(0).
-@note The calculated standard deviation is only the diagonal of the
-complete normalized covariance matrix. If the full matrix is needed, you
-can reshape the multi-channel array M x N to the single-channel array
-M\*N x mtx.channels() (only possible when the matrix is continuous) and
-then pass the matrix to calcCovarMatrix .
-@param src input array that should have from 1 to 4 channels so that the results can be stored in
-Scalar_ 's.
-@param mean output parameter: calculated mean value.
-@param stddev output parameter: calculated standard deviation.
-@param mask optional operation mask.
-@sa countNonZero, mean, norm, minMaxLoc, calcCovarMatrix
-*/
-CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev,
- InputArray mask=noArray());
-
-/** @brief Calculates the absolute norm of an array.
-
-This version of #norm calculates the absolute norm of src1. The type of norm to calculate is specified using #NormTypes.
-
-As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$.
-The \f$ L_{1}, L_{2} \f$ and \f$ L_{\infty} \f$ norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$
-is calculated as follows
-\f{align*}
- \| r(-1) \|_{L_1} &= |-1| + |2| = 3 \\
- \| r(-1) \|_{L_2} &= \sqrt{(-1)^{2} + (2)^{2}} = \sqrt{5} \\
- \| r(-1) \|_{L_\infty} &= \max(|-1|,|2|) = 2
-\f}
-and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is
-\f{align*}
- \| r(0.5) \|_{L_1} &= |0.5| + |0.5| = 1 \\
- \| r(0.5) \|_{L_2} &= \sqrt{(0.5)^{2} + (0.5)^{2}} = \sqrt{0.5} \\
- \| r(0.5) \|_{L_\infty} &= \max(|0.5|,|0.5|) = 0.5.
-\f}
-The following graphic shows all values for the three norm functions \f$\| r(x) \|_{L_1}, \| r(x) \|_{L_2}\f$ and \f$\| r(x) \|_{L_\infty}\f$.
-It is notable that the \f$ L_{1} \f$ norm forms the upper and the \f$ L_{\infty} \f$ norm forms the lower border for the example function \f$ r(x) \f$.
-![Graphs for the different norm functions from the above example](pics/NormTypes_OneArray_1-2-INF.png)
-
-When the mask parameter is specified and it is not empty, the norm is
-
-If normType is not specified, #NORM_L2 is used.
-calculated only over the region specified by the mask.
-
-Multi-channel input arrays are treated as single-channel arrays, that is,
-the results for all channels are combined.
-
-Hamming norms can only be calculated with CV_8U depth arrays.
-
-@param src1 first input array.
-@param normType type of the norm (see #NormTypes).
-@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
-*/
-CV_EXPORTS_W double norm(InputArray src1, int normType = NORM_L2, InputArray mask = noArray());
-
-/** @brief Calculates an absolute difference norm or a relative difference norm.
-
-This version of cv::norm calculates the absolute difference norm
-or the relative difference norm of arrays src1 and src2.
-The type of norm to calculate is specified using #NormTypes.
-
-@param src1 first input array.
-@param src2 second input array of the same size and the same type as src1.
-@param normType type of the norm (see #NormTypes).
-@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.
-*/
-CV_EXPORTS_W double norm(InputArray src1, InputArray src2,
- int normType = NORM_L2, InputArray mask = noArray());
-/** @overload
-@param src first input array.
-@param normType type of the norm (see #NormTypes).
-*/
-CV_EXPORTS double norm( const SparseMat& src, int normType );
-
-/** @brief Computes the Peak Signal-to-Noise Ratio (PSNR) image quality metric.
-
-This function calculates the Peak Signal-to-Noise Ratio (PSNR) image quality metric in decibels (dB),
-between two input arrays src1 and src2. The arrays must have the same type.
-
-The PSNR is calculated as follows:
-
-\f[
-\texttt{PSNR} = 10 \cdot \log_{10}{\left( \frac{R^2}{MSE} \right) }
-\f]
-
-where R is the maximum integer value of depth (e.g. 255 in the case of CV_8U data)
-and MSE is the mean squared error between the two arrays.
-
-@param src1 first input array.
-@param src2 second input array of the same size as src1.
-@param R the maximum pixel value (255 by default)
-
- */
-CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2, double R=255.);
-
-/** @brief naive nearest neighbor finder
-
-see http://en.wikipedia.org/wiki/Nearest_neighbor_search
-@todo document
- */
-CV_EXPORTS_W void batchDistance(InputArray src1, InputArray src2,
- OutputArray dist, int dtype, OutputArray nidx,
- int normType = NORM_L2, int K = 0,
- InputArray mask = noArray(), int update = 0,
- bool crosscheck = false);
-
-/** @brief Normalizes the norm or value range of an array.
-
-The function cv::normalize normalizes scale and shift the input array elements so that
-\f[\| \texttt{dst} \| _{L_p}= \texttt{alpha}\f]
-(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that
-\f[\min _I \texttt{dst} (I)= \texttt{alpha} , \, \, \max _I \texttt{dst} (I)= \texttt{beta}\f]
-
-when normType=NORM_MINMAX (for dense arrays only). The optional mask specifies a sub-array to be
-normalized. This means that the norm or min-n-max are calculated over the sub-array, and then this
-sub-array is modified to be normalized. If you want to only use the mask to calculate the norm or
-min-max but modify the whole array, you can use norm and Mat::convertTo.
-
-In case of sparse matrices, only the non-zero values are analyzed and transformed. Because of this,
-the range transformation for sparse matrices is not allowed since it can shift the zero level.
-
-Possible usage with some positive example data:
-@code{.cpp}
- vector positiveData = { 2.0, 8.0, 10.0 };
- vector normalizedData_l1, normalizedData_l2, normalizedData_inf, normalizedData_minmax;
-
- // Norm to probability (total count)
- // sum(numbers) = 20.0
- // 2.0 0.1 (2.0/20.0)
- // 8.0 0.4 (8.0/20.0)
- // 10.0 0.5 (10.0/20.0)
- normalize(positiveData, normalizedData_l1, 1.0, 0.0, NORM_L1);
-
- // Norm to unit vector: ||positiveData|| = 1.0
- // 2.0 0.15
- // 8.0 0.62
- // 10.0 0.77
- normalize(positiveData, normalizedData_l2, 1.0, 0.0, NORM_L2);
-
- // Norm to max element
- // 2.0 0.2 (2.0/10.0)
- // 8.0 0.8 (8.0/10.0)
- // 10.0 1.0 (10.0/10.0)
- normalize(positiveData, normalizedData_inf, 1.0, 0.0, NORM_INF);
-
- // Norm to range [0.0;1.0]
- // 2.0 0.0 (shift to left border)
- // 8.0 0.75 (6.0/8.0)
- // 10.0 1.0 (shift to right border)
- normalize(positiveData, normalizedData_minmax, 1.0, 0.0, NORM_MINMAX);
-@endcode
-
-@param src input array.
-@param dst output array of the same size as src .
-@param alpha norm value to normalize to or the lower range boundary in case of the range
-normalization.
-@param beta upper range boundary in case of the range normalization; it is not used for the norm
-normalization.
-@param norm_type normalization type (see cv::NormTypes).
-@param dtype when negative, the output array has the same type as src; otherwise, it has the same
-number of channels as src and the depth =CV_MAT_DEPTH(dtype).
-@param mask optional operation mask.
-@sa norm, Mat::convertTo, SparseMat::convertTo
-*/
-CV_EXPORTS_W void normalize( InputArray src, InputOutputArray dst, double alpha = 1, double beta = 0,
- int norm_type = NORM_L2, int dtype = -1, InputArray mask = noArray());
-
-/** @overload
-@param src input array.
-@param dst output array of the same size as src .
-@param alpha norm value to normalize to or the lower range boundary in case of the range
-normalization.
-@param normType normalization type (see cv::NormTypes).
-*/
-CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType );
-
-/** @brief Finds the global minimum and maximum in an array.
-
-The function cv::minMaxLoc finds the minimum and maximum element values and their positions. The
-extremums are searched across the whole array or, if mask is not an empty array, in the specified
-array region.
-
-The function do not work with multi-channel arrays. If you need to find minimum or maximum
-elements across all the channels, use Mat::reshape first to reinterpret the array as
-single-channel. Or you may extract the particular channel using either extractImageCOI , or
-mixChannels , or split .
-@param src input single-channel array.
-@param minVal pointer to the returned minimum value; NULL is used if not required.
-@param maxVal pointer to the returned maximum value; NULL is used if not required.
-@param minLoc pointer to the returned minimum location (in 2D case); NULL is used if not required.
-@param maxLoc pointer to the returned maximum location (in 2D case); NULL is used if not required.
-@param mask optional mask used to select a sub-array.
-@sa max, min, reduceArgMin, reduceArgMax, compare, inRange, extractImageCOI, mixChannels, split, Mat::reshape
-*/
-CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal,
- CV_OUT double* maxVal = 0, CV_OUT Point* minLoc = 0,
- CV_OUT Point* maxLoc = 0, InputArray mask = noArray());
-
-/**
- * @brief Finds indices of min elements along provided axis
- *
- * @note
- * - If input or output array is not continuous, this function will create an internal copy.
- * - NaN handling is left unspecified, see patchNaNs().
- * - The returned index is always in bounds of input matrix.
- *
- * @param src input single-channel array.
- * @param dst output array of type CV_32SC1 with the same dimensionality as src,
- * except for axis being reduced - it should be set to 1.
- * @param lastIndex whether to get the index of first or last occurrence of min.
- * @param axis axis to reduce along.
- * @sa reduceArgMax, minMaxLoc, min, max, compare, reduce
- */
-CV_EXPORTS_W void reduceArgMin(InputArray src, OutputArray dst, int axis, bool lastIndex = false);
-
-/**
- * @brief Finds indices of max elements along provided axis
- *
- * @note
- * - If input or output array is not continuous, this function will create an internal copy.
- * - NaN handling is left unspecified, see patchNaNs().
- * - The returned index is always in bounds of input matrix.
- *
- * @param src input single-channel array.
- * @param dst output array of type CV_32SC1 with the same dimensionality as src,
- * except for axis being reduced - it should be set to 1.
- * @param lastIndex whether to get the index of first or last occurrence of max.
- * @param axis axis to reduce along.
- * @sa reduceArgMin, minMaxLoc, min, max, compare, reduce
- */
-CV_EXPORTS_W void reduceArgMax(InputArray src, OutputArray dst, int axis, bool lastIndex = false);
-
-/** @brief Finds the global minimum and maximum in an array
-
-The function cv::minMaxIdx finds the minimum and maximum element values and their positions. The
-extremums are searched across the whole array or, if mask is not an empty array, in the specified
-array region. The function does not work with multi-channel arrays. If you need to find minimum or
-maximum elements across all the channels, use Mat::reshape first to reinterpret the array as
-single-channel. Or you may extract the particular channel using either extractImageCOI , or
-mixChannels , or split . In case of a sparse matrix, the minimum is found among non-zero elements
-only.
-@note When minIdx is not NULL, it must have at least 2 elements (as well as maxIdx), even if src is
-a single-row or single-column matrix. In OpenCV (following MATLAB) each array has at least 2
-dimensions, i.e. single-column matrix is Mx1 matrix (and therefore minIdx/maxIdx will be
-(i1,0)/(i2,0)) and single-row matrix is 1xN matrix (and therefore minIdx/maxIdx will be
-(0,j1)/(0,j2)).
-@param src input single-channel array.
-@param minVal pointer to the returned minimum value; NULL is used if not required.
-@param maxVal pointer to the returned maximum value; NULL is used if not required.
-@param minIdx pointer to the returned minimum location (in nD case); NULL is used if not required;
-Otherwise, it must point to an array of src.dims elements, the coordinates of the minimum element
-in each dimension are stored there sequentially.
-@param maxIdx pointer to the returned maximum location (in nD case). NULL is used if not required.
-@param mask specified array region
-*/
-CV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal = 0,
- int* minIdx = 0, int* maxIdx = 0, InputArray mask = noArray());
-
-/** @overload
-@param a input single-channel array.
-@param minVal pointer to the returned minimum value; NULL is used if not required.
-@param maxVal pointer to the returned maximum value; NULL is used if not required.
-@param minIdx pointer to the returned minimum location (in nD case); NULL is used if not required;
-Otherwise, it must point to an array of src.dims elements, the coordinates of the minimum element
-in each dimension are stored there sequentially.
-@param maxIdx pointer to the returned maximum location (in nD case). NULL is used if not required.
-*/
-CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal,
- double* maxVal, int* minIdx = 0, int* maxIdx = 0);
-
-/** @brief Reduces a matrix to a vector.
-
-The function #reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of
-1D vectors and performing the specified operation on the vectors until a single row/column is
-obtained. For example, the function can be used to compute horizontal and vertical projections of a
-raster image. In case of #REDUCE_MAX and #REDUCE_MIN , the output image should have the same type as the source one.
-In case of #REDUCE_SUM, #REDUCE_SUM2 and #REDUCE_AVG , the output may have a larger element bit-depth to preserve accuracy.
-And multi-channel arrays are also supported in these two reduction modes.
-
-The following code demonstrates its usage for a single channel matrix.
-@snippet snippets/core_reduce.cpp example
-
-And the following code demonstrates its usage for a two-channel matrix.
-@snippet snippets/core_reduce.cpp example2
-
-@param src input 2D matrix.
-@param dst output vector. Its size and type is defined by dim and dtype parameters.
-@param dim dimension index along which the matrix is reduced. 0 means that the matrix is reduced to
-a single row. 1 means that the matrix is reduced to a single column.
-@param rtype reduction operation that could be one of #ReduceTypes
-@param dtype when negative, the output vector will have the same type as the input matrix,
-otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()).
-@sa repeat, reduceArgMin, reduceArgMax
-*/
-CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype = -1);
-
-/** @brief Creates one multi-channel array out of several single-channel ones.
-
-The function cv::merge merges several arrays to make a single multi-channel array. That is, each
-element of the output array will be a concatenation of the elements of the input arrays, where
-elements of i-th input array are treated as mv[i].channels()-element vectors.
-
-The function cv::split does the reverse operation. If you need to shuffle channels in some other
-advanced way, use cv::mixChannels.
-
-The following example shows how to merge 3 single channel matrices into a single 3-channel matrix.
-@snippet snippets/core_merge.cpp example
-
-@param mv input array of matrices to be merged; all the matrices in mv must have the same
-size and the same depth.
-@param count number of input matrices when mv is a plain C array; it must be greater than zero.
-@param dst output array of the same size and the same depth as mv[0]; The number of channels will
-be equal to the parameter count.
-@sa mixChannels, split, Mat::reshape
-*/
-CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst);
-
-/** @overload
-@param mv input vector of matrices to be merged; all the matrices in mv must have the same
-size and the same depth.
-@param dst output array of the same size and the same depth as mv[0]; The number of channels will
-be the total number of channels in the matrix array.
- */
-CV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst);
-
-/** @brief Divides a multi-channel array into several single-channel arrays.
-
-The function cv::split splits a multi-channel array into separate single-channel arrays:
-\f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f]
-If you need to extract a single channel or do some other sophisticated channel permutation, use
-mixChannels .
-
-The following example demonstrates how to split a 3-channel matrix into 3 single channel matrices.
-@snippet snippets/core_split.cpp example
-
-@param src input multi-channel array.
-@param mvbegin output array; the number of arrays must match src.channels(); the arrays themselves are
-reallocated, if needed.
-@sa merge, mixChannels, cvtColor
-*/
-CV_EXPORTS void split(const Mat& src, Mat* mvbegin);
-
-/** @overload
-@param m input multi-channel array.
-@param mv output vector of arrays; the arrays themselves are reallocated, if needed.
-*/
-CV_EXPORTS_W void split(InputArray m, OutputArrayOfArrays mv);
-
-/** @brief Copies specified channels from input arrays to the specified channels of
-output arrays.
-
-The function cv::mixChannels provides an advanced mechanism for shuffling image channels.
-
-cv::split,cv::merge,cv::extractChannel,cv::insertChannel and some forms of cv::cvtColor are partial cases of cv::mixChannels.
-
-In the example below, the code splits a 4-channel BGRA image into a 3-channel BGR (with B and R
-channels swapped) and a separate alpha-channel image:
-@code{.cpp}
- Mat bgra( 100, 100, CV_8UC4, Scalar(255,0,0,255) );
- Mat bgr( bgra.rows, bgra.cols, CV_8UC3 );
- Mat alpha( bgra.rows, bgra.cols, CV_8UC1 );
-
- // forming an array of matrices is a quite efficient operation,
- // because the matrix data is not copied, only the headers
- Mat out[] = { bgr, alpha };
- // bgra[0] -> bgr[2], bgra[1] -> bgr[1],
- // bgra[2] -> bgr[0], bgra[3] -> alpha[0]
- int from_to[] = { 0,2, 1,1, 2,0, 3,3 };
- mixChannels( &bgra, 1, out, 2, from_to, 4 );
-@endcode
-@note Unlike many other new-style C++ functions in OpenCV (see the introduction section and
-Mat::create ), cv::mixChannels requires the output arrays to be pre-allocated before calling the
-function.
-@param src input array or vector of matrices; all of the matrices must have the same size and the
-same depth.
-@param nsrcs number of matrices in `src`.
-@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and
-depth must be the same as in `src[0]`.
-@param ndsts number of matrices in `dst`.
-@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\*2] is
-a 0-based index of the input channel in src, fromTo[k\*2+1] is an index of the output channel in
-dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to
-src[0].channels()-1, the second input image channels are indexed from src[0].channels() to
-src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image
-channels; as a special case, when fromTo[k\*2] is negative, the corresponding output channel is
-filled with zero .
-@param npairs number of index pairs in `fromTo`.
-@sa split, merge, extractChannel, insertChannel, cvtColor
-*/
-CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts,
- const int* fromTo, size_t npairs);
-
-/** @overload
-@param src input array or vector of matrices; all of the matrices must have the same size and the
-same depth.
-@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and
-depth must be the same as in src[0].
-@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\*2] is
-a 0-based index of the input channel in src, fromTo[k\*2+1] is an index of the output channel in
-dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to
-src[0].channels()-1, the second input image channels are indexed from src[0].channels() to
-src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image
-channels; as a special case, when fromTo[k\*2] is negative, the corresponding output channel is
-filled with zero .
-@param npairs number of index pairs in fromTo.
-*/
-CV_EXPORTS void mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
- const int* fromTo, size_t npairs);
-
-/** @overload
-@param src input array or vector of matrices; all of the matrices must have the same size and the
-same depth.
-@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and
-depth must be the same as in src[0].
-@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\*2] is
-a 0-based index of the input channel in src, fromTo[k\*2+1] is an index of the output channel in
-dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to
-src[0].channels()-1, the second input image channels are indexed from src[0].channels() to
-src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image
-channels; as a special case, when fromTo[k\*2] is negative, the corresponding output channel is
-filled with zero .
-*/
-CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst,
- const std::vector& fromTo);
-
-/** @brief Extracts a single channel from src (coi is 0-based index)
-@param src input array
-@param dst output array
-@param coi index of channel to extract
-@sa mixChannels, split
-*/
-CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi);
-
-/** @brief Inserts a single channel to dst (coi is 0-based index)
-@param src input array
-@param dst output array
-@param coi index of channel for insertion
-@sa mixChannels, merge
-*/
-CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi);
-
-/** @brief Flips a 2D array around vertical, horizontal, or both axes.
-
-The function cv::flip flips the array in one of three different ways (row
-and column indices are 0-based):
-\f[\texttt{dst} _{ij} =
-\left\{
-\begin{array}{l l}
-\texttt{src} _{\texttt{src.rows}-i-1,j} & if\; \texttt{flipCode} = 0 \\
-\texttt{src} _{i, \texttt{src.cols} -j-1} & if\; \texttt{flipCode} > 0 \\
-\texttt{src} _{ \texttt{src.rows} -i-1, \texttt{src.cols} -j-1} & if\; \texttt{flipCode} < 0 \\
-\end{array}
-\right.\f]
-The example scenarios of using the function are the following:
-* Vertical flipping of the image (flipCode == 0) to switch between
- top-left and bottom-left image origin. This is a typical operation
- in video processing on Microsoft Windows\* OS.
-* Horizontal flipping of the image with the subsequent horizontal
- shift and absolute difference calculation to check for a
- vertical-axis symmetry (flipCode \> 0).
-* Simultaneous horizontal and vertical flipping of the image with
- the subsequent shift and absolute difference calculation to check
- for a central symmetry (flipCode \< 0).
-* Reversing the order of point arrays (flipCode \> 0 or
- flipCode == 0).
-@param src input array.
-@param dst output array of the same size and type as src.
-@param flipCode a flag to specify how to flip the array; 0 means
-flipping around the x-axis and positive value (for example, 1) means
-flipping around y-axis. Negative value (for example, -1) means flipping
-around both axes.
-@sa transpose , repeat , completeSymm
-*/
-CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode);
-
-/** @brief Flips a n-dimensional at given axis
- * @param src input array
- * @param dst output array that has the same shape of src
- * @param axis axis that performs a flip on. 0 <= axis < src.dims.
- */
-CV_EXPORTS_W void flipND(InputArray src, OutputArray dst, int axis);
-
-/** @brief Broadcast the given Mat to the given shape.
- * @param src input array
- * @param shape target shape. Should be a list of CV_32S numbers. Note that negative values are not supported.
- * @param dst output array that has the given shape
- */
-CV_EXPORTS_W void broadcast(InputArray src, InputArray shape, OutputArray dst);
-
-enum RotateFlags {
- ROTATE_90_CLOCKWISE = 0, //! A = (cv::Mat_(3, 2) << 1, 4,
- 2, 5,
- 3, 6);
- cv::Mat_ B = (cv::Mat_(3, 2) << 7, 10,
- 8, 11,
- 9, 12);
-
- cv::Mat C;
- cv::hconcat(A, B, C);
- //C:
- //[1, 4, 7, 10;
- // 2, 5, 8, 11;
- // 3, 6, 9, 12]
- @endcode
- @param src1 first input array to be considered for horizontal concatenation.
- @param src2 second input array to be considered for horizontal concatenation.
- @param dst output array. It has the same number of rows and depth as the src1 and src2, and the sum of cols of the src1 and src2.
- */
-CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst);
-/** @overload
- @code{.cpp}
- std::vector matrices = { cv::Mat(4, 1, CV_8UC1, cv::Scalar(1)),
- cv::Mat(4, 1, CV_8UC1, cv::Scalar(2)),
- cv::Mat(4, 1, CV_8UC1, cv::Scalar(3)),};
-
- cv::Mat out;
- cv::hconcat( matrices, out );
- //out:
- //[1, 2, 3;
- // 1, 2, 3;
- // 1, 2, 3;
- // 1, 2, 3]
- @endcode
- @param src input array or vector of matrices. all of the matrices must have the same number of rows and the same depth.
- @param dst output array. It has the same number of rows and depth as the src, and the sum of cols of the src.
-same depth.
- */
-CV_EXPORTS_W void hconcat(InputArrayOfArrays src, OutputArray dst);
-
-/** @brief Applies vertical concatenation to given matrices.
-
-The function vertically concatenates two or more cv::Mat matrices (with the same number of cols).
-@code{.cpp}
- cv::Mat matArray[] = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)),
- cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)),
- cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),};
-
- cv::Mat out;
- cv::vconcat( matArray, 3, out );
- //out:
- //[1, 1, 1, 1;
- // 2, 2, 2, 2;
- // 3, 3, 3, 3]
-@endcode
-@param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth.
-@param nsrc number of matrices in src.
-@param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src.
-@sa cv::hconcat(const Mat*, size_t, OutputArray), @sa cv::hconcat(InputArrayOfArrays, OutputArray) and @sa cv::hconcat(InputArray, InputArray, OutputArray)
-*/
-CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst);
-/** @overload
- @code{.cpp}
- cv::Mat_ A = (cv::Mat_(3, 2) << 1, 7,
- 2, 8,
- 3, 9);
- cv::Mat_ B = (cv::Mat_(3, 2) << 4, 10,
- 5, 11,
- 6, 12);
-
- cv::Mat C;
- cv::vconcat(A, B, C);
- //C:
- //[1, 7;
- // 2, 8;
- // 3, 9;
- // 4, 10;
- // 5, 11;
- // 6, 12]
- @endcode
- @param src1 first input array to be considered for vertical concatenation.
- @param src2 second input array to be considered for vertical concatenation.
- @param dst output array. It has the same number of cols and depth as the src1 and src2, and the sum of rows of the src1 and src2.
- */
-CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst);
-/** @overload
- @code{.cpp}
- std::vector matrices = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)),
- cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)),
- cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),};
-
- cv::Mat out;
- cv::vconcat( matrices, out );
- //out:
- //[1, 1, 1, 1;
- // 2, 2, 2, 2;
- // 3, 3, 3, 3]
- @endcode
- @param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth
- @param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src.
-same depth.
- */
-CV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst);
-
-/** @brief computes bitwise conjunction of the two arrays (dst = src1 & src2)
-Calculates the per-element bit-wise conjunction of two arrays or an
-array and a scalar.
-
-The function cv::bitwise_and calculates the per-element bit-wise logical conjunction for:
-* Two arrays when src1 and src2 have the same size:
- \f[\texttt{dst} (I) = \texttt{src1} (I) \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
-* An array and a scalar when src2 is constructed from Scalar or has
- the same number of elements as `src1.channels()`:
- \f[\texttt{dst} (I) = \texttt{src1} (I) \wedge \texttt{src2} \quad \texttt{if mask} (I) \ne0\f]
-* A scalar and an array when src1 is constructed from Scalar or has
- the same number of elements as `src2.channels()`:
- \f[\texttt{dst} (I) = \texttt{src1} \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
-In case of floating-point arrays, their machine-specific bit
-representations (usually IEEE754-compliant) are used for the operation.
-In case of multi-channel arrays, each channel is processed
-independently. In the second and third cases above, the scalar is first
-converted to the array type.
-@param src1 first input array or a scalar.
-@param src2 second input array or a scalar.
-@param dst output array that has the same size and type as the input
-arrays.
-@param mask optional operation mask, 8-bit single channel array, that
-specifies elements of the output array to be changed.
-*/
-CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2,
- OutputArray dst, InputArray mask = noArray());
-
-/** @brief Calculates the per-element bit-wise disjunction of two arrays or an
-array and a scalar.
-
-The function cv::bitwise_or calculates the per-element bit-wise logical disjunction for:
-* Two arrays when src1 and src2 have the same size:
- \f[\texttt{dst} (I) = \texttt{src1} (I) \vee \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
-* An array and a scalar when src2 is constructed from Scalar or has
- the same number of elements as `src1.channels()`:
- \f[\texttt{dst} (I) = \texttt{src1} (I) \vee \texttt{src2} \quad \texttt{if mask} (I) \ne0\f]
-* A scalar and an array when src1 is constructed from Scalar or has
- the same number of elements as `src2.channels()`:
- \f[\texttt{dst} (I) = \texttt{src1} \vee \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
-In case of floating-point arrays, their machine-specific bit
-representations (usually IEEE754-compliant) are used for the operation.
-In case of multi-channel arrays, each channel is processed
-independently. In the second and third cases above, the scalar is first
-converted to the array type.
-@param src1 first input array or a scalar.
-@param src2 second input array or a scalar.
-@param dst output array that has the same size and type as the input
-arrays.
-@param mask optional operation mask, 8-bit single channel array, that
-specifies elements of the output array to be changed.
-*/
-CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2,
- OutputArray dst, InputArray mask = noArray());
-
-/** @brief Calculates the per-element bit-wise "exclusive or" operation on two
-arrays or an array and a scalar.
-
-The function cv::bitwise_xor calculates the per-element bit-wise logical "exclusive-or"
-operation for:
-* Two arrays when src1 and src2 have the same size:
- \f[\texttt{dst} (I) = \texttt{src1} (I) \oplus \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
-* An array and a scalar when src2 is constructed from Scalar or has
- the same number of elements as `src1.channels()`:
- \f[\texttt{dst} (I) = \texttt{src1} (I) \oplus \texttt{src2} \quad \texttt{if mask} (I) \ne0\f]
-* A scalar and an array when src1 is constructed from Scalar or has
- the same number of elements as `src2.channels()`:
- \f[\texttt{dst} (I) = \texttt{src1} \oplus \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f]
-In case of floating-point arrays, their machine-specific bit
-representations (usually IEEE754-compliant) are used for the operation.
-In case of multi-channel arrays, each channel is processed
-independently. In the 2nd and 3rd cases above, the scalar is first
-converted to the array type.
-@param src1 first input array or a scalar.
-@param src2 second input array or a scalar.
-@param dst output array that has the same size and type as the input
-arrays.
-@param mask optional operation mask, 8-bit single channel array, that
-specifies elements of the output array to be changed.
-*/
-CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2,
- OutputArray dst, InputArray mask = noArray());
-
-/** @brief Inverts every bit of an array.
-
-The function cv::bitwise_not calculates per-element bit-wise inversion of the input
-array:
-\f[\texttt{dst} (I) = \neg \texttt{src} (I)\f]
-In case of a floating-point input array, its machine-specific bit
-representation (usually IEEE754-compliant) is used for the operation. In
-case of multi-channel arrays, each channel is processed independently.
-@param src input array.
-@param dst output array that has the same size and type as the input
-array.
-@param mask optional operation mask, 8-bit single channel array, that
-specifies elements of the output array to be changed.
-*/
-CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst,
- InputArray mask = noArray());
-
-/** @brief Calculates the per-element absolute difference between two arrays or between an array and a scalar.
-
-The function cv::absdiff calculates:
-* Absolute difference between two arrays when they have the same
- size and type:
- \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{src2}(I)|)\f]
-* Absolute difference between an array and a scalar when the second
- array is constructed from Scalar or has as many elements as the
- number of channels in `src1`:
- \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{src2} |)\f]
-* Absolute difference between a scalar and an array when the first
- array is constructed from Scalar or has as many elements as the
- number of channels in `src2`:
- \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1} - \texttt{src2}(I) |)\f]
- where I is a multi-dimensional index of array elements. In case of
- multi-channel arrays, each channel is processed independently.
-@note Saturation is not applied when the arrays have the depth CV_32S.
-You may even get a negative value in the case of overflow.
-@note (Python) Be careful to difference behaviour between src1/src2 are single number and they are tuple/array.
-`absdiff(src,X)` means `absdiff(src,(X,X,X,X))`.
-`absdiff(src,(X,))` means `absdiff(src,(X,0,0,0))`.
-@param src1 first input array or a scalar.
-@param src2 second input array or a scalar.
-@param dst output array that has the same size and type as input arrays.
-@sa cv::abs(const Mat&)
-*/
-CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst);
-
-/** @brief This is an overloaded member function, provided for convenience (python)
-Copies the matrix to another one.
-When the operation mask is specified, if the Mat::create call shown above reallocates the matrix, the newly allocated matrix is initialized with all zeros before copying the data.
-@param src source matrix.
-@param dst Destination matrix. If it does not have a proper size or type before the operation, it is
-reallocated.
-@param mask Operation mask of the same size as \*this. Its non-zero elements indicate which matrix
-elements need to be copied. The mask has to be of type CV_8U and can have 1 or multiple channels.
-*/
-
-void CV_EXPORTS_W copyTo(InputArray src, OutputArray dst, InputArray mask);
-/** @brief Checks if array elements lie between the elements of two other arrays.
-
-The function checks the range as follows:
-- For every element of a single-channel input array:
- \f[\texttt{dst} (I)= \texttt{lowerb} (I)_0 \leq \texttt{src} (I)_0 \leq \texttt{upperb} (I)_0\f]
-- For two-channel arrays:
- \f[\texttt{dst} (I)= \texttt{lowerb} (I)_0 \leq \texttt{src} (I)_0 \leq \texttt{upperb} (I)_0 \land \texttt{lowerb} (I)_1 \leq \texttt{src} (I)_1 \leq \texttt{upperb} (I)_1\f]
-- and so forth.
-
-That is, dst (I) is set to 255 (all 1 -bits) if src (I) is within the
-specified 1D, 2D, 3D, ... box and 0 otherwise.
-
-When the lower and/or upper boundary parameters are scalars, the indexes
-(I) at lowerb and upperb in the above formulas should be omitted.
-@param src first input array.
-@param lowerb inclusive lower boundary array or a scalar.
-@param upperb inclusive upper boundary array or a scalar.
-@param dst output array of the same size as src and CV_8U type.
-*/
-CV_EXPORTS_W void inRange(InputArray src, InputArray lowerb,
- InputArray upperb, OutputArray dst);
-
-/** @brief Performs the per-element comparison of two arrays or an array and scalar value.
-
-The function compares:
-* Elements of two arrays when src1 and src2 have the same size:
- \f[\texttt{dst} (I) = \texttt{src1} (I) \,\texttt{cmpop}\, \texttt{src2} (I)\f]
-* Elements of src1 with a scalar src2 when src2 is constructed from
- Scalar or has a single element:
- \f[\texttt{dst} (I) = \texttt{src1}(I) \,\texttt{cmpop}\, \texttt{src2}\f]
-* src1 with elements of src2 when src1 is constructed from Scalar or
- has a single element:
- \f[\texttt{dst} (I) = \texttt{src1} \,\texttt{cmpop}\, \texttt{src2} (I)\f]
-When the comparison result is true, the corresponding element of output
-array is set to 255. The comparison operations can be replaced with the
-equivalent matrix expressions:
-@code{.cpp}
- Mat dst1 = src1 >= src2;
- Mat dst2 = src1 < 8;
- ...
-@endcode
-@param src1 first input array or a scalar; when it is an array, it must have a single channel.
-@param src2 second input array or a scalar; when it is an array, it must have a single channel.
-@param dst output array of type ref CV_8U that has the same size and the same number of channels as
- the input arrays.
-@param cmpop a flag, that specifies correspondence between the arrays (cv::CmpTypes)
-@sa checkRange, min, max, threshold
-*/
-CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop);
-
-/** @brief Calculates per-element minimum of two arrays or an array and a scalar.
-
-The function cv::min calculates the per-element minimum of two arrays:
-\f[\texttt{dst} (I)= \min ( \texttt{src1} (I), \texttt{src2} (I))\f]
-or array and a scalar:
-\f[\texttt{dst} (I)= \min ( \texttt{src1} (I), \texttt{value} )\f]
-@param src1 first input array.
-@param src2 second input array of the same size and type as src1.
-@param dst output array of the same size and type as src1.
-@sa max, compare, inRange, minMaxLoc
-*/
-CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst);
-/** @overload
-needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)
-*/
-CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst);
-/** @overload
-needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)
-*/
-CV_EXPORTS void min(const UMat& src1, const UMat& src2, UMat& dst);
-
-/** @brief Calculates per-element maximum of two arrays or an array and a scalar.
-
-The function cv::max calculates the per-element maximum of two arrays:
-\f[\texttt{dst} (I)= \max ( \texttt{src1} (I), \texttt{src2} (I))\f]
-or array and a scalar:
-\f[\texttt{dst} (I)= \max ( \texttt{src1} (I), \texttt{value} )\f]
-@param src1 first input array.
-@param src2 second input array of the same size and type as src1 .
-@param dst output array of the same size and type as src1.
-@sa min, compare, inRange, minMaxLoc, @ref MatrixExpressions
-*/
-CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst);
-/** @overload
-needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)
-*/
-CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst);
-/** @overload
-needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)
-*/
-CV_EXPORTS void max(const UMat& src1, const UMat& src2, UMat& dst);
-
-/** @brief Calculates a square root of array elements.
-
-The function cv::sqrt calculates a square root of each input array element.
-In case of multi-channel arrays, each channel is processed
-independently. The accuracy is approximately the same as of the built-in
-std::sqrt .
-@param src input floating-point array.
-@param dst output array of the same size and type as src.
-*/
-CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst);
-
-/** @brief Raises every array element to a power.
-
-The function cv::pow raises every element of the input array to power :
-\f[\texttt{dst} (I) = \fork{\texttt{src}(I)^{power}}{if \(\texttt{power}\) is integer}{|\texttt{src}(I)|^{power}}{otherwise}\f]
-
-So, for a non-integer power exponent, the absolute values of input array
-elements are used. However, it is possible to get true values for
-negative values using some extra operations. In the example below,
-computing the 5th root of array src shows:
-@code{.cpp}
- Mat mask = src < 0;
- pow(src, 1./5, dst);
- subtract(Scalar::all(0), dst, dst, mask);
-@endcode
-For some values of power, such as integer values, 0.5 and -0.5,
-specialized faster algorithms are used.
-
-Special values (NaN, Inf) are not handled.
-@param src input array.
-@param power exponent of power.
-@param dst output array of the same size and type as src.
-@sa sqrt, exp, log, cartToPolar, polarToCart
-*/
-CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst);
-
-/** @brief Calculates the exponent of every array element.
-
-The function cv::exp calculates the exponent of every element of the input
-array:
-\f[\texttt{dst} [I] = e^{ src(I) }\f]
-
-The maximum relative error is about 7e-6 for single-precision input and
-less than 1e-10 for double-precision input. Currently, the function
-converts denormalized values to zeros on output. Special values (NaN,
-Inf) are not handled.
-@param src input array.
-@param dst output array of the same size and type as src.
-@sa log , cartToPolar , polarToCart , phase , pow , sqrt , magnitude
-*/
-CV_EXPORTS_W void exp(InputArray src, OutputArray dst);
-
-/** @brief Calculates the natural logarithm of every array element.
-
-The function cv::log calculates the natural logarithm of every element of the input array:
-\f[\texttt{dst} (I) = \log (\texttt{src}(I)) \f]
-
-Output on zero, negative and special (NaN, Inf) values is undefined.
-
-@param src input array.
-@param dst output array of the same size and type as src .
-@sa exp, cartToPolar, polarToCart, phase, pow, sqrt, magnitude
-*/
-CV_EXPORTS_W void log(InputArray src, OutputArray dst);
-
-/** @brief Calculates x and y coordinates of 2D vectors from their magnitude and angle.
-
-The function cv::polarToCart calculates the Cartesian coordinates of each 2D
-vector represented by the corresponding elements of magnitude and angle:
-\f[\begin{array}{l} \texttt{x} (I) = \texttt{magnitude} (I) \cos ( \texttt{angle} (I)) \\ \texttt{y} (I) = \texttt{magnitude} (I) \sin ( \texttt{angle} (I)) \\ \end{array}\f]
-
-The relative accuracy of the estimated coordinates is about 1e-6.
-@param magnitude input floating-point array of magnitudes of 2D vectors;
-it can be an empty matrix (=Mat()), in this case, the function assumes
-that all the magnitudes are =1; if it is not empty, it must have the
-same size and type as angle.
-@param angle input floating-point array of angles of 2D vectors.
-@param x output array of x-coordinates of 2D vectors; it has the same
-size and type as angle.
-@param y output array of y-coordinates of 2D vectors; it has the same
-size and type as angle.
-@param angleInDegrees when true, the input angles are measured in
-degrees, otherwise, they are measured in radians.
-@sa cartToPolar, magnitude, phase, exp, log, pow, sqrt
-*/
-CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle,
- OutputArray x, OutputArray y, bool angleInDegrees = false);
-
-/** @brief Calculates the magnitude and angle of 2D vectors.
-
-The function cv::cartToPolar calculates either the magnitude, angle, or both
-for every 2D vector (x(I),y(I)):
-\f[\begin{array}{l} \texttt{magnitude} (I)= \sqrt{\texttt{x}(I)^2+\texttt{y}(I)^2} , \\ \texttt{angle} (I)= \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))[ \cdot180 / \pi ] \end{array}\f]
-
-The angles are calculated with accuracy about 0.3 degrees. For the point
-(0,0), the angle is set to 0.
-@param x array of x-coordinates; this must be a single-precision or
-double-precision floating-point array.
-@param y array of y-coordinates, that must have the same size and same type as x.
-@param magnitude output array of magnitudes of the same size and type as x.
-@param angle output array of angles that has the same size and type as
-x; the angles are measured in radians (from 0 to 2\*Pi) or in degrees (0 to 360 degrees).
-@param angleInDegrees a flag, indicating whether the angles are measured
-in radians (which is by default), or in degrees.
-@sa Sobel, Scharr
-*/
-CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y,
- OutputArray magnitude, OutputArray angle,
- bool angleInDegrees = false);
-
-/** @brief Calculates the rotation angle of 2D vectors.
-
-The function cv::phase calculates the rotation angle of each 2D vector that
-is formed from the corresponding elements of x and y :
-\f[\texttt{angle} (I) = \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))\f]
-
-The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0 ,
-the corresponding angle(I) is set to 0.
-@param x input floating-point array of x-coordinates of 2D vectors.
-@param y input array of y-coordinates of 2D vectors; it must have the
-same size and the same type as x.
-@param angle output array of vector angles; it has the same size and
-same type as x .
-@param angleInDegrees when true, the function calculates the angle in
-degrees, otherwise, they are measured in radians.
-*/
-CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle,
- bool angleInDegrees = false);
-
-/** @brief Calculates the magnitude of 2D vectors.
-
-The function cv::magnitude calculates the magnitude of 2D vectors formed
-from the corresponding elements of x and y arrays:
-\f[\texttt{dst} (I) = \sqrt{\texttt{x}(I)^2 + \texttt{y}(I)^2}\f]
-@param x floating-point array of x-coordinates of the vectors.
-@param y floating-point array of y-coordinates of the vectors; it must
-have the same size as x.
-@param magnitude output array of the same size and type as x.
-@sa cartToPolar, polarToCart, phase, sqrt
-*/
-CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude);
-
-/** @brief Checks every element of an input array for invalid values.
-
-The function cv::checkRange checks that every array element is neither NaN nor infinite. When minVal \>
--DBL_MAX and maxVal \< DBL_MAX, the function also checks that each value is between minVal and
-maxVal. In case of multi-channel arrays, each channel is processed independently. If some values
-are out of range, position of the first outlier is stored in pos (when pos != NULL). Then, the
-function either returns false (when quiet=true) or throws an exception.
-@param a input array.
-@param quiet a flag, indicating whether the functions quietly return false when the array elements
-are out of range or they throw an exception.
-@param pos optional output parameter, when not NULL, must be a pointer to array of src.dims
-elements.
-@param minVal inclusive lower boundary of valid values range.
-@param maxVal exclusive upper boundary of valid values range.
-*/
-CV_EXPORTS_W bool checkRange(InputArray a, bool quiet = true, CV_OUT Point* pos = 0,
- double minVal = -DBL_MAX, double maxVal = DBL_MAX);
-
-/** @brief Replaces NaNs by given number
-@param a input/output matrix (CV_32F type).
-@param val value to convert the NaNs
-*/
-CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val = 0);
-
-/** @brief Performs generalized matrix multiplication.
-
-The function cv::gemm performs generalized matrix multiplication similar to the
-gemm functions in BLAS level 3. For example,
-`gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T)`
-corresponds to
-\f[\texttt{dst} = \texttt{alpha} \cdot \texttt{src1} ^T \cdot \texttt{src2} + \texttt{beta} \cdot \texttt{src3} ^T\f]
-
-In case of complex (two-channel) data, performed a complex matrix
-multiplication.
-
-The function can be replaced with a matrix expression. For example, the
-above call can be replaced with:
-@code{.cpp}
- dst = alpha*src1.t()*src2 + beta*src3.t();
-@endcode
-@param src1 first multiplied input matrix that could be real(CV_32FC1,
-CV_64FC1) or complex(CV_32FC2, CV_64FC2).
-@param src2 second multiplied input matrix of the same type as src1.
-@param alpha weight of the matrix product.
-@param src3 third optional delta matrix added to the matrix product; it
-should have the same type as src1 and src2.
-@param beta weight of src3.
-@param dst output matrix; it has the proper size and the same type as
-input matrices.
-@param flags operation flags (cv::GemmFlags)
-@sa mulTransposed , transform
-*/
-CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha,
- InputArray src3, double beta, OutputArray dst, int flags = 0);
-
-/** @brief Calculates the product of a matrix and its transposition.
-
-The function cv::mulTransposed calculates the product of src and its
-transposition:
-\f[\texttt{dst} = \texttt{scale} ( \texttt{src} - \texttt{delta} )^T ( \texttt{src} - \texttt{delta} )\f]
-if aTa=true , and
-\f[\texttt{dst} = \texttt{scale} ( \texttt{src} - \texttt{delta} ) ( \texttt{src} - \texttt{delta} )^T\f]
-otherwise. The function is used to calculate the covariance matrix. With
-zero delta, it can be used as a faster substitute for general matrix
-product A\*B when B=A'
-@param src input single-channel matrix. Note that unlike gemm, the
-function can multiply not only floating-point matrices.
-@param dst output square matrix.
-@param aTa Flag specifying the multiplication ordering. See the
-description below.
-@param delta Optional delta matrix subtracted from src before the
-multiplication. When the matrix is empty ( delta=noArray() ), it is
-assumed to be zero, that is, nothing is subtracted. If it has the same
-size as src , it is simply subtracted. Otherwise, it is "repeated" (see
-repeat ) to cover the full src and then subtracted. Type of the delta
-matrix, when it is not empty, must be the same as the type of created
-output matrix. See the dtype parameter description below.
-@param scale Optional scale factor for the matrix product.
-@param dtype Optional type of the output matrix. When it is negative,
-the output matrix will have the same type as src . Otherwise, it will be
-type=CV_MAT_DEPTH(dtype) that should be either CV_32F or CV_64F .
-@sa calcCovarMatrix, gemm, repeat, reduce
-*/
-CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa,
- InputArray delta = noArray(),
- double scale = 1, int dtype = -1 );
-
-/** @brief Transposes a matrix.
-
-The function cv::transpose transposes the matrix src :
-\f[\texttt{dst} (i,j) = \texttt{src} (j,i)\f]
-@note No complex conjugation is done in case of a complex matrix. It
-should be done separately if needed.
-@param src input array.
-@param dst output array of the same type as src.
-*/
-CV_EXPORTS_W void transpose(InputArray src, OutputArray dst);
-
-/** @brief Transpose for n-dimensional matrices.
- *
- * @note Input should be continuous single-channel matrix.
- * @param src input array.
- * @param order a permutation of [0,1,..,N-1] where N is the number of axes of src.
- * The i’th axis of dst will correspond to the axis numbered order[i] of the input.
- * @param dst output array of the same type as src.
- */
-CV_EXPORTS_W void transposeND(InputArray src, const std::vector& order, OutputArray dst);
-
-/** @brief Performs the matrix transformation of every array element.
-
-The function cv::transform performs the matrix transformation of every
-element of the array src and stores the results in dst :
-\f[\texttt{dst} (I) = \texttt{m} \cdot \texttt{src} (I)\f]
-(when m.cols=src.channels() ), or
-\f[\texttt{dst} (I) = \texttt{m} \cdot [ \texttt{src} (I); 1]\f]
-(when m.cols=src.channels()+1 )
-
-Every element of the N -channel array src is interpreted as N -element
-vector that is transformed using the M x N or M x (N+1) matrix m to
-M-element vector - the corresponding element of the output array dst .
-
-The function may be used for geometrical transformation of
-N -dimensional points, arbitrary linear color space transformation (such
-as various kinds of RGB to YUV transforms), shuffling the image
-channels, and so forth.
-@param src input array that must have as many channels (1 to 4) as
-m.cols or m.cols-1.
-@param dst output array of the same size and depth as src; it has as
-many channels as m.rows.
-@param m transformation 2x2 or 2x3 floating-point matrix.
-@sa perspectiveTransform, getAffineTransform, estimateAffine2D, warpAffine, warpPerspective
-*/
-CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m );
-
-/** @brief Performs the perspective matrix transformation of vectors.
-
-The function cv::perspectiveTransform transforms every element of src by
-treating it as a 2D or 3D vector, in the following way:
-\f[(x, y, z) \rightarrow (x'/w, y'/w, z'/w)\f]
-where
-\f[(x', y', z', w') = \texttt{mat} \cdot \begin{bmatrix} x & y & z & 1 \end{bmatrix}\f]
-and
-\f[w = \fork{w'}{if \(w' \ne 0\)}{\infty}{otherwise}\f]
-
-Here a 3D vector transformation is shown. In case of a 2D vector
-transformation, the z component is omitted.
-
-@note The function transforms a sparse set of 2D or 3D vectors. If you
-want to transform an image using perspective transformation, use
-warpPerspective . If you have an inverse problem, that is, you want to
-compute the most probable perspective transformation out of several
-pairs of corresponding points, you can use getPerspectiveTransform or
-findHomography .
-@param src input two-channel or three-channel floating-point array; each
-element is a 2D/3D vector to be transformed.
-@param dst output array of the same size and type as src.
-@param m 3x3 or 4x4 floating-point transformation matrix.
-@sa transform, warpPerspective, getPerspectiveTransform, findHomography
-*/
-CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m );
-
-/** @brief Copies the lower or the upper half of a square matrix to its another half.
-
-The function cv::completeSymm copies the lower or the upper half of a square matrix to
-its another half. The matrix diagonal remains unchanged:
- - \f$\texttt{m}_{ij}=\texttt{m}_{ji}\f$ for \f$i > j\f$ if
- lowerToUpper=false
- - \f$\texttt{m}_{ij}=\texttt{m}_{ji}\f$ for \f$i < j\f$ if
- lowerToUpper=true
-
-@param m input-output floating-point square matrix.
-@param lowerToUpper operation flag; if true, the lower half is copied to
-the upper half. Otherwise, the upper half is copied to the lower half.
-@sa flip, transpose
-*/
-CV_EXPORTS_W void completeSymm(InputOutputArray m, bool lowerToUpper = false);
-
-/** @brief Initializes a scaled identity matrix.
-
-The function cv::setIdentity initializes a scaled identity matrix:
-\f[\texttt{mtx} (i,j)= \fork{\texttt{value}}{ if \(i=j\)}{0}{otherwise}\f]
-
-The function can also be emulated using the matrix initializers and the
-matrix expressions:
-@code
- Mat A = Mat::eye(4, 3, CV_32F)*5;
- // A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]
-@endcode
-@param mtx matrix to initialize (not necessarily square).
-@param s value to assign to diagonal elements.
-@sa Mat::zeros, Mat::ones, Mat::setTo, Mat::operator=
-*/
-CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s = Scalar(1));
-
-/** @brief Returns the determinant of a square floating-point matrix.
-
-The function cv::determinant calculates and returns the determinant of the
-specified matrix. For small matrices ( mtx.cols=mtx.rows\<=3 ), the
-direct method is used. For larger matrices, the function uses LU
-factorization with partial pivoting.
-
-For symmetric positively-determined matrices, it is also possible to use
-eigen decomposition to calculate the determinant.
-@param mtx input matrix that must have CV_32FC1 or CV_64FC1 type and
-square size.
-@sa trace, invert, solve, eigen, @ref MatrixExpressions
-*/
-CV_EXPORTS_W double determinant(InputArray mtx);
-
-/** @brief Returns the trace of a matrix.
-
-The function cv::trace returns the sum of the diagonal elements of the
-matrix mtx .
-\f[\mathrm{tr} ( \texttt{mtx} ) = \sum _i \texttt{mtx} (i,i)\f]
-@param mtx input matrix.
-*/
-CV_EXPORTS_W Scalar trace(InputArray mtx);
-
-/** @brief Finds the inverse or pseudo-inverse of a matrix.
-
-The function cv::invert inverts the matrix src and stores the result in dst
-. When the matrix src is singular or non-square, the function calculates
-the pseudo-inverse matrix (the dst matrix) so that norm(src\*dst - I) is
-minimal, where I is an identity matrix.
-
-In case of the #DECOMP_LU method, the function returns non-zero value if
-the inverse has been successfully calculated and 0 if src is singular.
-
-In case of the #DECOMP_SVD method, the function returns the inverse
-condition number of src (the ratio of the smallest singular value to the
-largest singular value) and 0 if src is singular. The SVD method
-calculates a pseudo-inverse matrix if src is singular.
-
-Similarly to #DECOMP_LU, the method #DECOMP_CHOLESKY works only with
-non-singular square matrices that should also be symmetrical and
-positively defined. In this case, the function stores the inverted
-matrix in dst and returns non-zero. Otherwise, it returns 0.
-
-@param src input floating-point M x N matrix.
-@param dst output matrix of N x M size and the same type as src.
-@param flags inversion method (cv::DecompTypes)
-@sa solve, SVD
-*/
-CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags = DECOMP_LU);
-
-/** @brief Solves one or more linear systems or least-squares problems.
-
-The function cv::solve solves a linear system or least-squares problem (the
-latter is possible with SVD or QR methods, or by specifying the flag
-#DECOMP_NORMAL ):
-\f[\texttt{dst} = \arg \min _X \| \texttt{src1} \cdot \texttt{X} - \texttt{src2} \|\f]
-
-If #DECOMP_LU or #DECOMP_CHOLESKY method is used, the function returns 1
-if src1 (or \f$\texttt{src1}^T\texttt{src1}\f$ ) is non-singular. Otherwise,
-it returns 0. In the latter case, dst is not valid. Other methods find a
-pseudo-solution in case of a singular left-hand side part.
-
-@note If you want to find a unity-norm solution of an under-defined
-singular system \f$\texttt{src1}\cdot\texttt{dst}=0\f$ , the function solve
-will not do the work. Use SVD::solveZ instead.
-
-@param src1 input matrix on the left-hand side of the system.
-@param src2 input matrix on the right-hand side of the system.
-@param dst output solution.
-@param flags solution (matrix inversion) method (#DecompTypes)
-@sa invert, SVD, eigen
-*/
-CV_EXPORTS_W bool solve(InputArray src1, InputArray src2,
- OutputArray dst, int flags = DECOMP_LU);
-
-/** @brief Sorts each row or each column of a matrix.
-
-The function cv::sort sorts each matrix row or each matrix column in
-ascending or descending order. So you should pass two operation flags to
-get desired behaviour. If you want to sort matrix rows or columns
-lexicographically, you can use STL std::sort generic function with the
-proper comparison predicate.
-
-@param src input single-channel array.
-@param dst output array of the same size and type as src.
-@param flags operation flags, a combination of #SortFlags
-@sa sortIdx, randShuffle
-*/
-CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags);
-
-/** @brief Sorts each row or each column of a matrix.
-
-The function cv::sortIdx sorts each matrix row or each matrix column in the
-ascending or descending order. So you should pass two operation flags to
-get desired behaviour. Instead of reordering the elements themselves, it
-stores the indices of sorted elements in the output array. For example:
-@code
- Mat A = Mat::eye(3,3,CV_32F), B;
- sortIdx(A, B, SORT_EVERY_ROW + SORT_ASCENDING);
- // B will probably contain
- // (because of equal elements in A some permutations are possible):
- // [[1, 2, 0], [0, 2, 1], [0, 1, 2]]
-@endcode
-@param src input single-channel array.
-@param dst output integer array of the same size as src.
-@param flags operation flags that could be a combination of cv::SortFlags
-@sa sort, randShuffle
-*/
-CV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags);
-
-/** @brief Finds the real roots of a cubic equation.
-
-The function solveCubic finds the real roots of a cubic equation:
-- if coeffs is a 4-element vector:
-\f[\texttt{coeffs} [0] x^3 + \texttt{coeffs} [1] x^2 + \texttt{coeffs} [2] x + \texttt{coeffs} [3] = 0\f]
-- if coeffs is a 3-element vector:
-\f[x^3 + \texttt{coeffs} [0] x^2 + \texttt{coeffs} [1] x + \texttt{coeffs} [2] = 0\f]
-
-The roots are stored in the roots array.
-@param coeffs equation coefficients, an array of 3 or 4 elements.
-@param roots output array of real roots that has 1 or 3 elements.
-@return number of real roots. It can be 0, 1 or 2.
-*/
-CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots);
-
-/** @brief Finds the real or complex roots of a polynomial equation.
-
-The function cv::solvePoly finds real and complex roots of a polynomial equation:
-\f[\texttt{coeffs} [n] x^{n} + \texttt{coeffs} [n-1] x^{n-1} + ... + \texttt{coeffs} [1] x + \texttt{coeffs} [0] = 0\f]
-@param coeffs array of polynomial coefficients.
-@param roots output (complex) array of roots.
-@param maxIters maximum number of iterations the algorithm does.
-*/
-CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters = 300);
-
-/** @brief Calculates eigenvalues and eigenvectors of a symmetric matrix.
-
-The function cv::eigen calculates just eigenvalues, or eigenvalues and eigenvectors of the symmetric
-matrix src:
-@code
- src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()
-@endcode
-
-@note Use cv::eigenNonSymmetric for calculation of real eigenvalues and eigenvectors of non-symmetric matrix.
-
-@param src input matrix that must have CV_32FC1 or CV_64FC1 type, square size and be symmetrical
-(src ^T^ == src).
-@param eigenvalues output vector of eigenvalues of the same type as src; the eigenvalues are stored
-in the descending order.
-@param eigenvectors output matrix of eigenvectors; it has the same size and type as src; the
-eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding
-eigenvalues.
-@sa eigenNonSymmetric, completeSymm , PCA
-*/
-CV_EXPORTS_W bool eigen(InputArray src, OutputArray eigenvalues,
- OutputArray eigenvectors = noArray());
-
-/** @brief Calculates eigenvalues and eigenvectors of a non-symmetric matrix (real eigenvalues only).
-
-@note Assumes real eigenvalues.
-
-The function calculates eigenvalues and eigenvectors (optional) of the square matrix src:
-@code
- src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()
-@endcode
-
-@param src input matrix (CV_32FC1 or CV_64FC1 type).
-@param eigenvalues output vector of eigenvalues (type is the same type as src).
-@param eigenvectors output matrix of eigenvectors (type is the same type as src). The eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding eigenvalues.
-@sa eigen
-*/
-CV_EXPORTS_W void eigenNonSymmetric(InputArray src, OutputArray eigenvalues,
- OutputArray eigenvectors);
-
-/** @brief Calculates the covariance matrix of a set of vectors.
-
-The function cv::calcCovarMatrix calculates the covariance matrix and, optionally, the mean vector of
-the set of input vectors.
-@param samples samples stored as separate matrices
-@param nsamples number of samples
-@param covar output covariance matrix of the type ctype and square size.
-@param mean input or output (depending on the flags) array as the average value of the input vectors.
-@param flags operation flags as a combination of #CovarFlags
-@param ctype type of the matrixl; it equals 'CV_64F' by default.
-@sa PCA, mulTransposed, Mahalanobis
-@todo InputArrayOfArrays
-*/
-CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean,
- int flags, int ctype = CV_64F);
-
-/** @overload
-@note use #COVAR_ROWS or #COVAR_COLS flag
-@param samples samples stored as rows/columns of a single matrix.
-@param covar output covariance matrix of the type ctype and square size.
-@param mean input or output (depending on the flags) array as the average value of the input vectors.
-@param flags operation flags as a combination of #CovarFlags
-@param ctype type of the matrixl; it equals 'CV_64F' by default.
-*/
-CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar,
- InputOutputArray mean, int flags, int ctype = CV_64F);
-
-/** wrap PCA::operator() */
-CV_EXPORTS_W void PCACompute(InputArray data, InputOutputArray mean,
- OutputArray eigenvectors, int maxComponents = 0);
-
-/** wrap PCA::operator() and add eigenvalues output parameter */
-CV_EXPORTS_AS(PCACompute2) void PCACompute(InputArray data, InputOutputArray mean,
- OutputArray eigenvectors, OutputArray eigenvalues,
- int maxComponents = 0);
-
-/** wrap PCA::operator() */
-CV_EXPORTS_W void PCACompute(InputArray data, InputOutputArray mean,
- OutputArray eigenvectors, double retainedVariance);
-
-/** wrap PCA::operator() and add eigenvalues output parameter */
-CV_EXPORTS_AS(PCACompute2) void PCACompute(InputArray data, InputOutputArray mean,
- OutputArray eigenvectors, OutputArray eigenvalues,
- double retainedVariance);
-
-/** wrap PCA::project */
-CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean,
- InputArray eigenvectors, OutputArray result);
-
-/** wrap PCA::backProject */
-CV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean,
- InputArray eigenvectors, OutputArray result);
-
-/** wrap SVD::compute */
-CV_EXPORTS_W void SVDecomp( InputArray src, OutputArray w, OutputArray u, OutputArray vt, int flags = 0 );
-
-/** wrap SVD::backSubst */
-CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt,
- InputArray rhs, OutputArray dst );
-
-/** @brief Calculates the Mahalanobis distance between two vectors.
-
-The function cv::Mahalanobis calculates and returns the weighted distance between two vectors:
-\f[d( \texttt{vec1} , \texttt{vec2} )= \sqrt{\sum_{i,j}{\texttt{icovar(i,j)}\cdot(\texttt{vec1}(I)-\texttt{vec2}(I))\cdot(\texttt{vec1(j)}-\texttt{vec2(j)})} }\f]
-The covariance matrix may be calculated using the #calcCovarMatrix function and then inverted using
-the invert function (preferably using the #DECOMP_SVD method, as the most accurate).
-@param v1 first 1D input vector.
-@param v2 second 1D input vector.
-@param icovar inverse covariance matrix.
-*/
-CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar);
-
-/** @brief Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array.
-
-The function cv::dft performs one of the following:
-- Forward the Fourier transform of a 1D vector of N elements:
- \f[Y = F^{(N)} \cdot X,\f]
- where \f$F^{(N)}_{jk}=\exp(-2\pi i j k/N)\f$ and \f$i=\sqrt{-1}\f$
-- Inverse the Fourier transform of a 1D vector of N elements:
- \f[\begin{array}{l} X'= \left (F^{(N)} \right )^{-1} \cdot Y = \left (F^{(N)} \right )^* \cdot y \\ X = (1/N) \cdot X, \end{array}\f]
- where \f$F^*=\left(\textrm{Re}(F^{(N)})-\textrm{Im}(F^{(N)})\right)^T\f$
-- Forward the 2D Fourier transform of a M x N matrix:
- \f[Y = F^{(M)} \cdot X \cdot F^{(N)}\f]
-- Inverse the 2D Fourier transform of a M x N matrix:
- \f[\begin{array}{l} X'= \left (F^{(M)} \right )^* \cdot Y \cdot \left (F^{(N)} \right )^* \\ X = \frac{1}{M \cdot N} \cdot X' \end{array}\f]
-
-In case of real (single-channel) data, the output spectrum of the forward Fourier transform or input
-spectrum of the inverse Fourier transform can be represented in a packed format called *CCS*
-(complex-conjugate-symmetrical). It was borrowed from IPL (Intel\* Image Processing Library). Here
-is how 2D *CCS* spectrum looks:
-\f[\begin{bmatrix} Re Y_{0,0} & Re Y_{0,1} & Im Y_{0,1} & Re Y_{0,2} & Im Y_{0,2} & \cdots & Re Y_{0,N/2-1} & Im Y_{0,N/2-1} & Re Y_{0,N/2} \\ Re Y_{1,0} & Re Y_{1,1} & Im Y_{1,1} & Re Y_{1,2} & Im Y_{1,2} & \cdots & Re Y_{1,N/2-1} & Im Y_{1,N/2-1} & Re Y_{1,N/2} \\ Im Y_{1,0} & Re Y_{2,1} & Im Y_{2,1} & Re Y_{2,2} & Im Y_{2,2} & \cdots & Re Y_{2,N/2-1} & Im Y_{2,N/2-1} & Im Y_{1,N/2} \\ \hdotsfor{9} \\ Re Y_{M/2-1,0} & Re Y_{M-3,1} & Im Y_{M-3,1} & \hdotsfor{3} & Re Y_{M-3,N/2-1} & Im Y_{M-3,N/2-1}& Re Y_{M/2-1,N/2} \\ Im Y_{M/2-1,0} & Re Y_{M-2,1} & Im Y_{M-2,1} & \hdotsfor{3} & Re Y_{M-2,N/2-1} & Im Y_{M-2,N/2-1}& Im Y_{M/2-1,N/2} \\ Re Y_{M/2,0} & Re Y_{M-1,1} & Im Y_{M-1,1} & \hdotsfor{3} & Re Y_{M-1,N/2-1} & Im Y_{M-1,N/2-1}& Re Y_{M/2,N/2} \end{bmatrix}\f]
-
-In case of 1D transform of a real vector, the output looks like the first row of the matrix above.
-
-So, the function chooses an operation mode depending on the flags and size of the input array:
-- If #DFT_ROWS is set or the input array has a single row or single column, the function
- performs a 1D forward or inverse transform of each row of a matrix when #DFT_ROWS is set.
- Otherwise, it performs a 2D transform.
-- If the input array is real and #DFT_INVERSE is not set, the function performs a forward 1D or
- 2D transform:
- - When #DFT_COMPLEX_OUTPUT is set, the output is a complex matrix of the same size as
- input.
- - When #DFT_COMPLEX_OUTPUT is not set, the output is a real matrix of the same size as
- input. In case of 2D transform, it uses the packed format as shown above. In case of a
- single 1D transform, it looks like the first row of the matrix above. In case of
- multiple 1D transforms (when using the #DFT_ROWS flag), each row of the output matrix
- looks like the first row of the matrix above.
-- If the input array is complex and either #DFT_INVERSE or #DFT_REAL_OUTPUT are not set, the
- output is a complex array of the same size as input. The function performs a forward or
- inverse 1D or 2D transform of the whole input array or each row of the input array
- independently, depending on the flags DFT_INVERSE and DFT_ROWS.
-- When #DFT_INVERSE is set and the input array is real, or it is complex but #DFT_REAL_OUTPUT
- is set, the output is a real array of the same size as input. The function performs a 1D or 2D
- inverse transformation of the whole input array or each individual row, depending on the flags
- #DFT_INVERSE and #DFT_ROWS.
-
-If #DFT_SCALE is set, the scaling is done after the transformation.
-
-Unlike dct , the function supports arrays of arbitrary size. But only those arrays are processed
-efficiently, whose sizes can be factorized in a product of small prime numbers (2, 3, and 5 in the
-current implementation). Such an efficient DFT size can be calculated using the getOptimalDFTSize
-method.
-
-The sample below illustrates how to calculate a DFT-based convolution of two 2D real arrays:
-@code
- void convolveDFT(InputArray A, InputArray B, OutputArray C)
- {
- // reallocate the output array if needed
- C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());
- Size dftSize;
- // calculate the size of DFT transform
- dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);
- dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);
-
- // allocate temporary buffers and initialize them with 0's
- Mat tempA(dftSize, A.type(), Scalar::all(0));
- Mat tempB(dftSize, B.type(), Scalar::all(0));
-
- // copy A and B to the top-left corners of tempA and tempB, respectively
- Mat roiA(tempA, Rect(0,0,A.cols,A.rows));
- A.copyTo(roiA);
- Mat roiB(tempB, Rect(0,0,B.cols,B.rows));
- B.copyTo(roiB);
-
- // now transform the padded A & B in-place;
- // use "nonzeroRows" hint for faster processing
- dft(tempA, tempA, 0, A.rows);
- dft(tempB, tempB, 0, B.rows);
-
- // multiply the spectrums;
- // the function handles packed spectrum representations well
- mulSpectrums(tempA, tempB, tempA);
-
- // transform the product back from the frequency domain.
- // Even though all the result rows will be non-zero,
- // you need only the first C.rows of them, and thus you
- // pass nonzeroRows == C.rows
- dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);
-
- // now copy the result back to C.
- tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);
-
- // all the temporary buffers will be deallocated automatically
- }
-@endcode
-To optimize this sample, consider the following approaches:
-- Since nonzeroRows != 0 is passed to the forward transform calls and since A and B are copied to
- the top-left corners of tempA and tempB, respectively, it is not necessary to clear the whole
- tempA and tempB. It is only necessary to clear the tempA.cols - A.cols ( tempB.cols - B.cols)
- rightmost columns of the matrices.
-- This DFT-based convolution does not have to be applied to the whole big arrays, especially if B
- is significantly smaller than A or vice versa. Instead, you can calculate convolution by parts.
- To do this, you need to split the output array C into multiple tiles. For each tile, estimate
- which parts of A and B are required to calculate convolution in this tile. If the tiles in C are
- too small, the speed will decrease a lot because of repeated work. In the ultimate case, when
- each tile in C is a single pixel, the algorithm becomes equivalent to the naive convolution
- algorithm. If the tiles are too big, the temporary arrays tempA and tempB become too big and
- there is also a slowdown because of bad cache locality. So, there is an optimal tile size
- somewhere in the middle.
-- If different tiles in C can be calculated in parallel and, thus, the convolution is done by
- parts, the loop can be threaded.
-
-All of the above improvements have been implemented in #matchTemplate and #filter2D . Therefore, by
-using them, you can get the performance even better than with the above theoretically optimal
-implementation. Though, those two functions actually calculate cross-correlation, not convolution,
-so you need to "flip" the second convolution operand B vertically and horizontally using flip .
-@note
-- An example using the discrete fourier transform can be found at
- opencv_source_code/samples/cpp/dft.cpp
-- (Python) An example using the dft functionality to perform Wiener deconvolution can be found
- at opencv_source/samples/python/deconvolution.py
-- (Python) An example rearranging the quadrants of a Fourier image can be found at
- opencv_source/samples/python/dft.py
-@param src input array that could be real or complex.
-@param dst output array whose size and type depends on the flags .
-@param flags transformation flags, representing a combination of the #DftFlags
-@param nonzeroRows when the parameter is not zero, the function assumes that only the first
-nonzeroRows rows of the input array (#DFT_INVERSE is not set) or only the first nonzeroRows of the
-output array (#DFT_INVERSE is set) contain non-zeros, thus, the function can handle the rest of the
-rows more efficiently and save some time; this technique is very useful for calculating array
-cross-correlation or convolution using DFT.
-@sa dct , getOptimalDFTSize , mulSpectrums, filter2D , matchTemplate , flip , cartToPolar ,
-magnitude , phase
-*/
-CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags = 0, int nonzeroRows = 0);
-
-/** @brief Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.
-
-idft(src, dst, flags) is equivalent to dft(src, dst, flags | #DFT_INVERSE) .
-@note None of dft and idft scales the result by default. So, you should pass #DFT_SCALE to one of
-dft or idft explicitly to make these transforms mutually inverse.
-@sa dft, dct, idct, mulSpectrums, getOptimalDFTSize
-@param src input floating-point real or complex array.
-@param dst output array whose size and type depend on the flags.
-@param flags operation flags (see dft and #DftFlags).
-@param nonzeroRows number of dst rows to process; the rest of the rows have undefined content (see
-the convolution sample in dft description.
-*/
-CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags = 0, int nonzeroRows = 0);
-
-/** @brief Performs a forward or inverse discrete Cosine transform of 1D or 2D array.
-
-The function cv::dct performs a forward or inverse discrete Cosine transform (DCT) of a 1D or 2D
-floating-point array:
-- Forward Cosine transform of a 1D vector of N elements:
- \f[Y = C^{(N)} \cdot X\f]
- where
- \f[C^{(N)}_{jk}= \sqrt{\alpha_j/N} \cos \left ( \frac{\pi(2k+1)j}{2N} \right )\f]
- and
- \f$\alpha_0=1\f$, \f$\alpha_j=2\f$ for *j \> 0*.
-- Inverse Cosine transform of a 1D vector of N elements:
- \f[X = \left (C^{(N)} \right )^{-1} \cdot Y = \left (C^{(N)} \right )^T \cdot Y\f]
- (since \f$C^{(N)}\f$ is an orthogonal matrix, \f$C^{(N)} \cdot \left(C^{(N)}\right)^T = I\f$ )
-- Forward 2D Cosine transform of M x N matrix:
- \f[Y = C^{(N)} \cdot X \cdot \left (C^{(N)} \right )^T\f]
-- Inverse 2D Cosine transform of M x N matrix:
- \f[X = \left (C^{(N)} \right )^T \cdot X \cdot C^{(N)}\f]
-
-The function chooses the mode of operation by looking at the flags and size of the input array:
-- If (flags & #DCT_INVERSE) == 0 , the function does a forward 1D or 2D transform. Otherwise, it
- is an inverse 1D or 2D transform.
-- If (flags & #DCT_ROWS) != 0 , the function performs a 1D transform of each row.
-- If the array is a single column or a single row, the function performs a 1D transform.
-- If none of the above is true, the function performs a 2D transform.
-
-@note Currently dct supports even-size arrays (2, 4, 6 ...). For data analysis and approximation, you
-can pad the array when necessary.
-Also, the function performance depends very much, and not monotonically, on the array size (see
-getOptimalDFTSize ). In the current implementation DCT of a vector of size N is calculated via DFT
-of a vector of size N/2 . Thus, the optimal DCT size N1 \>= N can be calculated as:
-@code
- size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }
- N1 = getOptimalDCTSize(N);
-@endcode
-@param src input floating-point array.
-@param dst output array of the same size and type as src .
-@param flags transformation flags as a combination of cv::DftFlags (DCT_*)
-@sa dft , getOptimalDFTSize , idct
-*/
-CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags = 0);
-
-/** @brief Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.
-
-idct(src, dst, flags) is equivalent to dct(src, dst, flags | DCT_INVERSE).
-@param src input floating-point single-channel array.
-@param dst output array of the same size and type as src.
-@param flags operation flags.
-@sa dct, dft, idft, getOptimalDFTSize
-*/
-CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags = 0);
-
-/** @brief Performs the per-element multiplication of two Fourier spectrums.
-
-The function cv::mulSpectrums performs the per-element multiplication of the two CCS-packed or complex
-matrices that are results of a real or complex Fourier transform.
-
-The function, together with dft and idft , may be used to calculate convolution (pass conjB=false )
-or correlation (pass conjB=true ) of two arrays rapidly. When the arrays are complex, they are
-simply multiplied (per element) with an optional conjugation of the second-array elements. When the
-arrays are real, they are assumed to be CCS-packed (see dft for details).
-@param a first input array.
-@param b second input array of the same size and type as src1 .
-@param c output array of the same size and type as src1 .
-@param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that
-each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a `0` as value.
-@param conjB optional flag that conjugates the second input array before the multiplication (true)
-or not (false).
-*/
-CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c,
- int flags, bool conjB = false);
-
-/** @brief Returns the optimal DFT size for a given vector size.
-
-DFT performance is not a monotonic function of a vector size. Therefore, when you calculate
-convolution of two arrays or perform the spectral analysis of an array, it usually makes sense to
-pad the input data with zeros to get a bit larger array that can be transformed much faster than the
-original one. Arrays whose size is a power-of-two (2, 4, 8, 16, 32, ...) are the fastest to process.
-Though, the arrays whose size is a product of 2's, 3's, and 5's (for example, 300 = 5\*5\*3\*2\*2)
-are also processed quite efficiently.
-
-The function cv::getOptimalDFTSize returns the minimum number N that is greater than or equal to vecsize
-so that the DFT of a vector of size N can be processed efficiently. In the current implementation N
-= 2 ^p^ \* 3 ^q^ \* 5 ^r^ for some integer p, q, r.
-
-The function returns a negative number if vecsize is too large (very close to INT_MAX ).
-
-While the function cannot be used directly to estimate the optimal vector size for DCT transform
-(since the current DCT implementation supports only even-size vectors), it can be easily processed
-as getOptimalDFTSize((vecsize+1)/2)\*2.
-@param vecsize vector size.
-@sa dft , dct , idft , idct , mulSpectrums
-*/
-CV_EXPORTS_W int getOptimalDFTSize(int vecsize);
-
-/** @brief Returns the default random number generator.
-
-The function cv::theRNG returns the default random number generator. For each thread, there is a
-separate random number generator, so you can use the function safely in multi-thread environments.
-If you just need to get a single random number using this generator or initialize an array, you can
-use randu or randn instead. But if you are going to generate many random numbers inside a loop, it
-is much faster to use this function to retrieve the generator and then use RNG::operator _Tp() .
-@sa RNG, randu, randn
-*/
-CV_EXPORTS RNG& theRNG();
-
-/** @brief Sets state of default random number generator.
-
-The function cv::setRNGSeed sets state of default random number generator to custom value.
-@param seed new state for default random number generator
-@sa RNG, randu, randn
-*/
-CV_EXPORTS_W void setRNGSeed(int seed);
-
-/** @brief Generates a single uniformly-distributed random number or an array of random numbers.
-
-Non-template variant of the function fills the matrix dst with uniformly-distributed
-random numbers from the specified range:
-\f[\texttt{low} _c \leq \texttt{dst} (I)_c < \texttt{high} _c\f]
-@param dst output array of random numbers; the array must be pre-allocated.
-@param low inclusive lower boundary of the generated random numbers.
-@param high exclusive upper boundary of the generated random numbers.
-@sa RNG, randn, theRNG
-*/
-CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high);
-
-/** @brief Fills the array with normally distributed random numbers.
-
-The function cv::randn fills the matrix dst with normally distributed random numbers with the specified
-mean vector and the standard deviation matrix. The generated random numbers are clipped to fit the
-value range of the output array data type.
-@param dst output array of random numbers; the array must be pre-allocated and have 1 to 4 channels.
-@param mean mean value (expectation) of the generated random numbers.
-@param stddev standard deviation of the generated random numbers; it can be either a vector (in
-which case a diagonal standard deviation matrix is assumed) or a square matrix.
-@sa RNG, randu
-*/
-CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev);
-
-/** @brief Shuffles the array elements randomly.
-
-The function cv::randShuffle shuffles the specified 1D array by randomly choosing pairs of elements and
-swapping them. The number of such swap operations will be dst.rows\*dst.cols\*iterFactor .
-@param dst input/output numerical 1D array.
-@param iterFactor scale factor that determines the number of random swap operations (see the details
-below).
-@param rng optional random number generator used for shuffling; if it is zero, theRNG () is used
-instead.
-@sa RNG, sort
-*/
-CV_EXPORTS_W void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* rng = 0);
-
-/** @brief Principal Component Analysis
-
-The class is used to calculate a special basis for a set of vectors. The
-basis will consist of eigenvectors of the covariance matrix calculated
-from the input set of vectors. The class %PCA can also transform
-vectors to/from the new coordinate space defined by the basis. Usually,
-in this new coordinate system, each vector from the original set (and
-any linear combination of such vectors) can be quite accurately
-approximated by taking its first few components, corresponding to the
-eigenvectors of the largest eigenvalues of the covariance matrix.
-Geometrically it means that you calculate a projection of the vector to
-a subspace formed by a few eigenvectors corresponding to the dominant
-eigenvalues of the covariance matrix. And usually such a projection is
-very close to the original vector. So, you can represent the original
-vector from a high-dimensional space with a much shorter vector
-consisting of the projected vector's coordinates in the subspace. Such a
-transformation is also known as Karhunen-Loeve Transform, or KLT.
-See http://en.wikipedia.org/wiki/Principal_component_analysis
-
-The sample below is the function that takes two matrices. The first
-function stores a set of vectors (a row per vector) that is used to
-calculate PCA. The second function stores another "test" set of vectors
-(a row per vector). First, these vectors are compressed with PCA, then
-reconstructed back, and then the reconstruction error norm is computed
-and printed for each vector. :
-
-@code{.cpp}
-using namespace cv;
-
-PCA compressPCA(const Mat& pcaset, int maxComponents,
- const Mat& testset, Mat& compressed)
-{
- PCA pca(pcaset, // pass the data
- Mat(), // we do not have a pre-computed mean vector,
- // so let the PCA engine to compute it
- PCA::DATA_AS_ROW, // indicate that the vectors
- // are stored as matrix rows
- // (use PCA::DATA_AS_COL if the vectors are
- // the matrix columns)
- maxComponents // specify, how many principal components to retain
- );
- // if there is no test data, just return the computed basis, ready-to-use
- if( !testset.data )
- return pca;
- CV_Assert( testset.cols == pcaset.cols );
-
- compressed.create(testset.rows, maxComponents, testset.type());
-
- Mat reconstructed;
- for( int i = 0; i < testset.rows; i++ )
- {
- Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed;
- // compress the vector, the result will be stored
- // in the i-th row of the output matrix
- pca.project(vec, coeffs);
- // and then reconstruct it
- pca.backProject(coeffs, reconstructed);
- // and measure the error
- printf("%d. diff = %g\n", i, norm(vec, reconstructed, NORM_L2));
- }
- return pca;
-}
-@endcode
-@sa calcCovarMatrix, mulTransposed, SVD, dft, dct
-*/
-class CV_EXPORTS PCA
-{
-public:
- enum Flags { DATA_AS_ROW = 0, //!< indicates that the input samples are stored as matrix rows
- DATA_AS_COL = 1, //!< indicates that the input samples are stored as matrix columns
- USE_AVG = 2 //!
- };
-
- /** @brief default constructor
-
- The default constructor initializes an empty %PCA structure. The other
- constructors initialize the structure and call PCA::operator()().
- */
- PCA();
-
- /** @overload
- @param data input samples stored as matrix rows or matrix columns.
- @param mean optional mean value; if the matrix is empty (@c noArray()),
- the mean is computed from the data.
- @param flags operation flags; currently the parameter is only used to
- specify the data layout (PCA::Flags)
- @param maxComponents maximum number of components that %PCA should
- retain; by default, all the components are retained.
- */
- PCA(InputArray data, InputArray mean, int flags, int maxComponents = 0);
-
- /** @overload
- @param data input samples stored as matrix rows or matrix columns.
- @param mean optional mean value; if the matrix is empty (noArray()),
- the mean is computed from the data.
- @param flags operation flags; currently the parameter is only used to
- specify the data layout (PCA::Flags)
- @param retainedVariance Percentage of variance that PCA should retain.
- Using this parameter will let the PCA decided how many components to
- retain but it will always keep at least 2.
- */
- PCA(InputArray data, InputArray mean, int flags, double retainedVariance);
-
- /** @brief performs %PCA
-
- The operator performs %PCA of the supplied dataset. It is safe to reuse
- the same PCA structure for multiple datasets. That is, if the structure
- has been previously used with another dataset, the existing internal
- data is reclaimed and the new @ref eigenvalues, @ref eigenvectors and @ref
- mean are allocated and computed.
-
- The computed @ref eigenvalues are sorted from the largest to the smallest and
- the corresponding @ref eigenvectors are stored as eigenvectors rows.
-
- @param data input samples stored as the matrix rows or as the matrix
- columns.
- @param mean optional mean value; if the matrix is empty (noArray()),
- the mean is computed from the data.
- @param flags operation flags; currently the parameter is only used to
- specify the data layout. (Flags)
- @param maxComponents maximum number of components that PCA should
- retain; by default, all the components are retained.
- */
- PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents = 0);
-
- /** @overload
- @param data input samples stored as the matrix rows or as the matrix
- columns.
- @param mean optional mean value; if the matrix is empty (noArray()),
- the mean is computed from the data.
- @param flags operation flags; currently the parameter is only used to
- specify the data layout. (PCA::Flags)
- @param retainedVariance Percentage of variance that %PCA should retain.
- Using this parameter will let the %PCA decided how many components to
- retain but it will always keep at least 2.
- */
- PCA& operator()(InputArray data, InputArray mean, int flags, double retainedVariance);
-
- /** @brief Projects vector(s) to the principal component subspace.
-
- The methods project one or more vectors to the principal component
- subspace, where each vector projection is represented by coefficients in
- the principal component basis. The first form of the method returns the
- matrix that the second form writes to the result. So the first form can
- be used as a part of expression while the second form can be more
- efficient in a processing loop.
- @param vec input vector(s); must have the same dimensionality and the
- same layout as the input data used at %PCA phase, that is, if
- DATA_AS_ROW are specified, then `vec.cols==data.cols`
- (vector dimensionality) and `vec.rows` is the number of vectors to
- project, and the same is true for the PCA::DATA_AS_COL case.
- */
- Mat project(InputArray vec) const;
-
- /** @overload
- @param vec input vector(s); must have the same dimensionality and the
- same layout as the input data used at PCA phase, that is, if
- DATA_AS_ROW are specified, then `vec.cols==data.cols`
- (vector dimensionality) and `vec.rows` is the number of vectors to
- project, and the same is true for the PCA::DATA_AS_COL case.
- @param result output vectors; in case of PCA::DATA_AS_COL, the
- output matrix has as many columns as the number of input vectors, this
- means that `result.cols==vec.cols` and the number of rows match the
- number of principal components (for example, `maxComponents` parameter
- passed to the constructor).
- */
- void project(InputArray vec, OutputArray result) const;
-
- /** @brief Reconstructs vectors from their PC projections.
-
- The methods are inverse operations to PCA::project. They take PC
- coordinates of projected vectors and reconstruct the original vectors.
- Unless all the principal components have been retained, the
- reconstructed vectors are different from the originals. But typically,
- the difference is small if the number of components is large enough (but
- still much smaller than the original vector dimensionality). As a
- result, PCA is used.
- @param vec coordinates of the vectors in the principal component
- subspace, the layout and size are the same as of PCA::project output
- vectors.
- */
- Mat backProject(InputArray vec) const;
-
- /** @overload
- @param vec coordinates of the vectors in the principal component
- subspace, the layout and size are the same as of PCA::project output
- vectors.
- @param result reconstructed vectors; the layout and size are the same as
- of PCA::project input vectors.
- */
- void backProject(InputArray vec, OutputArray result) const;
-
- /** @brief write PCA objects
-
- Writes @ref eigenvalues @ref eigenvectors and @ref mean to specified FileStorage
- */
- void write(FileStorage& fs) const;
-
- /** @brief load PCA objects
-
- Loads @ref eigenvalues @ref eigenvectors and @ref mean from specified FileNode
- */
- void read(const FileNode& fn);
-
- Mat eigenvectors; //!< eigenvectors of the covariation matrix
- Mat eigenvalues; //!< eigenvalues of the covariation matrix
- Mat mean; //!< mean value subtracted before the projection and added after the back projection
-};
-
-/** @example samples/cpp/pca.cpp
-An example using %PCA for dimensionality reduction while maintaining an amount of variance
-*/
-
-/** @example samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp
-Check @ref tutorial_introduction_to_pca "the corresponding tutorial" for more details
-*/
-
-/**
-@brief Linear Discriminant Analysis
-@todo document this class
-*/
-class CV_EXPORTS LDA
-{
-public:
- /** @brief constructor
- Initializes a LDA with num_components (default 0).
- */
- explicit LDA(int num_components = 0);
-
- /** Initializes and performs a Discriminant Analysis with Fisher's
- Optimization Criterion on given data in src and corresponding labels
- in labels. If 0 (or less) number of components are given, they are
- automatically determined for given data in computation.
- */
- LDA(InputArrayOfArrays src, InputArray labels, int num_components = 0);
-
- /** Serializes this object to a given filename.
- */
- void save(const String& filename) const;
-
- /** Deserializes this object from a given filename.
- */
- void load(const String& filename);
-
- /** Serializes this object to a given cv::FileStorage.
- */
- void save(FileStorage& fs) const;
-
- /** Deserializes this object from a given cv::FileStorage.
- */
- void load(const FileStorage& node);
-
- /** destructor
- */
- ~LDA();
-
- /** Compute the discriminants for data in src (row aligned) and labels.
- */
- void compute(InputArrayOfArrays src, InputArray labels);
-
- /** Projects samples into the LDA subspace.
- src may be one or more row aligned samples.
- */
- Mat project(InputArray src);
-
- /** Reconstructs projections from the LDA subspace.
- src may be one or more row aligned projections.
- */
- Mat reconstruct(InputArray src);
-
- /** Returns the eigenvectors of this LDA.
- */
- Mat eigenvectors() const { return _eigenvectors; }
-
- /** Returns the eigenvalues of this LDA.
- */
- Mat eigenvalues() const { return _eigenvalues; }
-
- static Mat subspaceProject(InputArray W, InputArray mean, InputArray src);
- static Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);
-
-protected:
- int _num_components;
- Mat _eigenvectors;
- Mat _eigenvalues;
- void lda(InputArrayOfArrays src, InputArray labels);
-};
-
-/** @brief Singular Value Decomposition
-
-Class for computing Singular Value Decomposition of a floating-point
-matrix. The Singular Value Decomposition is used to solve least-square
-problems, under-determined linear systems, invert matrices, compute
-condition numbers, and so on.
-
-If you want to compute a condition number of a matrix or an absolute value of
-its determinant, you do not need `u` and `vt`. You can pass
-flags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that full-size u
-and vt must be computed, which is not necessary most of the time.
-
-@sa invert, solve, eigen, determinant
-*/
-class CV_EXPORTS SVD
-{
-public:
- enum Flags {
- /** allow the algorithm to modify the decomposed matrix; it can save space and speed up
- processing. currently ignored. */
- MODIFY_A = 1,
- /** indicates that only a vector of singular values `w` is to be processed, while u and vt
- will be set to empty matrices */
- NO_UV = 2,
- /** when the matrix is not square, by default the algorithm produces u and vt matrices of
- sufficiently large size for the further A reconstruction; if, however, FULL_UV flag is
- specified, u and vt will be full-size square orthogonal matrices.*/
- FULL_UV = 4
- };
-
- /** @brief the default constructor
-
- initializes an empty SVD structure
- */
- SVD();
-
- /** @overload
- initializes an empty SVD structure and then calls SVD::operator()
- @param src decomposed matrix. The depth has to be CV_32F or CV_64F.
- @param flags operation flags (SVD::Flags)
- */
- SVD( InputArray src, int flags = 0 );
-
- /** @brief the operator that performs SVD. The previously allocated u, w and vt are released.
-
- The operator performs the singular value decomposition of the supplied
- matrix. The u,`vt` , and the vector of singular values w are stored in
- the structure. The same SVD structure can be reused many times with
- different matrices. Each time, if needed, the previous u,`vt` , and w
- are reclaimed and the new matrices are created, which is all handled by
- Mat::create.
- @param src decomposed matrix. The depth has to be CV_32F or CV_64F.
- @param flags operation flags (SVD::Flags)
- */
- SVD& operator ()( InputArray src, int flags = 0 );
-
- /** @brief decomposes matrix and stores the results to user-provided matrices
-
- The methods/functions perform SVD of matrix. Unlike SVD::SVD constructor
- and SVD::operator(), they store the results to the user-provided
- matrices:
-
- @code{.cpp}
- Mat A, w, u, vt;
- SVD::compute(A, w, u, vt);
- @endcode
-
- @param src decomposed matrix. The depth has to be CV_32F or CV_64F.
- @param w calculated singular values
- @param u calculated left singular vectors
- @param vt transposed matrix of right singular vectors
- @param flags operation flags - see SVD::Flags.
- */
- static void compute( InputArray src, OutputArray w,
- OutputArray u, OutputArray vt, int flags = 0 );
-
- /** @overload
- computes singular values of a matrix
- @param src decomposed matrix. The depth has to be CV_32F or CV_64F.
- @param w calculated singular values
- @param flags operation flags - see SVD::Flags.
- */
- static void compute( InputArray src, OutputArray w, int flags = 0 );
-
- /** @brief performs back substitution
- */
- static void backSubst( InputArray w, InputArray u,
- InputArray vt, InputArray rhs,
- OutputArray dst );
-
- /** @brief solves an under-determined singular linear system
-
- The method finds a unit-length solution x of a singular linear system
- A\*x = 0. Depending on the rank of A, there can be no solutions, a
- single solution or an infinite number of solutions. In general, the
- algorithm solves the following problem:
- \f[dst = \arg \min _{x: \| x \| =1} \| src \cdot x \|\f]
- @param src left-hand-side matrix.
- @param dst found solution.
- */
- static void solveZ( InputArray src, OutputArray dst );
-
- /** @brief performs a singular value back substitution.
-
- The method calculates a back substitution for the specified right-hand
- side:
-
- \f[\texttt{x} = \texttt{vt} ^T \cdot diag( \texttt{w} )^{-1} \cdot \texttt{u} ^T \cdot \texttt{rhs} \sim \texttt{A} ^{-1} \cdot \texttt{rhs}\f]
-
- Using this technique you can either get a very accurate solution of the
- convenient linear system, or the best (in the least-squares terms)
- pseudo-solution of an overdetermined linear system.
-
- @param rhs right-hand side of a linear system (u\*w\*v')\*dst = rhs to
- be solved, where A has been previously decomposed.
-
- @param dst found solution of the system.
-
- @note Explicit SVD with the further back substitution only makes sense
- if you need to solve many linear systems with the same left-hand side
- (for example, src ). If all you need is to solve a single system
- (possibly with multiple rhs immediately available), simply call solve
- add pass #DECOMP_SVD there. It does absolutely the same thing.
- */
- void backSubst( InputArray rhs, OutputArray dst ) const;
-
- /** @todo document */
- template static
- void compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt );
-
- /** @todo document */
- template static
- void compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w );
-
- /** @todo document */
- template static
- void backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst );
-
- Mat u, w, vt;
-};
-
-/** @brief Random Number Generator
-
-Random number generator. It encapsulates the state (currently, a 64-bit
-integer) and has methods to return scalar random values and to fill
-arrays with random values. Currently it supports uniform and Gaussian
-(normal) distributions. The generator uses Multiply-With-Carry
-algorithm, introduced by G. Marsaglia (
- ).
-Gaussian-distribution random numbers are generated using the Ziggurat
-algorithm ( ),
-introduced by G. Marsaglia and W. W. Tsang.
-*/
-class CV_EXPORTS RNG
-{
-public:
- enum { UNIFORM = 0,
- NORMAL = 1
- };
-
- /** @brief constructor
-
- These are the RNG constructors. The first form sets the state to some
- pre-defined value, equal to 2\*\*32-1 in the current implementation. The
- second form sets the state to the specified value. If you passed state=0
- , the constructor uses the above default value instead to avoid the
- singular random number sequence, consisting of all zeros.
- */
- RNG();
- /** @overload
- @param state 64-bit value used to initialize the RNG.
- */
- RNG(uint64 state);
- /**The method updates the state using the MWC algorithm and returns the
- next 32-bit random number.*/
- unsigned next();
-
- /**Each of the methods updates the state using the MWC algorithm and
- returns the next random number of the specified type. In case of integer
- types, the returned number is from the available value range for the
- specified type. In case of floating-point types, the returned value is
- from [0,1) range.
- */
- operator uchar();
- /** @overload */
- operator schar();
- /** @overload */
- operator ushort();
- /** @overload */
- operator short();
- /** @overload */
- operator unsigned();
- /** @overload */
- operator int();
- /** @overload */
- operator float();
- /** @overload */
- operator double();
-
- /** @brief returns a random integer sampled uniformly from [0, N).
-
- The methods transform the state using the MWC algorithm and return the
- next random number. The first form is equivalent to RNG::next . The
- second form returns the random number modulo N , which means that the
- result is in the range [0, N) .
- */
- unsigned operator ()();
- /** @overload
- @param N upper non-inclusive boundary of the returned random number.
- */
- unsigned operator ()(unsigned N);
-
- /** @brief returns uniformly distributed integer random number from [a,b) range
-
- The methods transform the state using the MWC algorithm and return the
- next uniformly-distributed random number of the specified type, deduced
- from the input parameter type, from the range [a, b) . There is a nuance
- illustrated by the following sample:
-
- @code{.cpp}
- RNG rng;
-
- // always produces 0
- double a = rng.uniform(0, 1);
-
- // produces double from [0, 1)
- double a1 = rng.uniform((double)0, (double)1);
-
- // produces float from [0, 1)
- float b = rng.uniform(0.f, 1.f);
-
- // produces double from [0, 1)
- double c = rng.uniform(0., 1.);
-
- // may cause compiler error because of ambiguity:
- // RNG::uniform(0, (int)0.999999)? or RNG::uniform((double)0, 0.99999)?
- double d = rng.uniform(0, 0.999999);
- @endcode
-
- The compiler does not take into account the type of the variable to
- which you assign the result of RNG::uniform . The only thing that
- matters to the compiler is the type of a and b parameters. So, if you
- want a floating-point random number, but the range boundaries are
- integer numbers, either put dots in the end, if they are constants, or
- use explicit type cast operators, as in the a1 initialization above.
- @param a lower inclusive boundary of the returned random number.
- @param b upper non-inclusive boundary of the returned random number.
- */
- int uniform(int a, int b);
- /** @overload */
- float uniform(float a, float b);
- /** @overload */
- double uniform(double a, double b);
-
- /** @brief Fills arrays with random numbers.
-
- @param mat 2D or N-dimensional matrix; currently matrices with more than
- 4 channels are not supported by the methods, use Mat::reshape as a
- possible workaround.
- @param distType distribution type, RNG::UNIFORM or RNG::NORMAL.
- @param a first distribution parameter; in case of the uniform
- distribution, this is an inclusive lower boundary, in case of the normal
- distribution, this is a mean value.
- @param b second distribution parameter; in case of the uniform
- distribution, this is a non-inclusive upper boundary, in case of the
- normal distribution, this is a standard deviation (diagonal of the
- standard deviation matrix or the full standard deviation matrix).
- @param saturateRange pre-saturation flag; for uniform distribution only;
- if true, the method will first convert a and b to the acceptable value
- range (according to the mat datatype) and then will generate uniformly
- distributed random numbers within the range [saturate(a), saturate(b)),
- if saturateRange=false, the method will generate uniformly distributed
- random numbers in the original range [a, b) and then will saturate them,
- it means, for example, that
- theRNG().fill(mat_8u, RNG::UNIFORM, -DBL_MAX, DBL_MAX) will likely
- produce array mostly filled with 0's and 255's, since the range (0, 255)
- is significantly smaller than [-DBL_MAX, DBL_MAX).
-
- Each of the methods fills the matrix with the random values from the
- specified distribution. As the new numbers are generated, the RNG state
- is updated accordingly. In case of multiple-channel images, every
- channel is filled independently, which means that RNG cannot generate
- samples from the multi-dimensional Gaussian distribution with
- non-diagonal covariance matrix directly. To do that, the method
- generates samples from multi-dimensional standard Gaussian distribution
- with zero mean and identity covariation matrix, and then transforms them
- using transform to get samples from the specified Gaussian distribution.
- */
- void fill( InputOutputArray mat, int distType, InputArray a, InputArray b, bool saturateRange = false );
-
- /** @brief Returns the next random number sampled from the Gaussian distribution
- @param sigma standard deviation of the distribution.
-
- The method transforms the state using the MWC algorithm and returns the
- next random number from the Gaussian distribution N(0,sigma) . That is,
- the mean value of the returned random numbers is zero and the standard
- deviation is the specified sigma .
- */
- double gaussian(double sigma);
-
- uint64 state;
-
- bool operator ==(const RNG& other) const;
-};
-
-/** @brief Mersenne Twister random number generator
-
-Inspired by http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c
-@todo document
-*/
-class CV_EXPORTS RNG_MT19937
-{
-public:
- RNG_MT19937();
- RNG_MT19937(unsigned s);
- void seed(unsigned s);
-
- unsigned next();
-
- operator int();
- operator unsigned();
- operator float();
- operator double();
-
- unsigned operator ()(unsigned N);
- unsigned operator ()();
-
- /** @brief returns uniformly distributed integer random number from [a,b) range*/
- int uniform(int a, int b);
- /** @brief returns uniformly distributed floating-point random number from [a,b) range*/
- float uniform(float a, float b);
- /** @brief returns uniformly distributed double-precision floating-point random number from [a,b) range*/
- double uniform(double a, double b);
-
-private:
- enum PeriodParameters {N = 624, M = 397};
- unsigned state[N];
- int mti;
-};
-
-//! @} core_array
-
-//! @addtogroup core_cluster
-//! @{
-
-/** @example samples/cpp/kmeans.cpp
-An example on K-means clustering
-*/
-
-/** @brief Finds centers of clusters and groups input samples around the clusters.
-
-The function kmeans implements a k-means algorithm that finds the centers of cluster_count clusters
-and groups the input samples around the clusters. As an output, \f$\texttt{bestLabels}_i\f$ contains a
-0-based cluster index for the sample stored in the \f$i^{th}\f$ row of the samples matrix.
-
-@note
-- (Python) An example on K-means clustering can be found at
- opencv_source_code/samples/python/kmeans.py
-@param data Data for clustering. An array of N-Dimensional points with float coordinates is needed.
-Examples of this array can be:
-- Mat points(count, 2, CV_32F);
-- Mat points(count, 1, CV_32FC2);
-- Mat points(1, count, CV_32FC2);
-- std::vector\ points(sampleCount);
-@param K Number of clusters to split the set by.
-@param bestLabels Input/output integer array that stores the cluster indices for every sample.
-@param criteria The algorithm termination criteria, that is, the maximum number of iterations and/or
-the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of the cluster
-centers moves by less than criteria.epsilon on some iteration, the algorithm stops.
-@param attempts Flag to specify the number of times the algorithm is executed using different
-initial labellings. The algorithm returns the labels that yield the best compactness (see the last
-function parameter).
-@param flags Flag that can take values of cv::KmeansFlags
-@param centers Output matrix of the cluster centers, one row per each cluster center.
-@return The function returns the compactness measure that is computed as
-\f[\sum _i \| \texttt{samples} _i - \texttt{centers} _{ \texttt{labels} _i} \| ^2\f]
-after every attempt. The best (minimum) value is chosen and the corresponding labels and the
-compactness value are returned by the function. Basically, you can use only the core of the
-function, set the number of attempts to 1, initialize labels each time using a custom algorithm,
-pass them with the ( flags = #KMEANS_USE_INITIAL_LABELS ) flag, and then choose the best
-(most-compact) clustering.
-*/
-CV_EXPORTS_W double kmeans( InputArray data, int K, InputOutputArray bestLabels,
- TermCriteria criteria, int attempts,
- int flags, OutputArray centers = noArray() );
-
-//! @} core_cluster
-
-//! @addtogroup core_basic
-//! @{
-
-/////////////////////////////// Formatted output of cv::Mat ///////////////////////////
-
-/** @todo document */
-class CV_EXPORTS Formatted
-{
-public:
- virtual const char* next() = 0;
- virtual void reset() = 0;
- virtual ~Formatted();
-};
-
-/** @todo document */
-class CV_EXPORTS Formatter
-{
-public:
- enum FormatType {
- FMT_DEFAULT = 0,
- FMT_MATLAB = 1,
- FMT_CSV = 2,
- FMT_PYTHON = 3,
- FMT_NUMPY = 4,
- FMT_C = 5
- };
-
- virtual ~Formatter();
-
- virtual Ptr format(const Mat& mtx) const = 0;
-
- virtual void set16fPrecision(int p = 4) = 0;
- virtual void set32fPrecision(int p = 8) = 0;
- virtual void set64fPrecision(int p = 16) = 0;
- virtual void setMultiline(bool ml = true) = 0;
-
- static Ptr get(Formatter::FormatType fmt = FMT_DEFAULT);
-
-};
-
-static inline
-String& operator << (String& out, Ptr fmtd)
-{
- fmtd->reset();
- for(const char* str = fmtd->next(); str; str = fmtd->next())
- out += cv::String(str);
- return out;
-}
-
-static inline
-String& operator << (String& out, const Mat& mtx)
-{
- return out << Formatter::get()->format(mtx);
-}
-
-//////////////////////////////////////// Algorithm ////////////////////////////////////
-
-class CV_EXPORTS Algorithm;
-
-template struct ParamType {};
-
-
-/** @brief This is a base class for all more or less complex algorithms in OpenCV
-
-especially for classes of algorithms, for which there can be multiple implementations. The examples
-are stereo correspondence (for which there are algorithms like block matching, semi-global block
-matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians
-models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck
-etc.).
-
-Here is example of SimpleBlobDetector use in your application via Algorithm interface:
-@snippet snippets/core_various.cpp Algorithm
-*/
-class CV_EXPORTS_W Algorithm
-{
-public:
- Algorithm();
- virtual ~Algorithm();
-
- /** @brief Clears the algorithm state
- */
- CV_WRAP virtual void clear() {}
-
- /** @brief Stores algorithm parameters in a file storage
- */
- CV_WRAP virtual void write(FileStorage& fs) const { CV_UNUSED(fs); }
-
- /**
- * @overload
- */
- CV_WRAP void write(FileStorage& fs, const String& name) const;
-#if CV_VERSION_MAJOR < 5
- /** @deprecated */
- void write(const Ptr& fs, const String& name = String()) const;
-#endif
-
- /** @brief Reads algorithm parameters from a file storage
- */
- CV_WRAP virtual void read(const FileNode& fn) { CV_UNUSED(fn); }
-
- /** @brief Returns true if the Algorithm is empty (e.g. in the very beginning or after unsuccessful read
- */
- CV_WRAP virtual bool empty() const { return false; }
-
- /** @brief Reads algorithm from the file node
-
- This is static template method of Algorithm. It's usage is following (in the case of SVM):
- @code
- cv::FileStorage fsRead("example.xml", FileStorage::READ);
- Ptr svm = Algorithm::read(fsRead.root());
- @endcode
- In order to make this method work, the derived class must overwrite Algorithm::read(const
- FileNode& fn) and also have static create() method without parameters
- (or with all the optional parameters)
- */
- template static Ptr<_Tp> read(const FileNode& fn)
- {
- Ptr<_Tp> obj = _Tp::create();
- obj->read(fn);
- return !obj->empty() ? obj : Ptr<_Tp>();
- }
-
- /** @brief Loads algorithm from the file
-
- @param filename Name of the file to read.
- @param objname The optional name of the node to read (if empty, the first top-level node will be used)
-
- This is static template method of Algorithm. It's usage is following (in the case of SVM):
- @code
- Ptr svm = Algorithm::load("my_svm_model.xml");
- @endcode
- In order to make this method work, the derived class must overwrite Algorithm::read(const
- FileNode& fn).
- */
- template static Ptr<_Tp> load(const String& filename, const String& objname=String())
- {
- FileStorage fs(filename, FileStorage::READ);
- CV_Assert(fs.isOpened());
- FileNode fn = objname.empty() ? fs.getFirstTopLevelNode() : fs[objname];
- if (fn.empty()) return Ptr<_Tp>();
- Ptr<_Tp> obj = _Tp::create();
- obj->read(fn);
- return !obj->empty() ? obj : Ptr<_Tp>();
- }
-
- /** @brief Loads algorithm from a String
-
- @param strModel The string variable containing the model you want to load.
- @param objname The optional name of the node to read (if empty, the first top-level node will be used)
-
- This is static template method of Algorithm. It's usage is following (in the case of SVM):
- @code
- Ptr svm = Algorithm::loadFromString(myStringModel);
- @endcode
- */
- template static Ptr<_Tp> loadFromString(const String& strModel, const String& objname=String())
- {
- FileStorage fs(strModel, FileStorage::READ + FileStorage::MEMORY);
- FileNode fn = objname.empty() ? fs.getFirstTopLevelNode() : fs[objname];
- Ptr<_Tp> obj = _Tp::create();
- obj->read(fn);
- return !obj->empty() ? obj : Ptr<_Tp>();
- }
-
- /** Saves the algorithm to a file.
- In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs). */
- CV_WRAP virtual void save(const String& filename) const;
-
- /** Returns the algorithm string identifier.
- This string is used as top level xml/yml node tag when the object is saved to a file or string. */
- CV_WRAP virtual String getDefaultName() const;
-
-protected:
- void writeFormat(FileStorage& fs) const;
-};
-
-enum struct Param {
- INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7,
- UNSIGNED_INT=8, UINT64=9, UCHAR=11, SCALAR=12
-};
-
-
-
-template<> struct ParamType
-{
- typedef bool const_param_type;
- typedef bool member_type;
-
- static const Param type = Param::BOOLEAN;
-};
-
-template<> struct ParamType
-{
- typedef int const_param_type;
- typedef int member_type;
-
- static const Param type = Param::INT;
-};
-
-template<> struct ParamType
-{
- typedef double const_param_type;
- typedef double member_type;
-
- static const Param type = Param::REAL;
-};
-
-template<> struct ParamType
-{
- typedef const String& const_param_type;
- typedef String member_type;
-
- static const Param type = Param::STRING;
-};
-
-template<> struct ParamType
-{
- typedef const Mat& const_param_type;
- typedef Mat member_type;
-
- static const Param type = Param::MAT;
-};
-
-template<> struct ParamType >
-{
- typedef const std::vector& const_param_type;
- typedef std::vector member_type;
-
- static const Param type = Param::MAT_VECTOR;
-};
-
-template<> struct ParamType
-{
- typedef const Ptr& const_param_type;
- typedef Ptr member_type;
-
- static const Param type = Param::ALGORITHM;
-};
-
-template<> struct ParamType
-{
- typedef float const_param_type;
- typedef float member_type;
-
- static const Param type = Param::FLOAT;
-};
-
-template<> struct ParamType
-{
- typedef unsigned const_param_type;
- typedef unsigned member_type;
-
- static const Param type = Param::UNSIGNED_INT;
-};
-
-template<> struct ParamType
-{
- typedef uint64 const_param_type;
- typedef uint64 member_type;
-
- static const Param type = Param::UINT64;
-};
-
-template<> struct ParamType
-{
- typedef uchar const_param_type;
- typedef uchar member_type;
-
- static const Param type = Param::UCHAR;
-};
-
-template<> struct ParamType
-{
- typedef const Scalar& const_param_type;
- typedef Scalar member_type;
-
- static const Param type = Param::SCALAR;
-};
-
-template
-struct ParamType<_Tp, typename std::enable_if< std::is_enum<_Tp>::value >::type>
-{
- typedef typename std::underlying_type<_Tp>::type const_param_type;
- typedef typename std::underlying_type<_Tp>::type member_type;
-
- static const Param type = Param::INT;
-};
-
-//! @} core_basic
-
-} //namespace cv
-
-#include "opencv2/core/operations.hpp"
-#include "opencv2/core/cvstd.inl.hpp"
-#include "opencv2/core/utility.hpp"
-#include "opencv2/core/optim.hpp"
-#include "opencv2/core/ovx.hpp"
-
-#endif /*OPENCV_CORE_HPP*/
diff --git a/opencv/native/jni/include/opencv2/core/affine.hpp b/opencv/native/jni/include/opencv2/core/affine.hpp
deleted file mode 100644
index 1806382..0000000
--- a/opencv/native/jni/include/opencv2/core/affine.hpp
+++ /dev/null
@@ -1,678 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef OPENCV_CORE_AFFINE3_HPP
-#define OPENCV_CORE_AFFINE3_HPP
-
-#ifdef __cplusplus
-
-#include
-
-namespace cv
-{
-
-//! @addtogroup core
-//! @{
-
- /** @brief Affine transform
- *
- * It represents a 4x4 homogeneous transformation matrix \f$T\f$
- *
- * \f[T =
- * \begin{bmatrix}
- * R & t\\
- * 0 & 1\\
- * \end{bmatrix}
- * \f]
- *
- * where \f$R\f$ is a 3x3 rotation matrix and \f$t\f$ is a 3x1 translation vector.
- *
- * You can specify \f$R\f$ either by a 3x3 rotation matrix or by a 3x1 rotation vector,
- * which is converted to a 3x3 rotation matrix by the Rodrigues formula.
- *
- * To construct a matrix \f$T\f$ representing first rotation around the axis \f$r\f$ with rotation
- * angle \f$|r|\f$ in radian (right hand rule) and then translation by the vector \f$t\f$, you can use
- *
- * @code
- * cv::Vec3f r, t;
- * cv::Affine3f T(r, t);
- * @endcode
- *
- * If you already have the rotation matrix \f$R\f$, then you can use
- *
- * @code
- * cv::Matx33f R;
- * cv::Affine3f T(R, t);
- * @endcode
- *
- * To extract the rotation matrix \f$R\f$ from \f$T\f$, use
- *
- * @code
- * cv::Matx33f R = T.rotation();
- * @endcode
- *
- * To extract the translation vector \f$t\f$ from \f$T\f$, use
- *
- * @code
- * cv::Vec3f t = T.translation();
- * @endcode
- *
- * To extract the rotation vector \f$r\f$ from \f$T\f$, use
- *
- * @code
- * cv::Vec3f r = T.rvec();
- * @endcode
- *
- * Note that since the mapping from rotation vectors to rotation matrices
- * is many to one. The returned rotation vector is not necessarily the one
- * you used before to set the matrix.
- *
- * If you have two transformations \f$T = T_1 * T_2\f$, use
- *
- * @code
- * cv::Affine3f T, T1, T2;
- * T = T2.concatenate(T1);
- * @endcode
- *
- * To get the inverse transform of \f$T\f$, use
- *
- * @code
- * cv::Affine3f T, T_inv;
- * T_inv = T.inv();
- * @endcode
- *
- */
- template
- class Affine3
- {
- public:
- typedef T float_type;
- typedef Matx Mat3;
- typedef Matx Mat4;
- typedef Vec Vec3;
-
- //! Default constructor. It represents a 4x4 identity matrix.
- Affine3();
-
- //! Augmented affine matrix
- Affine3(const Mat4& affine);
-
- /**
- * The resulting 4x4 matrix is
- *
- * \f[
- * \begin{bmatrix}
- * R & t\\
- * 0 & 1\\
- * \end{bmatrix}
- * \f]
- *
- * @param R 3x3 rotation matrix.
- * @param t 3x1 translation vector.
- */
- Affine3(const Mat3& R, const Vec3& t = Vec3::all(0));
-
- /**
- * Rodrigues vector.
- *
- * The last row of the current matrix is set to [0,0,0,1].
- *
- * @param rvec 3x1 rotation vector. Its direction indicates the rotation axis and its length
- * indicates the rotation angle in radian (using right hand rule).
- * @param t 3x1 translation vector.
- */
- Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0));
-
- /**
- * Combines all constructors above. Supports 4x4, 3x4, 3x3, 1x3, 3x1 sizes of data matrix.
- *
- * The last row of the current matrix is set to [0,0,0,1] when data is not 4x4.
- *
- * @param data 1-channel matrix.
- * when it is 4x4, it is copied to the current matrix and t is not used.
- * When it is 3x4, it is copied to the upper part 3x4 of the current matrix and t is not used.
- * When it is 3x3, it is copied to the upper left 3x3 part of the current matrix.
- * When it is 3x1 or 1x3, it is treated as a rotation vector and the Rodrigues formula is used
- * to compute a 3x3 rotation matrix.
- * @param t 3x1 translation vector. It is used only when data is neither 4x4 nor 3x4.
- */
- explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0));
-
- //! From 16-element array
- explicit Affine3(const float_type* vals);
-
- //! Create an 4x4 identity transform
- static Affine3 Identity();
-
- /**
- * Rotation matrix.
- *
- * Copy the rotation matrix to the upper left 3x3 part of the current matrix.
- * The remaining elements of the current matrix are not changed.
- *
- * @param R 3x3 rotation matrix.
- *
- */
- void rotation(const Mat3& R);
-
- /**
- * Rodrigues vector.
- *
- * It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
- *
- * @param rvec 3x1 rotation vector. The direction indicates the rotation axis and
- * its length indicates the rotation angle in radian (using the right thumb convention).
- */
- void rotation(const Vec3& rvec);
-
- /**
- * Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix.
- *
- * It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
- *
- * @param data 1-channel matrix.
- * When it is a 3x3 matrix, it sets the upper left 3x3 part of the current matrix.
- * When it is a 1x3 or 3x1 matrix, it is used as a rotation vector. The Rodrigues formula
- * is used to compute the rotation matrix and sets the upper left 3x3 part of the current matrix.
- */
- void rotation(const Mat& data);
-
- /**
- * Copy the 3x3 matrix L to the upper left part of the current matrix
- *
- * It sets the upper left 3x3 part of the matrix. The remaining part is unaffected.
- *
- * @param L 3x3 matrix.
- */
- void linear(const Mat3& L);
-
- /**
- * Copy t to the first three elements of the last column of the current matrix
- *
- * It sets the upper right 3x1 part of the matrix. The remaining part is unaffected.
- *
- * @param t 3x1 translation vector.
- */
- void translation(const Vec3& t);
-
- //! @return the upper left 3x3 part
- Mat3 rotation() const;
-
- //! @return the upper left 3x3 part
- Mat3 linear() const;
-
- //! @return the upper right 3x1 part
- Vec3 translation() const;
-
- //! Rodrigues vector.
- //! @return a vector representing the upper left 3x3 rotation matrix of the current matrix.
- //! @warning Since the mapping between rotation vectors and rotation matrices is many to one,
- //! this function returns only one rotation vector that represents the current rotation matrix,
- //! which is not necessarily the same one set by `rotation(const Vec3& rvec)`.
- Vec3 rvec() const;
-
- //! @return the inverse of the current matrix.
- Affine3 inv(int method = cv::DECOMP_SVD) const;
-
- //! a.rotate(R) is equivalent to Affine(R, 0) * a;
- Affine3 rotate(const Mat3& R) const;
-
- //! a.rotate(rvec) is equivalent to Affine(rvec, 0) * a;
- Affine3 rotate(const Vec3& rvec) const;
-
- //! a.translate(t) is equivalent to Affine(E, t) * a, where E is an identity matrix
- Affine3 translate(const Vec3& t) const;
-
- //! a.concatenate(affine) is equivalent to affine * a;
- Affine3 concatenate(const Affine3& affine) const;
-
- template operator Affine3() const;
-
- template Affine3 cast() const;
-
- Mat4 matrix;
-
-#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
- Affine3(const Eigen::Transform& affine);
- Affine3(const Eigen::Transform& affine);
- operator Eigen::Transform() const;
- operator Eigen::Transform() const;
-#endif
- };
-
- template static
- Affine3 operator*(const Affine3& affine1, const Affine3& affine2);
-
- //! V is a 3-element vector with member fields x, y and z
- template static
- V operator*(const Affine3& affine, const V& vector);
-
- typedef Affine3 Affine3f;
- typedef Affine3 Affine3d;
-
- static Vec3f operator*(const Affine3f& affine, const Vec3f& vector);
- static Vec3d operator*(const Affine3d& affine, const Vec3d& vector);
-
- template class DataType< Affine3<_Tp> >
- {
- public:
- typedef Affine3<_Tp> value_type;
- typedef Affine3::work_type> work_type;
- typedef _Tp channel_type;
-
- enum { generic_type = 0,
- channels = 16,
- fmt = traits::SafeFmt::fmt + ((channels - 1) << 8)
-#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED
- ,depth = DataType::depth
- ,type = CV_MAKETYPE(depth, channels)
-#endif
- };
-
- typedef Vec vec_type;
- };
-
- namespace traits {
- template
- struct Depth< Affine3<_Tp> > { enum { value = Depth<_Tp>::value }; };
- template
- struct Type< Affine3<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 16) }; };
- } // namespace
-
-//! @} core
-
-}
-
-//! @cond IGNORED
-
-///////////////////////////////////////////////////////////////////////////////////
-// Implementation
-
-template inline
-cv::Affine3::Affine3()
- : matrix(Mat4::eye())
-{}
-
-template inline
-cv::Affine3::Affine3(const Mat4& affine)
- : matrix(affine)
-{}
-
-template inline
-cv::Affine3::Affine3(const Mat3& R, const Vec3& t)
-{
- rotation(R);
- translation(t);
- matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
- matrix.val[15] = 1;
-}
-
-template inline
-cv::Affine3::Affine3(const Vec3& _rvec, const Vec3& t)
-{
- rotation(_rvec);
- translation(t);
- matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
- matrix.val[15] = 1;
-}
-
-template inline
-cv::Affine3::Affine3(const cv::Mat& data, const Vec3& t)
-{
- CV_Assert(data.type() == cv::traits::Type::value);
- CV_Assert(data.channels() == 1);
-
- if (data.cols == 4 && data.rows == 4)
- {
- data.copyTo(matrix);
- return;
- }
- else if (data.cols == 4 && data.rows == 3)
- {
- rotation(data(Rect(0, 0, 3, 3)));
- translation(data(Rect(3, 0, 1, 3)));
- }
- else
- {
- rotation(data);
- translation(t);
- }
-
- matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
- matrix.val[15] = 1;
-}
-
-template inline
-cv::Affine3::Affine3(const float_type* vals) : matrix(vals)
-{}
-
-template inline
-cv::Affine3 cv::Affine3::Identity()
-{
- return Affine3(cv::Affine3::Mat4::eye());
-}
-
-template inline
-void cv::Affine3::rotation(const Mat3& R)
-{
- linear(R);
-}
-
-template inline
-void cv::Affine3::rotation(const Vec3& _rvec)
-{
- double theta = norm(_rvec);
-
- if (theta < DBL_EPSILON)
- rotation(Mat3::eye());
- else
- {
- double c = std::cos(theta);
- double s = std::sin(theta);
- double c1 = 1. - c;
- double itheta = (theta != 0) ? 1./theta : 0.;
-
- Point3_ r = _rvec*itheta;
-
- Mat3 rrt( r.x*r.x, r.x*r.y, r.x*r.z, r.x*r.y, r.y*r.y, r.y*r.z, r.x*r.z, r.y*r.z, r.z*r.z );
- Mat3 r_x( 0, -r.z, r.y, r.z, 0, -r.x, -r.y, r.x, 0 );
-
- // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
- // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]
- Mat3 R = c*Mat3::eye() + c1*rrt + s*r_x;
-
- rotation(R);
- }
-}
-
-//Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix;
-template inline
-void cv::Affine3::rotation(const cv::Mat& data)
-{
- CV_Assert(data.type() == cv::traits::Type::value);
- CV_Assert(data.channels() == 1);
-
- if (data.cols == 3 && data.rows == 3)
- {
- Mat3 R;
- data.copyTo(R);
- rotation(R);
- }
- else if ((data.cols == 3 && data.rows == 1) || (data.cols == 1 && data.rows == 3))
- {
- Vec3 _rvec;
- data.reshape(1, 3).copyTo(_rvec);
- rotation(_rvec);
- }
- else
- CV_Error(Error::StsError, "Input matrix can only be 3x3, 1x3 or 3x1");
-}
-
-template inline
-void cv::Affine3::linear(const Mat3& L)
-{
- matrix.val[0] = L.val[0]; matrix.val[1] = L.val[1]; matrix.val[ 2] = L.val[2];
- matrix.val[4] = L.val[3]; matrix.val[5] = L.val[4]; matrix.val[ 6] = L.val[5];
- matrix.val[8] = L.val[6]; matrix.val[9] = L.val[7]; matrix.val[10] = L.val[8];
-}
-
-template inline
-void cv::Affine3::translation(const Vec3& t)
-{
- matrix.val[3] = t[0]; matrix.val[7] = t[1]; matrix.val[11] = t[2];
-}
-
-template inline
-typename cv::Affine3::Mat3 cv::Affine3::rotation() const
-{
- return linear();
-}
-
-template inline
-typename cv::Affine3::Mat3 cv::Affine3::linear() const
-{
- typename cv::Affine3::Mat3 R;
- R.val[0] = matrix.val[0]; R.val[1] = matrix.val[1]; R.val[2] = matrix.val[ 2];
- R.val[3] = matrix.val[4]; R.val[4] = matrix.val[5]; R.val[5] = matrix.val[ 6];
- R.val[6] = matrix.val[8]; R.val[7] = matrix.val[9]; R.val[8] = matrix.val[10];
- return R;
-}
-
-template inline
-typename cv::Affine3::Vec3 cv::Affine3::translation() const
-{
- return Vec3(matrix.val[3], matrix.val[7], matrix.val[11]);
-}
-
-template inline
-typename cv::Affine3::Vec3 cv::Affine3::rvec() const
-{
- cv::Vec3d w;
- cv::Matx33d u, vt, R = rotation();
- cv::SVD::compute(R, w, u, vt, cv::SVD::FULL_UV + cv::SVD::MODIFY_A);
- R = u * vt;
-
- double rx = R.val[7] - R.val[5];
- double ry = R.val[2] - R.val[6];
- double rz = R.val[3] - R.val[1];
-
- double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);
- double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5;
- c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c;
- double theta = std::acos(c);
-
- if( s < 1e-5 )
- {
- if( c > 0 )
- rx = ry = rz = 0;
- else
- {
- double t;
- t = (R.val[0] + 1) * 0.5;
- rx = std::sqrt(std::max(t, 0.0));
- t = (R.val[4] + 1) * 0.5;
- ry = std::sqrt(std::max(t, 0.0)) * (R.val[1] < 0 ? -1.0 : 1.0);
- t = (R.val[8] + 1) * 0.5;
- rz = std::sqrt(std::max(t, 0.0)) * (R.val[2] < 0 ? -1.0 : 1.0);
-
- if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R.val[5] > 0) != (ry*rz > 0) )
- rz = -rz;
- theta /= std::sqrt(rx*rx + ry*ry + rz*rz);
- rx *= theta;
- ry *= theta;
- rz *= theta;
- }
- }
- else
- {
- double vth = 1/(2*s);
- vth *= theta;
- rx *= vth; ry *= vth; rz *= vth;
- }
-
- return cv::Vec3d(rx, ry, rz);
-}
-
-template inline
-cv::Affine3 cv::Affine3::inv(int method) const
-{
- return matrix.inv(method);
-}
-
-template inline
-cv::Affine3 cv::Affine3::rotate(const Mat3& R) const
-{
- Mat3 Lc = linear();
- Vec3 tc = translation();
- Mat4 result;
- result.val[12] = result.val[13] = result.val[14] = 0;
- result.val[15] = 1;
-
- for(int j = 0; j < 3; ++j)
- {
- for(int i = 0; i < 3; ++i)
- {
- float_type value = 0;
- for(int k = 0; k < 3; ++k)
- value += R(j, k) * Lc(k, i);
- result(j, i) = value;
- }
-
- result(j, 3) = R.row(j).dot(tc.t());
- }
- return result;
-}
-
-template inline
-cv::Affine3 cv::Affine3::rotate(const Vec3& _rvec) const
-{
- return rotate(Affine3f(_rvec).rotation());
-}
-
-template inline
-cv::Affine3 cv::Affine3::translate(const Vec3& t) const
-{
- Mat4 m = matrix;
- m.val[ 3] += t[0];
- m.val[ 7] += t[1];
- m.val[11] += t[2];
- return m;
-}
-
-template inline
-cv::Affine3 cv::Affine3::concatenate(const Affine3& affine) const
-{
- return (*this).rotate(affine.rotation()).translate(affine.translation());
-}
-
-template template inline
-cv::Affine3::operator Affine3() const
-{
- return Affine3(matrix);
-}
-
-template template inline
-cv::Affine3 cv::Affine3::cast() const
-{
- return Affine3(matrix);
-}
-
-template inline
-cv::Affine3 cv::operator*(const cv::Affine3& affine1, const cv::Affine3& affine2)
-{
- return affine2.concatenate(affine1);
-}
-
-template inline
-V cv::operator*(const cv::Affine3& affine, const V& v)
-{
- const typename Affine3::Mat4& m = affine.matrix;
-
- V r;
- r.x = m.val[0] * v.x + m.val[1] * v.y + m.val[ 2] * v.z + m.val[ 3];
- r.y = m.val[4] * v.x + m.val[5] * v.y + m.val[ 6] * v.z + m.val[ 7];
- r.z = m.val[8] * v.x + m.val[9] * v.y + m.val[10] * v.z + m.val[11];
- return r;
-}
-
-static inline
-cv::Vec3f cv::operator*(const cv::Affine3f& affine, const cv::Vec3f& v)
-{
- const cv::Matx44f& m = affine.matrix;
- cv::Vec3f r;
- r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
- r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
- r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
- return r;
-}
-
-static inline
-cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v)
-{
- const cv::Matx44d& m = affine.matrix;
- cv::Vec3d r;
- r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
- r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
- r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
- return r;
-}
-
-
-
-#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
-
-template inline
-cv::Affine3::Affine3(const Eigen::Transform& affine)
-{
- cv::Mat(4, 4, cv::traits::Type::value, affine.matrix().data()).copyTo(matrix);
-}
-
-template inline
-cv::Affine3::Affine3(const Eigen::Transform& affine)
-{
- Eigen::Transform a = affine;
- cv::Mat(4, 4, cv::traits::Type::value, a.matrix().data()).copyTo(matrix);
-}
-
-template inline
-cv::Affine3