From f1a684f50be5babe63d9707f9e783901d689978d Mon Sep 17 00:00:00 2001 From: scottsievert Date: Sun, 13 Sep 2015 00:40:03 -0500 Subject: [PATCH] adds an example and simple iOS app --- .../swix_ios_app.xcodeproj/project.pbxproj | 798 +++ .../contents.xcworkspacedata | 7 + swix_ios_app/swix_ios_app/AppDelegate.swift | 46 + .../AppIcon.appiconset/Contents.json | 38 + .../Base.lproj/LaunchScreen.storyboard | 27 + .../swix_ios_app/Base.lproj/Main.storyboard | 56 + swix_ios_app/swix_ios_app/Info.plist | 40 + .../swix_ios_app/ViewController.swift | 38 + swix_ios_app/swix_ios_app/space_shuttle.png | Bin 0 -> 416850 bytes .../swix/ScalarArithmetic-bleed.swift | 205 + swix_ios_app/swix_ios_app/swix/imshow.py | 30 + swix_ios_app/swix_ios_app/swix/io.swift | 119 + .../machine_learning/machine_learning.swift | 82 + .../swix/matrix/m-complex-math.swift | 119 + .../swix/matrix/m-helper-functions.swift | 147 + .../swix_ios_app/swix/matrix/m-image.swift | 165 + .../swix_ios_app/swix/matrix/m-initing.swift | 113 + .../swix_ios_app/swix/matrix/m-matrix.swift | 247 + .../swix/matrix/m-operators.swift | 152 + .../swix/matrix/m-simple-math.swift | 110 + .../swix/ndarray/complex-math.swift | 101 + .../swix/ndarray/helper-functions.swift | 190 + .../swix_ios_app/swix/ndarray/initing.swift | 135 + .../swix_ios_app/swix/ndarray/ndarray.swift | 163 + .../swix_ios_app/swix/ndarray/operators.swift | 244 + .../swix/ndarray/simple-math.swift | 251 + swix_ios_app/swix_ios_app/swix/numbers.swift | 129 + swix_ios_app/swix_ios_app/swix/objc/OpenCV.h | 48 + .../swix_ios_app/swix/objc/conversion.swift | 27 + .../swix/objc/machine_learning.mm | 116 + swix_ios_app/swix_ios_app/swix/objc/math.m | 66 + swix_ios_app/swix_ios_app/swix/objc/opencv.mm | 140 + .../swix/objc/opencv2.framework/Headers | 1 + .../swix/objc/opencv2.framework/Resources | 1 + .../Versions/A/Headers/calib3d/calib3d.hpp | 751 +++ .../Versions/A/Headers/contrib/contrib.hpp | 985 ++++ .../contrib/detection_based_tracker.hpp | 106 + .../A/Headers/contrib/hybridtracker.hpp | 220 + .../Versions/A/Headers/contrib/openfabmap.hpp | 405 ++ .../Versions/A/Headers/contrib/retina.hpp | 355 ++ .../Versions/A/Headers/core/affine.hpp | 509 ++ .../Versions/A/Headers/core/core.hpp | 4858 +++++++++++++++++ .../Versions/A/Headers/core/core_c.h | 1885 +++++++ .../Versions/A/Headers/core/cuda_devptrs.hpp | 199 + .../Versions/A/Headers/core/devmem2d.hpp | 43 + .../Versions/A/Headers/core/eigen.hpp | 280 + .../Versions/A/Headers/core/gpumat.hpp | 562 ++ .../Versions/A/Headers/core/internal.hpp | 781 +++ .../Versions/A/Headers/core/mat.hpp | 2619 +++++++++ .../A/Headers/core/opengl_interop.hpp | 284 + .../core/opengl_interop_deprecated.hpp | 330 ++ .../Versions/A/Headers/core/operations.hpp | 4046 ++++++++++++++ .../Versions/A/Headers/core/types_c.h | 1896 +++++++ .../Versions/A/Headers/core/version.hpp | 72 + .../Versions/A/Headers/core/wimage.hpp | 621 +++ .../A/Headers/features2d/features2d.hpp | 1611 ++++++ .../Versions/A/Headers/flann/all_indices.h | 155 + .../Versions/A/Headers/flann/allocator.h | 188 + .../Versions/A/Headers/flann/any.h | 304 ++ .../A/Headers/flann/autotuned_index.h | 583 ++ .../A/Headers/flann/composite_index.h | 194 + .../Versions/A/Headers/flann/config.h | 38 + .../Versions/A/Headers/flann/defines.h | 176 + .../Versions/A/Headers/flann/dist.h | 817 +++ .../Versions/A/Headers/flann/dummy.h | 16 + .../Versions/A/Headers/flann/dynamic_bitset.h | 159 + .../Versions/A/Headers/flann/flann.hpp | 427 ++ .../Versions/A/Headers/flann/flann_base.hpp | 291 + .../Versions/A/Headers/flann/general.h | 52 + .../Versions/A/Headers/flann/ground_truth.h | 94 + .../Versions/A/Headers/flann/hdf5.h | 231 + .../Versions/A/Headers/flann/heap.h | 165 + .../flann/hierarchical_clustering_index.h | 759 +++ .../Versions/A/Headers/flann/index_testing.h | 318 ++ .../Versions/A/Headers/flann/kdtree_index.h | 621 +++ .../A/Headers/flann/kdtree_single_index.h | 634 +++ .../Versions/A/Headers/flann/kmeans_index.h | 1117 ++++ .../Versions/A/Headers/flann/linear_index.h | 132 + .../Versions/A/Headers/flann/logger.h | 130 + .../Versions/A/Headers/flann/lsh_index.h | 392 ++ .../Versions/A/Headers/flann/lsh_table.h | 492 ++ .../Versions/A/Headers/flann/matrix.h | 116 + .../Versions/A/Headers/flann/miniflann.hpp | 162 + .../Versions/A/Headers/flann/nn_index.h | 179 + .../Versions/A/Headers/flann/object_factory.h | 91 + .../Versions/A/Headers/flann/params.h | 96 + .../Versions/A/Headers/flann/random.h | 133 + .../Versions/A/Headers/flann/result_set.h | 542 ++ .../Versions/A/Headers/flann/sampling.h | 81 + .../Versions/A/Headers/flann/saving.h | 187 + .../A/Headers/flann/simplex_downhill.h | 186 + .../Versions/A/Headers/flann/timer.h | 93 + .../Versions/A/Headers/highgui/cap_ios.h | 169 + .../Versions/A/Headers/highgui/highgui.hpp | 255 + .../Versions/A/Headers/highgui/highgui_c.h | 650 +++ .../Versions/A/Headers/highgui/ios.h | 49 + .../Versions/A/Headers/imgproc/imgproc.hpp | 1303 +++++ .../Versions/A/Headers/imgproc/imgproc_c.h | 623 +++ .../Versions/A/Headers/imgproc/types_c.h | 640 +++ .../Versions/A/Headers/legacy/blobtrack.hpp | 948 ++++ .../Versions/A/Headers/legacy/compat.hpp | 740 +++ .../Versions/A/Headers/legacy/legacy.hpp | 3436 ++++++++++++ .../Versions/A/Headers/legacy/streams.hpp | 92 + .../Versions/A/Headers/ml/ml.hpp | 2147 ++++++++ .../Versions/A/Headers/nonfree/features2d.hpp | 155 + .../Versions/A/Headers/nonfree/gpu.hpp | 128 + .../Versions/A/Headers/nonfree/nonfree.hpp | 57 + .../Versions/A/Headers/nonfree/ocl.hpp | 140 + .../A/Headers/objdetect/objdetect.hpp | 1073 ++++ .../Versions/A/Headers/opencv.hpp | 61 + .../Versions/A/Headers/opencv_modules.hpp | 26 + .../Versions/A/Headers/photo/photo.hpp | 91 + .../Versions/A/Headers/photo/photo_c.h | 69 + .../A/Headers/stitching/detail/autocalib.hpp | 65 + .../A/Headers/stitching/detail/blenders.hpp | 137 + .../A/Headers/stitching/detail/camera.hpp | 69 + .../stitching/detail/exposure_compensate.hpp | 106 + .../A/Headers/stitching/detail/matchers.hpp | 190 + .../stitching/detail/motion_estimators.hpp | 205 + .../Headers/stitching/detail/seam_finders.hpp | 257 + .../A/Headers/stitching/detail/util.hpp | 162 + .../A/Headers/stitching/detail/util_inl.hpp | 127 + .../A/Headers/stitching/detail/warpers.hpp | 510 ++ .../Headers/stitching/detail/warpers_inl.hpp | 765 +++ .../Versions/A/Headers/stitching/stitcher.hpp | 174 + .../Versions/A/Headers/stitching/warpers.hpp | 170 + .../A/Headers/video/background_segm.hpp | 262 + .../Versions/A/Headers/video/tracking.hpp | 373 ++ .../Versions/A/Headers/video/video.hpp | 58 + .../A/Headers/videostab/deblurring.hpp | 110 + .../A/Headers/videostab/fast_marching.hpp | 103 + .../A/Headers/videostab/fast_marching_inl.hpp | 166 + .../A/Headers/videostab/frame_source.hpp | 91 + .../A/Headers/videostab/global_motion.hpp | 141 + .../A/Headers/videostab/inpainting.hpp | 200 + .../Versions/A/Headers/videostab/log.hpp | 75 + .../Headers/videostab/motion_stabilizing.hpp | 106 + .../A/Headers/videostab/optical_flow.hpp | 120 + .../A/Headers/videostab/stabilizer.hpp | 187 + .../A/Headers/videostab/videostab.hpp | 48 + .../Versions/A/Headers/world/world.hpp | 58 + .../Versions/A/Resources/Info.plist | 18 + .../objc/opencv2.framework/Versions/A/opencv2 | Bin 0 -> 65971260 bytes .../objc/opencv2.framework/Versions/Current | 1 + .../swix/objc/opencv2.framework/opencv2 | 1 + .../swix/objc/operations-and-indexing.m | 72 + .../swix/objc/swix-Bridging-Header.h | 17 + .../swix_ios_app/swix/tests/speed.swift | 77 + .../swix_ios_app/swix/tests/tests.swift | 517 ++ swix_ios_app/swix_ios_appTests/Info.plist | 24 + .../swix_ios_appTests/swix_ios_appTests.swift | 36 + swix_ios_app/swix_ios_appUITests/Info.plist | 24 + .../swix_ios_appUITests.swift | 36 + 153 files changed, 56630 insertions(+) create mode 100644 swix_ios_app/swix_ios_app.xcodeproj/project.pbxproj create mode 100644 swix_ios_app/swix_ios_app.xcodeproj/project.xcworkspace/contents.xcworkspacedata create mode 100644 swix_ios_app/swix_ios_app/AppDelegate.swift create mode 100644 swix_ios_app/swix_ios_app/Assets.xcassets/AppIcon.appiconset/Contents.json create mode 100644 swix_ios_app/swix_ios_app/Base.lproj/LaunchScreen.storyboard create mode 100644 swix_ios_app/swix_ios_app/Base.lproj/Main.storyboard create mode 100644 swix_ios_app/swix_ios_app/Info.plist create mode 100644 swix_ios_app/swix_ios_app/ViewController.swift create mode 100644 swix_ios_app/swix_ios_app/space_shuttle.png create mode 100755 swix_ios_app/swix_ios_app/swix/ScalarArithmetic-bleed.swift create mode 100644 swix_ios_app/swix_ios_app/swix/imshow.py create mode 100644 swix_ios_app/swix_ios_app/swix/io.swift create mode 100644 swix_ios_app/swix_ios_app/swix/machine_learning/machine_learning.swift create mode 100644 swix_ios_app/swix_ios_app/swix/matrix/m-complex-math.swift create mode 100644 swix_ios_app/swix_ios_app/swix/matrix/m-helper-functions.swift create mode 100644 swix_ios_app/swix_ios_app/swix/matrix/m-image.swift create mode 100644 swix_ios_app/swix_ios_app/swix/matrix/m-initing.swift create mode 100644 swix_ios_app/swix_ios_app/swix/matrix/m-matrix.swift create mode 100644 swix_ios_app/swix_ios_app/swix/matrix/m-operators.swift create mode 100644 swix_ios_app/swix_ios_app/swix/matrix/m-simple-math.swift create mode 100644 swix_ios_app/swix_ios_app/swix/ndarray/complex-math.swift create mode 100644 swix_ios_app/swix_ios_app/swix/ndarray/helper-functions.swift create mode 100644 swix_ios_app/swix_ios_app/swix/ndarray/initing.swift create mode 100644 swix_ios_app/swix_ios_app/swix/ndarray/ndarray.swift create mode 100644 swix_ios_app/swix_ios_app/swix/ndarray/operators.swift create mode 100644 swix_ios_app/swix_ios_app/swix/ndarray/simple-math.swift create mode 100644 swix_ios_app/swix_ios_app/swix/numbers.swift create mode 100644 swix_ios_app/swix_ios_app/swix/objc/OpenCV.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/conversion.swift create mode 100644 swix_ios_app/swix_ios_app/swix/objc/machine_learning.mm create mode 100644 swix_ios_app/swix_ios_app/swix/objc/math.m create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv.mm create mode 120000 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Headers create mode 120000 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Resources create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/calib3d/calib3d.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/contrib.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/detection_based_tracker.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/hybridtracker.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/openfabmap.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/retina.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/affine.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/core.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/core_c.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/cuda_devptrs.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/devmem2d.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/eigen.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/gpumat.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/internal.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/mat.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/opengl_interop.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/opengl_interop_deprecated.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/operations.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/types_c.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/version.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/wimage.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/features2d/features2d.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/all_indices.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/allocator.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/any.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/autotuned_index.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/composite_index.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/config.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/defines.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dist.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dummy.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dynamic_bitset.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/flann.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/flann_base.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/general.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/ground_truth.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/hdf5.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/heap.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/hierarchical_clustering_index.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/index_testing.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/kdtree_index.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/kdtree_single_index.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/kmeans_index.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/linear_index.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/logger.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/lsh_index.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/lsh_table.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/matrix.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/miniflann.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/nn_index.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/object_factory.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/params.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/random.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/result_set.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/sampling.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/saving.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/simplex_downhill.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/timer.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/cap_ios.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/highgui.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/highgui_c.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/ios.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/imgproc.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/imgproc_c.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/types_c.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/blobtrack.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/compat.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/legacy.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/streams.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/ml/ml.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/features2d.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/gpu.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/nonfree.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/ocl.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/objdetect/objdetect.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/opencv.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/opencv_modules.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/photo/photo.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/photo/photo_c.h create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/autocalib.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/blenders.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/camera.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/exposure_compensate.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/matchers.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/motion_estimators.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/seam_finders.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/util.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/util_inl.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/warpers.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/detail/warpers_inl.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/stitcher.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/stitching/warpers.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/video/background_segm.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/video/tracking.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/video/video.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/deblurring.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/fast_marching.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/fast_marching_inl.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/frame_source.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/global_motion.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/inpainting.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/log.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/motion_stabilizing.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/optical_flow.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/stabilizer.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/videostab/videostab.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/world/world.hpp create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Resources/Info.plist create mode 100644 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/opencv2 create mode 120000 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/Current create mode 120000 swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/opencv2 create mode 100644 swix_ios_app/swix_ios_app/swix/objc/operations-and-indexing.m create mode 100644 swix_ios_app/swix_ios_app/swix/objc/swix-Bridging-Header.h create mode 100644 swix_ios_app/swix_ios_app/swix/tests/speed.swift create mode 100644 swix_ios_app/swix_ios_app/swix/tests/tests.swift create mode 100644 swix_ios_app/swix_ios_appTests/Info.plist create mode 100644 swix_ios_app/swix_ios_appTests/swix_ios_appTests.swift create mode 100644 swix_ios_app/swix_ios_appUITests/Info.plist create mode 100644 swix_ios_app/swix_ios_appUITests/swix_ios_appUITests.swift diff --git a/swix_ios_app/swix_ios_app.xcodeproj/project.pbxproj b/swix_ios_app/swix_ios_app.xcodeproj/project.pbxproj new file mode 100644 index 0000000..bc8cd84 --- /dev/null +++ b/swix_ios_app/swix_ios_app.xcodeproj/project.pbxproj @@ -0,0 +1,798 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + D259E2521BA53D5B0096A116 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2511BA53D5B0096A116 /* AppDelegate.swift */; }; + D259E2541BA53D5B0096A116 /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2531BA53D5B0096A116 /* ViewController.swift */; }; + D259E2571BA53D5B0096A116 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = D259E2551BA53D5B0096A116 /* Main.storyboard */; }; + D259E2591BA53D5B0096A116 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = D259E2581BA53D5B0096A116 /* Assets.xcassets */; }; + D259E25C1BA53D5B0096A116 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = D259E25A1BA53D5B0096A116 /* LaunchScreen.storyboard */; }; + D259E2671BA53D5B0096A116 /* swix_ios_appTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2661BA53D5B0096A116 /* swix_ios_appTests.swift */; }; + D259E2721BA53D5B0096A116 /* swix_ios_appUITests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2711BA53D5B0096A116 /* swix_ios_appUITests.swift */; }; + D259E2A11BA53DCE0096A116 /* imshow.py in Resources */ = {isa = PBXBuildFile; fileRef = D259E2801BA53DCE0096A116 /* imshow.py */; settings = {ASSET_TAGS = (); }; }; + D259E2A21BA53DCE0096A116 /* imshow.py in Resources */ = {isa = PBXBuildFile; fileRef = D259E2801BA53DCE0096A116 /* imshow.py */; settings = {ASSET_TAGS = (); }; }; + D259E2A31BA53DCE0096A116 /* imshow.py in Resources */ = {isa = PBXBuildFile; fileRef = D259E2801BA53DCE0096A116 /* imshow.py */; settings = {ASSET_TAGS = (); }; }; + D259E2A41BA53DCE0096A116 /* io.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2811BA53DCE0096A116 /* io.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2A51BA53DCE0096A116 /* io.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2811BA53DCE0096A116 /* io.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2A61BA53DCE0096A116 /* io.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2811BA53DCE0096A116 /* io.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2A71BA53DCE0096A116 /* machine_learning.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2831BA53DCE0096A116 /* machine_learning.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2A81BA53DCE0096A116 /* machine_learning.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2831BA53DCE0096A116 /* machine_learning.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2A91BA53DCE0096A116 /* machine_learning.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2831BA53DCE0096A116 /* machine_learning.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2AA1BA53DCE0096A116 /* m-complex-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2851BA53DCE0096A116 /* m-complex-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2AB1BA53DCE0096A116 /* m-complex-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2851BA53DCE0096A116 /* m-complex-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2AC1BA53DCE0096A116 /* m-complex-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2851BA53DCE0096A116 /* m-complex-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2AD1BA53DCE0096A116 /* m-helper-functions.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2861BA53DCE0096A116 /* m-helper-functions.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2AE1BA53DCE0096A116 /* m-helper-functions.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2861BA53DCE0096A116 /* m-helper-functions.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2AF1BA53DCE0096A116 /* m-helper-functions.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2861BA53DCE0096A116 /* m-helper-functions.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2B01BA53DCE0096A116 /* m-image.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2871BA53DCE0096A116 /* m-image.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2B11BA53DCE0096A116 /* m-image.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2871BA53DCE0096A116 /* m-image.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2B21BA53DCE0096A116 /* m-image.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2871BA53DCE0096A116 /* m-image.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2B31BA53DCE0096A116 /* m-initing.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2881BA53DCE0096A116 /* m-initing.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2B41BA53DCE0096A116 /* m-initing.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2881BA53DCE0096A116 /* m-initing.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2B51BA53DCE0096A116 /* m-initing.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2881BA53DCE0096A116 /* m-initing.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2B61BA53DCE0096A116 /* m-matrix.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2891BA53DCE0096A116 /* m-matrix.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2B71BA53DCE0096A116 /* m-matrix.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2891BA53DCE0096A116 /* m-matrix.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2B81BA53DCE0096A116 /* m-matrix.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2891BA53DCE0096A116 /* m-matrix.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2B91BA53DCE0096A116 /* m-operators.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28A1BA53DCE0096A116 /* m-operators.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2BA1BA53DCE0096A116 /* m-operators.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28A1BA53DCE0096A116 /* m-operators.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2BB1BA53DCE0096A116 /* m-operators.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28A1BA53DCE0096A116 /* m-operators.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2BC1BA53DCE0096A116 /* m-simple-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28B1BA53DCE0096A116 /* m-simple-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2BD1BA53DCE0096A116 /* m-simple-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28B1BA53DCE0096A116 /* m-simple-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2BE1BA53DCE0096A116 /* m-simple-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28B1BA53DCE0096A116 /* m-simple-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2BF1BA53DCE0096A116 /* complex-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28D1BA53DCE0096A116 /* complex-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2C01BA53DCE0096A116 /* complex-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28D1BA53DCE0096A116 /* complex-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2C11BA53DCE0096A116 /* complex-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28D1BA53DCE0096A116 /* complex-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2C21BA53DCE0096A116 /* helper-functions.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28E1BA53DCE0096A116 /* helper-functions.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2C31BA53DCE0096A116 /* helper-functions.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28E1BA53DCE0096A116 /* helper-functions.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2C41BA53DCE0096A116 /* helper-functions.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28E1BA53DCE0096A116 /* helper-functions.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2C51BA53DCE0096A116 /* initing.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28F1BA53DCE0096A116 /* initing.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2C61BA53DCE0096A116 /* initing.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28F1BA53DCE0096A116 /* initing.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2C71BA53DCE0096A116 /* initing.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E28F1BA53DCE0096A116 /* initing.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2C81BA53DCE0096A116 /* ndarray.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2901BA53DCE0096A116 /* ndarray.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2C91BA53DCE0096A116 /* ndarray.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2901BA53DCE0096A116 /* ndarray.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2CA1BA53DCE0096A116 /* ndarray.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2901BA53DCE0096A116 /* ndarray.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2CB1BA53DCE0096A116 /* operators.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2911BA53DCE0096A116 /* operators.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2CC1BA53DCE0096A116 /* operators.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2911BA53DCE0096A116 /* operators.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2CD1BA53DCE0096A116 /* operators.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2911BA53DCE0096A116 /* operators.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2CE1BA53DCE0096A116 /* simple-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2921BA53DCE0096A116 /* simple-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2CF1BA53DCE0096A116 /* simple-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2921BA53DCE0096A116 /* simple-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2D01BA53DCE0096A116 /* simple-math.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2921BA53DCE0096A116 /* simple-math.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2D11BA53DCE0096A116 /* numbers.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2931BA53DCE0096A116 /* numbers.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2D21BA53DCE0096A116 /* numbers.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2931BA53DCE0096A116 /* numbers.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2D31BA53DCE0096A116 /* numbers.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2931BA53DCE0096A116 /* numbers.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2D41BA53DCE0096A116 /* conversion.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2951BA53DCE0096A116 /* conversion.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2D51BA53DCE0096A116 /* conversion.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2951BA53DCE0096A116 /* conversion.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2D61BA53DCE0096A116 /* conversion.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2951BA53DCE0096A116 /* conversion.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2D71BA53DCE0096A116 /* machine_learning.mm in Sources */ = {isa = PBXBuildFile; fileRef = D259E2961BA53DCE0096A116 /* machine_learning.mm */; settings = {ASSET_TAGS = (); }; }; + D259E2D81BA53DCE0096A116 /* machine_learning.mm in Sources */ = {isa = PBXBuildFile; fileRef = D259E2961BA53DCE0096A116 /* machine_learning.mm */; settings = {ASSET_TAGS = (); }; }; + D259E2D91BA53DCE0096A116 /* machine_learning.mm in Sources */ = {isa = PBXBuildFile; fileRef = D259E2961BA53DCE0096A116 /* machine_learning.mm */; settings = {ASSET_TAGS = (); }; }; + D259E2DA1BA53DCE0096A116 /* math.m in Sources */ = {isa = PBXBuildFile; fileRef = D259E2971BA53DCE0096A116 /* math.m */; settings = {ASSET_TAGS = (); }; }; + D259E2DB1BA53DCE0096A116 /* math.m in Sources */ = {isa = PBXBuildFile; fileRef = D259E2971BA53DCE0096A116 /* math.m */; settings = {ASSET_TAGS = (); }; }; + D259E2DC1BA53DCE0096A116 /* math.m in Sources */ = {isa = PBXBuildFile; fileRef = D259E2971BA53DCE0096A116 /* math.m */; settings = {ASSET_TAGS = (); }; }; + D259E2DD1BA53DCE0096A116 /* opencv.mm in Sources */ = {isa = PBXBuildFile; fileRef = D259E2991BA53DCE0096A116 /* opencv.mm */; settings = {ASSET_TAGS = (); }; }; + D259E2DE1BA53DCE0096A116 /* opencv.mm in Sources */ = {isa = PBXBuildFile; fileRef = D259E2991BA53DCE0096A116 /* opencv.mm */; settings = {ASSET_TAGS = (); }; }; + D259E2DF1BA53DCE0096A116 /* opencv.mm in Sources */ = {isa = PBXBuildFile; fileRef = D259E2991BA53DCE0096A116 /* opencv.mm */; settings = {ASSET_TAGS = (); }; }; + D259E2E01BA53DCE0096A116 /* opencv2.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D259E29A1BA53DCE0096A116 /* opencv2.framework */; settings = {ASSET_TAGS = (); }; }; + D259E2E11BA53DCE0096A116 /* opencv2.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D259E29A1BA53DCE0096A116 /* opencv2.framework */; settings = {ASSET_TAGS = (); }; }; + D259E2E21BA53DCE0096A116 /* opencv2.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D259E29A1BA53DCE0096A116 /* opencv2.framework */; settings = {ASSET_TAGS = (); }; }; + D259E2E31BA53DCE0096A116 /* operations-and-indexing.m in Sources */ = {isa = PBXBuildFile; fileRef = D259E29B1BA53DCE0096A116 /* operations-and-indexing.m */; settings = {ASSET_TAGS = (); }; }; + D259E2E41BA53DCE0096A116 /* operations-and-indexing.m in Sources */ = {isa = PBXBuildFile; fileRef = D259E29B1BA53DCE0096A116 /* operations-and-indexing.m */; settings = {ASSET_TAGS = (); }; }; + D259E2E51BA53DCE0096A116 /* operations-and-indexing.m in Sources */ = {isa = PBXBuildFile; fileRef = D259E29B1BA53DCE0096A116 /* operations-and-indexing.m */; settings = {ASSET_TAGS = (); }; }; + D259E2E61BA53DCE0096A116 /* ScalarArithmetic-bleed.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E29D1BA53DCE0096A116 /* ScalarArithmetic-bleed.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2E71BA53DCE0096A116 /* ScalarArithmetic-bleed.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E29D1BA53DCE0096A116 /* ScalarArithmetic-bleed.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2E81BA53DCE0096A116 /* ScalarArithmetic-bleed.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E29D1BA53DCE0096A116 /* ScalarArithmetic-bleed.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2E91BA53DCE0096A116 /* speed.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E29F1BA53DCE0096A116 /* speed.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2EA1BA53DCE0096A116 /* speed.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E29F1BA53DCE0096A116 /* speed.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2EB1BA53DCE0096A116 /* speed.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E29F1BA53DCE0096A116 /* speed.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2EC1BA53DCE0096A116 /* tests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2A01BA53DCE0096A116 /* tests.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2ED1BA53DCE0096A116 /* tests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2A01BA53DCE0096A116 /* tests.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2EE1BA53DCE0096A116 /* tests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D259E2A01BA53DCE0096A116 /* tests.swift */; settings = {ASSET_TAGS = (); }; }; + D259E2F01BA53EC70096A116 /* space_shuttle.png in Resources */ = {isa = PBXBuildFile; fileRef = D259E2EF1BA53EC70096A116 /* space_shuttle.png */; settings = {ASSET_TAGS = (); }; }; + D259E2F11BA53EC70096A116 /* space_shuttle.png in Resources */ = {isa = PBXBuildFile; fileRef = D259E2EF1BA53EC70096A116 /* space_shuttle.png */; settings = {ASSET_TAGS = (); }; }; + D259E2F21BA53EC70096A116 /* space_shuttle.png in Resources */ = {isa = PBXBuildFile; fileRef = D259E2EF1BA53EC70096A116 /* space_shuttle.png */; settings = {ASSET_TAGS = (); }; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + D259E2631BA53D5B0096A116 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = D259E2461BA53D5B0096A116 /* Project object */; + proxyType = 1; + remoteGlobalIDString = D259E24D1BA53D5B0096A116; + remoteInfo = swix_ios_app; + }; + D259E26E1BA53D5B0096A116 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = D259E2461BA53D5B0096A116 /* Project object */; + proxyType = 1; + remoteGlobalIDString = D259E24D1BA53D5B0096A116; + remoteInfo = swix_ios_app; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + D259E24E1BA53D5B0096A116 /* swix_ios_app.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = swix_ios_app.app; sourceTree = BUILT_PRODUCTS_DIR; }; + D259E2511BA53D5B0096A116 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; }; + D259E2531BA53D5B0096A116 /* ViewController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ViewController.swift; sourceTree = ""; }; + D259E2561BA53D5B0096A116 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; + D259E2581BA53D5B0096A116 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; + D259E25B1BA53D5B0096A116 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = ""; }; + D259E25D1BA53D5B0096A116 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + D259E2621BA53D5B0096A116 /* swix_ios_appTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = swix_ios_appTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + D259E2661BA53D5B0096A116 /* swix_ios_appTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = swix_ios_appTests.swift; sourceTree = ""; }; + D259E2681BA53D5B0096A116 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + D259E26D1BA53D5B0096A116 /* swix_ios_appUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = swix_ios_appUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + D259E2711BA53D5B0096A116 /* swix_ios_appUITests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = swix_ios_appUITests.swift; sourceTree = ""; }; + D259E2731BA53D5B0096A116 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + D259E2801BA53DCE0096A116 /* imshow.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = imshow.py; sourceTree = ""; }; + D259E2811BA53DCE0096A116 /* io.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = io.swift; sourceTree = ""; }; + D259E2831BA53DCE0096A116 /* machine_learning.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = machine_learning.swift; sourceTree = ""; }; + D259E2851BA53DCE0096A116 /* m-complex-math.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "m-complex-math.swift"; sourceTree = ""; }; + D259E2861BA53DCE0096A116 /* m-helper-functions.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "m-helper-functions.swift"; sourceTree = ""; }; + D259E2871BA53DCE0096A116 /* m-image.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "m-image.swift"; sourceTree = ""; }; + D259E2881BA53DCE0096A116 /* m-initing.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "m-initing.swift"; sourceTree = ""; }; + D259E2891BA53DCE0096A116 /* m-matrix.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "m-matrix.swift"; sourceTree = ""; }; + D259E28A1BA53DCE0096A116 /* m-operators.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "m-operators.swift"; sourceTree = ""; }; + D259E28B1BA53DCE0096A116 /* m-simple-math.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "m-simple-math.swift"; sourceTree = ""; }; + D259E28D1BA53DCE0096A116 /* complex-math.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "complex-math.swift"; sourceTree = ""; }; + D259E28E1BA53DCE0096A116 /* helper-functions.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "helper-functions.swift"; sourceTree = ""; }; + D259E28F1BA53DCE0096A116 /* initing.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = initing.swift; sourceTree = ""; }; + D259E2901BA53DCE0096A116 /* ndarray.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ndarray.swift; sourceTree = ""; }; + D259E2911BA53DCE0096A116 /* operators.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = operators.swift; sourceTree = ""; }; + D259E2921BA53DCE0096A116 /* simple-math.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "simple-math.swift"; sourceTree = ""; }; + D259E2931BA53DCE0096A116 /* numbers.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = numbers.swift; sourceTree = ""; }; + D259E2951BA53DCE0096A116 /* conversion.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = conversion.swift; sourceTree = ""; }; + D259E2961BA53DCE0096A116 /* machine_learning.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = machine_learning.mm; sourceTree = ""; }; + D259E2971BA53DCE0096A116 /* math.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = math.m; sourceTree = ""; }; + D259E2981BA53DCE0096A116 /* OpenCV.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpenCV.h; sourceTree = ""; }; + D259E2991BA53DCE0096A116 /* opencv.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = opencv.mm; sourceTree = ""; }; + D259E29A1BA53DCE0096A116 /* opencv2.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; path = opencv2.framework; sourceTree = ""; }; + D259E29B1BA53DCE0096A116 /* operations-and-indexing.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "operations-and-indexing.m"; sourceTree = ""; }; + D259E29C1BA53DCE0096A116 /* swix-Bridging-Header.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "swix-Bridging-Header.h"; sourceTree = ""; }; + D259E29D1BA53DCE0096A116 /* ScalarArithmetic-bleed.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "ScalarArithmetic-bleed.swift"; sourceTree = ""; }; + D259E29F1BA53DCE0096A116 /* speed.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = speed.swift; sourceTree = ""; }; + D259E2A01BA53DCE0096A116 /* tests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = tests.swift; sourceTree = ""; }; + D259E2EF1BA53EC70096A116 /* space_shuttle.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = space_shuttle.png; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + D259E24B1BA53D5B0096A116 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + D259E2E01BA53DCE0096A116 /* opencv2.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D259E25F1BA53D5B0096A116 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + D259E2E11BA53DCE0096A116 /* opencv2.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D259E26A1BA53D5B0096A116 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + D259E2E21BA53DCE0096A116 /* opencv2.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + D259E2451BA53D5B0096A116 = { + isa = PBXGroup; + children = ( + D259E2501BA53D5B0096A116 /* swix_ios_app */, + D259E2651BA53D5B0096A116 /* swix_ios_appTests */, + D259E2701BA53D5B0096A116 /* swix_ios_appUITests */, + D259E24F1BA53D5B0096A116 /* Products */, + ); + sourceTree = ""; + }; + D259E24F1BA53D5B0096A116 /* Products */ = { + isa = PBXGroup; + children = ( + D259E24E1BA53D5B0096A116 /* swix_ios_app.app */, + D259E2621BA53D5B0096A116 /* swix_ios_appTests.xctest */, + D259E26D1BA53D5B0096A116 /* swix_ios_appUITests.xctest */, + ); + name = Products; + sourceTree = ""; + }; + D259E2501BA53D5B0096A116 /* swix_ios_app */ = { + isa = PBXGroup; + children = ( + D259E2511BA53D5B0096A116 /* AppDelegate.swift */, + D259E2531BA53D5B0096A116 /* ViewController.swift */, + D259E2EF1BA53EC70096A116 /* space_shuttle.png */, + D259E27F1BA53DCE0096A116 /* swix */, + D259E2551BA53D5B0096A116 /* Main.storyboard */, + D259E2581BA53D5B0096A116 /* Assets.xcassets */, + D259E25A1BA53D5B0096A116 /* LaunchScreen.storyboard */, + D259E25D1BA53D5B0096A116 /* Info.plist */, + ); + path = swix_ios_app; + sourceTree = ""; + }; + D259E2651BA53D5B0096A116 /* swix_ios_appTests */ = { + isa = PBXGroup; + children = ( + D259E2661BA53D5B0096A116 /* swix_ios_appTests.swift */, + D259E2681BA53D5B0096A116 /* Info.plist */, + ); + path = swix_ios_appTests; + sourceTree = ""; + }; + D259E2701BA53D5B0096A116 /* swix_ios_appUITests */ = { + isa = PBXGroup; + children = ( + D259E2711BA53D5B0096A116 /* swix_ios_appUITests.swift */, + D259E2731BA53D5B0096A116 /* Info.plist */, + ); + path = swix_ios_appUITests; + sourceTree = ""; + }; + D259E27F1BA53DCE0096A116 /* swix */ = { + isa = PBXGroup; + children = ( + D259E2801BA53DCE0096A116 /* imshow.py */, + D259E2811BA53DCE0096A116 /* io.swift */, + D259E2821BA53DCE0096A116 /* machine_learning */, + D259E2841BA53DCE0096A116 /* matrix */, + D259E28C1BA53DCE0096A116 /* ndarray */, + D259E2931BA53DCE0096A116 /* numbers.swift */, + D259E2941BA53DCE0096A116 /* objc */, + D259E29D1BA53DCE0096A116 /* ScalarArithmetic-bleed.swift */, + D259E29E1BA53DCE0096A116 /* tests */, + ); + path = swix; + sourceTree = ""; + }; + D259E2821BA53DCE0096A116 /* machine_learning */ = { + isa = PBXGroup; + children = ( + D259E2831BA53DCE0096A116 /* machine_learning.swift */, + ); + path = machine_learning; + sourceTree = ""; + }; + D259E2841BA53DCE0096A116 /* matrix */ = { + isa = PBXGroup; + children = ( + D259E2851BA53DCE0096A116 /* m-complex-math.swift */, + D259E2861BA53DCE0096A116 /* m-helper-functions.swift */, + D259E2871BA53DCE0096A116 /* m-image.swift */, + D259E2881BA53DCE0096A116 /* m-initing.swift */, + D259E2891BA53DCE0096A116 /* m-matrix.swift */, + D259E28A1BA53DCE0096A116 /* m-operators.swift */, + D259E28B1BA53DCE0096A116 /* m-simple-math.swift */, + ); + path = matrix; + sourceTree = ""; + }; + D259E28C1BA53DCE0096A116 /* ndarray */ = { + isa = PBXGroup; + children = ( + D259E28D1BA53DCE0096A116 /* complex-math.swift */, + D259E28E1BA53DCE0096A116 /* helper-functions.swift */, + D259E28F1BA53DCE0096A116 /* initing.swift */, + D259E2901BA53DCE0096A116 /* ndarray.swift */, + D259E2911BA53DCE0096A116 /* operators.swift */, + D259E2921BA53DCE0096A116 /* simple-math.swift */, + ); + path = ndarray; + sourceTree = ""; + }; + D259E2941BA53DCE0096A116 /* objc */ = { + isa = PBXGroup; + children = ( + D259E2951BA53DCE0096A116 /* conversion.swift */, + D259E2961BA53DCE0096A116 /* machine_learning.mm */, + D259E2971BA53DCE0096A116 /* math.m */, + D259E2981BA53DCE0096A116 /* OpenCV.h */, + D259E2991BA53DCE0096A116 /* opencv.mm */, + D259E29A1BA53DCE0096A116 /* opencv2.framework */, + D259E29B1BA53DCE0096A116 /* operations-and-indexing.m */, + D259E29C1BA53DCE0096A116 /* swix-Bridging-Header.h */, + ); + path = objc; + sourceTree = ""; + }; + D259E29E1BA53DCE0096A116 /* tests */ = { + isa = PBXGroup; + children = ( + D259E29F1BA53DCE0096A116 /* speed.swift */, + D259E2A01BA53DCE0096A116 /* tests.swift */, + ); + path = tests; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + D259E24D1BA53D5B0096A116 /* swix_ios_app */ = { + isa = PBXNativeTarget; + buildConfigurationList = D259E2761BA53D5B0096A116 /* Build configuration list for PBXNativeTarget "swix_ios_app" */; + buildPhases = ( + D259E24A1BA53D5B0096A116 /* Sources */, + D259E24B1BA53D5B0096A116 /* Frameworks */, + D259E24C1BA53D5B0096A116 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = swix_ios_app; + productName = swix_ios_app; + productReference = D259E24E1BA53D5B0096A116 /* swix_ios_app.app */; + productType = "com.apple.product-type.application"; + }; + D259E2611BA53D5B0096A116 /* swix_ios_appTests */ = { + isa = PBXNativeTarget; + buildConfigurationList = D259E2791BA53D5B0096A116 /* Build configuration list for PBXNativeTarget "swix_ios_appTests" */; + buildPhases = ( + D259E25E1BA53D5B0096A116 /* Sources */, + D259E25F1BA53D5B0096A116 /* Frameworks */, + D259E2601BA53D5B0096A116 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + D259E2641BA53D5B0096A116 /* PBXTargetDependency */, + ); + name = swix_ios_appTests; + productName = swix_ios_appTests; + productReference = D259E2621BA53D5B0096A116 /* swix_ios_appTests.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; + D259E26C1BA53D5B0096A116 /* swix_ios_appUITests */ = { + isa = PBXNativeTarget; + buildConfigurationList = D259E27C1BA53D5B0096A116 /* Build configuration list for PBXNativeTarget "swix_ios_appUITests" */; + buildPhases = ( + D259E2691BA53D5B0096A116 /* Sources */, + D259E26A1BA53D5B0096A116 /* Frameworks */, + D259E26B1BA53D5B0096A116 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + D259E26F1BA53D5B0096A116 /* PBXTargetDependency */, + ); + name = swix_ios_appUITests; + productName = swix_ios_appUITests; + productReference = D259E26D1BA53D5B0096A116 /* swix_ios_appUITests.xctest */; + productType = "com.apple.product-type.bundle.ui-testing"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + D259E2461BA53D5B0096A116 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0700; + ORGANIZATIONNAME = com.scott; + TargetAttributes = { + D259E24D1BA53D5B0096A116 = { + CreatedOnToolsVersion = 7.0; + }; + D259E2611BA53D5B0096A116 = { + CreatedOnToolsVersion = 7.0; + TestTargetID = D259E24D1BA53D5B0096A116; + }; + D259E26C1BA53D5B0096A116 = { + CreatedOnToolsVersion = 7.0; + TestTargetID = D259E24D1BA53D5B0096A116; + }; + }; + }; + buildConfigurationList = D259E2491BA53D5B0096A116 /* Build configuration list for PBXProject "swix_ios_app" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = D259E2451BA53D5B0096A116; + productRefGroup = D259E24F1BA53D5B0096A116 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + D259E24D1BA53D5B0096A116 /* swix_ios_app */, + D259E2611BA53D5B0096A116 /* swix_ios_appTests */, + D259E26C1BA53D5B0096A116 /* swix_ios_appUITests */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + D259E24C1BA53D5B0096A116 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D259E2F01BA53EC70096A116 /* space_shuttle.png in Resources */, + D259E25C1BA53D5B0096A116 /* LaunchScreen.storyboard in Resources */, + D259E2591BA53D5B0096A116 /* Assets.xcassets in Resources */, + D259E2571BA53D5B0096A116 /* Main.storyboard in Resources */, + D259E2A11BA53DCE0096A116 /* imshow.py in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D259E2601BA53D5B0096A116 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D259E2F11BA53EC70096A116 /* space_shuttle.png in Resources */, + D259E2A21BA53DCE0096A116 /* imshow.py in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D259E26B1BA53D5B0096A116 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D259E2F21BA53EC70096A116 /* space_shuttle.png in Resources */, + D259E2A31BA53DCE0096A116 /* imshow.py in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + D259E24A1BA53D5B0096A116 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D259E2CE1BA53DCE0096A116 /* simple-math.swift in Sources */, + D259E2C51BA53DCE0096A116 /* initing.swift in Sources */, + D259E2541BA53D5B0096A116 /* ViewController.swift in Sources */, + D259E2BC1BA53DCE0096A116 /* m-simple-math.swift in Sources */, + D259E2B61BA53DCE0096A116 /* m-matrix.swift in Sources */, + D259E2AD1BA53DCE0096A116 /* m-helper-functions.swift in Sources */, + D259E2E31BA53DCE0096A116 /* operations-and-indexing.m in Sources */, + D259E2B01BA53DCE0096A116 /* m-image.swift in Sources */, + D259E2E91BA53DCE0096A116 /* speed.swift in Sources */, + D259E2E61BA53DCE0096A116 /* ScalarArithmetic-bleed.swift in Sources */, + D259E2D71BA53DCE0096A116 /* machine_learning.mm in Sources */, + D259E2C81BA53DCE0096A116 /* ndarray.swift in Sources */, + D259E2A71BA53DCE0096A116 /* machine_learning.swift in Sources */, + D259E2C21BA53DCE0096A116 /* helper-functions.swift in Sources */, + D259E2CB1BA53DCE0096A116 /* operators.swift in Sources */, + D259E2DA1BA53DCE0096A116 /* math.m in Sources */, + D259E2B91BA53DCE0096A116 /* m-operators.swift in Sources */, + D259E2D11BA53DCE0096A116 /* numbers.swift in Sources */, + D259E2A41BA53DCE0096A116 /* io.swift in Sources */, + D259E2DD1BA53DCE0096A116 /* opencv.mm in Sources */, + D259E2BF1BA53DCE0096A116 /* complex-math.swift in Sources */, + D259E2521BA53D5B0096A116 /* AppDelegate.swift in Sources */, + D259E2B31BA53DCE0096A116 /* m-initing.swift in Sources */, + D259E2D41BA53DCE0096A116 /* conversion.swift in Sources */, + D259E2AA1BA53DCE0096A116 /* m-complex-math.swift in Sources */, + D259E2EC1BA53DCE0096A116 /* tests.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D259E25E1BA53D5B0096A116 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D259E2BA1BA53DCE0096A116 /* m-operators.swift in Sources */, + D259E2E41BA53DCE0096A116 /* operations-and-indexing.m in Sources */, + D259E2C01BA53DCE0096A116 /* complex-math.swift in Sources */, + D259E2B71BA53DCE0096A116 /* m-matrix.swift in Sources */, + D259E2D21BA53DCE0096A116 /* numbers.swift in Sources */, + D259E2E71BA53DCE0096A116 /* ScalarArithmetic-bleed.swift in Sources */, + D259E2C31BA53DCE0096A116 /* helper-functions.swift in Sources */, + D259E2EA1BA53DCE0096A116 /* speed.swift in Sources */, + D259E2AB1BA53DCE0096A116 /* m-complex-math.swift in Sources */, + D259E2B11BA53DCE0096A116 /* m-image.swift in Sources */, + D259E2D51BA53DCE0096A116 /* conversion.swift in Sources */, + D259E2AE1BA53DCE0096A116 /* m-helper-functions.swift in Sources */, + D259E2671BA53D5B0096A116 /* swix_ios_appTests.swift in Sources */, + D259E2DE1BA53DCE0096A116 /* opencv.mm in Sources */, + D259E2B41BA53DCE0096A116 /* m-initing.swift in Sources */, + D259E2D81BA53DCE0096A116 /* machine_learning.mm in Sources */, + D259E2A51BA53DCE0096A116 /* io.swift in Sources */, + D259E2C61BA53DCE0096A116 /* initing.swift in Sources */, + D259E2A81BA53DCE0096A116 /* machine_learning.swift in Sources */, + D259E2DB1BA53DCE0096A116 /* math.m in Sources */, + D259E2BD1BA53DCE0096A116 /* m-simple-math.swift in Sources */, + D259E2ED1BA53DCE0096A116 /* tests.swift in Sources */, + D259E2CF1BA53DCE0096A116 /* simple-math.swift in Sources */, + D259E2C91BA53DCE0096A116 /* ndarray.swift in Sources */, + D259E2CC1BA53DCE0096A116 /* operators.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D259E2691BA53D5B0096A116 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + D259E2BB1BA53DCE0096A116 /* m-operators.swift in Sources */, + D259E2E51BA53DCE0096A116 /* operations-and-indexing.m in Sources */, + D259E2C11BA53DCE0096A116 /* complex-math.swift in Sources */, + D259E2B81BA53DCE0096A116 /* m-matrix.swift in Sources */, + D259E2D31BA53DCE0096A116 /* numbers.swift in Sources */, + D259E2E81BA53DCE0096A116 /* ScalarArithmetic-bleed.swift in Sources */, + D259E2C41BA53DCE0096A116 /* helper-functions.swift in Sources */, + D259E2EB1BA53DCE0096A116 /* speed.swift in Sources */, + D259E2AC1BA53DCE0096A116 /* m-complex-math.swift in Sources */, + D259E2B21BA53DCE0096A116 /* m-image.swift in Sources */, + D259E2D61BA53DCE0096A116 /* conversion.swift in Sources */, + D259E2AF1BA53DCE0096A116 /* m-helper-functions.swift in Sources */, + D259E2721BA53D5B0096A116 /* swix_ios_appUITests.swift in Sources */, + D259E2DF1BA53DCE0096A116 /* opencv.mm in Sources */, + D259E2B51BA53DCE0096A116 /* m-initing.swift in Sources */, + D259E2D91BA53DCE0096A116 /* machine_learning.mm in Sources */, + D259E2A61BA53DCE0096A116 /* io.swift in Sources */, + D259E2C71BA53DCE0096A116 /* initing.swift in Sources */, + D259E2A91BA53DCE0096A116 /* machine_learning.swift in Sources */, + D259E2DC1BA53DCE0096A116 /* math.m in Sources */, + D259E2BE1BA53DCE0096A116 /* m-simple-math.swift in Sources */, + D259E2EE1BA53DCE0096A116 /* tests.swift in Sources */, + D259E2D01BA53DCE0096A116 /* simple-math.swift in Sources */, + D259E2CA1BA53DCE0096A116 /* ndarray.swift in Sources */, + D259E2CD1BA53DCE0096A116 /* operators.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + D259E2641BA53D5B0096A116 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = D259E24D1BA53D5B0096A116 /* swix_ios_app */; + targetProxy = D259E2631BA53D5B0096A116 /* PBXContainerItemProxy */; + }; + D259E26F1BA53D5B0096A116 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = D259E24D1BA53D5B0096A116 /* swix_ios_app */; + targetProxy = D259E26E1BA53D5B0096A116 /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin PBXVariantGroup section */ + D259E2551BA53D5B0096A116 /* Main.storyboard */ = { + isa = PBXVariantGroup; + children = ( + D259E2561BA53D5B0096A116 /* Base */, + ); + name = Main.storyboard; + sourceTree = ""; + }; + D259E25A1BA53D5B0096A116 /* LaunchScreen.storyboard */ = { + isa = PBXVariantGroup; + children = ( + D259E25B1BA53D5B0096A116 /* Base */, + ); + name = LaunchScreen.storyboard; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + D259E2741BA53D5B0096A116 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 9.0; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = iphoneos; + SWIFT_OBJC_BRIDGING_HEADER = "swix_ios_app/swix/objc/swix-Bridging-Header.h"; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + }; + name = Debug; + }; + D259E2751BA53D5B0096A116 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 9.0; + MTL_ENABLE_DEBUG_INFO = NO; + SDKROOT = iphoneos; + SWIFT_OBJC_BRIDGING_HEADER = "swix_ios_app/swix/objc/swix-Bridging-Header.h"; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + D259E2771BA53D5B0096A116 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + FRAMEWORK_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/swix_ios_app/swix/objc", + ); + INFOPLIST_FILE = swix_ios_app/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = "stss.swix-ios-app"; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + D259E2781BA53D5B0096A116 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + FRAMEWORK_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/swix_ios_app/swix/objc", + ); + INFOPLIST_FILE = swix_ios_app/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = "stss.swix-ios-app"; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + D259E27A1BA53D5B0096A116 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + BUNDLE_LOADER = "$(TEST_HOST)"; + FRAMEWORK_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/swix_ios_app/swix/objc", + ); + INFOPLIST_FILE = swix_ios_appTests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = "stss.swix-ios-appTests"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/swix_ios_app.app/swix_ios_app"; + }; + name = Debug; + }; + D259E27B1BA53D5B0096A116 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + BUNDLE_LOADER = "$(TEST_HOST)"; + FRAMEWORK_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/swix_ios_app/swix/objc", + ); + INFOPLIST_FILE = swix_ios_appTests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = "stss.swix-ios-appTests"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/swix_ios_app.app/swix_ios_app"; + }; + name = Release; + }; + D259E27D1BA53D5B0096A116 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + FRAMEWORK_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/swix_ios_app/swix/objc", + ); + INFOPLIST_FILE = swix_ios_appUITests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = "stss.swix-ios-appUITests"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_TARGET_NAME = swix_ios_app; + USES_XCTRUNNER = YES; + }; + name = Debug; + }; + D259E27E1BA53D5B0096A116 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + FRAMEWORK_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/swix_ios_app/swix/objc", + ); + INFOPLIST_FILE = swix_ios_appUITests/Info.plist; + LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + PRODUCT_BUNDLE_IDENTIFIER = "stss.swix-ios-appUITests"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_TARGET_NAME = swix_ios_app; + USES_XCTRUNNER = YES; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + D259E2491BA53D5B0096A116 /* Build configuration list for PBXProject "swix_ios_app" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + D259E2741BA53D5B0096A116 /* Debug */, + D259E2751BA53D5B0096A116 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + D259E2761BA53D5B0096A116 /* Build configuration list for PBXNativeTarget "swix_ios_app" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + D259E2771BA53D5B0096A116 /* Debug */, + D259E2781BA53D5B0096A116 /* Release */, + ); + defaultConfigurationIsVisible = 0; + }; + D259E2791BA53D5B0096A116 /* Build configuration list for PBXNativeTarget "swix_ios_appTests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + D259E27A1BA53D5B0096A116 /* Debug */, + D259E27B1BA53D5B0096A116 /* Release */, + ); + defaultConfigurationIsVisible = 0; + }; + D259E27C1BA53D5B0096A116 /* Build configuration list for PBXNativeTarget "swix_ios_appUITests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + D259E27D1BA53D5B0096A116 /* Debug */, + D259E27E1BA53D5B0096A116 /* Release */, + ); + defaultConfigurationIsVisible = 0; + }; +/* End XCConfigurationList section */ + }; + rootObject = D259E2461BA53D5B0096A116 /* Project object */; +} diff --git a/swix_ios_app/swix_ios_app.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/swix_ios_app/swix_ios_app.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 0000000..dd9ff12 --- /dev/null +++ b/swix_ios_app/swix_ios_app.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/swix_ios_app/swix_ios_app/AppDelegate.swift b/swix_ios_app/swix_ios_app/AppDelegate.swift new file mode 100644 index 0000000..8610d44 --- /dev/null +++ b/swix_ios_app/swix_ios_app/AppDelegate.swift @@ -0,0 +1,46 @@ +// +// AppDelegate.swift +// swix_ios_app +// +// Created by Scott Sievert on 9/13/15. +// Copyright © 2015 com.scott. All rights reserved. +// + +import UIKit + +@UIApplicationMain +class AppDelegate: UIResponder, UIApplicationDelegate { + + var window: UIWindow? + + + func application(application: UIApplication, didFinishLaunchingWithOptions launchOptions: [NSObject: AnyObject]?) -> Bool { + // Override point for customization after application launch. + return true + } + + func applicationWillResignActive(application: UIApplication) { + // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state. + // Use this method to pause ongoing tasks, disable timers, and throttle down OpenGL ES frame rates. Games should use this method to pause the game. + } + + func applicationDidEnterBackground(application: UIApplication) { + // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. + // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits. + } + + func applicationWillEnterForeground(application: UIApplication) { + // Called as part of the transition from the background to the inactive state; here you can undo many of the changes made on entering the background. + } + + func applicationDidBecomeActive(application: UIApplication) { + // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface. + } + + func applicationWillTerminate(application: UIApplication) { + // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:. + } + + +} + diff --git a/swix_ios_app/swix_ios_app/Assets.xcassets/AppIcon.appiconset/Contents.json b/swix_ios_app/swix_ios_app/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000..118c98f --- /dev/null +++ b/swix_ios_app/swix_ios_app/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,38 @@ +{ + "images" : [ + { + "idiom" : "iphone", + "size" : "29x29", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "29x29", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "40x40", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "40x40", + "scale" : "3x" + }, + { + "idiom" : "iphone", + "size" : "60x60", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "60x60", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/swix_ios_app/swix_ios_app/Base.lproj/LaunchScreen.storyboard b/swix_ios_app/swix_ios_app/Base.lproj/LaunchScreen.storyboard new file mode 100644 index 0000000..2e721e1 --- /dev/null +++ b/swix_ios_app/swix_ios_app/Base.lproj/LaunchScreen.storyboard @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/swix_ios_app/swix_ios_app/Base.lproj/Main.storyboard b/swix_ios_app/swix_ios_app/Base.lproj/Main.storyboard new file mode 100644 index 0000000..a89dab5 --- /dev/null +++ b/swix_ios_app/swix_ios_app/Base.lproj/Main.storyboard @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/swix_ios_app/swix_ios_app/Info.plist b/swix_ios_app/swix_ios_app/Info.plist new file mode 100644 index 0000000..6905cc6 --- /dev/null +++ b/swix_ios_app/swix_ios_app/Info.plist @@ -0,0 +1,40 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + APPL + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 1 + LSRequiresIPhoneOS + + UILaunchStoryboardName + LaunchScreen + UIMainStoryboardFile + Main + UIRequiredDeviceCapabilities + + armv7 + + UISupportedInterfaceOrientations + + UIInterfaceOrientationPortrait + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + + diff --git a/swix_ios_app/swix_ios_app/ViewController.swift b/swix_ios_app/swix_ios_app/ViewController.swift new file mode 100644 index 0000000..67b8b0b --- /dev/null +++ b/swix_ios_app/swix_ios_app/ViewController.swift @@ -0,0 +1,38 @@ +// +// ViewController.swift +// swix_ios_app +// +// Created by Scott Sievert on 9/13/15. +// Copyright © 2015 com.scott. All rights reserved. +// + +import UIKit + +class ViewController: UIViewController { + + @IBOutlet weak var imageView: UIImageView! + @IBOutlet weak var button: UIButton! + override func viewDidLoad() { + super.viewDidLoad() + // Do any additional setup after loading the view, typically from a nib. + var img = UIImage(named: "space_shuttle.png")! + self.imageView.image = img + + + } + @IBAction func changeImage(sender: AnyObject) { + var N = 500 + var x = 255*rand((N, N)) + var img2 = RGBAToUIImage(x, g: x, b: x, a: 255*ones(x.shape)) + + self.imageView.image = img2 + } + + override func didReceiveMemoryWarning() { + super.didReceiveMemoryWarning() + // Dispose of any resources that can be recreated. + } + + +} + diff --git a/swix_ios_app/swix_ios_app/space_shuttle.png b/swix_ios_app/swix_ios_app/space_shuttle.png new file mode 100644 index 0000000000000000000000000000000000000000..926f63dd1c0b8872b8d27c9fd03edef13deccadb GIT binary patch literal 416850 zcmbTc1yoyWw>FAHi(7Ch-s0}=#i7OBf&_vD58C1mrO+awNNI70;t;e@w79!VaOZaK z@0{%DB%dpCch5;i{@A=prE@pa1zJV`2QU zq#n!K{P{xm)K!*4Dj%la{d4lnO~uF)35kpGuMe_sp|m#=5=M{X8$*bphPs#y*oDi= z7Hnf?NVzJPcCL>FMbuJZ$a7bQP5T3;yR!lEDE2aTDX__VMxI^5N$KdjPq4MMXuq zdHA^b_&EP~aC-W=Lacl_T|F89Me^S~3U;109*%AhN3bjXU%XbKZ3PdJF9{r@O*_581N{7DS=Un_rLJpU2q z<^YC(JsrSq|Bm_(?7x@)MeuiK{v}w#)+0+Z_Hq3`#s4S#@A&`ESbr(7{hyNmGv5Ed4C%R9 z+1Sy)@vw6BbOU=ptp08#JwH!~oeRCDmy5NX2fYvny_ScyBjnGayn_`8WakRB^WdNd zIod-!>B08)^!6TL7kX_gTY7#`S$Z2Q4-Y>_S0KHWC&UBnYUTCEN&8PV=wEZu8`wEQ z{5j~=z#dkApUZ)*oxvRR3V+-{w)84o^xAfRqImvkW-C`)dKN}s1#txrE_%H`8TX^t z@bUzDSb5uV{h!kKAIkWj6DILj_ljwGIR0t=zkjq8uf*RS{SVv!%#p;uJ625I>hFOw z@c1hwQfgfE`i`!DG)r**zq$Np7OZkpE@=&lpFDpOl)Srlz8T5xt(a zth^$ituyt{FyC*H#|IIf4~1X+5dL^k6e0SFAp0#DJ4gc%m2pxm-#cT}&=ItA)u&brhZ{k{SJF21ACRW{wM=Awj=0j&-^}k zB%^L)@2FUmUpXNW2(=w9HJSuJZ9iQE2R|S#59g;xz{^3EPe< z>`SVC6^HD_6+7*$sOc3!{Y)5#R54fW*GenX4bs!J?HfUNMi%SKOWH+O(}}lBYZS*D z&~stqrobSUJD}LZlm%a2AaG(NQQdK<;eh(-JQ`Rw@xV?&3kHL}KQNj~1;HZ2xU8LAnoi*h%67Lz5&eW>3wt zs5L|H&f15Lk7u8hZ%!`qXZ+hU9;p)*Asjo*c(c!^?+JlIF_EzQ;Rv1^0G?>|3%>}- zo(ts)FgRE>t!Kl`JAG_WiT%uCmljpr1=?%i~y9dA&h3qz&7*8Up zf;NeO(CW5#PV1(5V z%ClrwX(~N#2lHV?NvA{9?arTr_gZ>(*gM`UTkDC*V^Z5^y-Bvq^?tx+ z!Ru4CVn;(G?tEFJ%9HEf5}j+S{v*|3r8{!UoP?H3H;D#!;W~T6Sgg{G4ad;~&v9jk z^0ABogS@8OdF&Auc~xkbk>?@mnH2hi}+gbB*aZ>@4I zT~#vf65`LJtX`1A6`=x@%djlnB&RRiS_5B3O}{CeaoCIO8|Z#~ueKzesEi}DRhhP9 zRcl!srCmtx|HSj(O6+vhH)(8C1RBWDSZx z%({mU{+Nm=;!bMn~-y@yCv!wtwf^vYVWYTM*- zzRS|P5NN&h94q$9aq35S9d$O%ShB4R{ogKYxcFu9YHz5pWV)S+sP0{;#Z;xS|w}Oc#l}2L5OIoHDVHa zdHleUibYCO4~ocEl)14_(U(tWy{L*On`u+jS@%YU7w^3Rjuq=T#N$#2X=4T~z9(I_ zT7@EH!H+}+Z}V9CG73E_-1dj$Sh(OeJRb*d?+5${H@XhJlHm;{;k*mh47ymORSA!l z&Zp0(BP+5P)}5GJ>HH9UWQ{WKcLT_aZ*4Vtah)vXyKtMJ+$>zI6T=FLFjoKe)4ZM< zQJMI_$-U4~UowdnWdpIQQ-PrrmQ_w_d#$;G!O(ZZ1h-;I+<=?&V$04(Z>bja(pxth zHW9y)8V4NixwBMLuneD)4l5M=m?hei?w?&epw_P9V--bf0<%_s|re^S}gTs=y?Vj@RteQ3*lve_d!W4#1HCV~L0@=LKm@E4@(5pU_GhIQ?XY1cx< z@5ieM1kF=w^F{%sBm-|%)9a;f2{)3^<+ew5!u$N5=lQ} z^t;{*GbI%4buvTwZGcV3g3+w`6#FqKC3%JvU6z$x&#gzYm1#Eon6!5{#f6=N+&$D# zWt-DnEd4|3ryLcgn38E`HA5FtwN6_^_J&!Pm=T36djf%1i#K<9NR8+{$s=sJyK&|X zmY_h;_IkVXZDB(xoWngDCYOjYm^(*Jqx9vWzmi5|Zla*`82 zk;uJ`SncVi`(C&Sth%4=Xs{mkZIN)b!k{u$cc|-GznzRI8m{y8ncO3^-PL;!{fQ9R zm_85`<-kn2HcFur4B)W?8zkbnMH(HF)g{EHSFT6*=t{0_<*nDsK27u_PkZ0i? zC^%PnBp#S8DfhQqqtBk}!>;*q;Oev)FiWzx=S39@97kdlOREG85AG8lDj8gOcIM~3 z^DeT}BI2GlQynJSQRy77ih`O(O&;Qnq#mm86ukqTy6)R%%}mHNxx%c0TX9DIQMj`y zM*=4RTPP&w6-QN11 z!zOkl!DfPaT3;1~QX3_zdO+V{FOUilUw`}X_()baW9-!Y%Er2r+`|9|nWGe&C=RjL zFkJ>gsfUsJAI2v~>C4kl_FdoV)GUvf_R2D<~OW&8j-d`O6TtV5ptZfq@S^K4X@7Z^?wPmb1^)( zKexFmeLo8`lMURTDeS}OYmk)cS0CePZ;)FVKmrK)wawRd(Sq@#=hg1LiyMUBpp_4> zrvDzD0my1vkA6oI(qz4@e1%l1$^LU}0S!>DMUGXw!(gtnKBDms60fy>jWTxr!DCm< z{SdQZG^V|TO=txlX)ISo5D=xvq7lX(Y)eIU?#t}2T;z$y{%ZZg1P*V(q-}7}p|zQn@MNpAVdP=T6-t0_t8-7Ql6}Tz5)zM1zhSCXE-?JWRIEe!sL+ zv|-=5$&?+7iBwseBFql>Mb>!WlhIG{I8Bo-v#n-b(J{e4W*ri?&QDr|zJ%}o2;CgR zP`0(SiOOVwdNEtjBL-8`ZLG%RBU{VHof%m@VsHJA_TE2T5SVfS)Og$rrHpDvie66A z-L!Jx$%|M>s!Z$Q9KV*DgNT-`O(ObEh}nA=qGre0Qmex8dv*vc< zJzm;GWf!01v$rDuj(w{cZ--Au>aq6W$WE8Z4kYSqz%{=)!d28`_M-tCU|84Tbinm( z4MUF*Tr8EzHp9EjdgVcQ_U)RdIrB~5oRH;ueu~-}MK&V)apn35=85NT#=>@5WAgXpGf6 z7Vtr|dXXOhAk(Tt5lIbwKp2tBZO@fqq1wIKO$G>z;}`cZlMqtE)aWmrB34@eO~;0Z zxaizGcLaPQ(uD6T^3JNy*uY^Oh`bjAC4V)(A8YNG1nk;cYEm7AMsUA54gMXp+cgub z>ak<1OAmZQ6I-{&)#RMWLL2+)oTh616x%!A`5t?}X9=W|XkA$}U`8E*h(R|Sr;^%^ zbyyAnw*d@G4N>E`ApAPx-y`MxZ$dmDlyc*@SwtSPE1tE*`@98__~l&RfqVh3(8a_7 z#jHWX_xBzvU_D+EC&9GJH}Am@Zj}F#m$~ID3bc5wIC#6t z8>q|M+pO!)2fpM~>hA|n3ZC<5Y;zqU-GS@0sUwJloo%pL;by!3*WxJ(=zK%JTdO;e`l! zM(duX3%}lqcAN~{9bwPIaU-cdxI}bp)od>5aIx0M$tkxhJ|0uY*8Y3*$cx4@D)+(O+kvvpGSb<0+)L7i7mlYgk1NJls>de4g|^kbYw>as>aaXc1Ez}* z*-QwRN;R#d@YlXGe1GwyvAf3eOtqRjD0U(kF&#Yp{*gpHLs@?1`Eb_DMsyQmb#=ff z!ge_9$lz48{ZjH6tv?IY^jbAEjgPM8y+;stWt@)4Za_H)ghGfoU`ZjWH`jF7D2wH~ z&!J2BO9DG1Yoa+yNDaw%;0r{54%Q)l*_B)G?lb#g9?jgPZlhpxob}ya0TFTmady7v zD6p60CIkhnC$mn1e)AtnyS)l7{EnM~O8qZ{koCeFNPk>iH-x{H8NJZcp1v}Hg#84} z`iL{4AD_ddUVh=hRcfnoD|RtlV)$84{EHc8V4%&@$Uu2KqW=|OEdY7}L4;1dGdm%4 z!#>W@;FulmgBM=%e7e|Otb_MH>zaLHbJnqAV3lvaGDwe$_d9lpNqkQ7@xZ9_c`7-# zJ`*3VNaxI9u`g+Wn^r^#u|W$<0kfR2=B(2Ksi1m(qJww9+o5hrD!`eb=G*L0`b+N% zaB@xQ*ZJMf>e>0Tlf1qi7wLUTiP&!gts-T(ABp`G!EFo|(LIN*0st+cHWoQZrVOp9 zXNYf5uQ~4Cc{T~ z^H=qKigKFfomDc{n#eYcOT#hNz^d?x9x-*8+K=SnbgOo`c#1pL{Ouc*$0O2E9}jZl zgNmIP)$;+tHyXxc;qt*(O16gN?6BSH2JI+!%}k!fd1&R@&;TJ_ALoUQQA)Rv2=#QsbB&m_)NhVcZfv3@so7QD%tq=h%W`6{{i%nYxW zHGXUk)qgcWwTvXA?yrQ;RnaK8gTq#{&{h0bs;*U#0)-Mj@XXC)uO;;weWY$O%qk7S z^SNZI5RyzU`o5U^Q1Aq(K_qe0Qt=*$f0?kQEMbkG_NhKu8qU0Ag3dN2sJARBY6$kY zc2a_&VT_jCt>dN=acx^(O0C$eyw?)6fmcC+{@++17ubGefA2*Arnb0z7_f? zE%kt3m9^hGnhp$BsJ`V9F&VMB4Y~X(Tw!AGO6GRQ0``Y=f^DX1*S~Vl1}*Aw^x~$7 zP&j73uSKAXC<_>`jv#cH*&dvl-caVd)btc2XJ-~FS+-uxzRkIs=nk_~ps&rOUKC(< zb2g zAMmBK^6IumoV{EA1e#bz6aRx6BSQJZ$hO`W6AWrWp za%~X$c*uB*t1!MlE+{4HocHcl6Z@lIrh70K zV$#e7;(;-P{NX(cZCys0UyRh-$GCDR8?=xtwK=N2;l<3vI-sxGx6yiSHWloor`#;N z0f6-vTB$GEoi>$JBxr3`Ja$i%W>vIy?Row3$JbhK?K_(X*xCKLNycK10IV!Jjby+9 zgUSYaxAOKYnX7@Qb54b#=SjA?#%y>mCs0=0P!k3iogW<&6J&SrV3 z>L_gOui73UCmnZ8Sk4@2Pzd8&jZ*K)G-qPa+8 zfW{9@Du?fmNs1OigYX_Cwt@h~yYr1W@^#GPGE4Lo4Rv z<5!!DuEy+xx;CC$x~Hb*g$XF~pd+ri?2ge2dArM7WQGJb^ZXKKpIB~{d=oburdZS@ zi%N+tIRPbR7bR}CwHi5HaXU=$PZXL|MghXFAQ|vD0MauQV{Y~BkOYdUNjQqbt*p|D z`lyHmW%C%7MU0sFP!a^ zpCeB1!#|eE4|kL)Y=$CmJj_mSc48dwL%Ge0S4%cazL%AKy=no-Y{ zFe5fpK3GZq$~Qw&1$1WN!NUaAK3)UJ(QV9jQw4f3V48xaZmQa`r9|SC8I^WE?=F7| zsxFg{G#)1pNBXty+DXiHomRwI_k1vH*7598OOX7RvqGwm;y65 zhb!{(`61x@US8h9(UYff6*a^eO4*CFHk*F8)ZQsn!`xZkOA0vnN#(+=YS}#(G5*zN zM(_vyMGYdInCQCgC$mKmQ7jXvp2YFxr zpK0|N@e%;S=eX4?5B~J#Lk7s-v`7NHAcOc zAg=_p4FXZ`$`Hw`0_}=-pZi5=hpB#jby^%sfzd?aW1~MBNs=^}a75~pVeCC=(K!xT zEv3U=?lQ7(8O$%zP&Hq{#q+~`3DvH?hbnLPaPPrGX&uLYnEKzO%NnD)5qm2fo<}Rr zND(Uqp2O~dHrnuW~00-(Hpt0VQX}F=4~&H z(2|_R{b4@*Is?b2PhtdHDG{A{4;1?q*&B^Dn}7W5G|%v`uLUz^7LK@!Wub0}$~Bhw zD(!QlV_2*Ht2B1I@9A9l>EsT7KGW;ah8YWO0R_1x;se`<&x~k?1-;byoyszYbc;r9 z^-NsJTp~Wq;Q~3&BV1^tE3TL+SRBf35ns<;fWwM9C_H{dBH>#cVIK!tAtDb{5`p{8 zxq@38<=;$K!Ixa4z`sk{XBUQ?zx90(HBh zENZII)1UYt@FMBO#`V=O|%1Ed+~ z9S~1cGQ5hu1nWx0U0YgB>v(B0H6D%hn^(;)^Y1b^q)qsR06Cv;%!yoO30DJgQd}<- zWxxi;1_Y$9V;SzL*IgvmUSD4KiX(VBEnW!UEaW9G7{laV)a#G7OtnP&K&_B0oMI1e z8AYfrlJ|>Mt9%keI~~rm@d4@n-M@^j-lOg*j#?+~8IMwkKTWG#ayDKJk|kPmsZWNQ z#g||748Hy8%u7Uf-%Y)EsWM&J*tNtE-F6zywWoqoq-GQ6QBAHMVfZChQDFHqhg8$Z z3JbWY$S8H7IOUNtXdk~l2v`@+o_ycJKIrcX#2Kk+U(}dW*rURo zMXL!nm9v(@a$bICnF@V;Kkq6Ndu6EHs2J$`tKmL_DOM+{v90jNEvjz{owVdg{rlmU z#a6~&sMa4XtZ`Ul(Um!?J|{VZfe&}z3E%7uc?kQN@bnY6ISw5Jx|u+KMb2840@v4{ zVg^35P;KSKQVW=@)QNPA%mZGHFjUD0ed0W+sG*5s3IO*N;m5qoD z?Lc<^+X0N|wBI-5QYf}nQ@D4R7X0Xh^AzQZy}PUM^g!MmH&MC%fycYr#ArKX4 z79)Y+i5=gVqZ_P)FXL`^7H@tUv1`k`47qBvm*HJ}-_K_vPU|Tb7}A=VwO4fa+>Wo_ zrB6ShqQaIk-zqt$lBbQXY2t%>1IiH&PO1Ub;)2(3lzv@9$1YmpPA}Du)Q#h^NX{@z zaO4_Dp*jzx+kvc9ywpPbf)HTg>M@h$N)YcGY_glm3(dzSrsJ<_#k|#czPgKSHwq%! z_cHJ$(qxqPPur-$d*S}9*-J$&Q-7PmRkHU z2kA^klssy^4MBfw)xb*hj=?uI2J*?Sk)=M1P{B4r|F0J+D+TeAkaGhy3HgA=J%q4y z@lySV=@d;wTXN*Q+4tUeInPH=^l>byagk{uuHydKAseMOh$8lcv9XmHYVqsGG@IKs zEb{kfFy0e{f6oE}U}zTYW2>Z*K7Yk3qr=NUgYdP@&%AD zwbfrCmB3uyC)xE)oJ^7F73kgvv7Z0wz^Lljt16SG!i?DBXSGDX&-vAQIxgHD0hUYZ&>%NFvcN7ft zUVb2mj6Lnme9scjy;8mK`f_N|#5MnEXDKz1N%LX#I%rnr$earMgMfw%r`2OhcbjC+ zttA-&^SplKHZk1dx_a|j{!abQpSuw{^e8BUpDAG&EbG>|i_XYmS3)pf5E%=_N66^` z%j0lr-}L!aK(j*4SE(wrMI2J3*&mr?;_ZI@qcXrUDxt|De%!V+ zwa6e5NyDeGNU?`60>1v7>1@EcVeU0|{~{xlilFeV_Q(D9rw7b;#KX~Hn^HBiNH~(Z z-oi?&WR>L4G>?}(x)rt?%dlM7iAgFh&LPj{W%2G$TnxozUe9(A6zI=1!#11a15`gTE8S&H+zWG+ljCV zbOs|?ti8=-;7YEKc=uL=7@zbb!4rF`p+)*o{WsA}C4ePieWWNGf<2%Zw^D~yDhkZj*2c$p=bFU;cw z$J>=p-afyC%e!XB!9`fw5A@@1$+uRCCtX7I8->3U_Tew#X*X`tmr*dXa0y>0)?Yv92mRSo{X&lgjsksmymKyifI>jV+mY>tzr(Y6AO!8ar1!&86mvXJLAI?<$ zv#?jvu~NacCsj6;{-T-vlTw1?um!k#(1@1xfcJgNlBIvMJ8v<9)|*BNAQriLB9m+e zc;M334;K0yH%c#DZqzbmwlEb0-t7KN8?ZWP>n%YZD~za74q9CE_u@3i7=^?JcrBGr zOa_pTaOX#t{&2@0W5s`8UR6-uR%}ynns{AOA$t)$yX0nf9Q;)DXCN+**I&)>0g)0+ z5_rvtB!*2+9Xx$%6=1z6pIb>dc=KTIC@9oIS6-AhP|B$%JDWCF^qKI!8mm<3GYl3E zm5RZRLw?FqcF)8`0Ze}&{v8t>nW|vk$8Rr?Jzlb8a-oqLg&kyoUI!a?pBL#q_?%2r zg9?m|0%0>9AGSRI-9gZrs#A@XbCTb^P~+X=oatoN`OW$cs^ltZX*)5;Ui*-|#aKp=Lhds3RXcu@$3; zF@0Hm*voGt0DqPCd}9JTX;)n{p=}^nz(&h-STPVWNNam_Wtn*E1$0oZlXe$f%&a$i zuw!4i3=)-`a4|rFuD9M$x8;4ZsE;L}IzA>`MJ|et?X%8L%|sJ3H1FUuC$((L<#|&C zJe=QDD}yRg=HJqQEkTL^gIx3U-2jO2v_c*1f- zlB7kF%F4d3G6v^uW;k+qTe?f6Pv@_jNeL08Kes*YRz$sQLcd?SqZS$K+K8U_T}aco zFWM(P$n@=g)<)J~a9@YOmi)##tuvN7Tz4o^49ZU-r-Lb9wS2r$rBXUQ*(lBFak)%! zeAAx1DFnRbGB5l!W@7t6r^YD9Ui`s)UA#Gf)IocsN}UN-;MnFLSo`dD^jyS<_4qCE zZ2fhTI261U?ZG?w<=(KBLIAP{@ngWqp}OfZIgIDCFUQ%Gd@?IDE4MiIt;kUl4mg`_ zqntF6wyu5V2m{G3(@e_^7AW*-d>BYzD;=C15TT2ABj&$L5f?wTUJu;cqFtR^*(4Kp zNG<;YkPpH`b;taW(2eF_=Ov2$WP9Xf;QEvIW~P9{+tsw_+?bkb-h^_q1)2tLQ9WHt z$=YZhUYyTHdIkizSjSB@B#y7ANtY}eCZ!)7?v2*D&r_K4DDd3Lr7ow`i2Z6u_Dkh@ zN_yJB?HnK@^Yr7;be+q}*S^B4Sj~AdVcPdVE}#X;0@_=92!Hjp-%bSI2hI!BB4o0O zP{w1T7?J#sDFiz*F^#I(g=DN=&5O0kqwmWZpW@s<+u}g}q(@!=9$w=?{+$+hse_yE z$cQv>qE^Q8>&sI~1~dE5vT}n;5#(_1+X76AI*RJ!-|-D+nt))uP?s(*j_=_eQybLR zrD(yVGBLSX2_*K!0Vz}u8!4jLdHYuZvd-)nE?N(3yx3Y_^#shj{6kX##A%$POu@)m zOBkQxkT91yeOG=k71uDOv0eKf?D5am1ZX7>^$no-H+9zmk$s{&XoawTYGiyLeAPbT zd%Nli^+UWxG9P89_R6hA9^s^ty`2xG`KtbBhdqBDZ}Mcen68!fD;FTi$%L+GtWv>` zD@pcac236}W|#vE=(F`4vGuNirAd2W>b7{sf%5R_-HED{ZS#l#h2xO6nt17#gy!A* z40OM)zj4i6W&K>{Bp1Ig<+&yQiqdq|y*s(fsnXqp$U`&XX#vGsC}z~S<@mq@QjNPd zAL`ixXd>-Z>VVCPIG=}=jG*@L7$7fA(HZ*V>g|jVjK<=a#k+TAcBX*ee7gbue0ez4 z$K7^8gFNBLg}Y7#^mb;wZm;G(jzQcJt2r=KUFg=DV~ZUf-4N5Cj)JDm8{Hr=MJ;i< ze5;31lr^>1EJ}e9?}Q}Kb}oD$+r*xb(>pvNsO`fUA*cRkXf}qoO~I6;r?Nx^S8p1B z&4bspxH^}r9X#I5EYI^pGgzf7{FiagRUw&=+EMJ?bXTVN2jgGha&7q2zVCcki@WsQ zB|Pwh&{XW%dt4=mhDn?vfcP!tJ6SzH{HRa9%sv10TboH_KF#TNO4Jnao)C0FAw0zIODZ9@ico>C(R#?F*Qi+4IlW-W(nZ=E-%{=qBfjT`-C~ih9=JhM%&y*uK57C2W@l?rpHFk^^{?6~-;%vtXIN~(9z{5Cc zybF7Bp0$NH@RD&0xO^^6maLZJ8s< z8erf((75(qJTo&c71^6}z7sV&dcr~)7Cg#6_x!p?p=|A|{Y?joiil6)L5D#}wGSdS zt2wz$Dt7kTR6{r4bkgNKA)Z3ot(!FUbIUp9a*rJ={Y%YStrJ4^$MXKoMW~#CK;l(O z10~739?mCDy@^#pM06eR5&6Wo zO7I8-ua5(taO}wlHJo*~ReQa4i{_J6iLQvToR84UD|-GU#fan3g5sd)GvE|9nWM{J zITq@QVxDy8po}#4m4A!p=e6y)TVgBhW`~29q>PYKuWMF+z(_Arcb7-q)$O#la6Lv< z=3HB5#NI}hGz9+QK{OKMyZS|w^{if*N%qKzL!Lzut!g$+y5(o)m(=4Ujn^-PlUMvg zwQN`!48m-+IGy*YAIe|*bm&W74>67Z`pdSM{B5?0T+PHHZzd>pxmY**FaA$j5O}GPCYSvL;5kcs7+tw^QnOO zP3uM(`*m$W&SnWh%Rw{w03xYw@A!-gJ!7q)Wfc&ZzT+wS50szJVU z>bbrqObhW4vTfdP4^UO*#2>ik;lom5DqAB@S+qArS(ys}_j&Ii-0>#FtU?7Q=2PGi zgrz;-sr%GhZi8^a?I1BYCVRCsxy?7;z3l*)=%M@Nq12v2g#k436jMHTUaT6K+Vc~8 zkh~eNH|Be@SS7d%oko-or+(i*Gdv}d#Y4H6MSWU50&e4Z7KP5 zKRI^Ll>P1c>-18SVVvpRCdtJf?BhlrZaZi`5T92-mh>so*Px$(iuuKP?_h}y>3J&YgK8V>pS_(T zAK_TSbu+<2ah2xdv1D>&bciXzQ#_~USPDOR*C1HdL@e!~Y_lU*)HgH80)xSn-tOtA zry|elm0I*pkL+LPGA#!hk=NTciOtHfLBQD0(q>asfwcuEb$xwLC*Y@5x|gduy4s>W z^$D+BLHQ2EqhpZnC^#c(gJPW|VU>2k!wVr+sxw`Icv1)A>ceQ4=*TT}b)GWH^}bOO z)H(9S*O7$pFoCt_Y%N#3(ktTkMi#H~22jh$^Nx~;(5(6SP&FHTVR)_CsEBU?_H=>fvcGpd z?H&tE1C#aL;>cocX55eYm7Gj@W3P%|e$iwuMD}TG{Fbee8W*%Zvax_Ty0yq^CCa4e zg^i3i5ckw%8ViX$_|R9(V6&db!(3_&hi^{vR{NC=xt=(%DqGy!D>e2uIH+XTu?-F= zab>~~W|cVYW{YUgjViN3C2#yRw<1W-zgxUZbhz1~rjR@%TP+*?T4A3=| z9OHtjT9u`jW^=E!&?C{GaEP-ge@_Z$a{S;6A1o&Okl~_fvG}~aNGKLaY>~Q3UrLuV z8S+KI(QEiJASb#<#x{1jRglS%MAE~^T-{}RC11q3N_}zk6E5nP&SL+cnu$3?Yd0Hn z1KJJ0t$Fjv66BngJ(OV>hSEG22-g3*km?s(sVSf*N<-_*Sfdn^rg?08}3MQe>A95c$~jJKb-n1lQ_va ztHdaz^yAxrB{uiX9fJGm{5Yt>xm}OGm&9$7D+z!SH$%7W1H!9NmT{JrM%gpcAY;1o zz*wZwyDgVs9$SeHuF$u{9>o%@z+!`}CDJC`x{uyKb(8G;U8rA1P3SyAx;xPoDzu(@ zCqk7QRLGAJf2Qi%e@Vd^8%G++?zgiplr*Gso9pq7tZ-{cbGu^(z*oguK(ac}=dz-6 zubHLsWpzwClDPbI(6Rff9rIw*0aR4WQsP!~q*8)r2fIj{3jF4K5{D4hshTC6+nogK zq4Ww~rpLHW5C7Wb4Yj8WK&LceSGqe|Cn07(p}*T`sdq$odEo60@Ts#Okz>)k-?*+{ z;jzqA?pqxxw_+C8FvdPT8yqG7<_{)E!MNhd}SeWD%h)C@e8TGVvVo99HDp^q6e0iC2h4rbQDT@N^Zw}hj^ z&dV5%_mi!XbxlqZgl+^iRJd)Il%u%%mTqHAS#tPQ~c~lIn6XLM4Ln-xY{s>R2%wxm2dK5C5al3IzeFqh(Cf0{m8z?%%t=D*)__l->Z%u}9(;}=`HN=aju ze05%oJE4Xnd;=dvaTg{Y(W*?aX6YNmLtaWe0$k3w3TqxQQE}d!8Mrh&5(J;3a6|76 zIta?P#xilaXs_H`zEeo^Ib8{@ZrBTtHP_H$JQaguY_6$w`&c4|8_>C~(P&xOio>2Y zWi_nG&A10k3SZOD+xD+KV;y7MdYG-A_0P?#Q&mDDz0b!Y*ZiQO)~~1oiHmkkov^Gb z`=axONi_E|C^xq#A`#DBu0`J&AFJ3^aC)ER@jMj^!${U1OY_eI=2BzQn*_r@uWQa6 zEL=`q6_?SNZW^JD%xd&pA(Ra^rDDTlTui%@NKWTy6&hr9>?qWCD-$!xN{s`aVk<5j z3!~<8<}x;HBRjSmW92HN!1e1d-fw%r8D_zoy1AO7!XRzJc3Wx(nnL#569S0A=m(72 zt1O|3!}8~AAr|evEzlQR9^|X3oI7NR8;1>NGo4rVg>b;@Gu}V|4ADUrF~DPA;&SSw zjV6AOagaa zF*o1;9&%1lh6;!u8Pq4dkJ@!z(aO!&vvQ~Kv)MLWkJm-I*9#lOZs#)~4@y+iQ z=A-cv8w8viL@%v2-o7AYqcgApHKfBrR6uDv^lz;6UkN|k}&P6ueNPr z(UXar54#{BNUuF#=GDmAC4I7bpD16~#Vh?QSg`Oo;UWW)2ajHG`4!66H&%cQ(PveT;Z=TQa~_q@Vi`8WrEVPbrK4_+CP!nAJd*nHiQCEdRePNr?L;3NDnDnMp?gB`VK8lL#sedm-vkz51w0(s(Ue3 z?a+d?>w+KMBS_Oe{Aww-dB!h00DU$L;=j(m+gn%Ue>b09E{Z*Jrz8RGZJloRXG7X2 z#vger?Z2n!j$T-`XyGQ(b19>8bw=D!hE(b5Q8%6O+M-z?yd7~fNOoH=Si&};|AILC z?)eGyBL7KpxV)xunx}~-qPbe^JIl12X59H*8|mW>W`(KZgD)8@YK^@LVVn{(z&cg$ z2C_oZ3o?3&y9d&mm<7gFb@ihs3%Sg;mAoJBwBY3 zKXKJNJL7ybi2~pOuuWvTQ|~3!k$9aAU1D0FHRL?j=Z{9SCzciFvOEwxRG=n z?bn-M!zNMMQ#D3smU2rzJ9MzymfrT_LPHKl+#bu&a;+qBo`nw%eF7Q4wX@EqPXv+8 z7OnXeg}2JM(%75()78W_`l0wQ22?T4dly_p_Rhqe9OVqa z5G;=1V(>F1!w8mmv zK(a=8WNw8cZA%l7(>1p5%}9q=kYu>APH?oDyPNp~+Lb##@=TyoOg${TdSs4$$Ge+s zbM+YP?Tg;ZSyPbX`pP(T2XRXl%q~{r;{=o1eLYhTTEXJJWsT6F7kGrRR$-Hz>f4S7 zEAUc$+`uIVO}vymNj1&naq?0wk-2c2=AcGvRh)J)>r#nYOv4c8L!8fwxa(dSc7rn< zPH?R@CO_gW{M-g`vv7uTMMl0m)M6Ozi5!vWosy9VIZd&$Gefi-M|0uLZN1j6=*{nw z{v1>u@}`5WwYG~T+e}e-tA92LFJ@y$$V0Jrvd^0{u6$(iw|)XqrnJOMa-eW~pL_t~ z=0f@1taZGwnb_gG;&N*R>U=Yurq5mzq}kKdQwBpbxjcq!c4VLaF91$JvA@H}R4~!$ z{9B8S#)&rtcK#;UFN}MR@3x$Ww~w9WO)=2bK$JZcY<@v^EsHFU%^|*qHch*Mxydno z_g-^$-a;6T*~AVpt_~zuL8HO883V*s$Co@2U-4DEM!T2`J?5o5Tx=Of7@cVB;f+uQ zHU94wV2cmFh-X3cU4GHEaUnEJ;k0e^{Ivyh-TrIgO~&C8<9*pn61LDt%yMiCN)uB$ zKlTrsWU^Q7g3z{M*|Qw2;nDraoZnSx;}_Juoc9{HU}KTbe!H;@Mq#?i0|1@B{(~EH zHu+?;u8^4>@k^6*5kv6KzHi0<$LqqO_!KV|1>t9b6m?km4152%FkZa=-rE|l3n-|| zM|DhnmP^K5BqSq#?Bl!p*c`cZ)qeG1x!u@a)|K%{b;&zMQzNt-`Z#PPv zw{z0ZE?yg%`8|MM-AMnh&;Ktox_GSbN6hGS7i0KFNACo{79O*qxbW7XudQ|Sa*cJ; z(;|T~B=*YB@=kxJ+qsCGd^&r3dW*7hc)LP;s~4_nm|Q3~R!9Ej|Mq|U9#)`e0?a7M zKVX2|UjT8Wv4a27tCLi{_j6*Hku+KG$Nb(e(PF&wx?MSL(mei@@C`hdFchXl0n^_m zqJea6Y4*^?FJ}9?)qF66iXO?oA3P=tSqGgf4xF%UvUOj}EkYVq8FjPuKm1p{aUc)^THkcZ7V!?+&nH&H@1yE&jh8K|63MS_Je^d>AvywBc0GE~9ARia^J4QNS;d^wEz@+wBWf?ECw*3Z0BBiU06l zZ^!u8XTh`gMJvE0(#Mcn6_Ws)UJJmc%DT+}06+jqL_t)&A2yoK7bI!2g&w=C+AS*5 zt?_q-y^+&vI}K}3yRbP-;VM>3Boia7XPe@70pH+k!DqMh+hr5XkzjNM=kN3VOd#jc7AWBg+vE(yFc=$Dj9ggqIZ^FI&+uIWYvtPRW zVm<6@!dE-Ci&zLlpTaC*+BX%NkQ8e^{_gM7*~6q)a@wHNO8^2A6ri9yROH zIqdHLBcye>TEy>$z75Zo1MPfTAJ1Pbc93k=$~TkJ2rc} zX=VV1yzmOo)hX>bkI%&r#Jvr%x|qMj>liBPSfnK4fU4k@zn>rX89v9-bbuWtC~CbY0}8i;+v z1(>q!d5}5aQ)0}b&z7x>2Z-idG#CL$=k5~k(Np6X?bK@R5@U;)Njm=ZM{wHCS?TWP zl_uBPMZL*@W}PN>uO0TA{Jnz``q|bw93&WYrkrtR+Fto&=u@3uF*4!tpGSpdEYhKN z>vgB6gBv&cVuwBETSXedGop#`j@T%$C@PA?!F;AWtk7)v_&OJ z$nK8(C|T7a`>prc#71XccL(w&PgiP0dBT|u1h-f!7k33Wo9w*c10yle!KqY}tE9tP zv*@Db3J!e4Z$n6b*&K34$7@z8ZXL+|{x!|CIT5E>+=$y%cVleXrK78=(?tv`RTcc= zgN{t;N)8%mYf^t$CqGcrj~(dG_iDz;KmWJPKwJK#a`ID+H}-K#TIgt zO~>$g|IuC0da-yYoCXH3r745z)TeJt%jHQbe=PlniWQp8O*ZFR6K+sW*2HW_vc z>#MyNmxC!0vtIzbs_fSf=+{Btrpay5wS*;xDq?*6+w-X(?zE?^zg!QE;j@3p!YA+t zSf%Kqk6bc`OfQ=(lS^-PAPzz^5)ZXyjq?W@KP^Vlj7R)pu0B{4ozJe`h#ASg`yhU@ zjZb47W~-BU<3)}ggl$RRzXqQ^@t;1ENgiCw?L?wjY6FK*9vaiX5doo-x2LG#4Evh>mRq zTcrTs_>Q$VxL0lQm8i@26e3TOMt^j76?KJ7K*Vo4(1!!)1E5%Cpd_Q*>0DE7WL!(f zDSL6IKr1;Xo8v4;@hoh#mRcL{(bNy?tsRzAiVq02}1Di}h3OG}OpM<7NGM{#M zP;Ci8=N!5sA4b!OlaJbnc;2?q76zO?0Z}|y#{B39Eh#<5U{{(Ev%x7~>aCWkPagufV7xT%&D!OgB>Wwb9G(1v@K#UH-L_DW5DN)9~f z3WxiwZ9Im_^(kfEemNWH;U`Qs{_?`{uA-(94az29_IL3@gC&3G=}zuw#Syt_C2!b+ z?=bCzuHuBe)S8ZP+=$+TQ;6k6_U^7}1vI{up{~r57$oP1wxeN^&n{PMPdB41Xc97< z@IP!P`E(=iDV)=aGt3n zCKuzKS&x}Kd(YY1&i6e6Q=k*CJ=7-H;yF3m8n-$I>$7t@H%OPca`&{1)K;vz&_Yc; zSnk&^?yD6qz1);XmiJI$JgzY9*uqGB?1JW!XucAq@Z^vAaXBLvsNZuFF~0Q1Nj`Qm zdxo7t+vD!>Fv>$5!F`cA|7{0hZ7o0M5)*d(qFsJR%kHb4L8C9caTk9krgJIY;m)7* zGpM6Y|I?*DeAaFPWug4xjEfvycDr1t@VD9~=53>cW?vWHn7fj#>cDVuE53<5oyT(0 zYn;aF1^wc&xNoeSHg~W+e=UCKo#g3+UrdMpgy%a3466&E`E7^nKFo4xV-EYxSqm=u zB|q+~alXginS|P|g?_kQZAh8glQ*n3Xf|*6(dqQe@6l`#0fVrj<|G4P!o*#2*$Ic~ z-|*RtgbXqC_^mM?|K@N0{`dbk!I-&#o5MQ3GEZos5r6Kxs{w{Lc>`9mPjNwE#X&G9 zNo0M-cO^h6=Z*Vmq9bdnCuKB`Lk3RACj99_26Q|eH*f_e{kAI8j(0Nqn@-e>J5V>7 z7YMu3!X_Q=>RgZo{%g{WH|BFk1OmIy@tYa<$y8(SRz6r%3;t^ z%XYSZ{8r*eb0wnobmJdM;`lwhT*%%HF>za>J~231eAeG&&5*{Q*o)65jqrun5_f%K zp~0>Q@fZkVjBOQ`XMwOqVvN~y31=`H8Z_MK#a=wb#|dT+-8YWw)}!L4HYC2oxik{?tk4FCU*ZE(}m4AvFd?Ou}@{jM+Wec>? z$1r@~ihca~;=Tg={LG@d3z;H4S>FD*I3imOq{YPz+w5-rvSGfU13&kGSugYPGC>zR z(1;tkFm)$SeTL_{v+*A6q;h_#&s8y|7t3OaR}t_B|GMblqZs$={vi?>!Uzf#Mw_yxuci@4Tm_yzl<6rDN$@j*p6xf}C<1Qw4g7#+~17pxmZ zqqsIM8*f#^f!v{uZU_EN(t#FSJ`*^PaehC2Eu}~KFwVbpMzi3f_X3wRk4i;JnN<-K zqylw`bo{CTor5Ud6)i%@^=wpOVp8kzAkpgAxpC=nfg$)cX=x;{8$Ee-RHgOrj(YyJ z#YHIkUPs5>*zzNN_;kwDu~66qP@7Q;{A=&Lh6cuI$!Bc)6de2LIn5UC(Z#HINrqzs z^F;}1Az6^V{o&tpT4+h!9`k-GS?zbFJaiP_vl~8jHYshZo+}pvidzDTAH^9JNq3V~ z%}gz3=I5u+c~@l7PqB_A_2D=*xi1~IyVULhu<#^ta z;Vo$ol%9#v^U&vffmJSAj$SrE;xaZo(#Ga*I98W6G3 zq`pO8W0^dgqRiF~MaV6z#zss%A?Aj{`J$@+)0HL&7SIG05NP{39WDlm-qp`yn$6Kx z$S$TSKsrvZ!g>om@mNQX)e;dzc2u)r;WT_2ug1&@I6M|C13TZlBa_V+$Z9L^_SRRY zAyM06q5d#^a%Hs9saPNVE8CBczrq_OO&YJ1r}_LY_vl;n)=tM=d{C=%weAt!EBV

w94W9nW%Eq*~+WDy~V&{zG2;fR=C?M;`8O!-lHO~_8U+; z0rr#FO|R2)jvN-6`L*cci_T=pQHe2qT2IlJ3%Hi$!*0mY(-xHR9;?uUa_`JwUeo zQbKH%Y1Ji6z-+s2z|$E;oEsX&ZZM%(1fG= zJ2z3(>md&oki#XrhT}%YNpbOPm1=<^(c?Qk48$OP(qSWJKRlA98>XvH5gKOP28U6_ zR|D=?EAV9$3 zKi&>IZ{Y5DMzb9oc-~c%81Qv_{DvRjzWli&vqF9M8PJ?alcFou#bf>Z?|rrJ`@OU0 zyZc1a(-yXv5#>`SU0&b&w*_y#CP0sw8Fq4TF~k(ug~t{H5%}3ddpuY8*dAZ@%#Wjb z`4o-3iOm2ZhmFNdR~qwFytos~%a=~%n+t+uj+*1)*ogPGKz9``=4wCxB*UVkoSEbE z7u{h=#ugfOe(D7ed;E8F>qDCMW3tp+1FEaZrFpKIJ?xy3l7p{nrvZMti;3HDC}j+c0$=4 z?bW8Mf8*qCg==-r03v0b6$e$NaC3`$5b!S{l&v#tJlT1gO)f z>hIvYMH)QxKKRVW=yFXX>eHQ#TPW-;BzziGEU?G;UGvmB+_VXr^-o*U;-6Q1xlbqa zQNduGEEJ1J2@^IJldA>MF1Clzb}mjK{ycl8O){rb&maHhZ~y-H{}iZJJBsk&2!MMe zR019S0_l>FR`mf~RdF`nD(SwHn6vj!1+w?!x4sK` zPN8_hQgZkWpMs9gPtmsGI{j&RNj{T|t7&$vV4vgYwQZ(oeg0FSO51+mFF+dCSPP5$ z*jzngwR2RjLVPrJJqv?@K1~d#i<9{D$CT-dw<}3Kq>vdWdKg?=qgtram{Z0NDA9_nV^j_(> zSZ5PI;l)bvSRPCt+MZt0mR6MHS5^8J3LM*mKEq5h!}|23-0<&UG2QoPU%a`3+Vzf} zWXBWi29PuiwZSVi#7Mfmz1!B*)-i0O|1c3{6VtaKt&dNg(@WdNPLY#f=W0bu-cL3f zljFt9@u3fU*f#DENS9}faqEQh5c+Dc^bna!oaZaa;4~w*!T)6; ztMy=q|32EAUdw4vsGVW=nP1+v)f`f*mlqGWXigXTfY)l?;W9be9&CJ)QyqrWEh6Pa z7+*kc{I4+;zso`Wu@|?`M?n|oCZXAQ@nPawO-RDkTjL?n-*UZ*XJNkLI7QYulsjE+ zjA8sTXNya<-h!DGp^t{sQs+)%~T75FBHc)kk4 z$kS!-m1M-?CPVHH$JimD<2_Aprz<2e=GYZ`(&!)1D*_ekrNoHoeFYMbpB2La4A>m9 z?fC(UFE9zHJ+e&*^sWcMxm>_lba5gfZJ&@z9zx=Abk z^cqtEz7=c7yF#Snl8KU%yq9dMd9ifGug2bM6nk6wKlIqzZN9iMa!@8+!b8~u`aO%4 z8ReLNj8ovy(Ep(i=GS|43Vm=fsIr;*0)?(oicbsPW1BN^lI z#QJC(fp>P`&#O+h(_x2fr>BoyfPy3*=xGIoMtq3J2=A=%$qVD*#JBXDzUgSJNia71 zh?e%cms=d&uf-tVTZoKpH1*ZBKh#HuH`b9m6H zAMtu0U7}MM)NWC+xG}2lQHVt{#c&*kLAK*zLW!&U3h1=^B=qO>Td0bS#whOMub->C z=@OlXc3Zr6GFTdG>z<8#KyzZ*)GIZ`+QwLf-$M5wjef)iu=~zm7Xz!ceQLqy)%m8% zfpA0M%f@NK27ce1L+@HAFPj^+xCF}LHq1M|T}8V3Nw&D;JW}SP>WDt$9{<&l3|^s& zyxV}~{_)G0(IkG0sOVx!A33q1on1mslCxx-uZQ(8gk=w+jqPu{DTZp#Kkz!;HD+VB z;c_=xi`)A=Jw4w0(zI2a{ua+Z~EO< ze;ncPQ9#ec0@3J6$f=g%H0)uMZ4- zRuDOFM7rPA(yheltvyRh61{g#HRd3`NwXJkcbh)j|EP;R(d zQbh3Dm2B}fUU)Zpx<*fC-v`t9{Ir4~Cbhci1t1-w+aW1pu9NF}`gkk6@|Ac$y*6@f zKoU`UpB3@p z4@JoMS=JbN@RbhULg8oMayFYlja3g{6eH4oX^)A<1s{L!sx5ta>f)cZ;y&);q4*k) z11y>K!>_@%X<=L9r^D$#y+?Jz_a1)dFyZI(P_e~F@}%|)Iy+KvPtU0=UTD%eyKl$f za_{gzpF@WVvxlLRL6=7Fsz`ifPg|m}WX~-wlAS-&Oc-|z{FLhV=a0T=kn~uup0Hgm z^z-%EH;DDPCUwO(rRipGJX2&lS{-lEUaQ!=39bJ8bpPHrx;Zx+y#hpSh+dBN6jOSK zi??+g4|8vKbsisBqTj-zg`+s~KR4Jb9Qj0+u8utuF#8&jh%Cg-YFha{_0#FrvIuzI(^#Z^0&B2UftNd?8n#7 zI?}<-qoR{~DO1d?wVTAKo`?qfbZ^0vNTPU?UcQqs}^C56MYA z(0MQMlDhMn=SwkI0pJ3DvE@bRkeRQ;s`0R#81D+YGV_Vb&? z$rdQtw?!KN8qlb-k%dJWeOg#u(MWDIw@~bL_kZN+86MrFR3wva60<<0!S3@|A3fvC z2VUWUro&t8Wq-QA_qC?ppEvq2I$dD1NxG{bkt1Ah^1J@HOka5PXe_tNx3671z|kNR+Nwpzlu|qBPtW+oUtCjeUy8JtwkV*^Au$ztvUs}hzxOS5 zjp-_MJ9F^FsIhFu^kQV}YwjWjg?RDTSc?cl?i~K&ga17s3A^y($0?)ve6lgqGkM`2 zm-t;wSiFGq(>KWFUozy*7VNc?Z6fxGhh5Y)7KWfdPRW)78<&a02;d#VtygI7X1aKY zy`co!6qXknjZWGb4!KIi}V|xtWJ@)YJuAh#Z){n!YYRa zl2UFJ+!xoSG%%YWBt$)@bqc)wHwkdHdT`p3^;LTda=Z%Y6wRS>+3NregpcU+hoYy<*FE5YL-+xe0tM>R#_=sk_GJdfMDy7d zoh>8Zr2_1I7yIBR|E^vrBjhe@B#Xixf7?oJ+v?xP_TqLELRW%0>C1g4_{#zjmNE1xDg*{%1rIq_#0hw;$^}-ZZ z*XKLDEv_bg*k+SJE|K>>{}PD)@pGBtK`k6@2W7w3hkG&upU%k^Qzp-t*WYV@vpJrM z{BAyURVf)w4B=*b{*pk@Rzxc@FutNQ$4-Xzv5EEDkNU-AirgZUekZMAQF-o>9I{Ea z1GB3eHby%GAh5WG1TTw&cz1mNC%miFe5U357NsVr_!b-1vc7UwUfkOc;^5fgmL)xh z;fg>^1$U?=sn91#~rTz*lqKiE+%w6*)`m?)mW1@}2c4Hbr4~vXCH)izYJQMS7#!H9ct2sKq+kvq#g>U$< zdAr6GA0Xn1zc4A^q;l&x*Zt@X^)jw{HrJ zPj5>N-(-FK>2Lo1KmMl%P$&SHRDA8*k~`y~Y*k#nqLF6^?#(c-F;RK$2 z;~f9dc{5?O7^TgM+OnA(1PzE^_{So>-~Y1XjjP#5R8FVc_M6lj68+`?Vaz4t&WwDS z6bWcjJJ=o{QgRP^3wi_7!%l5_j6i?7?T2 zha+6xO9&XZkZ2^c-7#E2>%=0@6{krM$~CSJUfUK;?>ge{9On6JEBy4@1YUd=KPH_; zV78-~HC4{}8{hesCmOHVj({%HXk%pfZqC^^`4tQKiIx`*6ccNBlOZ-09HMS@FE%MV zBS=X)@0-rOGCnK9lHCo@|H|7Y`{*6-(;d`pz+yOic7@c(jdp|7FF%u%9vD?mjE-BJ z$)Y2?o_O9l{To6KqwD+L1qB_;xyJFa;!VRyg_73-;^@~l(=5QI=V+5>5@um|6}It- zHoo?c#nUjeTa!^+yEbl%6Y4Y)$`OV0)gs6DOFq3XHdfpl+kFf@vr{hNdpG)S;m6l{ z#Taxu-r|)L3&z5Liusv!*=kWXobX{gZ=rLXqy~uL`BoF#*qKiIn$<=_QLUVRp)Ep#0iU-n(Ph>if!h{x0!=qwDAdr61AtYECTb5xX{7cNT_RmTK{PR zNjj^W!>Kb!$*m9)N4D{5vFnjXGgWTJBf5vv9rwvb?b1^eOK=%4M%+?P*e>l6Gj z$jYfSJ7;(DS45{Tn@`_Srd~w_w_0s&8r{^zp6L0R4vUeEloOq&n?f}E8o|D!=L>NG z8+Lch*ZMWpwyVKLUyK}G@WvM&@kG6`H~;~{K{SmMqLrwBHaqXr1GoKvN;lU?<9^tqfuTy zBCwnpW&L8xam`T$dUUl;=751?{MZAv^(nwhT{rH@j-s^so8=dU%hv=tB$zJ zgK&z(;s0mqPnTv()APPBtM;z32Q&ZzBuJ5p0BI4lZ&%lQSot?uDf3d8= z7HtwD2%||1jqd8I+Esff|9{t=Z-e;`xz;n>!*vhOuvV^2R)dJ6$%9HiaP>-n4cG($ zL3R5}#uvNq1}Md=)9iBIXe7r5z;-~<;+uF6eG`;=xemL@0A9;|)WM$)a(3@iRi1p> zW#S|?jN);h8aQ9r-Tjr}cAuZ7QToxT?(WLJp?4nIx%`SJqWyWxe;gZFVZouV6JjWbcy{-K#`0oxjf*f|OZ9ML5Ad*3QUS!_UbSudNk$(45eGPFBnDPm;wczo-Eqd4u^8MS zOEG4sy}=8h3D0c=n;@L9gl0R*p+KDDx{GfEnaX65-~in=QYSmc-}Tf=M|VkK28eFI z4sGS5$Cc;!E054Xei}dd4PKiHPsZoYz>GVqRcGri8Hj3N1Y{I9SdCduNT%O-@8eM@+IgwK>BoQlk;ZyC(Ah+Imtt}`f?()$Zj_yJR!R4)MkT@ z8CV<6S?g`{cgQN6omYBJfGEd7KN*(5qR?!Bp0=9wuVTNDcPe?znJeE)t`=zQ?#$}g zoD3e}?6c5^3|l-CX0314K{v-aDUx9!X>&quvI$q1+xCXVZGD|DjU{c<5seoyhev(q zV@>e3^688(FDReMOot_b$%g=3>C2{Bj|`m)Aa=)N0nXqHHonguaKrF?!AflH_<&AZ zUBQzb+-ON{GOO8MO*)JcjzkT!gtB-#4_*A|iqKa}s;m|o7ke@xmPE1XQ z+j2GeC>ZCre7Gjzn#_1?A`vaC#CRKcE@#DL!tS32lj{aS5_*xPelXc_B>79iu^1WV zV`!2gyMh>x$>4ZB#ep8@Q)mgX*+sv-L{XCi`d7Bl?r0N8cA7Nte9FSER(vKXFDt(Y zj+;sKCT3JK>fTmqCvDfx2k_SRg&%vQm-gvk8`#Nq#0C$2;}z~V*-fA5EQgS6IFO@0 z+Rtd``Bf$hJI?trg-%S|NMXdCvOwrwzfE7M4liG z1cd22aR5T^I`=1t@0zYc=hfDK17$@7H#@8*plDf9X9P!&V^JkFuPlMdJV2EjEbI2M z>ux2-_1C7q3t(lOg@VnHO)79>=z>j{+%}O%m%6zRe`0a=oWzP-kYLr%CAVuskOky` zIzGGpiH3nEIYVvy&LLq#*xF7Gt3E+e>G<_d)nxLT(R#l`du^653Sovi3D#$0b!5PYFTl~uK+GNMwb6*v!WPZm zG_IdX)>iQ^vg?IbZ5BL^Ca3<6IU}z?8zoy%@$YD#&9}8F;Z_1$LakezE-{e*k_=a+ z=xhLpC`ks0?b`ZQ-WiXg6x@dl1)bsO{gUa+00<$A zhw-g$ukhFdVNC3;o}E*0a>G0tYnaV;W3le8&6BrZY-+QCBL+^-7OSY;y%vMj+IjPI zv>}g=7aa7oyP{jC9ysIBrxWI5+KeO%uvw>z6_`C~jGlh_TpO6ayvVTY#&m-7X;HOf zGN5gt($5Cg>|FPe#b2%#hiF<*XcNul!xubrq|0efMbN4ZAKzp@UzqGoEGB<7{Afz> zO?uUyZ1Vv{lYvjjm-w@!IOaGjN4sE*xunE(?mdHAhs{La@ln!o=q{`gNkCCRmD z0f8~KPTra(!De)TO>v5T@V&YTJO>>R4P2F9a;4CLKStF@eN1l#LRcbljvS%uSb$*ZUpbo{L zk4w6#=ai!xugP3@a$==!^%E@qb5hN-YkIPcl{(Ep1;X_X2SU#>(~o@BYZMuA=ifv{qd6rP#4sNm%e;9&ne`?M(U0Ded{jG$U$$90CgzdjY)$ym||6SAQ`>|7?AO>`wU+JUa@r6s??zfQlI#1_Q&$$*+#&Z+OA zF^d=eygnuI8L75}>2(Q<_MQ%hUjD2ugWQ*&#J4`%$q|P$R2~hXX0!+-zy_Uo@7fAjBt z{`fEd@T14CKK%6Ym*2g5>~4V)T*|`0M?#yEZpDUHlWoJvSNUE{_lkcI?j1 z=&>Jx>3q5Gh4%SDH1T`V@g4K$H)zUf^w+;X`bFR0u&2Mly^^s}gI2jvj-7qW17=7D z#tJ6eCKPDYc!O$koe*+Rd3lMq`W7g5Owe>0fZS#-^rGgrg!wJ9%3zN_l`Ef}R!)yA zL*fQVEaiOd#4y=A)-V~yD?&+8K@+ACE2oipiz#?$3xir=zcGM)$i)6(X)>Se`io0D zotW4Oz4mpC%N9xq68;5``rbVz81)kS^7%ZEwKHp7kt#+{;Y;5}uHmxun(tw=+1QxWd^=^S)Fb*9TS32u>{;U zEg>RjHJx|7b|a9F@Fc_@8b9%;c8wSw4ot|UI0ZlqW(*T8iY1FWs%krjm3$=f=z2*6 zcSBi;m%$4J1~<_uLnKdBl6w^EdY1+SLhUD~WLn$EZA-layte`!f4t)ZwaUqHTjGvu zyC6u;%6G2uXrxz!wV$35=x}^5S=7#N7~b_Oi8q>^k@v2LFpmE8d|Q2; zCF=(t!BR|!WWbgDY(@TkEH3Pk3K;@+RDJKaLadH_2tP3(neAhW%}#7pUw(_G{!-5L z>ge=2(gs7k4F1~l#Xa9XKmVqWjsBm0@0-Ve^9O(a_~hL$AHV+k@%Zk$A0Geg4_`fg z^7`%LFTee*K9_R+T-G5meJ1Z@$YvX0a7Y$@_B62f^+;@Y!_(h{M70$aeC`rLcxNNJ zfs8+IE59Thxv+(s39=Z(Iefgwf3NXex5I;6Yo|7h@{##tWd?x_*ldAWN!^cJI`wL+ zFEQ9=1N}69<{1>trt}r7Nn^q9Qt9nZ(*NXglcC+i8uph=PhaaK-h4;&SL=9+M|bw7 zXJ5}r$w>WOj2|Va26^S;%6pCM7Al`DqSNv46(6{aQ96{Xj;2_-{Jr}nrZ69y*2jDF z(=hrs@vlpA9`5K*5}NIpERdTY)9u-IKKax!^|bB>p5ey8y>`BL5Tu;fsx-RuO_(d5 z$5(8U@p6yS$?&X8uFF69PqsnEgnWOn7L|-}a1+=>WsC-|gC* z?8d(a^#A5h7W5Ik0o6+V)2@GAQrrdqj6hkG)%keR%SW6fMhdxk!ivfbAcAx&lFD~& zc7~t{BI=r@rX=jP`WFDz%`mRrGUn76!_m`k21_Ae@gOr5VDITzPh*A(4hU2i&&g6d z;EV=3i&Eb>tT_ukM~G zre44P+X)fzy*8L-!{&$8UQjCYu_H6=*c?v|5iRntB%RT_flg2KaO5o=&)>;tfPiPo zY>7njl6ao-ZIGo$9+Xe$ZKZh=G)ahlc*4fEb`(|E*iUBc3e=C1>qv!ds zq5Y%BoBsauPk;6J-@W_4KYsDA{$G!u|EsSafBLV#d3^P&?;n5pzx?~h-~QYGZ!@q@ z9>4zb_YO^ivLj-K3vwU&I3u6kuFD1w6SH<>w$Lc9sw1<9;Lv&3`Rk{Vfsjxio6Pmc zFQpLJZ8xL$;lW;mfKKSs>S_;w7Vk1kUC zc0qb~IdhMz(R~^@UheZxUloZo`uAHr0`@n zcCH^9Rzx$EOAD_g`PGLD!;~m_-yCx75sha~gYpI3oMZ46 zlx>2@*=;r)&hPs*DDB?bt`0PL7hgjA$nVml~l}v(0vL?#fAYUhWLw@PB(ee)03ix8Hu=Upou0zI?p;tbL+yzJ7f5 zSD!rI{rrD?y#L}KKHmNGcRzaPAs`Wl_L49bOdP%YI|#OsfYteh11PWYVfBADE&!6_ie+PW8%9diUAj-kyW$`{`@NHvgHk+%_^APutBu&bC=@{)6tmCI;fs zi+B2%7&4OB}KW8cmWKp) zKJ4RpEjw-|t^I`8z;5!8B3q$Q87h-+0?&5kpwl>MNw6F;I zN4!3KUs+g$|KTihgkZAEKS#T&GC+j4c1Raq0zLKe|GRx{>WiFJiPg2y4(Z6&o+5Rb zkm7uNvMUfa-WHoZ61Em8k#ES8um0J;_^WIA`>P64mJ$NCehu1UHeM$ zj*6Ncqodw+%MDV>CzpS86&$@!SgS^OM}z)guZ|e7xGMarK;|hYxjiJuXT{G0&vp^k zI~wHa{tKPLk5725d+)--YHaANPv@>}^VJ&BsHdtf;eONr^68teA8&v0=Y86~r(S*D zvzFs8aN%MyvK=Y*WalMGNy=bPj-ssglWI72oW3NT(&Qm#Wu`!9yXerZ&&m_ppt+TQ zZ9Sw=cR1=dO0@y`Gp|-JNj>?@caP3Kg#iyXTzlBE#dD>fO#2qDR>2ds^Sk5`qh7Qi zpE`d0(d+K;SCK8Q@Z?$Bgmh!r67+DrT|IJXFTsAEZ2!gg|NY~qzy4nz-@g6vU43AjPx1@C;5VT5@k! z(|~-*TO0llMP6Hm(SKm-%7?>cmktxP0AWC$ze|`ms7~n7UVr&+ZRfqC7vafz15C6d zPEy#b69X@GE6Z_~XZZ$yT7Q@_Y<=X8@PgG3&-f}i2f zM-rk2{Kh1R5A?F|@&o7PC8bu=CwqlECyO6Z2HV*NhNx(>x9SPeOCB42?F-P|v^L(= z?OHp!URseyd*hQApC^xFIq~E-R>&CB*tPx1B7;z?HXdwL8kbx91T8co*fJpk!; zw^nFqnm(}b+8C_0F$<+)@W$lR-6WC?X9vDqD>1}=ul~`0`p19rmQ!q$t59W*Zv|o5 zH9|S+5_d+wpp@WarC)6*k1S{?jhTRAa6)aR{Y<28rrC=i9H!TvkV9vHC&vcfjBs{3 zu+QwklpJph(V$I^IbSsc+&SP2R3HO?ffH>%bEbGr#sxTe@vd!udMqBv!U5;ZjV3_mn&Ka+tV-gEA+=Q2&FR}{ESi$`%L3K$>huRp#PFE-x_{jh4 zPvBnKlWj?6$E|MWT!~_U(T1HCVA+NBVa?g5<2fMv&N-^zq(M@7;&Oo$Eq1S$FAUJv z4IIcQ-Gm73FuKX@(?&^l*DX7r0+@HSH-GtOkDvcf z|HI?s&%Stk{#(EIn>HHuD3s30Q786i5;p|FeU5edN+1S#GED#ZRL#ZB)1Hz5JaOIt zUYo7z=bUtlpH*&L`NCE~PoFp}Omz57@gpg>DsW@v?Ka?hap=07NuGES8Ba8u002M$ zNkl?ZAhhmGVqI?1LK#>)@-kSj{G2!OL`<#$tPJRGO2%UEyH z`Jcc0i^m`Q`v3a)=BqCr-@Y+;l=9xbeSG$l-|_6J$GdO8dc19c^tOTV-PeEf__nS0 zSAXvxWmSFqdGcSEKg z@4C;39rf8wyu(0VJiFCAJWdXeO zcFIHGxRD4R(IHz$B-7XF`9>(p1$--;tERWbop=s`*x|3Ydir^xRNr)1UoH1kMzqcb zwXT&v{@ZWzjiMaav3L|4w`(*h^Q)d{INOue>-oQ&;|VNuT&@lOA*Ond1Ly0Vn1J-g z!}Zk)-lH}QdVti%F39y)v+e5E=`b*9Kqs=*txNj2M`o|&b-&lGuIcl{qlWs4m&4;G zDNiuZ*TmF6_@h7m@7{HA;Jf!VH*F8*H7pm|o&a^u;N~-uP~}!I5*78F8$cMOG@cRm z6G->dso%~tXOKr>ymtPnk+k{A%dv(M4W${MLfeiKKr>u^@-%D!-T~nv53vU$~{9dY%d3( zP`5dZcb(!F)lnw%4<4KiWWOu#zh<$GY4#(8M_s$R!PZ8MLj- zR++ZWOAd+J$~2lykUH!3s|7-JT_mO(47xTNf$0-{Ge|raf%PY&K}TUpfQ)(K?!D1W zve|n>LGM*ZPx4%cWGxK{oGHdGHDa47tNmuIv5{be8GG#TbWX;Hr&$TIC5=uK4I#E7 z)BY1}vvPqCis8%?l^-3Nf8*C=8rhEBp>_7Q?pFMu}%g5t)fA;0$^B*+;e)dz^ z)~m}*$;KXsKcB&y5Bej2e?~m&4GFqPHC^0#-t@Syz0Bt&<8e=TGWGB1s@eS~o~8x2 zFau_GF)<;-%a-EesjlePGJUdZXLl(fAxwtyZBp=6ICa#&5=c@tx!`kkHSy^H70Gr9 zjeP#Rj<22g-l`%T*IUNhq+1N9bB3VTbC571V#)sEm#cvR2t0axaULDbq1rmP(2nWQjm3F+~ zCKGGDx>~W#`m_7-p@23?Ngb|Ro!qa!XzLv-C^ai-oLVY9jFoVJ$e z5NN(?mvh+xPm2eNoa9Gu`9(B2ty^`r-m{|^o4)Gmx!8>tKUni4)rnwHuT?s@x2Xmn z$U(s_&vhz2rf`2QGnJSwZrP)LwR35}m*ki*?o(3rYr;IV7ETH0QWYJxqY_cOhXkLi z)$&CK7#gc;TrKTtC+9x(mzY(ZO^Q>KJEEENH9^$Nj!(Y3Hj*xSul{*k?(YP9%5p#s zEhZRR64LR4Q<>GFfjd)1bcrc(w+f&&v?Dss^+WWG#0_j)SFKkG?A3Gixxy(;0k{G< zc`~%s=;T0iGid#lr}+M#+!VF)hJN*)mR?Q2^{Z;JB@uGW zDMHrTh6ik6!A@I=BwMP~00|gZH-9l3#si5f-!rz8^ByjulWsc3tA_V+$`KlC_VxPW z<2R2V{qVPsU;o8lJpStEfAx6(^;eHC8i-zf_M^W1&rSudHyBP3IwF&9#J|Ci{NzcN z1wUQbO50kcUy{}Sbbgs^?143wQ_5>+B`+pTGG?!^*kzgebBarH=tgN6N44?{Q2VDp zMQ!3uJ{+%n@(yz{WXIW{I(X~QYXcQ=CaZTZH<1-<{HOAL+g_#T_pr%F+;EwU{0nU= z+f(_^zWW~^zyIOSllQZ-gv8dq*7s+xZ%Uwl@t1GA0+Ra(t&g22Wl-#JKqy%VmOkYmyJ6T?;g=G(j~nhw!?1xvX{ ze1|2I?b1?x79W$T!{Tw|J3oGZr=vl9xa1dB$w@Ri*ADNCjL}AfzR_`a_T|~6JjNtS zM;Jyc99`Sq&W;;1tF3f-X9FFaM?3$g5W-}c?6pH9X_mL?RrS8XvIAithVZ@o!>;ID zOdv7zYN-FHle^c%j|QLkjbDgE67HJ6ywG`=Vmj^l)8vhwT=t@k98&CTP#x{dHH(eO z0}B&93c}=2u&XoQAb*&hzhd{&F0Kj6rZ+C&coR@=Xr$R!Eo(ErKC$HcNa$KikHAab zbp~NMV8FhA2Krq~j^I`i-zJ2af<^|=-j>t}&`IS2*jA_nfR@2z4wl1Qz)NCh-~ndF zp=4h}XntdLS>KETpfxWi1RW4UNvy8#umLq!vG)&tPa+82hrdl zOEf6cZ$kBtfdS+^1K>O--au9QSDy}lcL(sRwguhjgEMV}37H6leD zZ{}DXM~vqNX)@Eti9zs*^Ucn}dALReF&I2$;WUE6i{p6h@(AZ-vm6o(QAmz>&x^E! z4;$zkO#$5M1g`h;PHJ`CNnL8)1Zt%fX;*)Nw+BX)=;^$#lnk>C5c(z)dueOn+O}B^ zL&nWEk(Y4cYxc>f;$>F9-J!N#7nJegqcP4Ogfg0T9@w0moquqbh)Kg)gDD*I1-9K* zZL{)AXlPX*j@z1x@B3D3Uv%%u>;69c=(ES$t{4CM-~9FC@%wKcpZ)efp8ZWwNXTw> z6q0zpfsc<@ebg5z;K=BpHQ;G~chM$?30oBszum4|`P656=etWX$g`{H<2POHOK;cs zt?s?VUhIaA81fBFuE9f%i?B;aHSoWJe1Xli@Xd+Z9FCz=n@t$HhWA6yo^a{r!?xy= z&(28pj9h;P@zckEM2GV|6OOz&6TE86ZyQ^+lgy>z_x1lz9)I=t2k-vp$M@fT^7#JE zw~u$<|IkBanjX~g`$q3~-@kpl$rc}e)qw8HDALa}t>3-=(c|%N|MlbTpZ(e6)gS(g z-{|}{I~sZy%k++yJKqyMjGN5HpMMx^4bF0s__aeRK@V?n7@oXEJq(*;e%QM*Tysx4O@6DIcsXu=lFKyEC@XsLC-=x?Pxz2a_ zx@14QT2+!?%;ex=%jJi2n|AZ$Z>mS_E@^d;oo0P(9nfihi3Z>3SDzTrcDWUwUN>Rx zefj_JTIgTP-0JB|78Y>m?>;sia`AAKFU1dEpjFzy zqy9VQGue4@IK7+X|GhuzV{4m*oN~8SlaoA=7`8F}yO%FrNAA8M&EEe2)o=+QCf7NP zB9g(T6hRF#HQV-ufZ;5t8NA&zqTgnz30&}=ViGdLP>90SJr$|L`p+0MD!G!C!s!GA z3v$( z=_O3Ixtri-E4H1W75Cn^fj-`R|L*aR|NhS&fBc{S)5kyj2fy?9$!DKFe(R^N9-sX9 z$B$os@eh(Eo}S|!7XF_m;({FKTW*l!t$Znt$)ug51nJEsCCBqQWu%P17_vP(T}sfq zQ$7q#-1w|*T-W}H{A%~9NFX=pyIu`9SST|&-s&Ur^Iwym$t%IsKe|zhdpkb*L==zN zPx*Et$gNuxBxX2gYZD;KNzP>1&W0EdgIk5rKV&MaFquDk{IulupZx9rkd1gWJ}QzpedhPY|8`i4m$NNCpx|5KQQzPy z4f@`5<ygO*ZzL&idVyrH4YYw;0BlTI(|08BB5MEemqUjyJjh6S zT;@mQOP1bdhjqJ&-Y`*`&lhSnkOw=S4dS0zmvO}Lc^WtZy631%hBe&X!Wh?DV*XGv z{je3%G5pX#o|p;RHN@I)1|!i+q$G{WZ&vZ7oZ`0WtMA;__yoF!2^ftMNV-+e`ECYy zTe4uzu>hIKr;wwRjMi>G7|5%P4sFaz1!+mCGpg+0X5Ex#FsMc+XZ5}DWwPjaPEO zOAE<(0TCT9GP4)^)STmc)uaCP;7A=`qQ{>DUed2J7J*&DgwH-t&5k4xq!Bz@L>TdE zTrYA>aT6{H85?q{hM@r9y2ucLAeI*xC=HTLRX?IsFR^*z=Lj@AK9tHhYp3Uo&KZzOBY=48PaW z@<16AzjyDxd;H$-{?_B4wi5j1U;jLnKYsl5(^itL9>4zT)#K~8(P5AL7bX&hZSu$a zo*FfJ@d}&Wq(&z36c6~kYX!}I3wkjWzW1qwWJoo(zLf;JyoJo^-*Kd)R{8#8O`B?? z7x^&Grp^3Qy_aO3e^o!PN{0=ri|u5vE0B%(=RT@g%kjuh=0E8p_K!BqC00I)EBj=p z{3^EdpRHbC9M9#7T3zf^U#oZN^$*|t_2bpA|NBNvlss-7JN`@QsH2v%Emsk$`K*@n)k9R0BWVMSp-V?xLM5+$$f_)Wq}kr?+$A znyQC2-F5fmm~n9B_!Q`k@f>iF=((53WftF!y~lD7WOOWoTy7&%@$Cto=w!$KQ72N# z9gTY;XT`;f@ARg#xY4eT>6p$_G#b5INMQdIXajOz12*!C?b_Liv5Pet{kK~n_UZD& zb63=Eik#20_=GxfNyY1`2Cn7(3YTLhN5IACFy}{gsO6wR3$Q3{CxApI_{!5a95&%? zvciUU*{t^c@;$20LF%(uEk`{*h>zZqmu;})1GT_ybaVFJx2de1Vs#*eJ&+L{I+5^Lc%Q|qQy!K6J3ULt{=ls0f2Q*Fe@!P z0`GH_B?ZvU0Is(Jwk_7RCp2S9y2-c^=fIBG=#p7r30j5$gpb|aYL)nC$9B$gKuym2 zU+~tc2In{k>~ypzvBVd>OvK;!hosjMTh8h_;u6UVo!U9)XN-1%50^LTQ2@^ACGg|R zdA-grpEwaZEH;Qu2Lk~4*y_T=F$DeT?0U3hHmqv9SzY*?41zmZQv!@u@UZgbD(~Kv zFt)9DLch-z8!)3tkq_;fSRw4%?({K;WfwxjyQ6Kl(rvQP!HZsGERpPv`1~Q;v!L<| z3^+;%f(-ne6wml+)c|w>EAf_y(sL7-wX82+uO;kAG)xu3W*-wdTkNK!g6Ae}=~vtH zo#!#UD&dE>?drJiaB8W34^L@_AYb^@mR^&S4`KGvzF3Gzw{tQH5HaZuKCliAt|U;XyP z!Q_~J5jJTe_ivSSfA-bC4!<6T5lo+a`EmCtNUGJ>-XLhu{_8hibnn93$IpKIqsMpM zZTxN5(!XwS|L$#9+ut;>|Kul+Z~x`Le0=_o{^;?^&$>yGQYI!7JzI8e@a7x4u@S(XsapUL8xc8^FV|d*$Md zIbSXoA^^ALh-}Z!n`ms3%!=e@3pv8sC2yBkoREfho!2F^SD3$V z_i3O0N}mnf_@P-Jm!EXswS4|tu}n0}&B%w%)KBhu)`HB&NV3K~9(@*_5EaZYHONWa zvuiT;avgYEK><94=D>C7LOhWf_XZ*&=lwW-{Uvo5=P56w#7 zOL`?iGoU5298*FVP`g$V67ziYE0(r!y8yaSE3~y#$8woc0WO(J# zq~IcB`UKUpFZT!?6CHH>t7CQk1m6U#PS@l!fEctklN31k#8Z9pn-p=rG21zQ%A%Ki zbj1%Z0kp`f?+umF(=46~w)$DAS5SS&?npKm%~lc#r;ir;Pg$ur`)Z$!b`w{QD=lz) zzM{{o65WUG+;o<`iqy#>p^KSUuYKM-5^x;;wJUf{T-G3m=0Ae$uBT`7XzbC&lbP&r zO8J73_ zB!~Z2qRBShlR56R>`zh@AC3$Wj;kxVN~}XLpE2oi-F{&^o!V-5mXP{2NRO&w#^GC{#w6H;5VVG z8qKwP;c?d8K*g?tJT{V_0m~(p?_0_JNALdf@%KOb*N;zr^!JlLOV;dN|GxgZYvtdy z_1@!N#q#Sf+da%Tm3`IfoTWc&>%v!+e)?I*?>~F|&Cmbh@ogU`eEr-1S#-!3_UTU_ zTlB+(Ch@*&x%HP1S}{4_Dk^O6UJVY`Ym;q*LiF+-7y+_7WQOHFK3brydhCx#n<~Tc^DrI{#zVH^N8EQ3cGgB?37VI z($N%yJ)dF6d{}!t_(75;#^q|UFt6+#{w9RUeu}M6LU(2YgJnFTwLHa6%W>(qgdGoY zjThhi;F`T1;ml?7YhcfJ)mfnU#Kv~&hDUmtWX*nu53EK?dv!@5OeWiaO`qgN-_EOr z**-2v-u~+Kj4SaeHcZ|w1I$L1IfDz@e(5HL_l=c#*q%8dehD&rJ=^{VzMx+h{i21V zcoo;LAFsEJ?{G;d=A1DVY8~MWHkuUiN{E{+5nS7VW~fgh8&3%=%GHsw`jD96SyS2b zKO>rn)+2gx+l;g>0Vok~rHhyI(TPun@O7N?-FOh6q>Q{I6%QY;>*Q(}Nge3tNXbQ+ z=n8UzyD8?jK^VT9tX5wW7Q|cMWJQ2Y3|c2M&IK>LCPRRl;R-6i_6vn=LB+>4ze{=? zvmmK;o#)6)IDz+Z&S{3p_{8v{*9P|VMv*MaAS4J*D63&Xm}Avu$(~aVa?Y>W6aV2O zFcd~J8*D<6MCY`X@1@)YXdxaQMOk%zt^^VN5c@0*Cd%^^kMTe1hk`*un`{?V6@H$VSZk01Y&|4oyu zFY<+#ZR7N)HQ99H>vnZQY%-<*O|!}E47|%e%9es^yIn%eYO~~;Z+tv_vUN?DCitTR zfEK%1Aqsp5sddh-X|Kunb-Yu)6E3scvt|R2m~Al3=`R=a%Ip|!@8#Rt1A4YVG(Yci zVYQ8KbwAkd@Sa={M`H}xs+{EpL5qv|Vg2?w^=jJl2$L2ctQ7vscLA}dQnM!`DAf&(sYb9zVR9J%Z+%Z$wZsp`c|UD@~R0< zHrdWVHscG~O4Avaa> zy<`MAjOc8TTQW&uGbv6@1_OgoAjA~rG-j=wL`ck-MN~j=cFDmC8wi(_5-f)63zq0? z;EuRdK>*?^pHevsU~r#c(c9@73-Gyzws1{$(V(Wz3f6u6YkG%c{U!MK4X&GB8$l(g#DBqkpWPYGgnoUTwmJ@D3aiKYWTw%b0og>2R)5L10jx5nH?rnljSDFOcrhYdd)V; z!zTFKH9KY`^XI3cwNdx_@q6#Tdc675|L5c9|7+i%_KP=<@4ro=>fZ-pk9qB{FNx|N zg;$^U1i7DlkuFvAh=$+(osL_Te`pYoU=NphJih+s`^PWeefjvC|M$OqeEW;P&cxvw z{@eODBMsl1kfu@cnJBz0AV_z0^h6HOAiroJ4@cIh?$QCHV}^p04`z0YqqvDC-Q>K0bF z$Z+hh2}A#VDq{1|7a7sE^Mg+OP~h8}rsoslp1*r7kD#+$2&<>FCE0k_)IfQkBS&}C z=3{#taLuh=z4YCRy#e#wfimcLv5&kHFAklIJ7>d=HwFX(y!*BUz$5;Hujzx#pWSUia3C0E0Xs2sQ*3Ck5hNokgy`Xxt&Vm65bS7*6fVBFvK)Z=K zi4~bGnfPlG^f70le+L{9!kWEGfk)T}>9@W2fe}HX|bl1%2{wg|8kc$>%2_lRX}o zRcFi1WbgbCUs&8=Wd+Pr{M^<(n?#B|qQ(BM8~)IZZU%Mueo`KW`uvm;ogG*K-N&6Hb{plH5+-yEY%q%0?@>_y%P@`r-55lL;0wB%Q&H#&dw#s%>Ry9b5h&4)xJ) zs~2|EaWWMv9bH1;8?o!s!F)B_@a&dTx1vffAG?#7Hu;TC2h)?R^TO#~zwmM6mR0)_ z6`$)56ed?DZLi;d^Z3y(fBty?>tFP=y|7Uk$=9~g#TwenElJVN)=xfr*UsGgM`Qcr zyEoqz|FxqAnd8fM?$X=i@ynn8S&SsM_(lie-JHlj>F!q!*a1fSt!KKY7RQG(5g4IlNp9{nzglj< zQ9kGy3&X2ZMLu=$)W5TPMbo5wvGI9A>_(xrj$n&Anb(vIiln9tLwv$6Pp7aoKSc%j98;fX0+01W(&k&yXAsJh(;&93 z?iBJp?I%w~wlEAHHN5Tpp%I~+IbQo5B7+9>9+;CnlZ7RFnVm@%jJ%b7+!nKB5_y*lMg*tM; zE#0f53m3i>CGyVbWnF1y1TQuthx(@swc~^RpRH8Q$7Yk9HTqhT^RPWxJB@#2oLXT= zIYBCjem*7%?z)!Z^_4i%?=_liAh9HKK42y8Qw3I#wzBRCPlAu_lD<_T|M;{7_{FDb zVBi2(GQaBDMk;Tj8MWwz$)*|7yBJvjGl6H;i_XyqRseIh{Y^s`lf z+Wg!EO7f%bX7n8#vh|HWUtK~IPm;$bh}Whb>6dTD-(BcDQ?lQqVfnq7N~pg}P@PAQ zT{l>w9a;TW;s0_0Jd;0K>?N_$R?fIp@vFy|Z@+zf`OAOR*7)z_W62qwpM)oTKS}7^ zF2dO)Oi}yyfAxJg!aW{8`Ek2YJ*8c6!|81KX-)zEMgN2M> zA@jXRRo|_gOtgU9UyR%s?4FAaT*<|!cY{*E+$F10bNbw;3*z0t#jEwa9j5y1vRk{H zCbRnU1->{8!kTG{Z+-UB%qF7}IA6a%mhYOk959R5J=@*tnoq>PoQ94|I_%@wl6tX+ zira0;ThiZd#o}`g#t!je9L8qScB3`{uf~MRcD0E(TH6IGR<`hZ2H6%kwHdEjn0!=} z=MHb;E(_$3{FYB|P^*nQ$txhRPU{1UqgQ)!&H<)OK10zLpRFufbl-hh`3lOrgpe=G zHD0szl8XhAfex*a8EbOhBnZMVNtpU>;vY7+Ip4==v%*NlX>HW)FWJf8-^CVP`bk#u zl0iP`X-LE1hj!35K@#&7y8&D*Z6V61(5W;8>K}Rm0<*)7Z(MHMI0lAlhO0%XF^;#i z0NryWzDtSwhyhjaKFZHh^s|`U!!K$-g-_`*Ey0zxkLd8Kj)PIWa)}!f&I-{E0TF#k zr$2$Po8K6yKLInkwepf&C5Zb9NbRQuAMD8BVpRrEY&iCd+19;(fIT{rgFYO2E8v78 zjzOF(+h#?mzXK;bl>C-FbBMKdZ6oL2RvcOBXe7<_uN6#3gyyri#uUz=Lhu zTL!V=ozD8R1FTE{P0oCTs)7PHr;et9>%1V@b9O5-IxY}6d%HV^@7r$GwfeBzj1{&# zD1mP`Chtq1UAue^s>^O$aqfB|>rfpcn;=dX zrF7xB_d36j2>7q<{iJ*GpEYUllUN#b$RVd~5}SVasRjMUZI{O?UE+IXfbQqMq-GH_ zdA3rQn8g7s;+L4U%(uXu&=Opnme9}swVnQ9L8ksmP|QXbv3EBHIZl>yh7EQEp4{o6 z{jFq@Io>8$KW#w&^xdE4+Mhl?sUOp$@0ziRs`tfFwl~#y{i7cz=MRr|)*qk$_(!w< z301%P<7L47_|vYV|M{OjzVGRAFy*%MzmMJQYVt#empE-(TC%U*-A_{cd|_L`wtxHC zWvyuVZf=<1x$CnfPT1KU_#xUaF^AoJrGvklK&;!*qdz+SlIEjFmk;{P_Aq+N#dj=r zCO1sI(ogD0MItC@e5Vgzt%M$A=~(?@zB6Fn7JqzeyBHhICQzOxmK~P^BfOlN|672Z zf3S;u4xj2RO!#Df8??p=$FsFT_!bz+F`LSX6QL*mwPQ2gPu!_rY%C6HJGteKl^qDK#YP=9^cn_l^yKBr%p zZrpfcJSnTJ{dN=UkG%X0()&X*13)Q z&<4K|$|>e39Eq@X))q57_wsJ*rxi_yo8fnSmk+MZRt_6vITBvs7k$a`bnZC%KkTLw z$x9uck_~z4uXL%P22o_4(r5U*PK{_M?Pv{=C0I>n@|o_v=O zwL^=xe1xC=y`Q{$e3r1ynZn$_Qf+OQfAOfVNq}9Fx<9`8#V^uJzA3pyXA_PI*u!2- zdg>$E_NkSUxlcccN!Mf9&tTHb`H_otP&s;_Qp@^?0!olP9`M_aztn`9=?blN8(HcqHlWWj}E z6Z>5zs_$m;a+!X>sBJd#w?SA(`p7ZLpZP@Ida;=XcE-Hqmpe$W$-)mR85z!__ISxV z8xITy(cO1(biBbkTS={Hlx%exE}exhh-2UJ`MMT=x0=R4i@0PT^!Z%URPwBwuRXCs z2d=Aq#!mLWODNHRQ@^{nfxOY&J!PA~Bw2Cl!A~ZVCZ6;-a?wl|l;!Y#sgW2nUe~EQ z{EPEt8gDPW~HB|3gF`dA7yGOQu0X*GJc%{wQWezht1&1h|T++?M$i9TZh zJ$Y=^ppUUf9GaMLV0RO5X8s?@xY^%UTKXKY7nG6*IWM^|w*V#^x*HIq$pPT8>%^2v zXZGk|f^n7$f`O1j!KSK8H=7>-@S|IFB${YkdIDeklAqiiWboRtQ8>J;Sd?F~raQdP zh81=$)!Za+lZgX-0lV!z6S)&L`Y44N+@4raWwGoa-90?P4Qip8u*p;LVUv%g!0kXV zh)LPKqc@yqr#i4@I&3gO3n2W9yb_!V!ZSe(l*z;H>PMtwI@Zn>W`COyut(*F_qwEG zsi5~Sx`6xB?xbelcLkH+du;%DZs+4$8@%JhqYD-M&RY{ce&J?LMaVSAX^9@pT_z)AqYJ4YnO^!dyQzU;ZRosp{AN z{?DUB-!KrvW6?kzZd$XEPR^kAs{2c*!yj)aBwxs$ujwQYeE24fE~ZX*c=HQ7M=xyO z%3)}jSX8$GIK90#;jetY(XJp){B<$a^<%Hg6P5K5Gb^yme^t~DFL5zN;<1(F`gENC zX@Pgmm~H-A$GTS2@7)`NzcR6r4_k3pd4nTgaC7Ss(w-kiik7u=beJu6hTP?Dz_FwD z`^@_KobRnqL{0R!+lnKR8cTLla5)q8y7wpEd^)5y(b`0tU(fI3rHwjq6Rx{7W>=u5 z_?w`xH=EV4zr!o;wMoAk-aSWry=&>+J4L(m`()^RIj>@}E?>oOyA*(Fhs#dLUim#= z<3__ceusg3VDcR@Ot0k4A@{t2%6z@>{FbeH!TDC^@M}T2JSzX*B^)0a^g4eP6X8a! zFxdm5sE{poC%-&tZ0TH5y20o>DAq~-f!C~9mhWzC?S9ndRQr$AmJ8Y!E?#fLCD(ruDweym#|J=f#vH!JF87W zNFi80`IFd0?YI2NRz^wQI$n6&p?* zk#VnP^=GHLq=&P8WX2Q6K%Nhflee9#z(yj;cH{tG@B9(&8{j&J?+x1eN}|VId~5}S z_I?OAMx*!T_kEd41G<6BXa7>I{^46&mb3b_wTK?eK=!Ne`!qoxwfZh|e)?J0mnU8N zJ4$HUN_ukG$3OmQA7A^^E{D9?q-$TH+(-CQBAS}>1?}kvqr(lh9%3aPjz4?p;7{Bn zPw(5pt}i<|7GM4Dod4H4x-?4q`rd@O!d7Aioi#vjUptJh{FW}Gp*QULkH6$gC*j#y zypp|kTiGWk`9zpn{h>9Ubm22iYBz0Jyw)vx=x(R3D*d;O+hDbUEj!tfm}6~{b2qDr zNquh%*cfrx!(eRvOLVaa6T;Xv*f>HZP$&y`JH8!ML{?kN#g#4x*XUi_yl}ZWe9hA6 z-~Tk|r+t$2|KWXK*t3B=3~MuZXVV%^rjvVi8If?&;pFv+2viaMj$wzYG z{H(q4L_L)%mAlIcFg9M=F@?=N*r@tpQQLInYcAK39y(j(@t68-flLOIEFV|PM(ipF z+}I@-z)Rz3B-R$MDU8JreFW4ZZes>)-}FynGEmzh1NQW^|IdJ*^VH81h+NCGT>y*0bv19g4h7c>J(@e zKP8jRK!c_MrNVwMF>HIN_OJE`AlEqokQHyTBc6D8lap_6ry z6zH4b(ikr@HUS@)Ok`j==kJ^jCGJ&_Ak5})9a<^F*H#O@Ct%r%Jn4i7T9O62yKXB$ ztCuhtF)EXZQD7uj~{Wd(rs zL1-~?wt{iWMEB|c3E4HRaalk_~@HrWhfQ|LB=*R62i1-k*K{>&GYWepv~BK}GJlfygnjikQZ;j)OVvK`FM?^0y=Z~*j%9Q*U9-fQc=4d0##WB0!(^;+gg zzWI9Z!@tU>ht#5hf3lHZUWq{ddJpxfy7T4pFnZCCnI?jMr$O|x+l~=OrF36M5$N%4u-fWK5YH(9g0o!DmA; ztm^D^ej{$vyZY_AruU*_$|h9Y6XX7~pIc7=LeO!^ERN6DZEZG~T1`Wd)0$~)C6%D@y6Zxm7XJb$i6tys zit3MU?D_%bk~d=qz#ghlFS1rHSe#59WRwL$vh2p7 zYWytuRY7|H4M4ZuVWxfh&S~nmEt}-wjQSDC6TLRz4Wq(h+dfX>O(?YKRNM6-CFhut z!`o_V!Jh;YM8{iL)WIS;9@iK!aE+mu))U?*+C?E8~-GF9a!vU`|t)K0fhMz z2mQ18CV%U%XMTagCPXkwn0N^+9JV^%1nuk@sS1-YnMiv4tq@K4;N%)uB+$SYsor6^ zuUU1@DsVwyfG{$ z_g>wS=hQ?e+L8^fCE%%-58ZPd=z&({R-M_BeU}htC^2@4I#=izE=Z!rR}QnqOY)Ot zgXuHj@D2Sp8L4K%M+5RN^51n&*dWu`Mym$pw$FP&j|a=VZCf4A?EAKu z=ycy!=y~pB_i(GXE}@h^-t@7x#Y1grO>g`ikK6pC%Z9)Dn{D9uZa|~Sd_ilzcKPUsJ;%Y|q`n6FdVZzPlm1Iif&&caGx^a5l6-g9#yLKVWmGF>1XSD*OkQ^HOK`QIMts&Z=C9U-w&ZPyGFqnvRot|q_^>H ze4Z2In~d2@^Ko7Gw4famwyo_JhsiDI-BUrTmoZ1Uu3aj;j9V}9HnYGpU9G&~yYSA* z7So5!^w@QL`DFK>^a>Ku~4e)3vPs07NeR{&@^ntTHYqv1lh1qL^wY%dO5U_JX zXMxYK9M=B+1gqRC8hID#fxj8m1jxwML*x=;hm09>fsKllEk~5(0DjLT){Zf5hUbYw zw=HzDct+YRF}nLKVKR}@3XE*jpJ68zV{(j)m1xrk|C+5$S$n}d2UM?r)B-}_Y^IuA zlEaHJ7C_PDfF?Y5En45?kBQKk^VI9)$pHah4>aJx(9W4=H{(bG994mr0MbYjj;_If z+cKt>fTu%!CTFLdTi*?`@Tm=((8plPpN=6t&?;2XR{nxnCm%IiUl2!QlcQ-)mJ)d@ z4>;b=4W{>$F*35|e#a*}{f*L@<^TRL6uvhh@$&tOpH~Y>G*<_9^W8_=5 zuS^EfbKQ*a_lQ-9^QCw$c@ZTV>0M!op!X6mpQ=UhH5nj&=OzzA2R~NU{;q|lXQ0)j zJ`y^`^1lt1UCWQg5VjgmN7s8lX?MzuKQW?)QwQ`hV~+-$`O5rJ(vGkFag&kRzNe^2 zhGIy9yBMfFRi9+WR$dvsAYVQ4IW{F{IGU0;6F$JXGG99wvhoPyf-w;sLI z(@w|tzx-8-8GJhF#?ECX{xX?5OW1u9Az9(gi2IVFlH+}psQUEhuV}NA-KMjVi2)ti zP23<5iR3wqYsYOH(6j^M?lA(;&iU8bo4*+-?E>uye)U-U267)8+h>=>t>;L*FOJ56 z&B$-zg1#8-x8utp$+I1T-s`(n{2G{$*d+=RYgy>NI+pZvAc*43%Y1>KQqqXI` zlQ%?qzd=()@>Ot?k$hnvyCaZzk9NIDX<`6WK&!uVi-Ws7NcKIUFI%2Cc1Mw~W!yF&B<6&Hl83@*5C`riciCs# z^82_*VvQL3DN=&HfrPOtSfYXA31WNuLk|VXv4YU`oBAaC9=Dg4bG!8N(kllEi@+xZ zz2*QnghIX(+aPT`}8Xxa*sm~Y;aY}ByIr@DrO`+kkb{Gvt)gibR*qidXhI;3%2@? zn;XBZ;1>K1s`U?M*YiRu%-{i2Z??|2eVg2`$=16;pO!g00mlDUC#6BFGB&og8jqJG zL`&3lM3>KQ3swMk{`n`bdte5ikA8o9P(?m#j$dJdw+5))>|1BaW#eJ`&le607*naR7&c%+Np-S)W5+vy-Y6ZQVZC?gC0vX{Hk&j zOo{we18EZ+UKM?9)boYtN5O=YowdIenxxgrsQM<0^)OSj0;rM64VG378Csc%=k0o! zNbQr$HI$6mH7RS4H)MX4?|$dHmDVzvvrcU$$DKIO4w zBsX7L#uNWw^Gx#mual*V^kWf1xjcCJxBOMn28rd9k%C)MS zp+uFn{4`^m8+I;9Zq?Bj-5TchQH6w*-UuE>W5Say%W}MY-chjB8HC z#+AR!+@QF2mEFuQ;@VdDx`BIGEZFml5-`;H3_q175_os7# z%<89sU{BKM%4PI5@E^(MCH)P1ZXcO0<-#9QC z#V1Z^kVb4~%udrG8zj{apSgbi{ue*I{o!{Cp*UA6M@<+WgpL)z@7uAqTNt1DXmTxQO;i2ZGa!aQZR5R%Q5AT>os8#2=Nv4~X7Z@ZlE-hm zU)%0wcf2E4-*Ig6Ch~pT7(XkEp%7iPYtp9|_L=pMKIv8)sPq@G;@%00d&rjw(1M*m zE{`Yi?Q|M<@a1#y>Nwg1|E$`{x3Z1H3}#fzq)dUT#Rqj9fcR13Dg_*Z^m8qb$}mu$0% zx_LXF4xgGd8S3g%J}XbWlYovgL3Ot)E2l1RH3skjeZ@Nnn){|!*Jmtjim4pK_89{% z0mqQq2%+sKwao}t7MCYbH&Z9301{j=*1CR#lhCZjBvwT$W}nIy4D%+?=uc>EwCDJO z37zPtqb;hjwPLNkq*!r}hXjh5JYcow6zYzSZ5!H1$Yzl4>fCn>_mnDwqH8TjvDtkA zz-gi~T2{wPGUSh@o7HZ1U#Gna^r(Y(&RbAmHup;YO@vP9qsa*Rk&~qcbN$VdVUva~GRqrerO$Kf+V+1zn>}&dh;=Z&)!lFeETw+fA<5|zSD)#Xw3!D!D!FT%T zP^3;4RN!m!<+`&z+XgCdE1eDGBngwOsFm67cq`mW!ei*C#~%A47x?9 zf78$VUW_F!re}0Hc`FY)WXk%Nl>&vSlH1EYFUIdpmN3OnUW)+SchOV zI6v_E{13~ zo6xbo>mlb1+n_U{iPv2#g<+SW_|nYba)iq(9=~e<$Yu>XCbM??20S@zRZFs)lvH0^ zShDfm^=v|DM=cqmyX0N(^Hu)SNqxnV3Dx;xSOt=h)WCYr?x^Yd~sa+eQTr(P=JV*kN(z-57LH>*vCwXw4RBxf!! z$St}l>Yvp*d-^`~YgU^Sl{3XZ+4OHcpD_F^pPPTL_b^21=+z{>wm2fU#ZxpeKm&a_ z*8=6`W3h_?aR@K#^X&#xyGW- zcoD)!V5w zBd7RBUrvz^?mEvgYyn*&rxeGoT@01G5yOnn-Hmg?9NK`+t#0r@9ykd}DZy6T6CcJU z(#;riNE$_N%4B?r&9=#cCz>d2l1Fu`B1+F7+S>YZ3v3R@SkZlpSM^qw0%wl41eiYK z3Al51#{5PYiMfjb^kqk->WOdpElWF7;|#gU%CyL(dWhiGl&G_*f+@*fnBn zhFCj+QJuo)yF@MU%wo5StZgC;quGyP`(d++?->1#&}55kqh~A0qsXqD9mA~w^yJf% z4gHm9s1pVBJ&yADz<_kQSI#*b;Sm+bKWw+64(w3k+O_-w&NJbuHX9L%s{Mm2i;R;7 zwsDhz>gN+??butDKwI>^fSO#CW(QL{b4gjs@aM=ndk-|Rkw;XXv5=t zVf2d$plL(fvv;vw{Jl)Ht;)F0CU)u9gKj&{K4Z!|@j-z99;NAewe$F&M{WtX(AOm(oW zo}Emd($Bpp(=DBA=r{fbfAMhx&vFc!^}7k*=j;3OP$2rSFWlLkZubzb`b1nrZl`qe z)P&7oh*`C~g}#^jl2g!us5Qr{#_N1hM8o6OwAl<)B0>dCA{eO>33 zpKMv1$aukopR>VyHLCjIjThPPCPLi&Rr`!hc?cyk-lu2mj!d}PJ?ZU~--XKrQO?J* zp;#|B?2_uRn8x{@oDyfCRxbQ>-o)AA>`;lm(?5)0J0#h)R00^WcWuTWNv4;Uj-ezVwD?!JegG6kPW97Vr?}yL}4rL{*T@M8A z0D}?YEQKd!D0{2YeIINQwd)*RFUQOGZ+Gyd&$tpMs7S$4`#?wr;7}V-lJ)MaMzg*d z_f}LM-5I~02KPhXYklsOG;^?u$w#cG?lIg6oft#5yI7$Ukr+!Gx^oURY8}0;Za4ti zC6#lg^h%x(Losp&pSH9+4$37NP;4uiDD)iBDr2~rYOS%HUL6Yx2&Q@ega@r8mHWK$ zqZQ>=b+y~%NXAZRADbB_;U7AYrzP43W|&ciyapi7S*_VM=bR47L$%s%FpK9_SiKV& zK0fMb38YhD`=De7Z=+?tGqa%q~K( zKePS%@~4{_R~pZkj|x#M6{o{}m83q-FZsN0tCnwBkw_Mja%}oO?l9@;aBshFcc=0` z61^>9PZ+xw-$R;CWS$pXRTG#?6b1%@$E%W;VYetv}$x5gMDk=?$1dtgNJL>!z|lwmnC0#*Jsi`{ruJA zpZ)%i9)I0er$Y6wevy37*V}xUkD<89%5<$??^^xPqj~A;x&7{#|H)^4(aYyQdi>(6 zzO}BLv@8DAzp^#oB_+A&>=A*0V~Y;SaD}~y6z6p6 zU+vg#6~jxs+aZ|k^I^G!o$%S@PPgR|D`j~wn+ok@y-Q1SU$U_gPI5^U_!e;Z1)7;! zr?uTKj6ASGIR>`X4b1WRAuPn^dof0RujTaeS_RXqa)Znk40P$&q|B6J(W`yB@*TTJ z3tP0rsQdws{$C!7rh9|R)#ST{&(R}49xoqf-Tbz)e%E({W!UW!E&I?ho_N6E2Gb`d z$*beg=Qm!%CD+dEp18D3X6Iy@e{>_u0wu%kTT1 zeRe!K)H#*AVi(zBcNHm8kY$sw3>vyHBtVd1Ko^n?*oH3kH~6pcUtmM`x)o$g0z^|V zWKp3_i_MPJtXy^C&gu2{^PBHJ(DR08t-0nLd5k$`SZisnyw^qx<-ioEWZA7@Th~aG zWD{h77-qr~94cFJO$5$mG4A<}Xkh2$eQ&yE3aq=`bd!wfFe+Oa2>|(|RPCH1m+rsJ zSZc$#W|)jWq3bkcb0=ta&1ejL{TzieIEtm9Gu^ovgn0v_=!$9guASh=sbxT59pU>A z&!`I4)|)v}_XwX+Q$$i?!QVO{n5K?&RL8t)8bn$4**Fq?1zRx5u*#Uqm}?jFo>(Oy zAH~;(j&lyr)2~rg7*BerzNwcPc(p0HO)r~*+%9hVs(cO~j$_#5VbmY2K3DBLcfaY7 z6znTBCB23&nf;4EoEqN_TbP;kbrJ$Af+hjoExwy`9U3}<%* z|6rN*aZM8=yd-~MlF4l6_>RtE!9{1FJloJnyU9#@A5LCVA8apZY2qi{;c+b%^lQzA zM>UyiRpmab?=pG?2s$o7=J|LR{Oqdjq-(*5uFYV@3;5gjqOKN{K?Ysw4*s6mp4@v% z+_UUMdyfO9=S{cLdFGYa@3w zPEMU0H}1`b(e~oy-5%Zh`0{`JxBo$~o?rf>|MLiKyh3y*DtfGdVo4jm5{xeO`5V z(i79;Z25T|?rdU2q7A1y!#>p5ACv}ZbW9TB+B(?e=<|FJ2s`lU_1NaMPLGLID;a|~ zfcb(1AI>`cKwtA0XA%sU$VA=x1KjJ(cPd77U;rQ{>_IG!tAnp4P>jW&A~?kIj#<)V z6xkbevU6CfJ0G8{E&ZCtH)WkXs{*!IrZ24M@`f zW1q^vcj+v9bdb-8^Rq#`iKj#0$~tY-dIArkRgS=rAadDhg&rOwC>Vhf?Ah^)apqsc z2w5gs8w&H;H)19nsX|6d*nawsy{NTmXQ0Qh2e-VevkY+O42GhsA5DW3eKuqeK-l{e za40*R1+U%SgQpCDz_xi~G$p*~7L>ZsbyI~YUk8^l1u+~E0<$`l#~AT8{*x7hHD!`B zy?X9;L{_zH36Fr+wl>>c$Z`9SnKm;B{OP=A(2Y_ibdrcBJe1@ZS?zpsLxVPT3`ix9 zZik4wCO{luh;V-`enAPP8D^ zjaM-{I~h;(d8_wv2lJYrX#|bQQNo314h*`PkO4;9^$*JTV3En(3=3Hakl`7BHObDj znE&Za(xY~P4jgGpemWUDaM5?gWO7|*)Fz<`SbyYGy~^0#M%9(GZSCM9YaNYEPscX- zxmlH6T5Pba!4}vNdOU5XZ$7)cdoOPB!)N+_wUl@=w7l18&)oxN8PXkJNwbOy3shHBAgzZ+z&DK)sRXowA z7`B8szy=unlTOh)lOC;(kH0PUF18+-=@7?(%Ck9o)e*5j@UlxXU~@!J*<#6Le(bQr z+NeVw_(^_fi+@C%yHBq5L|&C0-xcGu@rORPJ0!`gYnELXhm@Jk@fBUwBV#sr{7hNb z25aD{tkZBtkMG%Y^qD%}wQITHHDuGr61dR<_L12F3Vnb#9NB2Tw0Ey7PZwy|t$x%u z-@I;1Jm}@;)zN=}G2L{4=JU+`uICSDiI^;$s?W~E#Av*>d2z7AKCEPlfpqkRsYW`S zS*?yCMG7c+nZSa8;8xi>dBGK;5oz=o5fOi@$1C1!$1mr`*hV`#g&`FX7$?LO#E4A~ zDg&3G3uiZ9?>j?Lt$NF*%6`C@z~NjA4ij@J8N+Z{r8uE3M-PVNq?auRE8`%*+P-+! zR2xQOd@p`MPxMbo!S$n=hnk$VBMPppzl}(0ca)H<=I~*hp<(G&y?6@WW(*?y1Q~Kj zmU$zN!H1Jc;B_5JByL14A<(Jy!ztDG6NG3Wm{*EM6Y{H5eXpVPbg>1Zc-s4`JM&9L zQIxDYO~<;`8#W6~PXd>z;(jQj&v>S%430ktb z1?z2|#E;+;-fHN?%_PvFw<$f}=Atfeis}eOvS)_6j;OMA670puO7ODQR}&v((~lNo zV#_u$9<}ROqA{kTEII@SWNMWXBwl6*4lsNKr$6h2WWomN%Bv-h9a%Op({XRX)n*yV z!I!#WU+Ke41{^1$m88H$@8}xp{*91oC6{&h66KM4%IJIWNCXZev7~HN#!!=i1fxrR zPD3+10)l!WOjfvHM}jljYEMl0Y!*>Hjn$?96HMus{#=?YvJ>5!&WF9;uyfQw@GY}} z$#y-&iA_m9?smN7Z}H>QMTIqT8+Xlg?$$(u0zr&?7G%LXc$~lS#%q^<`ET6oNpZ@f zxVk7F_2bR?DH|Qv9Ffk=>k;XVmeu~?hu?{CaqYFsSKe){qw(rIyFyST|kaxQM^NG0oJGkNK>c~)@P zD0snLV$$(O@j;C4g_ z8x3*zin-*R98Ucc=eB{f*t(gIc=U7prDpN&oLK;~gdzgBJUg%ogF~j#d)D6JPnz_Y zy!p)F5ks~1A(v!9PUD$;qv3S;(@^c$#c48s4bD2&!O#|Namu%I`b894lWIP6k}vW) zWz+F=tBvMIKd%O9V2F)u9nR}YlVu3ThvW;LW>*gHX8C0hCGgsMD2J;4IKu92%(13CG%}RUI>Zyh4Nrbk(>sk0 zJLcTz$4h_cv18g=6yff<*bGOz>Opi2d+Lg<1k1zM>@6Pzat-GC4&*M;?{&H$ZsO zCPZ_zz8GA})<&S5db@X(!XZD28Rtc#si}SjqU)p6Im4irex?-1Ne7F*8PaHyG^l{7 za5mcDP7B5v`y4DepxTdwX`>N93uHQ;szn<;L|Xx4Q}NNi>A9u_m+-7o*BYKNeszrU zw|h7lA%zw^48<{eY`EIYNOGZw=@F^)X?-vroK7y4EnqJo8g*7@bva6upP}MtCxLBX zisJ5r4imZ7yd*>tw9QNeDoB**ax=eYw5AiUJm8xu=h%+5k9V@w&fk$(bR~<4dZXxg zT6?3pI?bxqZ18E?tcXX5YF)+2U(nNeh;CH1juBps$%|~s62lAR@zhpwbP0fm_`ZUv zO zz3dL#AvuGJ?4|bWlSC4mzyq^8up(-*;U~$SGS{)jhxidy00G{I)g_N`3>OC#%O&nr zi^{&)^1**_xZuixu|aFCw`5Us-_t=def1&RGs8#*rHT^zK1LIy~ggB;<(`i=9i3+%c+!Sf@#gqj_$|%HAWYDCbs{&OX15F^ z_ekl2{K1GD#@`dW>tH#G5dLLZxOHx~TEx2@4V#8FO`G7JwUgF5ndw|RuFvC4>mIwY zGb&0LLNQ4PZV}Q07sh2c5Jp9PW&upL0J8wj7#mGo*>W=(R)t}8%2=0q3670astFF| zliHlK2Td%nca^A)NSmS_&O_A5Sx8w}>6$6YCVdf+kk&yt%xIi~;hKO1heA>}3fI4r zB4#5leMDpBL9Pl{%}`@WFv3I9PX}EPNa7*Am0UCGTQ|A|4ntjw1=kr^^>fzFdPEO_ zO)lwv$EO^sO+76U3SLlZ(0?@NoVCx{jtJ+`x!fzq$BCcl21etoYP6p$b#@aM=nbg9 zNOY@=PAmRns)tF$+2o!5NEkMiKRT+HfF?U#*H{cn^O>(YH>?DJMAzXNnHX6<%}JlO z;QQ*X0g^9?h{`lS29saMq{q1zgbp8vdG?Ej*-u}XGs6-6G{aAphy-hPG6_&)*Sd5P zmL6OpfpNn!`loCcwT({^W%5f?MHNCm2_ZitlR6ch$!IZq!aD>Cy2TOM_kJ=D`AEk~ zM?8sUbWxVf`s;k1Q0VR_{=Mdilfee)ICd zdv9HyKYakejkn%>_wv^5rc0wE@{2SjXQBh%lBpTKqeqzNH0q*p`l+7T8FpWsB&AbM zZ)}+1B@1Lx>lz0DSANOc%nJG?nrDWGp3mQAI+Ef(a18aU5$C{9HsneRu2`3zFNDKu6mKc*j;2w<)cUL6Vq1TtOeZF4;d2 zC1;90E6}N>d?@PhIzNM!0z`G9H9x06fcRd0MWnHP_H+lNF0pI)lEQ4qY)}?3p^!b9 zhhgVz>F9$Gz;Tv3*^)Zu0h$}^8sC@=t&gD@=N@JgF}0I~1o)aC{Ycah9Bzu^nW?hj zIC9PlY6`X`f~hZ9c-c%30JB%5N3(kUk-b6XHcXBp_6I*Jr_=OMN6QAWSfl<64V!bZHs# zL?oI+e{Jb~p8%n#zi@MUoeu&TV>%H49Q8l)XMovXaOFh zofzkU0#bN(o^xlA%XYR|1t$AwB?0jkm|M3S$)3O$mIb3Jn#`D%KpeDeA#1Jd1)39Z zO%iec$UKBID2*n8M!|S?o*~Yl!&}X`Yjr!LmyuqZZ9q?EcwYG@5Ry%krpbL46qxW& zMoC_m&_{b;$+~HFoeg>UsSLfPfXg-{2) zHhI$Y;33v$_tD^c+~t{8y@RhRAN;vW$7ht8OduYmFP~qJh?9nmM44Ckqn3QkhsS~- zVj40H(%YsL3QGfuR88;!6X@x z$@Wj|dL29-CabQQ9wljdjqbbmUXSM6+oN#rUQar^-F?YfENZf!5b>CB^C7_vec|li zaJTbxwb|U3sjvX{vo!4|7SXsi$#IlNjs!aU=CQ+vDRhM=`NvlctyNA8c&fg{Z%gK= zr~zGSjh%tPFS@P^EtgD2L0Wn8&jJ|wb&Q0G3~Nw3G@g({HsP*aOoM&(*@H)yzyFgT zT>el0&fmZK@waccaWH>?J|695U~xjLRJqOoGj-fX*R}4?7qQXh^LpI(KfHVS+dq1D zkFzvYzmBX5{D{O)wi|S9D!0#~riZs^x)Z&B@cnOW+33;32d&*dzPf$8m%ViRo3&4rOz*)l!vf>OX(UkBqP3UW~Q0K^O#rsBp-a#k`5i2T3;+mPGXH2 z1)V5coZFClQ1@`6%21V`^~JZ=Q3fN8^heHWh%IxL>LQ6huT$Z`#7*8JMznNLnRA^g zn0V+DJhxsRe~6}CsBtn(`@SaFEdk6TaAS}`F6m43d$Qr$+x@&9ViK=s+g;E@&rC zB!_Dm6eT9BDyEBxC;VbY@uK$R!Nveq!<{bN0IuuhrNw{!54_^CdPk-q&5o)Ka&}IJ z5>v6n2baF%x&K*4|Klck;>xl8IPGsaUEegD9pHDMl3L!pe{kf~lfOtY<)2b-Fz z*;k9`f9tF76|`PlK7RQ4@+6L*bbRa1&CB~Aym$HV{Wn7S^77!}v&*OVKkr2eeSiM6 zEI4+Pnb$xrD@rN5O$KrOK&rcCyfc+?b&QjZR!9ybP#~WnbDm9+b0`iG(;5)#}->qd9}Vee;dXt9yqb>5tJ)J;6BZvsIiC1Uos%J~xdX4~NVsP9OFT z+;~}LPKtH*;NZ^sE1J+PvpX;#k2cCZ#xi=fRS)f%2crxEex*=4hoiVn{e*OUMjJYI zk9154xO<`&^`W(Eq)J9Lh^~0ZkUbYVfI+3P@FZxYGn^$qHQ8%BYsdbM9+K@g?gVNH z5>riVwJ~aWX+HxWBoOTFU;hP$2;s~$G`XsmfgL9f_k@X89W$HYIGuKmK9cZrWOVqL z3|Wqy^SC6x$%{ae?QqaXtqZC!`tfU|;{ZJ)mdw*hO)75PKKf@oW~*`;;V_Ku9c;CtTz`|$!DdBHZf%n$|tsz*N^nym#;NcYpk&%lmI%UEaL$ z;BvE>jGLA1Nop~9rpMt6VGrmrBHkJ#p`;YH{jx!)d$`LLF7?5ef6?72kLZ|u`sr}= zA}SDvKfc=!8B?e?rao(Q{?#{|0KWa=@~fZ!^m6x&*GH$P&%JRkAfNZ1b7gr+LxfuA zf6Xv%=7UrP+RCrkR(hB}IuHJtkqZ~OM4xL*raW`KGG@ADJN=uD5*u`=bm!t=C@fXHjsQm+jK@>^Zt27&J_P&pSRKjbo?F45g1>mTno-wv!0_l6JoeSUO0r2E z(&AS(qO{m8X++xgM4^Aa5P|xcWVbyyWEoX>IGf4n@W^$tX;u=Qp3>`KTU+J;fAR3x zE;&dB#Rb&S&yjm@ECuotMu(rid5{j>tONS-L8eD;VnVb!*IzZTJkL!CeC>~a<3&^O zf~Fh~U@dM3ckwoP@+*QM{i%rzVTHr7<(ef^$wQoJFxy(!jc?w&bM?RY8y{ZYd-shS z4@wXo|D;jSoKj=<2Q4Wh5DPaGpN<>gDOvCztzWt<)eg0(ee|4w5vW+%(o40QODcN}WX>MHA{K$JGdVH@j7np<|E`88;C{$z zpRR1sO89k1T-0N93nGMq!(>(c;ZCo8t7VP#cAfP@JofXnN95!8PQBDdig6*Cg5|Im zjhr!B&6o%Thebg`KoH0fTY1Jaf(4M2ae_NI1s9|krN={^e-#PQ&H*nE6*eoF%Bz+v z%wR1s6$EONHgHUf@hgKkHngcfb=pL@+YQl41rv+F-#T2!e$Nyk=_M;spWVe9KSV2? zJb3c_#vlFgYgZq=_ul2n{a-e6e{&6fABp7!LoM-Q`$g6zU# zGiG;s8sDv3b*;^<-|4A*k6VX-TCe`#d9#EWQcGX4{_^r>y|_2B_3Bz_=hfYbUl%a- zG5vMuZpQiAyUnchMz3eVc>U(P!M{@<{^r#O@4bDw^~*2f%@g40Gud9Z$IBp7$c~dh zdSSU6DW`Mx)XRh^#kw?FP){`(EW}+K#SQiAByO}e9F%Yiu8fEySHd;|68`kg z_TtM0iRJ1KQdYcjx2shk0gKN~jL*Q)NAGDhxH@ByTPg}-lHo(~7zmA1TLwD*$)(vP zvJ7A{MG=2i5`iUGt~M*UwfOXpGuqKfwxos)u*S_g+jyTB^kE~Evo4#NDuSTfSL%t9N<3lTzLp&DoQV&znwZ-kRzaS+&|I`|;cGr zj-jkAKf}OisqXrKu2?YfUWm0Ke?EAOGcX;F*lBBRPrP2NO9z;0zt{ivM_;*m_k(vX zAAaSn%Zs~ruNu>ayWpJSy%ni<=N7wl%K2Z!8(o=#ZyvHt{Ef>;@7%k3_l@6N{@K6y z;_^kKb~eksmfd16cnLgw48Z0b>zq+p02Juhn;^D&8ztyiWkK6|SG_upygvE$Nz6Ku z^6IAh<1r>N&b0|KWuOo9^zOkiLGK`QNf-iTSxqoFe~Cj=+ypH~okWAN7ZP7oC%B~P z#RM5>sLO5$7pa`IcDva>e0wLh^;^pkaG^=X8S;)fS?!YJHX?ECp{h(si~8E7l8VuG z=%;(mBp`38Bh{yD9drK{R61vlpe5tJN}5yrwLBuYaE|(Tr*MZx+=UN4*V+Mid*r&E zX)i-yO95Nw7Oy%qvS0s1!37Ns?0`oQxftbwi<6B;>L1OLm;uV=Dib_{2;KObNuz1} zt}E4goen2pb4Ieg5U^9p#BhNVTKE?%)1_M}>1#ptx{da}>bdf0VFy+b%2a`) zR>W6PAQaftM;1yG1Rm@Rt?~<-y}DWYc0iwo??E?C^pK(|&LXSdhp56G^&Ql{;|0ou z#14&=BR$#|M$*}#TZ=P_CatPO$o1`EQ9_?mf@BdiQ|@5Yfiv@cf?Rng+4V_^_)v>I zdWL8&esS zSAX<_-|L?7o@nv>#?{+zy?c4*=3iZY`{JujeIMCA{yhEn%z_WTe&_P$SKhn4{`$?! zy&fKesT+6Nl|as&us!!&KWSO-&wh4w`NdCrUR*r<@b{W7zkBoYDEuFP_T=i*Pn&Ha z349|Joc^vweEG?bzIFNfH@+IJHnUOcxVgOf=H1}@?sC5w7cUoLiN|)xqZmdPWQOPL z+D-zkm0LQ~DJBzx=Gh?IPUyqIzEsu1pP3uLFcZXzV24jUi4SpyE|Z(v&(;?u=ryh!C$pC7*V)$S z^kY57Ho9^sn^@Prnti|GPEswVvd2AbGJn7IM?d=B<<=Wzs_jL%YI^-iLiT#d7ZH(< z)i@!=kf(WFdJLq293r1QdDtlB(yb+LUVi_(1++Gy{L{afvF5#wW}Hb_K$(i zzws&R#V4M1Ae*inPiQ0PXpe?H#37oYS|E?#B?-yVkKWC+F8asOO+(T;faVL)pX?K#&SvsLW&KbB8h%ri1mdefJr zl5(?nGTxfhsLimkPgu1_$LcM79eJ5nd#!tg>7dqKk02j*v&1J|#IHRTo-06BoBjc=KzGKk^R$dMi=IqRlj`nR=Ud{ z?%6R{9JC4c{F} z^E#@#J@VR!0dx7yW`C-)WM`evu_m)}KFlFx!$@h|6jk?$|1O|(nB^Jf>v}h zUU|XvQJX#Kk*|XiE@L+XKSa_rENGj#SLejQiusbS{_kZ*`XqA2dj3W}CDzr)dxh1n zXrIMjv#1rILx(Q@Pj0Idt>FY+Bki>%m&0vv(HjgYycxH?s+-Qzxnx9NS@qZekc84~ z*I$T>!M^qOSKjUAIR(LEut}=Gc1#cI2VFMQ47$mQ8(nza4?r}Bi2-{-Iqw> zJQJR0CQk_*r_shzK!$wyHVR`#TQkBiL!xl&odWE%X9UC0gBa|qo+7oW>}a#on^0_= zISEAfi!PHs3X&KQVAL%*3jn9{ley+(lti#PYLc@#BtB;FoFVu+V@e%8v|R>HJg+`F zW7#k{#7TeP4BE;%QLK6F1fJPeP{P=Uf?Cx!?k*T*$jYWrK4xZCdhmSl2FhM^@ z-ROLf7O&UGDX@^kH+rF-o&d2%N|dFpk)CvhcTPvDa!&&k81?V&%LO4pMS^7^?Yuz5c+ zgd6chPe^nm_;kk!nQXrTn&^e?kTPN>m)xElk`E20miX2wp_QE2Uu6+@_f~I&3)jL( zy4Z|R*Nw;pIPkX#CKT|-(yqZt4`=@eY~iongU639fBPrjYRT^H%j5g^ zFVCKQQTwN@=RLgKdh*%j=3DnJfBPT)u-nJ(v`qK%^7CKZzx?#4pIv_Un@5-LfA`*A zk@~3h^8eRA`t9W}|GJ>$neEx{~}9OpENwbU7)yA%(~a3 zFJG^te>c6qmC(xxF1PRX?)519yFdQ^<-ND=7OS3Ko_ucVKON=Qx9;@3i(o&0nh*DM z%;zuOIGxLEk>8{xc0s4r*qqV=d9thI%wkOtor_G(;3NZy|M8z}@c`(X?UrU1P{>Lfz^J>PFje2jrr-3d8*9Klm`Dg(>T4!5$I1M_;PIU6o6WI{NtBBC0S9G7V z;}^=sxM+P*{MbOLCVYV0&a+M=b;*t&l(0N6NmPHc1j)@!l}jerIo`lbUl@$NzB*og zd*!H zv=>js=>GT>h4-7(0#%GUpSP&~=*fc~kLe+yEqs|^5kMxqFoy#T!JVwPdldDEW1Qj5 zcqb1KNY~nJMpX?4+U}WXr8=S!$gJtPue)>ukO{tb#U_72bFQgwIwW?}aJ^?-1UItp z>~>Qy9(0l+hlMb2wfj4w!X5X77S2dYn4FT385VTj>3&B2M@gnsYYA_(QcOok&>~+up_yTCIxJ{&a2+t4TrY$lh!_+ZG+{V7Ei`mI z=2Y%Bv6dFlo_^A4t*rS4sQW2EX{s&V%=xO<3QrjWwz`P=9MV`-0;M}rNGg(HyRd`1 zDJ}5RRar^Gm>iCgH)9#SrmfiCctl(LUER!?JKDRqh09S>+huSFrmwyy3&B#47Qu;& zJb;0}4luFB6LZllxuJI#V?^?`gs3(Jj}Z36teV#}-U`+IIl0$6x$o2&)E8yOW#AHo zXz=pgnt?>0DCqob%1lU0fX~%*E1)OgGlETjAjdi#C7S+YuOfj)Le?=Yss)8SFq=r5 z*+InS+O%5x^hQU>4?Y{9)G8)9%Tv20$Cii%45KX%qCl&pSU^6blOz@bc6i{m-fcuk z`0>qZ>4}Xja3!PYEhd=y&1A-EPI_`JA z@?rPJKe{}ASo}zmci*^s`S9x>UOxWQ-v;}wI(iSqBK!15;Pja+qXO3?jc(` ztQw`gXc@-TeHFS+PHrblsg|MYfLQXslo8*Nd}>XHE{3~Vg+?T+2ym=O~I9Q zRG~^}Nwd*4+(31Skt~B_powubSnDi%t&^1JuALhlC=iUACf=MgqzjZeBg5D_JBnVj zu_ap4EA()z(@u5UfL5!qmF!ANs%r-~sAszQRVu-Zwh>|dZ~ST(_cwdYua1k37wCz^q|5*byekb1@?n)7RIYD?e=l6Uk!gB!dQA7XU$HF+B% z1#Q!Ojexb+V80+m-S7{^3_^1*#LUa)9^v;P3 zR3&Fg=ejfAv@e*+KOJ}#=Gg=(ObU(cF#8V-^Wu?wj}C6UW)w&kWG1=kdkF;IQwl$l z_w-|$(q0?UvQ1*0$7}MeWH3f*z_Wuet+3AKjQ0poXT~0+By7?2__sfN{qp^9e(mbX z!_RBbsIhtT#}B`_y!YV;m$zU0>&uG=_pcs&@!;~u|NW;|fBu&@FCTsX&gH!l{DG} zqsupb?;G)s)?$@i^a}zcQ%95R-+J?n%Nxb{$345k^9xS5muev*8G@|0?gg4vqFE~b8O}X=a`d^F{EP}pmU}=@wb)XSwKR_b3O|Hm@=Hd>U zmuq$xt?Hd*BcBr|haLZcT+%=}W^OlNP*sP8 ziz0Qn9UY!M9-2j!?5>O-@Nn?9Sr40C5&K6E9>}Nm1)Zv4gxxj5t_>T;%iu3gAJSm< zv&*%mW3iaOPEWN6ps+N$=>Kq5Y zuxRs}?x^Vu)4AavaE?Xhrrw{n-S=tJVPAal**cTg-+JrD*S_-p>-qYY3nOY|?DovEv0!3%{Ql@7Kj1WUsCu<`C zQzMlmGf#;l5yZ)!Ct%8%g0%`VB{iEsVWxi>;xaRiYE+K}xYvva< zvkSL)yjFrUsu(Ub$uW`XktS36Vo)=MqBne;rQhh#bXX#OZtjlY8kv$qI|1K+pa(%>ZM&;B@QmLcq(` zz;hTn@jFKd-(0~XFWtL&q5xTz@y{Myj?Y^74tjr_v|~1r!gS7Tj3fe6&2(hybTUq| zIU%?>^4i_4Jr^*l)L+@uhAGugcJy^dD>yFjKhCyVhjr)o?klf}>B`;tuqZ_r(GWg? z2F=2Rwzb%QdZ|_DN(x6eTa52H`f+Nc?g1aK_>3lU9uwp!vl|EMn?Z4237t+`!%&AP zV6jgR4N5n&W&EFYUhrJkdOM0osW^bI!AL6fmi=DKLefp90SR86Yy3QJo%?Tn_ro^G z-n()4c5Bn`e{gyI%{Om+_|>l`<9nB{yz}z%%Rm3C8-M<%pElk8{Knf~%TK$V?RmG* zNOp~0i2Y$}(5^_rJhVPJzY^2yHqNcacz599s1qN z|F`dd){u~3f zWs7W~Q$aO}37}P!8;tfWrndRpVx&lNM%iB6ArT|LO$le$I+46ni{WDv{@4rd`Bp(a zv1rqIdJC~DHH4>cU-@n`g}HUh2qk|y*eDN^YtNPzZbtMqu_sHvhLwp{3H|*|(WZ-& z4_VQ3P$Z~g6;93v(xgl@`Is0e250|Cq4RLxluLA6ca%9xAjmJ0-G}J7&ul#c@n4ht z4GugaJ72CnwzmX_SDhxG?T?F)4fVW=6;-V*+8Y4aOjBn*A}xtV-yZ84kF}~D?$X`b zC^Ms)++VhFTF%o*9J~#*jL_48BzU1K)LX)16|r(u6(I<}*$&X!f!#TN;;XxWs&x7- zSFe-!S!z5bNF6=ihtF|5<{8Rkio9bOySfDBWI34HIaXF1@uL#g7vo<(Z4}srtESj* z-@cn#3-! zi;UAKX<1st^b|VU3ZZDhSQE1>T%C?5EQ;321PIU6<07HmY}$D1T`MiCti`P&byimg zQJdyy86C_z?~T^7KK|2kh5zBk1Odt_RX3$9Fx0AF>#Q84)>&?H;NZ*ls->tYotVf- zYa?)k3on8WW%>!=IGUms07yAl+rb{>x%lP>!c{P(0EUuoY2uafAt7kzH)1?Dot|0? zEOR{cYB0jdEg3~iav?ilYeNofYGBO1G&!B#nDNGx3t(vJ)4#m}v9x0V7*KR=3Nk&H zCSvPsSL!{b3lW}0yb{LePVM@UL)tNU8X<*|3x zk^~ybotI*0C}?EUn+9xjrzvofig3nyFo;K8iA%=?I5x#Lpi@r*RHD+B{?INjNwaXiL;G<7*vr5^p37^qWg5*1Q1wIuibbntFeRm`Q`IZK4~|3 z_3l4xy8MI7!!Q2q>h3MUx7FJEkw-1Z{WpL7`Q^cb=aS?nd*L_s=^J{#-4QQN=$ul$`Yzuj-S3FC zpSVB1tl^zT>A(G?$G`@qZYMA>Cy$uoTg=^PXpcHI0%sp}po04>#*Va!w;QSF2Stu>_>-z{>96nnC_dWJED#1W2+^|{m5O<@WX52Hp67x^4tB1# zo%k(}%~oFCdKSiXL3fLW=u?rXl(C&?iqB;wUE?Qa2Eq5IqdXh2a7r$4S3fY4=1m(T zLIJQRa60`mZf_>G*%j?!Z^mRXRh%uB6gu$7&To%y_9r@9=HmnG%V-tt^OqnIPVEJ@ zek+f(Gt-M3e#5W(kU9NSfkq8n;;;+qkU{mL+pL@l>iKM{dn^U6q#_w^Wn=oJJ~; z=#!iod~NRB?cP!^jehO3b$Z|W+MD6;7L?yT=;1WoW4dW{9SiF{Mnj315ul?p;!@Q* zpNuO5+j=(guZ3mOp5y80juQw5m7xQ({ZKN;@dtm41ZmH8f=ZkTj5FT6B{)&IHfMwg zjuv3WcJ&gxen*`dv4@2&lf9y&@yO0>YWJ*be{Mvwf)RjV;Ik&7K zc!41$tYkpX{s45urw)Im#!NOUg}ni&<3the{`G4*cO9$_rQ|BHrC#gJ9YkLl<@i1U#z-}X-Wqs~ zIftprsv0qDH}B{~Gu&}nGQcT!YhFckfdc{Rc*22$z-vuauASrnpZ$x5Gg3;=%91`N z&_v^wnj}sF%e_W@Z@t<5%+Ut(*hspMTQ|LWCQKyB>KzTi>L}dg)b|pdXgh(Wjy+pD z0tAfxlL1}17SH(;n?cW)2`xP-W2*>8MAwbv`PyRXb@_?#Va^Z9jE!rsjgFS4%nn@j zP?hMssc|r#w-hEJl2~mG60C?2*v*11e)ip-i{z|*iA+C_x)=YuAG~q-)>l7hefm*0 zYxW5r*>~{nfAPiT&9`2^eD;^Wy8Qf4?q6Osi}$$wa(8=x)oV?UKdxi_=YR5O6D-f! z{cuK76yNInlXjtZ3wb&)DdscHOE;DXfBv(dU;gL+`rqkozK<^d{Ga@z z9!&FD8-DL}&-@>x6UZQy;}-;?gjxq0jD%Z+xvZ|Xn8IB<1{ zX|gqVc{!fWh>|$z;FOtB>UW9GY&81PYH~y$o0)Y^F8mXZ>!iVOO@f|(Z8*qg=*+Y) zD&PgVuXg|2zuic=#^bcgX=gKi zi~W)T`pqvxky67Rtm*kWu7*Bv#g=P%c6MxK@eo}SmTXDAXftb)hN}NOe>r7rnaYr_g4zRpM`nOaEh zV{`|`GSA785mx{n3L}O?YbR5m0}Eskv#HH=9J!-q{De=(1>1r_F!xagZg9394n@Y@ z(}OyfkVp*JYk}-qz5wqI1QtLjLDk`dA*mY(qtWy_8%(!n%`EEk2{L)nEk@5;ADv#q zKbs52q2-*@rwOqtMTG<{do@~*W@oO!b`nW+=q!&f9=g~JqN9uR_9)ekb}Wr!C$p^y zYLm{hIr`ih+iG`lJlU5*h7#0x^*7vn&CG$|AHL*j>UiPgRY1%(M^-$0{xlvXrOz5^ z{86ta{owt#n?~=Zd~u{}Mpin9kG}TdI$KZBZ0~c`ySM9I6eEsX)QkE|N@)r-=*OP3C5<9CQK1h7&Fn0PbB3(Z0 zp)pr4A71Xf_u=J(4_imiCm%ejeH-KMefII?#&18qJgMV<@Pz>MS9KEvE3xk$WHiMG+jH%Zd;T{6{iL9+goZCu#cW3=C>tN`RJyeJ2&cUhv94+R63O` z;)HI^#@Xnj-|*=)UbAnEg`-_&<4=8wPx4EbbHR>9 zd`Zaccd_TlOO)f^=Y?JDD*s8x>u5FppSF?M@(an&%Vux+AT<1QGB&GF(w;s+mjIv9 ze|~kntZ7fq;s|{=a1mqJPbOD6nbne@n$@|PuAfa0@y<8tySn5cM(|X>i4y7RSu9fYeFzJVl-ZruA3%o1-hw8Q*ig18hiHit;^@1f7)o~t(-k3 zy8g0JzMZnK-|fjvF?sJsj&-YzGCg(fllxCEzwW^hzb;!Qct%ffNHh)QL-A8kUksDt z2#K*Na=hso$`h@P98#1W>|W3kgaoB2<4`-s3-XMVWlb1so53UuP9#8RL&3x*a9-4fX0cVN)IRanZQT>RWw2- zr|4m5Dvlop-Rbbvo)LFtC^juMxLT1@-vdD>WRtT!4qU;*iKUR>>F~(S<63pAyF-D#;^Ip?FiIXD=oZG4M?|8( zL1g~H*-w2*8~jfzf2K{>23G=;o*eg2@nBtRC0U+8PA;wprrH zXlkPaomo6uV4UpeB>mL-wQUv)Hd!W{GlGq-;GSes=Mwz=fbDq=)eDY*wI8(cziHnE z@}*@s`q7~*C>JQxpHW2jM-)7sU4HQ0Z*?9I*@aGNolOj!fK&+Wwa8{g-uV0f+U0wH z@5h&i4@;8X`tb6*KmOy()q~G355udkn=ZmQrlRY151jeU$B!0-v=`{^&td?%f-w^SsjOX*-v%MB#EIwckG8x(*HX9V9^Teb0l%$Ykb!3ew*Z-OBy}1p$o|wF?4cl z){-H+lvEqgomnt(sJ6A+QdN*~GJDyvPK8WPY(2iI02wGczBZi(7ktO_N%->;SD$2FYpyU&xMxCv>`=*_xKS?kO;`iUuHQtL|hx_9(J4`2TD^H1u)n^NN`ue!hYwf8Q+`Q4`x_eq;x9$kJ@=YBKCKWgSs1&HV%N5;0%UraBHDQF0r z)&ker%9g65i^t!L_QvCcq|Q-R6>3BF3>8lV=r2cz$SJHkWF(yhjf?Du=9L6X(0^%4 z6k)-aq077_5lc!^4kc(YTpgTSgU;^UfK=WwgMhDgb0kXA4lYI*=An(RvzaNCRm?mF zJDLU9GlM|!QHmCJH`uFO+v>%*bv^CQ3k5n(xghm~^8Uk{QDlg()-PvZ(I+DZd!x5N z9s_eAiAfd#Cn=xP=15lH(P2S~el}J?IFFvR7kCz!Tk9E61dYClv$}t((o^Oghi#6FBJl)q3$C-Vd?_ znl1aRv)5^;a-NR>|7JMw#2$t>!s-Z2eG5WtpB=DgNq}1k-6KvLxl*uo9=r!!zjP!4 zkla-v8Mx~Drc*0rCzjZL(5+wJ_*$FKv=YE-^IFsTw{CZ%YPXQxX}0d(JFj1!)q#HX z_kVQx>W3d*{_p?S|J&v1?YG-u-t;~{iXnb2$SoO3ppPH*%GBNy@s01ieR(^b{-(|I zZ0!(nU3Y>Yr|2THeULo#xmtiN0ap^s)Vd#dq9^Ago1z~rqXCc5aNc7%Hdhf)6|$#z!3HS(?! z8^_0&ics1Q5vi6fv3++KEkUjR*58Y-mQ2|5vDY&qoPSjsImy+sN<5JvY}hJ!p!daA zxRr;zyWBx?&G6S&Z)DUJ=b_lhIXe zHo~nMc`rId0}8w!V1q*NHich|7y^8f7g=~UYOk$TFho3?G^s6 zD{x=^Vt{5k`Y%6xRQ-5QMzaYvNbkdemSzChsh_|van7%G@CGfno;4w2+;|drAT?AO!!6up{!$tvB$V5_)b@ig@1i=5Aqoe!E-MzUVp3m5X_Ct_*by4c{vtn|jHGNv)?XP$``?wy z1e<0Nu%&oG-pIR_3!(|lC}O*NtL6T5_0Gt*>DPo@Q7}oVPw-Ec?3|{8I|Z**B?agb zhutQpa(`&uDBTFjd8NJ&E#N?q5E3%FDS^;y)G@jbQ&UL$7z779QTNJMG7lmrkTD)- zq%<9A*Dye~)e&lDx||~+G2$6sRj#ag_H#NI1v=8ccyxbsymjetaFSyb$!eYF8O8A- z{F2El=Z1HQ(Tonjx|lF`SiJ^oZ6YenL{Rkb#_(H+G+=8!Ce#js$=&y zt(#2^Z&VxElAPUI7fhY)?M9E!a_}=9SWw4^Uk1wt$?_WhMtWV`Y*I*;rDt1`Mbmg` zx2xl-AeJA-2loT6U*MxZdpJxR;$`xQCiZCg?T`NEcQ0>skN$&C+QI(C$L(ByP-pz4wd+Ui zX#c!Xa3ksb_MDc27#%5|ZV;xx{sBhR( zSbXSs4h8%)TJzH-36o=&!a;_cRSfI=EIG0f$rfK{*YoH3a(D(Y9h%T03 z&fX4Ai6P(LAca1)2|qg&llY0a<+JnjLe|JK5IV6@9k~6*Bc}mG-!q$L*N3ka>umXg z_Sq+Y<3o~Wrn_4fyS{44mR(dSb@Gj)C$!K3cUO0|c&uGin9_G7d6fvw zmky}>M2tE5UG3T|zSd$(jTLU1KH2C;P8ogx(4zmrbj~06y>B6s%JM_caMFBP{#4R zCqzAJL?q*G=X@Qij*W3W-_F*GFYolY+3$br>z9A_v)^3ayxXH3%N(ZUH1Oo9phAFw ztetzLPev`P3{=d1*#X6oIvm&9a130TFbFy2$Co*nN+w@yBJakOL}faA0Q;-CU(7&wG-A3JK#^fIT*7^9i;jVw8t z%2f%LcEP2#U@-<&cE0DRBhu~(D@Q=5k%_&O^ z$e~i3^q4D|EpZYYA`_Q%Jz3Hn@5K*DlhJ8&?Csh)uVr(1uA;ih+J-!v_^e(?vd$kx zU1z+(Wy81X(AlG`dAr>+omU+txd^uWrT0e0cUob)bXEDy+UP9tMQjUNG0zfC!0RYo z5QtzY*x>W6-inS!;3%k6p5t=y5Ao-#&I% z8;Oj++O1x(U;)}_1)?ZFNhsM?R`7cBZXM<0PcFasv;X#Tzo)~!Z06!&Gj#Vq{l(>X zzxw3z>tB9$`S@qQy|l0G@`wNM-?)6|d*8kM{4f60Dq{O;-ne}6S{caNM{FS{| z#Rke3t&@A*vz^->Hj`1tQ`7Fv96n061}-n%H%EWFf1Q0-wrO}s$f|m^K`Wb9<27=u z4Ij)3A_XmCXK?ED7j!8FP4rx=SGGjau`yjUj`gMJl^pP?3Q6DWM374|(%U*rx;ycS zA8>)$8*KT;nf4DxrDCBDfZo{UOf8_`UAcWXo1=4|#rfdt6!{suff=2tpriN*8Jpkq zcJTOvc<6Pv+q5kCs~umnF>OnJwf7{$vn<+i@?w@}ra}LLu6!)`-C|sFm#&p19JV(3 zq610Eg$y>UmVC9{^2n}DW7YJNFGTZV6iJNn=r_oUg8{=fJbvcGbgGbJrQ7JNLRCR= z6kmOx4_oPs4)I53`3hRRzI!;*#WX)zXdQiHBmC2`^I(88YHQD~*ps+Z-4i>rw9X}d zn{^W>#Dj#0LJ59sRJWn-%hoCc-Dlmyed*p@uZeWuCPR4q^uZz0`d$q8xc4MvH)uf0 zGPYZ>?aMZSy!raQ%fk>#fH~L8I(eD0mn^*5J)h5?_SB}!{Vq1@xq9>RRVm%wC4p*?z}j0$CQ* zHJj`LOq8|jKH`pb@^b`%d&&)F$1BhI2d{eM%&3-)CnK9&oM)=ZUsPuR6u@~B3iGKQ zAuqu1I;Ew6oRM>!4v<1m;64F9ngpG2Y{OED5kU7zCJdT!*@fKAw!wDGVAxbaC*$Zu zCpG$PnDeH&W!br`WFWjZBwlKur#u z5#Zy*RV0Sd#VHnOq8D8@FEy;zlh8 z4Guj{Gs%KO&bbB=q7yVcYrPBoRfqfqx%g*S+Zl9Zvf9T{h*~#k2kiBq1iPW)M@OBGf6*)M)(#ESL9kk=0T$UJ2uE|KiAzL&j ztBscOg>g;?P3O%%;!SgKHbq%&JDb(Svjk|W4!7i6Cj-BGc>ABOOH`*4$xKbUo>^9Y z>_vNi@K!HLdGCX}m)GC_%H__xUtgm4`lHV-cRzXWa^tPvUT(kl>)yb2|MI{4xBm~_ zM)p7J@zC#G?*HzWm!E~>^?ds(1Fn0$Jcy>ZlM&YJ$NBK1o(JDuH-Gu_ZaHgRoULs` z899v!vO-Je8uej+FM9ygw>p0Bqjs!c%Y4BxIo#dMW1V-m(BALuYxco35g${P6@#9X zW>}wOkdl3PB%W;kRs)jxY&f>BCV!Z%CuHO-&0tf6USed=kn%&Dj{W%BI@6XqsAdnL zL3@97gGnKr{qTSG&qkNX&o<+q-Vim|eRn-8$wY!t@*&3I*5P2Jwx*3{Q=Q{`|HDZ?XNJk~ z@e%Tz?u;C-{q!V=;&{pR_!dk1aJFpjB$z>-(X!omNX~3?JXU$}qAHT%-5N-=pa7KK z*7cy=P&4yGO(yXaq<$giW z1imVBeR@@=+bw73mZSuoQ*mlj3i6HHbxe1AIKwt66qH`J>tj!ki-0?Kd*eaVywB>@ zM_iryv+gy$dU^kH_qKJv)MhpyhrMCrUG;vibvU?m+^>x1<6e9C_HRGC{PZ{XFTee~ zsr7=H83B(rv9mUv=2HStW*u#vt)sAcMkX^UbnV#;sbx_+my5L~+SXG_h&Z8%zU1wgsnIyxE5P`u#At0JNtesBO+PC+I zpPd&BG8|P1Qq>Q1Z~hYSHuAQC>za=7UCS@m!ZGC|Sk10+1$l7#dX?Fp4zS|%vyEkk zn$bB&WH0y+&!K^l^ryqE>~_Kb^?U6EF2QY=!LDt_Ak;LD3x=`$J9&`254%CgdCoJ+ zuH^wdNe-Wr+Q-Z2%eTnPuIBB!tz1XYzxavY?9eQS?*$z6=!lc`mTC$r^g##U@Nogr zgNW4GhBY0}ti&k1&7%SjhpNSFJnHBbKb|X3zV4wukIJMkHsL<`&@cFPA_6lmGGZzxiMNtIbf|xcu2a{gdF{X{oIZbI~Kv z3w(FpxY;QG^UEi{?m6&p-z%xVwrMhroLD*sJ3cxmqgy2yzv|NccGasyY|ndWh}g!z z&%TxL?6%PSGBF6~1|Bb4Hehe`uM^_4v+E_oWCv-w?z`}ur23v-#JTv#dwMrZB6;J( zk}oLet0 z!0a~C>Z0w?qE3}UWE~qFcD@HWJoFvhCFoOmXju@jVr;^i@U^QDvi>%zt)7@?J$(Ck zials&d%N?ac3X>QtI2N5Jkj6@ouiH1l20ur+tHH!um>(gCW`@*@^GV9#K5#;Nf7;1 zJ)FUauUhnzTtNi`f8<-Mqhs;L37qv4{1;}W}Q>5;`{$`!DUF#ud2z7p&suonLlQ%38c+zcZ z?nTGk$s{9$krhnH>T%Qh;2Bju=^+nKnx1o81@i5}GzECG4S1$`y+QE}_pmnufXRn# zbM=x2!o%#87Vg`g{8R^Vvdyk!;Fbtglbm4gtQDOA&ogv%yEidmroJjaiNu5(DuM3= zJw_0|m@#5sXP?j*$E%J^Q`9hytvjs)kJp+pZw9>@6bgQ@80;&@+DOYtVArc!8}+S& zF|19)6J~7|ptYak(Ey}T6lM?KXd6wM#vQVb5`^qm0w4 zL+a3M;|gBE585`_)n@4oV;#KOCF3P7Dsm7(VlvVe9pGLkCrBoPFq5&)25ip1&0FgC zw~ucyzm(%SJ)?AV2u$PwFD&srnMLd56(@n%tVj6IMmO+|PS&)xM<1LoX(R+Yx8HvK z?&a_Ot>4eKuDaFf(_R|($>nar>*0$#m%py>7!Bi}tRyRRQu*-nlbr+`U362O$qqeg zqXVyCk%uDs=y;0g_?rFG@e;$)_hDB9Gk2k?OSB@A7s zhkyXl-=TXVF9~-|OKr zpI+{~*=thY>h;V8?tkOo{I@QD@Gt+pw(wqv!}>udiguF3$)wx+)iQZuY!@Kl|*9%fJ5j|L*16-~Uej-^RQK7`;lgH?%b) z5PVAm?n(drzGX%*j(tD9{XzSO%(w`?NggT!lN?SDzQ+0ZHzhq;?W~TmOXM%Wif2gm z_bS<6$F95|dJXcm^5YeJ`eL)Q{rsVU0sF%z8-*DXKDwp8>d)#0*RGO5FQ~1~@n^O_ zKKfqV==wF*5%#4YWM^i$bW}cU;q1?m*o1BE=p(t<@qf+1NlMlv+;Da!n!pvG`2>C# zE*aC$cviOZzT@Y1@q2cAjfWV-pZ;I}fh%^yqsk{ zm-%-x3BieJ>Fiop+MjEh9a`{FVZX2Rc8$Z=_&&BP=1u0q7yVst$~`6IP&U9l?KZQE z`w{`_edk=*wtXTb3}otvpSEjM@X>);mwMJFIBO`%WFj(IBfIDAG~H&oMp};_>nP5~ zG9B@QHsak$@%Or=ji9^?#^~!su)Ck-@@_9s_~r*au;xK+TmQ0A&+PlAe5SmR$N0?x zfT(M+ru0zmHy1|Jz(n)Usi`TlV3AAsLUB{(kqlXe!_UpE%wuwfQ zHw@NMK!=`WYTimJLdWdgEW}37W{V{A^9Q;}W-Zj-XaOZU#fA{k1U;L_< zt=zi2{mwg=Z+z>AJr(ZZ<-K_N!yo^_rCZP&MD!%LzI!V9I`dD93pYN==X=cSqXctz zwudqnjSjl>cRsW8n3>&IbhAW5DoWR0xBBplPkJ`Sdza6C`>VtA?QT|U#^q)Mi(V{s zd9#5;ubH?!XtZHb(6jpM-Gju2;7AYI5dR$8?74U{yI)*rs+cg@YO)p)*12ofkK~9c zNzxd~{LnH&WusbCw#XmJHe8FV3+lKIUhwH1L6SM{DLGp(4`)Dwk(~td#3nY=jHr5o zPP7~N94WD(E=WGYd6;sgdg*vJ0$x9hV%a~~8{}Xk`yPRg#}^SrO-XP9; z1T3NT0^+88PEZges$FN^4fH(+I=kDtxv4*IxZ;$bw`2AMKb22XugmNE3jTcHNV8|W=n&1hwatf|40Zr7%ldN!Fkp<(cd6Z}o5KX}|T$ilFWmoa5mY4XThbEfSy>^Ik(k0gB>4Sm;7(&{CaXo6@h^%^SJvw*$ zTlRf6E*^WhC2+P&{(f%=d(nOd53TfB0OPch$BLe_k#KF6C_YZWBh^06tFvH)WL;{w zE1j>fN4fxc8y;;XTf;hYI?LB~i=U;H$%-!7NeKlw+EYe)la%NP36vke$&R*rUGPor zujM=2y*)kG;ZMF%6P~pdg!#JmfkZr;X0>J@eN3NwcKge#Pn(MW+U2X?`qt&Y_<#OC zF8}p^`d?hW@%ul#{OBitc=_><->IIC;zeufZ(e@zlOJFH#XtX(%k8({EB-v)tei!M zue@>V@^P<8{Y@RRPJl<6vVYjV5ENnLTI?j2e&Q##BnEc!@2!i`{P2G7mna!~R_u|8 zd9tOrL3U4iaLx1nKJ7_y&%SCOP%ms`&~1BG7sR9ZVche)hI$833Y zMT3fz)8~wSJC8SZ%O*~Yt_;uYzW;n?v8fYr<-{B@Ih}b`x2hTV{ItUYi$)Gc`x$Mc zg9tfB{3M&jzfKJf&YlDIgY62*tK%y5Pg(6Je-f$QWPJGPFj~^NYzJ@1=hN^3l|LL? zQA9!|r7s(GP}HW&jMNJRPaAQYLVIBiv(b_)%EK?7<`j4DzENggr)&)*!+FuhIrV{K z7>v%k`SV6tFB-8td7MBCSZ_7rwq5@yB0aIwuSTa|d8bEJ zzI^}kyN@4S{yaQJ-6xBRIl(7rn_BH#=6D0WDcjV((KQ1PnP$r`3ozCUB^PoBS7$2ZI?2StAfqQc=7iNb zJa*`~x*MHT^K7up0F~ph`le{(EE?25ya}k&c6!_>e{zU-`iM8lMi&-q1lE1hE5E90 z^mjSMnBCf~EhPt@DZxMR%a+;Qi)?ruQ+^%sRmKPEj=jug*#jp_3eUhfB|t zaP7ly{>{tJ|LU*mpq^a*&42gbxxDqkyO-bn`s2%g^I!er@OsY02a(A4|W?xcC#z4XTO0XTdmFj`=v?9tEOju&xo zOfT@Q&b95%X4Mn7{AD8%wl#J($iDjKqB%^m>N~zFuAa^#yPB<499%f~7<`LWv)#(0 zVSznb`yCD18F0;y1B+i?!56kXxv*=EJ+G6SE=e-^)dcRUCW`*0Go$#$mSC-Z-+X-5M^+uX z4VQG*FnZbZHp}65^}!AJd_?SKlb!b=v)#TNI-zNuJ>ieZl;W@-64BIve0H% z(HI@^wE-Esl5~v!V^{jRTgRSy?h|~~OgYK7Em_^3)GHjXdKKa*G-YF2-8%Gwj7@DB z%#+8xZS7eu8N=Qs9uYTMx2j#{lQX^O!rePPOYM0teu?fo0#GC-^k%5+glo*y_sce8 z*@S%OUcu;TqrE(#HOZ@Q^wzeY{P5A`cy>dwW>4;us zsc1oswtEU))vKC;pXY)zZCmiR`2$neGE1*s<{*i=79)<%C&XK4D74`3+{0Eb@VtF< z^jEA7;~|srWoGpTHb#xwP;(e|i|Q^VdW^*O?Ots6!R}&97L*kor*jtQkTkf6p@+^hMq+Ih zfQ5t^{6Lw3xP9l1`u;~v4cpY|ne;YiK5nFcmZS(X&VxnhXFC0e5iYY3e5OBm`+sF> z8g*>7EOVozAh3av67Ne|!^2)mN+KmTMqo{(AQ1{=O~T&&3ED(C9Dw>Oc9<+9&g%^>zHlLGUxPWM5#75eZf9bK^-HeIy*@fKQ<+f+tq3aPQGnAOEgb zIX9rVeXpgw&TroBPMQ4TRxf3F_S$DR9y~2GXgdFOn|WV*{qppS-=*m6W3q{bz>ZE6(u{Mw(MUKmE+88`a`o%JLoEn4pg}=U<2gZu1ctZ}N8lYn zl?5$3H+WKl4%7rcnO9Oh%Bp=4j2^^pD1%Yy;7mvOs%{1%hLC<03SiAnHNB0-0l$5_ z3A-sUO1`{m+MXgZ^)5H{362uWsxVzUfIS6=Ni*=n4?~pmAG-9ai^?XI=dIYRF zt29C&Qq!-MBvy8%gG-fAq={G4J35V%{TZEQ5WIL~MxQqhMc#3q({A=3eRLrhZOZwT zg(8uDYkxW&J*A+*Ok~)TP;~c==ZHx^oT(}&JKeezL(!X>?h2mj>p&u`q%2uxSn)?1 zxD<@6ZI|5C(L1lOU&d6ps*+l#P^XiS`V9s_EkXo$+xCg;TqkqXooVol(+0=NggJ70 zWvmQ$_`}7>!47c74y}^xs;7NX0wTyImOHFfxWe3pj-%_@i)UBv1)W{!J*WTLRQuRY zJDb6%Ww$=B}d*B{D41s{Apyr`Tr;CPF{Ck*Zi(O zYpvhv*WUZ=(^jg|R9PN_6)HguAt*S>KtVwe1aYVcIuo4fOa%V|bzo*3#>#C$4zo+S?NKoA$$a(|J&$RdDV* zB(wSW{_xh~+p}h50reVl$R1w$m~*48r2oX8v&WppzNTJcDzhc2WSpWIQ?5ScUBHcMNiduyB(srn%zjd};BbE9051!{u8S zszaRA&fteP+`T;N;?cR-dSqDqkB(wTw6QO>yv;5-@FSEEUL^V9^JJ5t1-tx_%%Hg1 zQWi`P7FvThKZ0vCrtc2d&eVw>RbWN8_zaDaMDkE|_C{cA!auaZPgff*hdF`WvmK-P zhM2u+PV{A~L0^2`{>Sa?@VdFE8>`bVPiw?~9+DK^aF|EWdM#l|Zud%_^vW^!rnE_B zFC%&Dc{=2YYdQ3z#~)oD$II`rXq#J=FTMu=`X@sLqYcU53R zxi#gDN;I8&;4y3X5_2?Byu=`$x36!H7*$}|+Z;6G6M(CLUvkSd5L?>DAo}F!l7VfK zVIB`~H{t<3@Not~bHEf!pSDBXtIHQZ`^lb)`S{ac z+GDM+S|~@q_wN@QGUPYKidQ9b8i|+1kvGkcqxf-;riJnHs@UuS_9~i|GwVvuSs;QJPrdP4?&|6R{w=lr&lfO94U)%~`P%Cllgp8FJ<5xc1O6!;* z3$II}x^^VX-q8=jku=`64y!U?O}5d;zF6N$K%ygEg1y+8Zf-x5#`s~UwNUw(w|DO3 zK|S;L>wzm?Biwrq2|kxP&zWXmz0V*av zxt{%_R=K}!?!9@sdibxh_y?_g-E(&l!f3EW5Fc!_${a|;O{&p$zhS}g)A_q)g())F6GpTaVc`2cBz$&y-0u-kKZGY9m4LD;3f?LuJ%bkFgHE|CO1Ycf5== zz8g>~2r!;X7tj`5)|g@d6pncU4r5=h?b=YRVa`wlE5{YAe8***7FcO;WwC1*CwixPIv}stlzcpSp~3X|U|2iWfWIlhW?G0r|`@yK^=1ZhtGPJ z{LSUJ{@z!YZ#}+iD=Jto-)?00JH6D2-oy#Mysc0JDH(WSTdh8x3_<^VNrKz^S1rCY zVsqpWCGJ;Nesjc?HpwJ8o?ac=_+NL;@lKYmRd0<8%p*pN&6hlT4Bku=f*R5- zsFMsJF91BJZog8=+a^&RCkwq?yP~;&Z19YJISZMqd<;-|*v;0js3SI@5U*+)MZJc)q@-kt;^K{kjL7LTtr z&J|rEi(qn3cIt=2=$_tfRo3b0ai;1R{h)_K<3^jJO~>K%(jf>&g-9+Yhzhtq(X`|K z6(n|_9Pyt_(`Mficyi_i*A|xzezSEv8&x~_e=m`wN9~ePitty|AaP7}4UBM)entc%ZS_~uWEsop^^-J&bJt^XhdgL<_`*k3Fu&*u6HD?O46O6?`dO zPu-lI&RsJ&At~Fe$8KvuPVw4v>OHphvPVYlN9Fzd?^~GCi;)wgfUO6V8pm9l$#>k! z)9$_Fh~(2EynDUq;rrkIRz3CCm!JInN0;Z(LUDwDj|ZLR0Z zkh6q?FSS!h40-%c#&cFZ5**5B7P~&BGSEgY&YWyuXBeC#J{n%DiJQZ0+Gyr5dkIH8 zB^HSayq$K}QD&LkIMP;C_dihwH^r~#t&$6R9C(NqhF3Wd#xEK=PeYZ7XMv3~;d>a; zU-U^O&j^8lNN!g+DZt`7@kv&?XE&=ln%Tk{^Gwm`hC<1mqmm6qHav;wt0R()WPllr zP7Wy~<0B(QGiO1uBFpB(cj!rfehLoWhYj`isJ7zOkZIRXt@Jgz$Q>Vpy7De}H~P@q zJ#y^(7=1|s2)nLaT8L2cHUfh;yCrV1Sn|>fe0;w_V-1c(v+wP3Aeh92Uf70aD;t0S z>V?HC>Eo&Tk=(sg&ZA><=5F$(JHcKV6z8DtOM*8f87%rEZ;kfU9v>4w{@h1*%izbUg%M&o{!0c6IeqhqIEi;dClN z`{tYWZjUFUGZyUKZ^5jF&bt|IYV2OM5be?9$IWLS`FY|(;s&a@S@z2J??3I0XZ7Ac z`?|UBr%x|m{OBLoRd zjhFeRDN&JGI}JS?i0Mq+e}4lfv<^~ zj;mLlgt7W`=NR6zP>gW-OxMJc@GoJTg4s9!XS=~qPPgHGo~llMM0Fac^&kVD$49$( zMpIXN?nnc$dhxNl*#kG#NdbWmpW$VEX8%h>w15)Ll;3oL%zM~#4E;4hlML| z8BHu#+4ZoK$84(3&}MqO!`k4Nz;v@eSHvIsJlM9gCHUdP7tYGrDieP!A-RhM>pU%v z@wwn8$BlfX>wD}g*zEB(VP+0e5opO5jX~-sxpqwor=390;)s!m-4H*!OJ~aAYXv%q z$R!cMPzHzovx}|EvVLo}B$*sQ@D9mFl_t9hTxw+_YUOd+tTHm-h9N_WZU;O^zA4^S#yGFy=(5vu*Ulu zO0++WnI~;@^M}6s^zwJV{pj-dfAXTB|8NGq-ON%TW705l8VmQx$g_J}2^^4Z6ce8U zt{^~K9z*i#$u;cpwIG*B2-H@G0ZZAFcCZ=AX)Fdjg8^iQ7pzQ-zE)@D;7EI#3$@m$ zts=vyG=)X0Dv}Ki$Y9|={I+DA;nag%q6KsxWql+vQ+{+TSs5?ndJ+*vuIQ+r@dh7( z>B)*6@}5&ir%{GOi0gjHrQRJmruQ}G3S{7?_l|<#F9TuVx3qwzZZKLrHpgbjIWWf( zg5G=1SDBoE&21I`j6T{cdT8z-BTh(N04p5hGv4}^tO*9>cpL^joRq`FbU-G%XzZll z0#x5Y4A#*InQgYS@?^1qIDDavPa{C=!)|e}G@eGAYB1jQVv$y4Hj?5w=^9XEvSqPlj+x1}vm9vV~qQD0~a<3m`|ml_4WRr*hg3_L6hsg-vg`P9qbH zB`lKGYoK#&(+iUwfaDpU#1`$D-C%EMIvvMt{(H5lEA`5g*Vb1>JBWjk-j)9fc8E<>kNmAO7FV51K=N{O#VG{&PdnC)Ks|;;XN-$Bf>BHs6nG z*q`>ARW@`#CHx>-J>E9vqEV7#r^|*L!+mQBqWAc_I4sWDc+ zyukHI`(2WR*!)NN(cCrIC(GF)n`P&>4LpS8erzACW9Q*kKAcN1lObBCJ9Ll(yxFvU zLpDOR5uaf3v*^%nVIp4btN;(b;6t~i|8pJWjur}4k~ zZg>ZHgjAkn7e}J6X=w3<1|`tza7`meo*ThoEbik2jccRGykl(}I?~`7T{E@QHF98R z`7xHxhpYk;Jn=$O?dCiq!Lt?Zh*W||0`ze}>XA35{=9{7&ug69-{JD&MZN4KrUxf# zalyTypltqS4gDT@3wEXuPz`1cUhtN|G;STyhLbSj{Md&cqraFgT2SJ^7v2H|cv0vGQrP z`)ITNf&*D0O+aV93U26<*fVL!J zJ;oU#Wq*QQB3tl@k9fhClFDx57=f;1x)j7V(i06!hEyJZz&K9+A*y3Gd654Sgs%BH zK`6Q#Ik*iv^||L0vtb*w)FVfu7qjR9yl(II|M|cFvzI^rU;pXLH|@jv*P_nn5BB%3 z{Ja0w<&Xc#KXLi?4_f?IlKwJX8FJp7WBX7T?$j+_p*}zLFfH!M&8NcaP~z$;E>-&-r|Fv#UUL;qjUVuDBJuRG;yl(_M2FPo!xzC@$T@% z+ZNyDi_RrvKGp7jc!+MJE7^Cv2nTq7mmVcgD;YiWG5inC$wWFG6oQ+5;iGFek3acN zBcab~#9y{$<2OA*()NfwSN>)1B!1okHA5+T#76Lq2JCVr8$k$k_fpJ)ZcgesU=Jf` zF!g%9!A(%PiP*Qzp{M@dOB`bi;G=}f*=pnoO)mQ|WlI3==Cq-h0X=AgnRhMdc=P6_ zVYL^RXHP%9eE->_%MZTwq((gfze-5q+?)~mCUD}bG3eNH^%!NKbI!^vqweA{>I78) z_A-VIGnv$t5YvC>KvDIJlI4m8fLHJErIGbtA0C?7_Y8VL%hh;K<|PM=Np| z*)-b8oXW+@%93TH>y1@LC=;^b-SBgz&f+w3M#q7Zl!DV0bS=1Cht?XdKG|K3OyxJP z9=uQxd$7<5&j4n|v zwCo?o4kjY^)YAZS-1UkJzTnvF0IxQN*iMAqKfT4060p%U+8l7w0%QEx(_bJpXDm>3 zHMy>zi@ES{Ia^v#Z|OHSBJ8G{nb`!aF++?PPKS-bOc~ zw}tVukAG0D2TkYQl$6yyNG~ta+1JyT&92_P{c`Ja?i9No-{hljYAn;u=0KxgTwBtc z{j3+%mDBiUt2HD{1x@tprB+co9{4I*JGl+6)$Cd&;!(WgD-v5c)>{Q8+QrG33uNWv zgWU)nVx^CTPWY@u_k&TAqA{|_&sD!c3g*#`sNmOoFTjk@%BS!7K%I;lC-wQ8Ui7Ay z0s{UT=I)aOdvjGsjeig5%HRua%J-jtvNM7SPj|bADAW~KwM=+&wSVHCRB*V^^1O#_USNd2(toYzmXZ7)oUUW&0yJ!R=xMJyITla1o zXZTGm^5+U>(tQJj=CTqoo*$gpc*>CDk&%SUHU69osod zc%bLLgl8aPrdY6emQK1^&Gd&ZT;{J8o{nd89pm-p$)~@sU9Vc?mT_+$Jj3q^lp%r0 zUdU9`!zQ$Gc~!4{J00~LuHp2TB?6oGsRw>~Pngdj5?ap=rGy&!Eku)JrmW3VCAb#F z-IEy>&`)nU!>eI@qYihB;5y^2lzG%SAy>8_$2gw$Z0WCjr{{pntbc})a)O>x&In^D zgP*7=0gIF(Xr;`}19xphVCchG=X)DY^^@rw%=hNUgT3Gx-wXyQ+2(;6{5((ARq?J9 z?nJFJK(oiYUTS}>bDx7%+R!@WGsY<@I2^Cro8wDTyn0T?paOtY28b^7Z}gyByOSX# zppKJ!c*$-kIIUKj!zBZA*eA=2&I03UWV&C?!t%;!P){ZuJ{Byx#G6&=hFQDNCGNq& z;HiGaoEui({ZNt79M6?t$~@N)#FdRr^!)Wo)?jySkMh)T-s2tk2`SSqx`%MM#qI(WbTl9qtqM^U(;c6fB8XgUJTty2#3s4G^&CxA7)t?+9Pz^%Z zV=rAky};`zL^KFm?7Lhp`WQ;kS0&AmGj510JQ@b}tS8#Qb#?2RMgy376vvOlfv%`r zPd}Mc!2yz9DrfDDAjU3V^y<`q_MiV#mw(|;{cD$JpL}%rsJ+|2dH!XOn|cTN>&tKb z^}l@is^t5B{BQre%fI>W{X0G0*7NDXW+(R=nG@Nt%in(7cF*R{4VOO;ersM9nx4Ip}i^*d}I3zaLW!p)Igizd;aFZ<$3$OzE0OK>qXftYl}}!qu@UQ z-Q$Eg`BpJcw`5)A1>6WhWi&HEHZOLaSE`1cX=Z~P_3LgfvupT5?HZQ&nMu&a^w3@Q z&_yxo=mBSa{A1IDRpZ#t+dXs*n#;)nz7=NY@b^`RVvSDlHC$}e^*yD-m&aoE00_Ln z2m4$oqQvhSuhE*^@m^7`xkMC>?wzRAN%Dw_t{opi+G+ID>BvVL8TOm8=s>Bfrw`ub z0d_EK-r@)iYoy)u37;QRgA$Ta9PfOn%hc1i>*+zlo<0ZIz5ev*(JTh7s1h+Wd5I0P zbv!7e@jtSJCy{k>z}fB_Kpd}a7{+K0u7-NVphFovcYo_(s^JHX3twFyAMv-TRp;qz zwyl_&(NXcqqT8Y6BQSTrcJRnf3_fxus&e&$K$;~0!NNinJ8BsoTn3kOY*~H{?E+f& z!y6J=v5W%ndh0!_DV#0aLC~Tkq=Hd|C5WaVJ8szSWjjD&BuCruR=_qiv~3#Sw-70M z`g_{~A9M2_>$taF>)>oYxq$oaCpqD#^^&6d^Iqg|CpzEOP{^<+jH~0zc?7VwSfk0*`6WX90Sd2u#$$(D4tJI8<)Jq@)!h}yQDy-*MurSXGo^1{M zXtDR5N;-@8?p3~;!RW$1a9mH&r>7Pyt3(awZHQ@fq9-}cSp{LZd^GqS?>t#Vmw;rI z<+$s3_t1oHe?}#uWr+rR2#|VX9$_^u$CmBGnG z&#qR<#nA8^8GdCsNix6-yz`#2@9we5IZs74yqCNQ3lC?=ob=p0xX;j4^aShdWQSjN zJxlMp#Ay~ex;V@d#cb-P;D35AXy`f^?NOjdVb1nJ!@nOI!LmbBKr3oLIY;!v^bYmcwHOq*F&j zzC=h}JXR}7#c#Hg%+9mKDIp$DLJ=_ZPxeQqMhi61jcA5HdI_hgGZL0~90t>W4Tk9# zfz$QpU$r8=H?Mv4@waN&pV#QTZFk3?HHG)h<+Hwxl70I9-*@@H{^DP{{E0u-yUsle z9^Rk+?vK+&Pq2LY(dFlVwLRZ^^{H0y%SJ%Z1|b^o@>*k;FB=t^;Hnp^#R5LWLP)7j zx1n5#|6Vgmz}6LH|LMux5aV$w#Q zrXSe(Bi)nhp>ZqXqj8IvvUz*`Z&-E-9l07I3ceq)f;_9sCfIjZWg@ zN!S%%Uu_AN50hK6!PR)dUGX8Hr}6ilZA3Sl+P)s+L9KXKyUOTTgPXk3A&GVMPP*6F zfv@9WT4Z(?|EoQH0g^1T!-Wy+J?00>f>s=^T!+Q%%CPxrBj_YU^mnDFLdyMuCb32g zLg})q;N|^T^G(f{GMv;Zl;WLUNX+aBoWR8aLq>Hm4 zj_>M>M0>Vq!Kr;Lz>3+9!=4^DT2mc>Xz}byps{w|Cq8M&J26~b`=PJ`ZU=8MU?;cN z;>&eE>YxiWei%LLG<0pFWp{h7GmGlB=gRwE%kj2GS|CGQ;@`8TH#M3Tu)XfNQ!lJ= zf3HdnkzphbAp5d{X9}tjm+dd~BxH?(2A1&NC+wSs+HUT)>sBxswyexhxAzrm;J3YH z8TI`Zzc7vut+0PhDLK}>laQTn1#e18eE~U!L?N+5!(FNjVJ=954o^2LKKZU#(1ID6eWkR@o1mqweY)Hu$uZFG|4kuyU4Ruq1 zXb~)H6ix#}Ck$2QDj=aCn!~vaQmUS#(65Glfh}D`MYIY~8X|w_ku;Dw8^A|*a=P`* zRUfUvi{B_3op`V>3yRS#Q6m4z8JX$mNTrj|sCOJFRGoz-92T`yZ?w=#A7uAj?;H~h zJnw_QMu2K$Zs$bVBi%UHsLjy}P;k(>|KwP<9HE{0C5&B3RYcCuAv$2hOs?| zzQDi0c{nHn1WAAV%tb4mV#6D4Y@)jYIe;HZmhUuzh3PH6B75)`%qq*C*(kktVqY{MlzE?Ty%l%abjh|7C-ZA3wVMi~s7sbosU4{KJ>0&9y&yT9VYLz~?{yX(N7b zF2DXy|C2qIdeiGwf4ll`E>G@jECs4IifQo?4WNte<81n7MR z$K*U}GHmkO5PN}FPs}?H<`1*ECLm%0SY5{OIVv3Vk+WI;P<1gURSSe1^B5#l>hCol$36h99D@o!;WT zYS|+1h9#qKLzf;gdDGDQo5r`^)GL0SQyT)3s@VYXoX|-}hX&9i@^! zjxvN8MpKtQ^c<+h!7bEiK=jUD^?lQPH~}R6`1FRgc5~{T#&~a< z_nc@3p{%(Im1(Yga?1-DRnfGOb=L3JpLX#xa;PysLte#q@=j8b85iuf$9v9@XPl z2@;^bYoKb>Is2X$jgJonJ9psZ_(=Ala9X94D!Y>5MJL)7;M9JSTPZ|E&hZ*)*Jml* zD&6S`Wudzk9eTwY@ipdP1^&##XFhWfK}@5j>u_t7M`uy7OPX(UpDJsBBpHVWO?>cI zQ)5{JQ<*|ZBB-``!Wu-fu)uB(ait|fD;eI4{lg^};q33Qg2*GI1CM-|AQ~*PnuaS= zh7C^VN0c=lYk)i7Mot{atS_r6&aAZ<9T?o+mpnEo* zvUb6ml~2C=X}$Cxw4qEr_#TJ7-`gA?Kl!M4pa0I~VZ-yk|JQ%%@~z7Joxk?CFJJ%e zXT5_y-PW0c-z#Q0il+2z2Z0(4|ibVhjAIxj$c8F?Mb~Rnl6MH*0dn~ED3V@OP&~&Es*u5XL zRRr15P5$>KKFXmXnJy7=du3@`JUF#$iK<5T2F)H>gM+VpRRg%VnSb7nHZ4&u zxi->(rX`4I89f$fXG8Nbe7T;?GcfY9@B13;aG%i&6p%H0AE_OqtFmCgujjJ5%!G`h zHJYcFuCu1alp@4rj-~W`+u>MD#%|@N3I;Yf?BnS6?%Nm7+Y#=o9P5kT7yRP9sx*h2 zrB_+NGyE>YxB5of~jYY(c@FW!r~+q(>WLCxSw#J_Kf=XK6==TTpqO0Mbqpx zj44BGs@gJd8k)fRKcgf3ZYD5+kX)7xQW{~o5l@DQkJBob0mh%ey$mZwP%Mla3dta> z(kE`c6~@RQ?7j#4HufiUmRiG3wWn7kNKh^ZRhQGlmw8-i^&TnD^=~LW-WbA+r~8Jz zLG)qj%j_BGWPzc|#6PF!aC+T?8$LBSpkbpiT<=2g0QYIhQ697;9y=rQ83hN)ZboP-LE zC@moEnjk|{c&TRS-1Xnv4<$S7Ve-hX?)BU@-VTpRIhrKrWUxhj*PCn9HB3RElOwwW zchfKjxF9kY@zOHqm;>HU7N; zjW3!i`}BL?yL|6g|Ip>jFTcrun{Er{&xL^eT}z8l>vzE%4Cza#+682`#)7q$MXxQx~Kjc zp<-e_v7z~>I-`!kS{#A5YsEbs5>y}Ug$J$CJ)5mKeI6dFS|iJH@UmDn%DU1Bha`jl ztTAW*(YjDVBE?m5m4Iv{Dje4N>}R}JhK+}6k3ga(+9XcXI|`A>M^=WEC3?FUoWp@@ zVgP+iR>PYW2`|^OIuagH+92>(iH?o;f#t*4-od*qnLdk zJ302gTa~@}>F@lbJ74_#s}Em(*&B-9yt-2lmmMZE2GmA!rI!(@=oDt@{%ehQ4YPOm z-t;i&owqk1-u8T{sjI>x^9&b

9TA(QHjmz%#mc-rR5QC6N^E+UGnwtvmyWa*xUN zI*4rK!JWsw;Nhc$pqDt|T;62Ak2aOsO1P>$D8_uaeEj&q<(sDuE+0L9)!Uh>Z%8z} z3(idfGRS!oagDh8`z#^4{0e$=7{cvOuOOZa(^aL!xDudiq`R6ZauV|9OpISph+7gK zgh=S-+-b5{b5hjoWWjn1{3umWT^$6f6eJmGWeusfJ6JT>Lgek*m(A63+Kfw1B?|{IG{@fUWGrB8(VZNa;|IC+s%j(?+^ZAM|JhXmV>(`t$RszxCnY{`dcl%Y*OzpbkKhqzvuVs~2}3J$~4`A2fm`McK&x-tFLX zbMtYssmZK&dcQRadSzT~{9rUQkfuy4;UlJQ#$-(lRwFXG^r?g3waq#YWY8!<*+jct ze%0n=&l^R5_2IDvlRVlBrUJ8sT6dl`Bm)v&!q&HA`Y;jIy31~P7;NIR+g0a$#(=?<~@O_UJ3jYrN3*;-l(ZN@$5303`DWVd7q6eW^j-| zSw!eRAv*0H1&oGG7pz)y;eY3?`r&0-$!LoZvu1o~Y@@A#nWQcW2-n%NH(nD9 zZsM)_jdO&W$KVp4XdUlHn@=N-dc~{C06SJ!C_$#FOe419QvfHni0tNRjC)+QYxko= zEUZZSsx+6^W({ZmS|c~Qrh~=f4{r(t*&J~w5dPrDhY_Bue^Y7PPMCdlv%h{TjEl-G z&SuyFkHbO|QyMP=R!mz*E*T5W-6jU0?mqwG^PbE6vI0Gc89suX!No8Gs>Y0o!+19I z-MZt6AtehW?%&^X^r+U7aXUTuoc7G~&g8mP!yZazubyntqey$HOT+!IJQo^#iNdSu zzUq~gU-quzm#w-N*dK^534}gG7VmxbXa>{u{Bdt*Z62;oCuZjrsmz2~aSSYbpkka8 zN_0?oCo4-F8VkXx2?p`C>x?~~l;daJW4vp~a}IDa-|ot+z@l`;7=_!pRlExICt#Cp zO?26zp1{c@%UpquHxOuc?h`Fr^_V=ZL|#@n=nOr0Rr1ULz!M+lI*+?eGkB z?p}d24RVqIP8p7_OH(4Q|76H$Wc>kLAgias0NLGS#!(QO?z;@DwG3n+@an-@qaXw2 zBx^*00%KRAm+aKDe4Fpl2;8yM2+}|L8-MxopZ({5=JNBOeSZ1ucRt%xowYFUvqM{JKB(co zskbiBlQX|~)jQ?MfZvEp5tn|#Rnib^!xp6k+z(`(?I7NwU#}jOctp+2GF_4V&BJHW zP`Ng;dhqyD&*xt5K5FjX8`i2+njh_XPVgR9-=n5%$%4hr2WybPVvDUe8Kz{5D(apM zRlXY06@B^XNtD3ougi-IY^hSGeC5Ckz%#imB;j^e90- z6UqW4p zBO8VTFVkJ=(XLg$JMNA)GH&!wuU5PhtM|t?$Dnc!==jLijWaq2HeT7adXmm;R>M3)kXu2ALC3CB#T<4@y-LP(`f9*y-U6DTNqi<6LTd$MMOuJgdx=|E_ zPapIm9v|tEgJV#85z&JW4_mwS1 zThaQ#D<=Ex+PQB&)0L18O=}{wv6e`vy}~i*KUq&ZFMJzTi`Dbw)HJD|<>Y@rb6!-G5r6{HADRo2i7opm zF|)TdWZFeRC1;*^HYKB96$VMDu$UgZClLatD{L-9&RLV3RPaG!VzdQ_=qCqRui#sy z*x;BEKNZUga*t4GOsBsf2uxE#I?$I8fDN5$U_ute;9q5LzWXLHrk6A}Yb2*8Z(1uf z1Y4l$QnE;k3@#i3M|To!P8mdvuTZi$#8@nqSG{`5ncW45>O0eU-49q z&gxi-{zrC@Anvp`LrK$UzH4jT?)p>tMj(+;R6mx>IXMacO^#$09;?*Jm_+Y3WUiNb zWETQll1LI14C;UVJgG+-)68W%m{yUnEEmgHo zRCZu)ng+z8-sG5=H6jm_SpIVj>}0uKYH&9-7|?r_r7?QiTajl#8pmq@sq7lk%A%Du zN?rTSv#dWr1Nzx&GVMrv+h;Ll``Poq&TX&yw%)z6Ni7)3yq-U~ibVY2R;O%;J-YhA zD}#+*&fdDiFFb0PlGre_A~*lkVJF+Qu%i&1+pc%I-uT>F`RriuJ4~54Frp+0m5{GQ za#({7dL`JwnR2U28Sm_R7#EVd*7#4iO+R9@{vkJhWdfcPT)Bs zGN;~_ZO1e}gF>~)pq zP_i2lztJOfWpZM*_%)t8)BvJ}#Mr;GNsHj9vfi1EQN~vVE9E8U%UokgNbw3(Sw}Df zoH+z^3rQ%jOxQDqbmbmY!i>V6SKe?@#-Jz1kZk0VP0pcgpmDche8ECsd|wZCFIXoH zz5RC?v`0QNGC@?JrKU5CVOOrM1^eUnjDFyS47NWDwGfqL55CSKcTVQoOvnW;ppg;| zVxB?!pkLN?3_BQ{h~dQlv`3%}D*ggd&;Wm~-t~?NJ7b$dQXEuP$Sy0M6LVjR6+FO{ zBsq>YvL_3&O13x2;W&Fx;Ki21;Hk5r-u`!?^98$#;X8`RY5^g+xFl1>qs%#akHIi( znfl>+4OLEk8mcIXw*?dYq~JA-QRwG30B{^6SOB!)+ZxJ=Dp{aIKmu<$LiK28lS5hR zdK?SluE%ND=$HRggUN>fj546jbJN?BwtHveh12s}VZo~F@a*v{IyFQVZm$CBNgUW1uFXEYGCfk3A*(`Z?pR6{JTl!92YZfnTs9Yx>Mq z^Qk$%x@5SZRGIiO`BJtVtE0VN%QRmZbFeKLUz}Y}N>1nc6&z1kt zPye~rzP!0S>V-C@Cp^CP=ux}sm0a9wF#d-4 zr~TZ)w^#e)>}XHBBvaex#>DiJnXSHta&)F+FMeiYk~J@cx!(?SMisqKiXNsbI*I@F zYWtpNRbE0R=x;v>qd3(#&m6~WIP~1L3+4_tr!EmYLW$PyH&q}&a|5GxU=UU`lO+#H%B@;ivHw|2@KX8?I--( z$c1f1=Moy0f=<`Mf2M};FRp7_=q9t;@c4E!w(V*{%T#m-BFmr1UWn_AfKV zP4zmFoNbgdy%rZ1&&UveY9w5A9X)h$b-G@e#sAL54x$^c)nQ{3rFMZHB8dJ*{$QdX zZ%|0c$o2}lYmq8 zr*?g_NsQMV?(Ik#DnAybWW-jmVrmO7ays&u&?C~Kxp!L?-?i$^*(ovZkZNJZdO=CR zBXQd4JLV%>F!#3kE|4%pDa_1Php&RCk;DXteWj5O=fj+I!x7Q21uPk*``$+7#S9PA z^DTV9WX4S0X=egnutfi?fq?X(!c3T5@An8wkk`mAz;PzSaU7>h9HN7P2!B_? z;V&88Iv@j8h9UM%HtS(15e2;n`rlA@cFOA2Wjpt3^cDo7ag9OOPQU;Z zUa%9EK&OrcACtui@T4L+TaQCj5XjS!H@yi?svfqlJmr&K{!pRd^oNSRH`nM|A1^av z53>pQgToG|fqI?yYjlmUv2FH651S`#!7TCLuQ$(E^|l|}e|)*u!ns$`FS#eDyY-T- zcvtu1kDp#X`Ti%{PL}k)vBO*NSQWq<+31{Zncn`A{ndnm8C4@Y+pp6=w(987dgMq# zI(G%5>tewgWcEisCHHICh;IIP?D?Grr=C9Dtg-CCqAHwfxV$QQNl8R1!-v~ z6hhPgMp<0J|NJmp4SrM4*~hU5HF0wi`$Mmos{W<{yGQhO8G@JXM&x3LdeK3zqpx$n zqN>m2pWTq{X_(2Rd%@0xE1;y9!+=n|>K*2H63c;c{d9hGhSz3vZynX>o23r^qEpD( z9$8-h(44J|tE1519h;rblj)Mj^fSA3xA-?&lKqCNjO;`Vem3eBoF(a?zU+l5_BDRl z<7ZZ}6Q-UdCp+_@DKUch{8j`o!Wa->0h29w4?5QG^$J!)eO`EQHtXo}6n+Fv3Fgyu z;|cyJK22D-N+8J-K);pxZoT&;4Fl7tAF|o}lqHi*&-hy~= z8CxZ1pj$8%9CJTlRawctuLhG6WN?&XZs=r(tV)^6Qc~zASs5mcMsuYzRt-Y3j6Nj# z)((HUM)!Dt&qZbllGUk|juWv9?UUaBWKL^Z7 z^6=4zZKMU?@Q`%|GTqD&lc5!%3tH63cq`)1yWAPRER$0lIcRj;)R@#@gnKVO3BJXd zY!!X)3fvlMdfK9|a8EYmN*>@w;O6*iL||YqhiH!KidE{@Ri_DFR(v-(d(H1%!^_oT zVdj3{w|0U3P6w4e^`kT-Yy%%|Zf-7GwqgKYPmxvlJZr4j)ed=>+u z$*>{K-s&z&Cr6TGiNq#GtWL6LujtiVJ8!$97kgD4oNEZ`F zaqugXCxmwiE0AO+q0(@Kf4pa_j+KRG4NtN#A}U6mA7#j7#i|$M*~yVfcfg+Pv!?1C zKf`bSO6IdMR)v3lo($PiW+dU>NS_l$vr}}C!8U{H`fOL_5n8>zgWjJDh&ujf;Cg<9 zpg#Oryq|7|-F<#{fZ^(NqUh(^aH2K-&ghne3KrcqwywOov}e+$ctI#iMD*$S;YXVm zpA}VHeC}VL?xDwxKKzM(F+82Jqx7{8{>A1hE=ESfMp=8b_OK@=olf)3%NJiaY8;4! zUG@|;<=%C*W+G{%dG6Y#UP)2Fd{-}?&}^(RIx;wMqG8*uJU6f2b+$+fGg@;bgI5{s z3LZ5R^>i92XC=KuGGZe@kZgr(LN?dLrhAU{!_U6yxWIJhBd=q;f9K)D$I+Kit67!~ zxF)1dv~mTtCc^APW2iZ?hl>>vez-~+G8j7P--2oP-gL}eNAgO`BB_jMWo8TH1eRq@ z++|4eF9B|I+0`ak7_C$syp&br?{ng5^FN+cj`pd_0h{ycZkMa>?Nu8dbHBNz`o(v8 zJh7XBrno)@%R`$CAW3x+Tz3f}Zcq|b`e+cXOMYd>r$N8XP!oNhf=H$7{e;I*YZP*l zu7hG=aUZDdfJo;3bTE2chO4|M(YBVomydk$v!7i4!QXpx`QoR)bNStW*lT-#`bDqq zY%`-8A0r33e}Dyt8Xn#zz6Wrz5#SBH#RV88Q4TI?^( zb^~Bf1y&}Q>+r+%5R(8|KB-InDqFr+gR9Y5f_k+?@8;$scEOtv;i=5bF--Vezz&-o z6%&5G3U7EaP4=3ct0LgD+PW#hL*;tdB?5Q11)nJ|5xD@u9JE!iqL@PxF)H8YlNB zi>`;Sh+0vLkWcX1VlVQ8j~!Nq3(S`-C}u?c7d=F@Zl*mAyf}vPY;4a4cC67ZI=~db zswFkSgIotm@)?DEl|;Mej!1!LX9=Z>$?Zg^&Wnjx9_W-zkx|Dk+_b}{>^l%{gcf_0 z%NxU6WJj_ggMwcjopgz89j3!eh47^sQxMpTQtZxyr^V^QenMqJ$Ozb7^?>SyMn{LHn*T=3vjwQ z#7*Ad{@BTDX7VN{KcyhLY0&Kg=S;45cx*$E04voMwJFWdE@Rn2()Ct z@Z)Ll;UL1OBFu1eUZ2dkfAJ@fHt#R%U1P!GvX5oFMlG_LjUuhM_lCn=pzhqL4{2Q9 zx1mS@3J>U>KSr-L1$MO~Rr0)7*16}CyB;xz?`SwvJpsg5$y&zMu|Vas;5L|%KlzMz zHYbNO5U_us#~dfui*t+ssm zRSj}Nc=W?hn*aIa=#ZYO#}hd-UW{sFBH^a*8kX$L_e^UPx*xH!hj{YC$tUvUbEgGe z(`5;pUR!2y)n)e7Ia@Oa4Ti+fP^cmGZJN{lN{17FY!i>cuD76(xtD+F@vt#+vBG{~ zGQ1K}3ABJB-!sIkDVRU;v%wT2f)jWgN~tv(8?s%-Dp1l-{6zp_K%KugKJ-)2!W#6* z>qtyqPe1x-qaKD%1?>lIQuDAmbfZM`tLW627QlO4Hvcj?3f5H~{FNFdc2a{!Eq3rK z)whP6t8=H_C5_-bXyNFa{PU}*vS&t=sO#+ zBolfCpG@-aZF+?}K@*JbZBrYv%?8Q(>_%6e>4fCiTD}$SvOzlEMo#QGxZ)QbY~(ay z&mWVYN1lSdpMX^-n3D%xH-a#+r^{@sL_Fab5>5}pd+^Vmzz3!K{C~p;gAZQ%C+WUf z@YXgNp__fD12Ug{(AWt%#DjSHp6ns0u0J~8T9>}N)3xXf_>u|wt<2ht=mW>kutt;V zj4thpgi^bpXLCjYjtozBrxr@XPu`uO=IIUJ002M$NklICJTR;Kg(a&j2`uEjrI`w_Rj!g_K? zGdU$YBgX3_M(5xx;iqf7!4-x;8?o6dp2M*<2)mUhzJFEktA^K*PYa(u-05W~1#nMy z2E2wSjGP-ZJwRCk#$;A|sY1_!S1<&aPe>K^?4U>bwqNgp0vh66!0FKyo5;NH4KQZ~ zV@++nPKHRq;(_fpNnhaH0-9c}`gy~g1))~dzqov>O=OT|1#;0R!br^GxE4!==aE+~ ze$y)2k$qIM7}q`Kdu1n)$YxSF5o>Iwl)xP$O2pVMV0B$)73<@JLJYqLs{8R3aE32% z%>q`6Jl>RnGMq3nh`!FM&ZFqUNcF0tQKP-PR!CN+fHQw6&lG$^8!32HNF$Dm(>Sa# z)hHwooSb*K)2?~c0$0y~s(V(9V;>yFwaSOy46UmYqJm2sg&ZpsL|qRLTj=RflpxIY z>AbrPt;@yJETA4@jX}KXfrfT6>M)8`U35xAoGGgfUYDYgp5?U?7=aRxYaG@MBYh1K z7-uet{@5a4B6U5xR#hfq(>L9;@B_8|G2zjV$koX0$+%+6vb#F5R#BtiWDRq;ALQ*% zd$Zq%H{We{vo8bLQykWkwD9Xgn=iHNFx;bCu!I`z>>vWNqk0M!jYY@knx7n*Z!{}o zIJOnZIkN`x_{o{e-(pb@Tyzi5Ir?mu#o(PWO$P8*cUyz57fUAYD}=uMWWlDIvr+P= z5OSB8;k&J91m!3gt?5i4^T?L@LH6CP_~9*$S@Ac~P{#;3IxgTYLH+PBmg{ZOZMKtn z72heogD6<26^{@ZpXn=yj$4;VL z-NiDz^B)9fZzT)Ke6=R%(M`8x6Ab)m%#*t+BelCXZ`-}8Cq)Ahf(M8z^RJQQwxcd( z=!JXzpk5g#HIMPGpvL{L6q4{{cZlwK2Bw>21Zzbzv(%xw4`WhV#?yt1+= zR!YFz`ma%fcR7FEpc2RC*Q5AFuekd9>*tpzt@L~JxZZvN-rTQ`XVFo7R>BrKdi;&y z7?fJ0x?^f5UbLRASML!VMR$L)9K2I*ib(hyT0QLzj-#{kf{>q%Z=G}gr33{P+-y)= z)ELf*d`b}*svEug6dD4?!+F2!i=kiIbx8D``-?ZmAoByUa zfm!LTr)W-cG6>TAqAxDSY5Aht^cb??6Wq5QF`GgXdZUv+O+M4D@}TJPqOr1MnB2kC zFp#6`Y+xe-RiSG(%1-NY&A8dUb8@uUX}jh0A5vr-N;%#y9E z+=8t!CoM6c6kui1u;+2fKDnPwKJvj5Elxk#YCzVChbuW-xQ3PcJtbq$iC53RNFMDl z8S%A)^{nAxyNLz>!E0Dw7Z;Y;=2P<ql?HvV$H;Sde!fZZk=7&K;^AFr$tlOaPC*`>Gsg-aAAapzrcT4#FUm)%)$L{T)(F*77AI3O?vwg=EY^G=6I|O~v`a zMm46>j`VoNHa%D2&fjEPnN6K^!J9GZDw4Zj9jF4Z*p!Za&*iT07w;+}7R~D8nLY#2 zc?f>dk(0=>@M2o!=VK8FymPVA)GNR8JvnXd4xRJ|Zf(v+Y%0I@uxm=A_r&L}W2(R5 zir)3)qxr}qY{_{TZ}r?6{-QWY1fpz5=@9)(P+}MkeslA_d2xL&%YiQUKT6=tO{1OS zTZzuNmXWkbPC(m`%7SF3R4aS8p)1dUd-TPSDFrNZugpFONctnfaD9)UnZF=b20G#O zEmL$Xuu;7EjSV4Mlq4Vq+g5-AlAif8-71_0NMk}3dJa~{7bI9qWhD(p3X)2|b1d^% zL2aY{fmLJWw(NO%~_dTSl|(VH+FXHJ%} zQLysMu9A!0)tu5ZWM7@iWV}|TRuEMBni{>u!VZ#+17c!n*$}eaB(zF;ThwEa_C8*f-io+JP|B=?Z+_S zvg)MCwmmX77)mD76QsM8T$gzIu2VzC4<6OyFMxd0v(y%7zN%OLy6bNmw(YgsL9ARl zG-AT~w!jj4jlq-*KBucJTkqaHp>wh(HTOtJK+uqO&|kKHY7XwL(}TIhx2qFHa?*R; z@MA`~U8|y(SFQI*pPjL3$=-(6(G^ly77UWXrdZfSavfFq9)7&L-YAcRV{pie-Nf^Q z2NPuX*kp}Ow2by(k4JK^E{-<(6N`NB?VGP!_@?*H6N(3VNMcJc9`$YpQ^{X^{>A0f zkL~ z@JXAjDncj6F~}pn`JRL_Jky!Lso?0pwISz$_eG(k%QoThQ=`F;C-1PuboYvD)5Wv} zT=$Cy$#_#%J7>Y3$%tor4UlbhB&+LjheyI;^?>?IK0-U(vB7W0t5E?leS8PMvXjV2 z#Ag~ymi9JCxv}D@1u)@6h?suP*{Q8GXMfD1k20q*N1OPEb#&3v2wJ(s-D&t5$)m-^ zFmg)H;N9CuK?i(Cy7EeMx=7Nm^K)Qi5;P}JVGHo7q8 zvazBBKqIo=h;m(4D1e)9>NiRfhF(ZL3C4Rw#iV^=xH4YMfLXE-KugL4L>LhiEuKHu z8<(vyr2F2;)_&*{NOV|4GR68|qh&Zt99-{(a>yr`l*jlJ*`a$hGKg@HO7v{?B%%t6 z=Wz^(VLJ2;XSYuSP56yeOtYzp!OT$9Ck#Rkr=Ca%u0!p?RiFFR6P1x=yk~AQ(@A#h zBNC&la%eL|+s}qi$+=^+kwquxPYDWCzDqt+X8c;2EV*Ln;93zcL3021^mZDo!Hx#{ zQh4knUo@YgVD<16Zr9LNkl?Ue^1<+v`G*fZV;o-^M-tznwFxSEu_1ved7Q?mn?^NOPuZMhHL4pPb&VwG6%>x*A-VLY zKG;;BBq|4AVbel7He9Q3_?h(WVECf<9Kgdizy9Kz=6_}4XULh>r(Xt9&(IK};6^v* zEvb!xn#=EEc<88$dc<_n;d{L+i4;8`vnq=ft3yWW;Kgkt12v$@_0S#d?7Xt9!S0Fo zTImfJdaN}OLpIOGF@hmj9Z?G<`_*OxDU{!Q3xsB`+a1%vN=_q&&0{o${bM0Gu; z+4-&9__@C0^oU#cW@kZ2cBtHAVRSX!Qd>2|qx_bwAz4xvdR$yhPHXV@UKJ?w1qQ@`9U57WhY&}T-r%#}UC`y%4nSQ_t1X=H!?TZ=l->u`z z!R~N0`tNT8$r|9`pq5fk2f!Kr#A$!fU^?G{6D$IvTXq~Sx?}}m4d00&OJdj-Xw{P# zYa==VzWT>T*ts%6OhNEU)Tt@HmBowiY;;>6qbXZ7-N81-XC0nnGh`P|wCyL`sBU1x zacIGvJr35fiIA=3>D%bidccR0;fI^#uAUd`_>R*aaVA?nqb3H@==6WIaqO97C0XNx z9+O?9RK7Sa$>~t17w@yN4#@Jz)AtJd7xZEanUJG%u`vKdLnnI=K<$L-9RNKMhVBVF zKF(vW#SJtk!`DqEN!F#L+pI0U-FS!iTf^f0JpZ!&j(e5q+myxt5y`k~wWGR>Ak4By z+XZ^W=k}4VWfN5}q{x67sG#PxjCu_l;xim_jY@_ykHZx>gU7+((^y~-N{$4#VKwuR zZ8)@Y5{NV0z#$S4;c$wW@>rXO%m{&@I?u9`r=V?Ut^d({3@-BeWK0PLF;f61O|TrC zv7Z&?ooa~H%NUy4>b(2ca$s-cfQD>21m!N#=$NT&QCL0AIZ;T39n#)H8k>W}4;~Fq zZrIz9x!@jcRqF2q!qXs!x}QBGow5aALGsLrah8q=(nm0WNnoBqRu9wH`wsPf7Yb^` zHQXHTIA8T=bXh>MS(W63SI{?_vHEzgsD>0AzZ*>O$|D z9?tXj_^qdy;U8Tj#dsiz({q84V^zLGBc>-Pl=yY*@kg|C7MfH~IWEV!I6NCWj>h%` zFbzS&y`O*Hs*3mZ6bo`~NBlcqe7OAh%}*|`zWTCV&EB;|a+{`wp-$!v$ClL z$>|ecAa_3=Eo@uyTb-(N^#yh_eNPI>svce-s@p_^73SI57SV>oRf)(JU^Q09h9A&J zw1Kf?q`GIg6i@V?(C8>UciK*MZy~E*1YUc^5}IU9xYlJ90IeI~6MLL8o!)Qs?b*Gj zO=0L5^oUx+;E#H;;Fo{-`}6S8vl9W`$?Ohj$Hb$ESxy#$-zhC)hi3ZT`aE z<{!~e-H{xu^}IWmAP9_`Izf88ZZ$Q&7O%hu@a$cnGvkp(kFHq+IGZK=YzKL3SPUmK zuK4APznID*=Wyv^uCsG1cLqbw)mg7LdEuEH$BC)C%G&vk46J9+;GUhx$TpaY`WKuX z(m5Xb0{&eiU;KQJwpP?y2fcMRLRR?O-r)JvVjG=jBkH^DZI+5}R3KrKTW9?#AI=JH zq-}O65~!;J8%9fIu$j!xf?EJ;n7U@k4M^yly~3#uWzEms=@0?IJc${3osm{HdOQ6? z7KlF6nX`i6ghV{e4wPM@5F9;Yc3e~(s>y;9hd-L2AD=+PPdpvi`3*ST$F9zcP1)EN zS<*NCq?MG<>284n>b#DX2O?4RG1W>u_f6c!=X z6-PfmX^bmJ(RVY1_?_d-QOZgyZ=Ka~{L>2)NY=AI z5|4Lu<2v{y9vEK(+Ua~sAd81_=RUe+&V8RiNH^74fT>i*y~a8jpqj()I~ZimSn;DF z+i+oZ8Lq5w28>s_=S=;ao_adz+JfE^pet-Nk6wCEQKlVY- z@`Im!-iugzhm1tR+71cblPB$BmW_P%TYsZQ@u!!YAO6Xt{jj~l+oiDU-cNA5tKQ}( z1rAP&*OaqFrDI9%0#u}LN(p^OW1X{Uc0r!lAnRaI8(z~TdGW7}Hef#-%KAG)^c@`$ zj8~&W0)RCJ4-IYRvoIdJ$Ol{0$ila_q6}NFsO7F6ZU~-$&G+;k0=GtsO?MnG<2Qc1 z1#!t872^fZea^au0(K1s-UM-v;RwWU`+H^Oc{ca373yzmJnpxL^W&#>n`>$#{A^02 zyM;tzQrC>24Ek_{F#a1b4y$P>!Jd4$n=l0H6 zM2u%Wd{YV?_Ff~9b<0!k)i9vcV8WCc=k zjCb)zgU`pU@z`SB7~T|GJi6Ccd|vH^9R(XrXg+hi)x}5m;9gRe53`|4bgjSWgmWXR z>BVU2Y0TL{Fb(JCbJi$n1cL;{u5tf3ddRqJgd!y!G&KuaimC< zB`MGpBMK8qN1E1VTjBg$piA`dKw&-BqW5eVXO(#AK*_CcE z;6`6R^aurccTJLUcJhSOXacI(GCio{KEGY1(X?K@x?Q6Sq?@ujYjuk06xmeM62urg zi6&e@>DGA!y7CbwIpo*uV6m4UWykAW=bP*G+dqaRWn6~5G|I2OX}-{qq6{sEc>AjC zuHlV)8$&tHF`9D%!{4VztX+zbjBpuwJHYj7r5PwT_G3H66epvYz%tf7%+l_31Vk|R zGom_2!74cRB8A7j7WHw?eYcmtc*W=AC#_(AbQ&3WyaZ;woz1Cps-!T5%l2&7X*h{e z$tVRCyq6Hn*gDtq#!MlZF-k7>CtFHLNpH&B;}~m{;0|5@&(@4*SZK)r-PAapgoaY$ zQxIgl(Iw;GT-=hG{Jk4?sr%~Ls7=4!RG}1A(0Z-GiJ67l#*v?f)}H|%DM6u z3kseZ>dc^WoJKD?c#I{QW=!GpsD=Q8*9GCqEMTLXQCAlo=I7uM6HZgu{|ssgMRk{X zCu57gZj%uK>5Ou4=Aa8w9167}Kn*oL8fx?St)yhxs9QpZ>v-%BujIH#f$-bkIG${f zS*7GHvEXVWw|g6MCbML*>|TK2JoP61Ea`}*_u22`R#!i&Vdk8#Ubda-cfNIb_{2ia zdzWv2_v1D^`TFwZPk-9lk=HHax_9~b+rNCd^VN?p&pv&6dGeil=Mu4Gvc@|C?x$lz z;0p@r%h3FWH(}=3-P@u}_Q$G{b0l{l2)nPh=h(IUvj%C24_3QGhwIrn=8M@8Nk{Pkm@#^?Xj>$;k&c!@=p@(R^fqfPw25TcK z2;&FcS3-I6I>{fLHh!r_^cW4n1NqeE##1&@Jn$kHor*hM{8kPBlI-fBOVYj7)B$#V zPYZOFgb}cV2=0Nm*U1YGK#XA&Wh#Qj>LhmvtWpZh0A()|z#ylytMA)t7h%yPNKo~wj82nRUP%9Nf{bwje-RdQK-8sejEc>1E zG|ycID5HT)V&uX}4u+t6%}Dzg$>bWARCXqbuw_GJ5KyF4G!{uDqwNkuO|Gf3zN!UQQAn2+ z>eps8nBx|~n9&miyvK--y&R*?&)kfkaGY3+l>Sao+KVR)0r%N3A)6`Lx#@MhAHM#q zYpk@P;Ex|&9zA__`Lss9@qp7~eei45dei17KfnB~zx4lI{^Y;>M=wAB?6W&JEztYH z51%$|T5m5oUwtkz$VekSJM5cSBF6R6v}H_jMP7$~f%n+Pi^|Z&gCpA@1tlYP+W1)> zm0QtIU=5kV0%-iSm{sx|U`heLO^w(b-0aMkfQYurcR3609PLb;xm8aR+KZaNKEwOy zj!vXTvV`7nuB2J)2vS7U8E~a%sft*8%}klLZk^LI5p#I)hbYCmYr%+rS3 zzxt{jC<-m#32LSjiWCYxf~zc|u#l+AowI6EEM9oi8u)tnH^m{Hkq=F0yuJ4#i8kJr zd|owb_n|2-&(q`0&*rZD#A!!qx=JE_Q*X#p13&FLKMrQsO5mcQlD>jmdc)8v-ew%v zn5@ZQZw=LQph}^UcE><4(az?=lgC#v`oyrsh0a6Nc|>MXRnnp97z=?8F`tDc+l=Na zOxL@G{_Yf{`>K9~(*GI=g|lI_fYg_w>7>_>&L1>x_;sVt=);18mPUHyGa9q%RTr~Z zKw!Jl5d|@b(ce+kP|f-ypLtZnd~|I|zAuI{6Qxc}Uso~P4`o%*rWTDJlw=NW?G}xa zXBty-Cs!TfH$3qW7?bQqBS*0CXL!`st~k@jkiG3 zL6JB|%285aMw)@__{CRW_Y_Y7^YK$XmzqtS<_fH(W3(;jCykjKS*4iC^WO-4C+Cb)DX2KJwx z`*YY&Fn7dAw(oES*JBEwZ{@@rjlc+3=7m3yXI)$HnB#Y>%=%~@GqMGi zxaY{3tLkJ`fLLQ$NF0G0_N5(jc7+leqpCu+)iHfQu9K2$=SD1&aFq3>QC31&U z9Us2_{KMt>&)3td#^vEhPq#?R`jNZ!81EG$zxT^e>Z$e?v5!B#eD_y=rTq}@RPF~K zzG`m$>)&q9_gDYe<&_0#TlL*{@Od%%jvSK7?16ToESdJHRCHu#ODrX{YL;YV_5GbN zi_QS2V=_T{GAP=oF_JiZ$8Vmmc4a2x3YPRrg5VpjtRTUAVubF`9uVm=xKh0i4w_0B zqE}{uD?TtT=d(kNOPbqTq$K%)E8|6YhE=0odGbehz0K*Cl8v~mVXA1J(feWAJEEQ* zs<&Gk-%XECpWeTGE-&egD+e~vcv5bKgN0ajOphHHREx&<_4eKyaU`f>k?Dm93Q!VD z{wXstGBW8|({f2sqr1lgqj#3dmNjbNmgHBfPga+HG$j!++3&JOE?J3Nr$Hzgu7XWy zP-j>aO=|d%341sqW2dh3bMnnY`I0NjFIJ^xYd5OLwC@c98BYZ zkX%D^^64MF(JAX0O?8O)&$ca2R(Ku_$9K~oyGR)!(Wj9B%*}NIk_im;)Q`!vFA0m4 z8Z31yV^};S?ClY*e!s~N?$cH(bL1DKtG(h~TOh_OrmnIeIr>Wv>g!POOaiM$(Gm9O??Nhy3R6$oi(=v`kS{uP^z?8Q zg`~fZdG7ngLLgCb5{{KofbytDyrE0{WS(xCKbnQ{)C3I{DXgKkIYcEHBNityY zRBBsyo*rrrV2H07DK8IZH+p!C9x=G8QesVtXb<=V0Y>FWGLkU8_kl;C-OB_l+zMVc zkGa$H;dd(g;%2K{7-N_c-WA^6vC!u8#SEAR3=?sygK=y%^RPQ0vG$C8JY zbB(Gwc)Sc(xB5^7?kAYwFd0KlQvkSnv^j*~va_xrlT2ZTMDH{SOb2waq=)7hNezxrCOWC>3J86SX1Z(TGy8FDPj<;8{A@t+ZEfqZ zoz65s0ge1)gK9ZiK?Fv#jg6KxM2q?x_M2T+mfotqziy)7&euP_T)zItZ5Y#Pea`~d zz|_mBcb6Ou`L>S3?r9cezG=52?*jkyX^mo!aXx4_x$pkccQ21C`}QCI%_P`<-0XrJ z^ib2wnp}PccRHNy@Aw2~!S&b~zDF>Qj(>H*c#YtB?fN6pTTmQ5z2Z1|7?xyHlqc5}cDNYVU?5nxEsm_z>Ni3OrPyYsrx#BoJHDE>M%>j+6f6!)+qu0n}I#M$_#5}u3<}|!-*T~PECG3+k&yyHw zMEZ2CUk69l*(Qr|?F@aBTMRBfAjx3^zY!yHUEKf3n6S zxZ*@*$T2*qA`6Mek|1#RiHeTZ;4>B5(UOws6Kt2lL+%}FqsUsN13CJ-cBTstzqkdA zt|c3OPMz>2bBWh<4I0T&iT&-PtufF1uZ%M&&Jd33ru1G@x2aW)4-JO!aO;*D31z`H$c`nv4U!kCYqHIMnSM}~54B)t|W>LqbU&y8BmY{>T{ z7{Sr-BwWt_um7h1#B*1s-nRyb0UjZdr>l&R12K+6(*kW*1>0jV%B5g% z2oAit%@UzU5bOPn z@EPssYrGg;f#u0797lFoIyA(;F05g>>d$qT>S2;C9qj%L9FbK#bS^~7?RNL8(0w|f zhZoZVL2L0eK8=2@wm3Xr=N}!3bDR*ik;UoLCE1P?4(93Ii?Lv|zx!mz8S@GCxS!d= z*u92`4;u2`?_|BFeLk#Xa2W3nnw_kS2|XAxIpIQ z<_mr5t&avWrpNAbc=N>p{An%8>}2NoNw4FRjVd`n+~7cVB+Q@gg{ z<0Dx{wlyN z&rY5NP@z@N4uUwxw&AiwS<9@rwz}z$d~~^2B3t>B&gLYs$||IOe|So_A+m4^Qo096 zI7w#LN4kK|s}}zdY1OPOnI$U))f7mz?Fu=K79__>NcuJbzse_6RJWQXvC0jc!4ExT z7Ik&vv@^bW2n_3Zpq}*yc7?@cB*iC^soL|7x0?9jYWI7HQ$35yK1)Cx>SQA+g!!?9 zM-7LWo{oUktMyJ0Fsu@`Ps)K+g_MqsJ$Uk9jA~`0m8%`9(qWJhWv^*fYxAfc`W}f@ zbB44Ul2^D#9?Z76S!DNYcb>Aw5Kf(XxIN40_j9UeO}(^YUe-*xtYwFWfbL+K(viNW z%$3`XS&o}Ll=d4XAch1Q)wQS1{=nf#hQWO$90pZUaD@was;Z`5Ibj=C=W2)+O+L@7 zIx~tfhO8VT=x#hvN-i`n@TQ7|SdCaHPq>^-ycGy!r3_wB;{+@?o2)ZP?w)u3}giOcqXf-K#X(__acEe29hwFFf(Kc}ts(Wh+x4>*B-R>oev z0MW1YQs(&!kJe`i7QJSKTZchNN=_dNFb@)Cj;$G(!-BxsoeMqF`{H)%!&BRXPdb9M zfOP%E^V_RmYgFjGatx-&fL@FX!8xdBX-;-$=W_a=?ls^<3!&=ia2_8DSJm)+L=sL8 z$C+Lx9=fiF)z9>HIbF5JIWMYLFJiQ<>&w|&c}_QH%>WrzU%E;cCSe^Y@yDg%D_v?` zZHSB=;%~U@RhNEQ-EL%L;0J5);?Tee@fqz%KiGyHCDSvky}m-Dmv-q+0k+_Jx&b`>_x}7pe7xO!{pK9xW16PlISKMyE6~r@rmdnwSvs;_ zwKlYj8-rqXs$^6+U1Q5o(W`V=is>;ciLJ(m=x{Ke&qz>yM}0wGyL81ORW>AuQtL(5 zNnqsjI8`^?8@o64&_Or_W89xat#Z?mai4CEZ_yGy_ZD3<7e9uxWZ=3)lJ+b_dZ#S@ z#y+}N85qy#6#I>k3Bw|t-w zsRK2mz2U!h9fUvH`%`}snb@(0%F#RC4l+}~xAD(>QS!2Ib4D4uEQA@1nFW=@KO7}j zI4rwZ?cj+&`Vjs=)BVtg2!Gy(u49~=;0Wj5f$D!n_#JCJ=!!>mPi#!TqTKDN50vY4 zg57!z|7w3Uf(`Or>C$wy`aXD7%INHzb{%hb4I575AXn5dwkP{=M2y=}it{9(oU1+K z%3ua(&|2VO?qpX93dRVa`UD3^zl|BbYG|pTo0K6F`UcZr5mKB@)x~?7OF2t#t$4i%)_s^&O>3p|C;b zE#|sr8Q27^6iDLb<>=9sSm2LR2`Wc4N>Upfg~W#v;QacFzHnI?KiDxRni=XEo`NSl z`b(+C(S+h836nq|1#hc%qOdY;j(;*w_%{1}IahR2> z(h2~{4=>cj_{>AmF( zqvLEme5nzdCbu%!o>#XU@8|`ZVn?pOI5e=O)f2 zeWwMEBaVmDla^Uz3qE@Z6ngV(>mBlAz&L+2jRsdH`;mCCM>t0tKVW1lwi4XsW2--PlScZi#p!J)&&9sV4bBmW-tcq1mX=+|I9o|!hTicj(Z8lz z<8yS)$VEJ68&pV#V)>f*Sg>*gH@_#mjkm(f;juWN+32ed+NN%h0vraFj-J^$(R7z- zkSP}&y1Vd~7!dBFQ&n|m#B+Sd-0Gs2I#H5sgFWri747gB=;x$rlV12ZwgH`Py8nD? zxw|oG>4F>Md&xF=Xz;#VYmbiEG8Z%w_Z;xa!Ueo!aVGa)s`Z zd*6K?9;GLKV9WioXbDS4zcvgu2$kg|Q8(S1G5-X^smzIHy74){UHi%^$mz?@DZIsO z0-2*>o!O2xr`toGJi45~8_Lov#7slDlbL2%oG@b}G%>!f<4m75cXr6yqgG(<0g^Fq zcY9Rt#|rjGk8^#y=OWU=AwdnF%ld2MbZuEwQyMS{h+%#VpSidi*&RG^MqODst|1en zMx>P|_-GFf10Sb13g9|rpa}L4hI9N0yJ5JDa482_beE=&;FDTLFo*t+CpZxp3{DjP z`N1QCp^R=lPdX6r=Rh-Lu<&D^q9oWkBfD!OzHrebyv>n~HH8lv$xHC#Sh(b))*6fh zrHm){Cy59UxdS*Wo&$-updy9Rk8yO-!XO4d&V+%asB~i3Sd-M=k)T#*GPVgywLL?O z0&&J{Me@8T9j7Aej~->=H^ce_KqJQ)@lN4az?-4d=)y?>cr6-1CmLO)b0v}-yTp;s zW;EkiC%Sa_(80q3c-xxJwl#9*aXieX`r4v@JrQiTDGbgKMJ*Ps7uk?}f%Krc_8O_D z+g8sfkGB20U#5O|m#l+n1>qQKIyb|EJxP$Rp=Mr;mf(h0)q``ibBa+Dz4fBH_B@zj z;d1n7)Q3@pGvyJ&HRCblNLR_2?O^Ll_`ItisKi%a!tXSP3I}?d+(-L^sqk^|#&-;! zrp#b!z_TJ+DhD>BXw4Yfo4;??s}nlEtK z(0h(!FFikh?ZWouC-<7dKwJCNv;DapHj;$lRh4J~A9c!eA-X8lGCJ3p5ARaF1;Cujd0C%yEgX8#LY?Ubt2)dsKJutrUrk-|9c6(F0S!bRRE) z65yutz)c5-uW7jZUe&{+v9It{i7#8DEaD4%x|-}}om(^LiXlXios%fFJ(;&ep^Uw8-S1jn(VWP~2;OeP@>@Wf=TqzAa+WlCk9oEcp3 zcb(8=d)J{rQJEP~V9-^Z3o&yxzw5oU0zEI+F#Ls4G=ccA-bekCY3y`CmV!GB=5!hM z8ajk*0zl{psh}HZNF8-}OciQ};!_MmalRVz_1d9<@V?BQ(XEQ--n*w@Ri&6QOph+$;fGOWo#k^}{upYCHIB=7+P^E_M{ zBiyNtQKIM?$I|=rd5r0O4b^V-f073whopo;!*IPgHXw|fKGUgoL451FcYNPtt z#sL%RD0RVl1?O8^Ff&&Vw~LtM{ol)29qA#q^N^Ja=O zdNDizhfyP<>lWq6v^hmi8BKzE_&MVnGomUoM^eS9=DX7yL!K+>aUSH5OY!u+hhG!~ z3T!RTbUwSCMz--eR`U2v<<2*ZS`XOMgB`wfgOem3dY$Po-dotW)Tagqv2DA6zkB(EbJvHLfp2;u zoi8|_t&?!2g>j#~Zy))^!IeL;UB?2~^93(*gxA?R89g(n6uw;wV{6ez&qGg=41R$&`Aikm4qL`2K7RE!!xlaitzQAiNuy(j zmBZWFe7cZH%xH`bHdkq;@1mG=rH(?jUiZVqyS z4+O?(CB8maxZl`N;}+WypocVK@{wL;3yGejczV;0xu;C^|06P9x4q@C%<(XT-#^z= z*B;-koq~3If3v-Y8gjfmLWMmJk&xkbCIulZhPWIgn;mYrID#|0gn47Ab1yt8P1gub z5EurskTF~sRcD-TxJ9EjPBApB!ts7N!?;tOC0qtIE$0MAnE~x@hR8V`EW%sh$GD=m zFBEPJt>Uss2Ck57dr`Aga%HMr>NOapG

)OYY-|g3T~H+~SuEkF_96`wYe5Cs2S} zeWK{$q|wQ+&_da2v(vUHDPHL1DXrezgJXv48a^|2y}0MSOPDiqPuoJFZ*bLdS&`4Z zXdNEcW-u5y87X4>+K8f$0=7{mfrXALOV*4E+zDy`<2OU9DZ$C9ZfDY|armve-cSEL z_=jE|bg<0XdVJ?^UKu<%?%fVJ+Ar`^vvKb6U+}+r_2P^kJ?zZ)YcITBn-X;gi_Yjx*yCe$mE|=0P`jLS`rAG52hPYjX*TBli{V6i zoQ!67OK8lIs>uXptET~p;FD>iAZL0o3;B%P$gYI^($yr;P67|-~@>*icGn`wSl{Q@1lZQ+2dd&)Ni2QRFVjI=sd15etiF zc;6vm;9$wn8lERS#iErk8A+)Oc>PFlRDfO=`{VQCQ~G$SWIK^pS}(bB`62OTRDAeb z{b*hBs;c4RH9Uq!ayDY1UCKuP#=ilo+{tyeL^89MXPnRkf=a}H?L>nP&x~lowBLzI z0faXS*nf@nr;Wd_P;YMQ6q;7ra$?|S!4u~iSR9Qaf- zWmWF+k|jGHRm!MiV3nTr$kju=m>O=cYViS8v8+d0b_nDE0*s621mo1j#3Ua}2{<>n} z7LOS91jE3|S{XhkyeBc5WU6IOUh#`?33(X24weaid`u|Q+a)iPmFUK61oB=v5hpgy z_bxTu6l;SyO5}hVs4Cy3>~FIDh5(x)sHx=`rrAo6fr$?eNrjp6P>;i+hVj0{VaG^{ z<`-^c5s#vkvm6CK!MRpX4vWFD^dXii*Z*KuKfU6*XS>l6$hz1iEjg4?`fw2}_JhkX zPUSf8BGS5^1D^<+#Rdc1Q9(U4rq<=tjrB@cpERw6m&X#@?)G`-()rz;H{ZUw-M)2y zd+^cw+tUL3UOjsEIWV$Z&aUz$(1L*>@_NJ-8SJOrj6$VvhUnoCx6y}0SXFem)4znm zc+*G8?9$(cKSxK?i*D+Hjf*?^2M5X9B+rNoylIVf;$dIuRJy8I}Q`-Bgu!gP3}uXx=~+ISgSkCMsb0{=meSJu;`sG}0eX-x_AJ=EA1 z`cAc8Wp++T{D*nS4@O-(Q^0Z_Mg)Ixvce-{1q@JM|g3x%}|5i1#E(kGd(N z;t9OovSWU1=Huu30O%84Uou+>SNcA@%e&AYJ{i9%Zaoj5H){*3H=}9EnLA)L@nLvd zLN%BXF56O*-x>kTfk@d>{3@Odu7=7Tj-ur395}^~g=;@XC1)rd&W8UJ$c%1eH<29- zx`l_qm~a9SG?6ubyZl5WPF=-*vR~dngCig0_kl_2Sm7rCg^#GkcRCaNz7_QP5ZJ1$ zsgU~Z{1_8WhU;&h2RLm7H`t5sEO}~z6@08gAIil@iyhMDm^s~CIL+^}spvo_+$lU; zt-d5Ze7nlOh+DOFwAUdBAv=nlc*dEWDu?RNis@44qDGm{`AcQZp^@|^TMw{#OLFON zTDQNpAm$7RuH>K4BTFr4`5pKkQhH5&nCRDN&8QaWZL#V84UX4cx@fd85$igG+ zd>S#Gg#%yeUIU!}%3cgJo^A-90QADn#hV_cQGP_E(Nc|uPRTab5#EU|4TA&(iXQAS zRCK@>(>X#6B~HETYBO#WrqNs+TvkS|P{m}+D|GZ;S7KmSN?QtRrw=eO)VZ92?1=7x z;7*|umCPnw{^RO(Oyq3ziVWE-PGwY;%Q4jBTN!j;Q#Z!XxO$7G+N*Z2K5M_9@qo3N zFlJt_s6C5fDq0|p-JBC{1*4`@@`~m;X7N{g*J!T}fKUt1&}J#MP~X^$qzIfsu$oJH{tK4Hg^Zn`N4XK`G@ zABm@l^-=2DAM78$bp70R^^Lb0x^GG<+^tQ4(-?C2844a9a4kA4`0RhV>~1WGsL>aA zVsilx4#A1)yiqQ(>!;hitw&SyV>9VykTKB7TYRC z_9t?ip?gUWr(L~tH!H0Y-cufb@oVrKbyQnR(L(NUT5V@9BQq4Uof3&sn8GGAU;qF> z07*naRD8^-y@c7YwyBed5TpdPn-DALsEAPezZTWV!8O%EFv1^vKdVk0)Kd`Pxp#v$kbyJc%~CG3C~=|k)?B<>7YCumkih{)k>xv&O-lV zQSBxBbP@b%8-!#&T;K@dICfYGLleoOj!?j=qd5x2CWAnB%il;~kxlQwlU%Sv=X+o* zoGTKYlgJI7_!yLMuEF6h^d%Ex!#iZl)7C`;w=%A)0uBE%ak%s){q6v=p}XX+3tqLg zd}iO{5-p}bKBZrhEH@~ya9djwfMs~K!pF?N;Doq>UDw-3kJC2e3i`;2zG@Gf(2eU8 z`Y8U==XAPStiU^&(Yf7VXb)Ze3}3TPFp3Y8poOmMG7fe-2lDXw{ozRv)BE`9{?yCn zvVGmLP`Xj&Jmx6W z1ZM`DV%3wcMxD(ZV!%*rSO9aK{cf=q)1C9FBXGo`$68qkaZ>m_L$NW{o^;-0i0|@J zg5H~#IZBC}8J$(f=CHE*S-4UMz2VK6QrkN%P&ymoj;iF=)`FkvwPN0i{1(?Y3~#6| zkl`oH%xcHDH*?kMg(uggvox`E#JdEYO+*WIOnETF@DWXqUq>mQOfw>TohYAsvuK6O z=~X*yVG3oS?mUu7&GxC@8AL?h*-!m~(mf?w+$SVMJGo9U2wdX{nwEc|+h3I0uT1H-`0>U53x z>)I$~{74h#^BP`q3xDxG`ZG+!%Q!|s#qrUzis65f9iA+Na#HCBqtuHT_ErwOST(vN zubr|rt_L@mobcVq54`Z3kE4(9ukT)(uePf%zp{Pw{ZD)Tz=KArF4T7C zk;f=}<8%#kK0gk160Ycwd=4|C8}j3D`mg6l*YvFAlJK5GI~2m-?vE{{J7Z^EwFY9m z(0JPTn~La_FFm6VY;o~fqGY^T7@y$?yRH}OrCsyo zbX8Di-~QmPJrA}aL%l4Ao7IB=X?xt_*Zqf2+DTxKJ2)I2+31Zm<i^oW zUxDG(;w+ii8tWP)1ZcW?F!b|H)dNNydLA84s>}OlTU{<_ukriv=)R7^Yy(Bhv>%9Y z9NJ3A2BR1^qfKDN$K@O9F+!m_V@o5Ka4RX{!`b8N<|LQVyL6S`!z(hb9yzYp2`wYY zuJwyc;|D84_QUXs5uzR{enw27`)3q0Uiz4cbZ^<-j66+X1|hx%e|fRdo?y|TuIJ;y zlGM?MX?J!>RGvU<(_@T{WL6*~`kbE|--UnGRo`OB`52A#m_5z#Azd2UBs}p(r>9ug zzv^`lt@K9aWQw0~3t~;toi3lWs|@{_PEKtL$PzWWPQNSIzx0AWCG+()-t(W7EgC2G zS9rxsKCZF_Vo8Zs=>*#R)D;N@_t6p`@j??`3VT(aF2Q%~55`^Vz^9yB{PK_JRIzhDZ<7g9}wvHyl z(T1`Wjff$(FQ+P_-t^kK?)19D7T!I}@jh!<^t6O61rv-~X*WZ7XWDC&CqYT}Gfk8D@lt=goRurV91kKBN^xeuoo-taaM@Cx=5o!`2qbNL5)?~v$ z!rIDmxbL*a!LhfLD~N;BBzArCMWzXNPC6+4a9}>i=NuI(eS-@{ADX&Po2t}5N*nI| zP2lf+C2f^Wws3QIQJd7#LtE1BK0Mg&-#gwu{i8y8b2)JHwW}lpuB#sT6@0=&E!Da9y!$50W z(42@_{Q#b-2j(!SgCq9Rp)-Rses=0!+tsX)KF1cTcd8*cBPe`5T|e8o78fpG+MeCM z(-3@%6?3BS!z);>AQi;)&JGfjH(%ZEaknK3F|OarH5`B3IRovF>#syBJ3docL&}q& zp$YFNVdQXq7ihBK2^OYSjA-CVZPL?tvCr=a418BO^Pk$p1LY3{*La1?^xn@{ASTux zKPtG)gOK<>3eLHQUj{Qc@T+__-p~iKS9bzwJcbX6nKHtESNIt?@U84aLdkd0C3ZJD zKzZ1zmw*8`xF?(8zj`xr0C)JZHnP;l(z#V$+e_!jTK)`mi)arkZ1Lh+z_)Ow?~*WZ zYI5Xx+*&PuaIiVC*Vvm5Yp13I58bf=yzVCYjULaMsn|aBPMPlEPo*dM;P=AMjuVSo z=R6GwE^w2CM%}a>kGju)z^`xEQcQ}MhWfk0j;x20fcZ2k9v+f(a$SPzI~h@hS=b$r z@U&Y%0yjzgw7uHkFW0n7(jB^zQ#h>A9nBWc!n3ET;C`Rc-?fR?!5Bs^8PGNw*6@9c zo?_aO1x_&xx)W#;FNsbeU8J?R4K0-CxrE7uIxWa(JODkir$t5R@22G@Q7a- zDuN=MQib4AL;0s^!zgW8h9QYy3s!3Z1H~%eNmh(w=<$(yrYn2L%-X5W@ZuMmLnVY7t@fD_ zX(QYwuo=2$a3M#L zI_S}FNIlcY6pFQ6G^!uM}9aOx%^Tt=(d%x0t>K9+y zF2DTJ_Vi3T-Bz|!Iqg$tdLBY}3Lu6sCqQFs)R6(amv4<1@zx6|vVT)2zM$J8Fzv&~ z#nGGIlTK7O$`*V>maQNeInM}B@Y#qmtEZ#A_*#&i;Xv)-C;r$q66EDXvbGcc($|@i zW5>!?WVTl$Cpbz3 z@#cIZY&$JDys~}t(I?yW$4 zyqKgkJ2QpDZbC5H*qx6BmD-ZUS(?W&|wepewN1lVk#4F&BQp!yiC< z%~8l?TI3(;RCZK->Qvly_CD9)4~8`F!$$to9~BiCa2Id@p^WU!DOVxbd(2 zgu*j)4J+61rJp=W4K29Zo{^&(2YcZ<5dZUUF0 zPd6ud@4IcjAeoM!!`)h+ho@<2aG&?~`Gnzfzywk+-?^zOPa-Fl415f$%uEH1UL+TE zu!JdJl^Q0G@x|=6@_hC5*sEjfF7Bo<-3yQSrdc{6b^sv8bl0c;cONs!ZTP@~*?Gma zHRUSJ#`szCt1OQ0avYT9s9q(Os7PRwi4>Fp%#(L#+AZO*n1^FRPwzeiXad6Gc1BzS zfrslbKP&{Wl|%G7jhofqiT${(YiBt1N{$*BvQD5AKqG4L6Y|Z)Rf6)(98&W>)+lKG z=zP=sDTL!s@qs?f>gB$8vJxSU!g5kUFJSNbw*U*e)}RWN4`2p)s1;3f~n(w1I};30mO@t_4bCW+yzEp5G`VdAPrL zOsmG=3nF?Su1W+d)#q?4h?vUQcFxBi=NvG;k)Bqf7* z>SEwH@Q>h;I>?>6V1lWnx; z{{2VWm$z<3N9BXFTar1$fK_!8lLeL?T-J7~uGhPl(O>KVE_-JWw=VHNMt2fSYrMsO!V|2B+-HyCr{E7fbn7E+Wq+A_}KG}0JH=p z-bYWeG88vXh%ye1=DqtI*TS$r_EodtIa5#rZ}GS;aQC7&`nVi6HpAi~F_{Jf&G0QA~Jl3cn2;TCH9RE<*^$=>2(hPlbXj50)9B~U*jfMpx z;#RxOv3pkenG*H0ml}>YiZ?BXBmE9ju4d#tZQxIfV6R@@zWDM^i)O2Pp)GOomJ`A^ zy)^41W&vAUVGr^h+w71j;Dbp7c*qwSO)UAIU|4;2hY~8M<3so73OeX&|GPBwOTTxl z zu6_1pL7-8A^_fueQU7mx2q%BxlT;W9mL4b}Fz>WWi8CnKsl9g27{Avd_=yZS#JRj@EWSZqvu1DkG6&wq3c zY;qAN;jMU{Pf|FshEFm??RxO*sSh9E*R`c%?B3@S-k#t#BZ|?hS01JVVmeoJDt~JF zM68wl)*o=o(u+se)rY&x11PnRAU8^vtkCWk$&g zBo-9O;7@nR#;I0v)C-#-4^N{G!Kdw`_1W}Id5olRJgJNP;E@s>cK40Mu?LIqBmpxg@OexB^yUs zZ{Pa_;Zcqhc6vAzhC+lZN+-xw4J{!y!God_0!4_nhzgd>G6j(Uu#bqyzy8s>4Oxo3dRYk7ym^ z?E0d+Ua?`Y=(BAs<)|)x7eGP=&t^ZfKrt9_4)B3Den(Sy+qY{Qnj#!SMTVwXtkUJD z5AJUdKmTlda_6%i22`*4v)kK)&u-O5H`9%;?(c8^`tyeayPauE2OXqzl1R#cN2e3G z1oYa=7q(ZgpWSZsnAOkkJl<}%uH$OYO5gkVN89Ti8~d}?n>_7V>F3UO4s5*~PbDKG z39-4-nfnuD`iaPpr6XucGT}bE4I*SMChe+df}8Oc&ZDE8$nX)d?5tx>tIw^^X>OzA zBi-(C1mTel*!RlrXN^H^&Wr8^%jip!S|@T^ItV(M8R_uJ_lWA_`lIt0KHKjg&pzsz zPZEN@{@5{jSAAsmizloVTxV2=81$@ybEebzGM>ur_00LJ?P!T#DPX9_M~)UJdeMVE zWwqmTb}yVy&Ud#5ckgaDu3hU1ju$#QnuCoFyo945Ut6Q#$$gG_9X}E0MovQKO19yR zZxScxrH@XI!%q$&z$>je@ZiI@Yo`o7EbMwqZe+@*L^$|#?O8U>=a3Ybv|tkP@XSK6 zXu(hhYrBf>84`xOGGK=nT-n%o>u^{j0@aI2aGVd_hYP~TUV!LY8W4$1QRbUaO>PsQ z0yUIljA~3)nK`5fY)+9ME%MK}NjW_>Ci5 z*RVm?OB}|5c5eYzXaAtDr5Idzj}v+X^T1BNOz>teYjTO-ON2y#~MK zsLXkBSBJy!?#^%cOmDJY#{?cqU%l{VbO2cCNwjmeWcQZCb7k_$=(>ZrH0$__{tm1u9@p z+3v-UEQ|fN; z$RIw_gRUj{!7)Iw2{u56SISM!6c;T5&f+K-wLz)kADqAyxW@sIcXWa?cQU5tZKgez zDZrTOnZze2%0M4{{_*zc!yj(fUb(q__Ti`73pcNAZ@u&4cJ%tS?emYn+`jnytL@B9 zP9XlK2k_uTHDbib8Vl?Uk!HIxFFD`{O_R z@%GxAceit|zPnvM|3+1V8UYLvUDWjX)XCUSjd_pI>6H!N;t4*^f*kibmXjV!Xa$2T z2HRL(Fo~ag*(IKu&sz>(Qd?m>qf6e-C%FOedr$6;^}p#&vtNCMf3eF zRBTv&P81|hB*Y8aFpP>9wFp0qvvz3!^X6;M95RJ>OyOrEPu)e|d>5ayV z=?zXW;Zym%sI9#<8k4;avJ%rO2Y@ZZMLFM3Zz4`1ZVWb_wa393Ok6BDv(WY z`8@IfZARUKGq-IA#4KNv-|FZ@>(rCXbR#n7@FF%czAij#XVDMm#aF&%Qkax@hO_DY zzzwI`poi1x1DjgJ8y<6w2-V2o#!nS!wTs^PU%_z0gWBq6>=ey?CS%tps8$awwb|Y3 z6w?@dR$f9*uW(d*K|mY5sRV;qNnKyNgjV;&>t`iXM%pDixvtK#+C9_sRJF-3iNv@G zhY34;c8lL;P7Gr=gqqXnAk9%W3K$Ym)7BdW2vl>`n*}8)2caG|mXJDiHO0~h0ZR$m z8h5rm>eb4A^wcVkIU!c!Q1Lbj7`}>WPIWBkw?DJ+)2i3Icki`dGe~6ym^MsGXmk#Y zgL-z)WKxKUjIw`w{_U8l-YoG6hpvnTF>16BI%bBixA1dy9hQM%4G&lx5Ifx=LUG(< zo|eN{#s&w&B4Mhzw0T3r09%Hrh(Ri{*J|q_HJFej8;ghZ8jm~2;cCOe zFYX;3zgghkEf|>(d*h|O?cUwJ?e6VQw_A^Udf1DvMsd8%*nzN6kr>h1bxcjNdXro; zU5L)HjARxyGly8?nNGYAxthpOD->o;!i+RT`-lll&LKVP{~%0)1$v_-0jT`~{?I}j zlA)l9SCy`H0cCKj_mh0+X|MKg&tkPk9|Rm1X`T0`1wwZ(?UNkvqRTtzF=AUkGlCs! z`${K{wucW|VB5LvM@NqvUcWKDjkfV=DgIjY`f!Hh&t1P@kMMB|+2c=*^;h#j48?em zm`OT_eT~L1zqq}I)ZMOqBRzZez3)5;N*EuiUKp=9 zWE_8ZHXxOp$G@gaIwVprFVlwPS#yg%ddKySn8vcLFM)T^$X>l@7@34?FUes?t2v4DY%&)%#BndSj3SI)teGV1*@<2IQ(zOax9(oq;ClIg^$W zb&8xJcT8~dR>w$I5~2*d>y_us8X@4sLoy|dX-6FkyZ>ar0B7aGQz%`d1kQj=CAr%UGjI<)PU(L1G+y3}+6bRaGDl?ZxLn zkalppzg`fCrf}zUlqGt1H72R>Y-)Xzw7YtSY9wal)Mxq_JY{L%!ogiCCdl?5^D9NO z86t)*=U*Xu2us(yUGdgEHVGwz#LKR#vcM~fH@X+OI6c>Ch^z3u*&4dFg`)YdiQ8+5;P zrv-GL;&--1UG*-H?|k`Sd+CJ>&3EV6(_LHOu3x{n{l<4sZTIdy+CIJgU^}??`F4Ey z#dcoU3rl9X1Wp#a5le^4HU)>t{Ba_hayHR||I$Y;GI?|*ZRB`{XCt#uuANnk7rH&LAl@o5{@|X7kx(OrhRS3XFv2_!8wJq zR@jvP(~})YSP|?3l2Od%%hQ*UTet;_v@6pOPs(?u@7b5>(Gh6>`J~oD45R2mg{SRK zUFdVT>fH!>v-Ux9G14Mo=!Fx%QN2*>R(0rY?TdQdEkm1|1A}1tO@sTd<3<~+pAXd8 zsxtT|uFdDD#fM-;-_#vG&M@Y1Fg)U2dXKA1pA@1rb8M(DFqy6y|1&z+H3`RL zkjfX&M!(aok*QcPx|&TzU!zvjeprq0l!EJ^m+)tpe{`eIMsyfaR8lzn@R=N#3f?nn zBemo68rsK)Fzim(`_U;`TPc%i^(J?!F)?5g&fy(MU@c9Yl;ioxM$oirajOndE!9>w zSnRA*d}e!Gqq^)}?^os)U9D8fccxHl(T-f=pu;%G@|1ny6euKSVMdKo=$b!q3K2-T zBS63-TCsuWuie=a2F3)%ggwthH|!EL3qaQ1Nsc1{#TcDFJqz>O=Xx;4*`AC=%P?)2 z#i3A4Osl@FY?znc&o&jkIOk7KjLqp=Eo(*X!oT(jOWXadgYJjOx*c7sE}CWQ<{u5W z&w{zGb7V6d9v{eZIRbcBfwK$;a2blM+8i~FSwlaQKuzFfIMnia7KUD#W$F)Tb;g+k z6)b#oE&fu>fmrdbli~CGw2}32kYH1&StpP@W;7#yjq`+C<$TM!lQP{7muOh70IxWH zF#4?R+JeSht1LRv1Z{p64wAbrR%42H508S)=(GYq1(NBM_kHMsE!_bRUV8JrM)7iR z(fII-ueSZ$Kiw{Mb~+uN>HiD?N4sE zxUCIk51tg%3p&ZPXZvk8-?_1U`jek*w?F%@Oy+wfus1F}+a7-U zr`y37*S5XOFKkB_UfK@NUEB^@2Qt$L^(Lg9$A-7Fx6z&S7;ABfelqNoI(Fe`yzP}{ zuInEB!l2^?$2SY5=qHV4aPe+>V}esgZ%j--XObRHWD-&Vq4I2QEN=e~f@U`s!W_ z*?Qjmc{C+c=TKDfgZDpb8`@Day%^s+A~r0hO`NIWEHIs@fpj+`E1>Ug5ASqh-~IdB zrHh>bpB~letbY<8bwtonu*Du!>?MOIDQ-a`NZ|C|=jD9Yh{*(6${oRG?A3(B$#6+L z)60y1eAx;A_&N@@OYx^hM;ts1ILWGH8~y^#Jh?HL0+49~xM+#}6FXmEU%QvyzJQ`L zC17Jdn9JXW{shx%lTGcI+nxo+!C`ZqbvPJEnu0Lx;Klf$8Aaf)8y(4qN*rdsJfC6e zW!5&;_KY};{NaG1C-w}l>`9Ll{T}93>3|Ea%B0&F;q%3&TAh)k6JCr@Bg;WP+QSa) zL^m`V!8$4l;Y;w&)ZVOdiuVR4z+F7aFP`{Xu;vs#|@S(ZKS|^!Ok6(-+GSe>#tEmT4ALm8? zCXheB6yl?Pm0t1=es$?R-!Mme*Xh-vDd7oV4eeq`jLGjsKa?}#x=vy1_5fL_Gb%+wq%+idi<<1@d?AvW-RA& z`s2vDe6B3xLI;5C_b5y37-g=m!z<1-E^j)Zh&{o3FD`i4!)XD&)?v`#Z6#v13(AJq zFu%~@ce1LvR~a~~gu&4Y;1m4H#h1)3CBy6=mcS2VgO03OMv>aAC)z@ivoNnLX zTT>^mu&J6LG=+ku#v1lCijpBESL+HmSUf$Q&g?&V+JjK8ZSVZ@yW6#E*PCa(T99e@ zHqRaT5NMC{qwP^a^=ZcxKYH|8k2QT%Py22|@9k^uh|q^UIc>jQYPm$tXxd9$E< zVf)5wU+dbF?OuD#@7%jrkp61>*`NOFR;$0f{pRod^7gZjKJPTLui6`ZZoAv+_^VN( zC-!9%=tgoa=)_-#8r`q{-8&B(8939C#Tnnz`0=1ca}Ph;F1I@V_G2+m!;^OC3=2boWa1pNj+Zho*4=*w*(1fK#0JSU~bNs=y*SgF^w!=kBIm z$tyVy^Nb=9({h#qCyentt?3gvc+c?)kd>7vXjp4NSSyi+PFW6gdWQ7GaB+#1-i(K{ zoIc%wM^-~3L!WR@xwe3HC+=}+gL+6kQ=XmnjC!bYpcRSnS z!^2yRbbu3$qw8?$!*d^iJEKYAb?@F6+sz)PGr@RlV&Ok)YqG%=42)VCw+I4TuVG`u z>}A$Zq?hQO>4j*RP6EBx(C`49Y+d~V)f|^s<)D<;HyVY0$a#w&m^bMvw-r1Uu)e29ti~CR7aWl z9Kr!Yy^ab7&q#{89MoXl?tpAeyb$d9PQ6V@$=b({G9|rd#mND8r;*_8w3CW1b~!tR zgVK24Ti5xS36OoJddlgUIOA2t zb=Sw>96jgIiZ5cEobPL)vT;M8*8=|Om)UsjwzC0OJJqg}^G58t9Yyv5z(n}uXCC1wCH z!#DLnWFm&#`wdtNy!w7nU^E0Zi*JmR>R}911TM^uAu(Q09eR*i>f5)Fg8S-0<}m1z z;Amb-$?i`AP)&8wfW{KT?P9~F9q^cPp589lb8bNyrlh!n_VfyC3!ao1&J;E|We5>Y z@dPwHG5=H{Owkjbtd0^GJ(^Y4)zvT z!CRx@)aEDIiWAQqg3>TSc0#v%c6twY`RKdn!B%unQWgt?twz*j>A&B;e0Fu#TOO8tTzmJ0ZSU4!RX@Fd>s#CXb1z|2M8t3Ulg#4M5asL! zTk6I!C399Mc`k*WYof|>ycvG-6%-`CDoe%Jh)&6G3?dqq2e!2foYTy)yEi@fbSnBz zwY71tLqFIhyx8m-6(FM8nI2lO(>E~ah3C$5I7WB4#b?fWMTfS-9WEb>1 z02B1^5?hD%bhSC_wGg+9MvrpZbaHxrQ*hb$*yz{@J_f5I(}R_S>VcJDg;M$uv|4|k_WlD*Hq_jfn*X5sO6y_v+FOzH?Z^-*8ZPFSu(X zg1EFX;y%x)!strZ;*m8(Y@Z)y&r3D}6xGPJs}4vitx;z@c7+G6EjHKR z3_>I z-`NM`+S3RC_wB3OAO5o+HJ^Jvha4ZnizBssWyymj9BrrIcq0IMVb!W3O^x~tfBM*$74VYj z!!PgE!H<7U#pq-l)K!6#kFug^4s;^`1s z$F4pncbdp06ewLg;Ey?~RiLt~r&Ncaw&|NDm#=6+S*i-F!twY)0XT-wRw(eh9o~9* zxRTz+6m3dAad?E{|151P6R8Ts27X1tHLVJtt_7yLIvVITN(!Q)c+%au&sRz4B8JYW zJ9p4Xw}Ugdb}|03f$)i{xVl7JnS3}Y<|Z39uKF5Ln1zXIjc#Z#E{&bV-q96-+VJ=! zrgUCMbf!2g+-$LU>N8tJmybkHI%ISUz5xj++@aac&}z)eP=4Q1mj+r9p;|Ax)jV=a zUxQ(pBGqxNckT#9hD^`_qg=CKBy#4dM*)|8ODCS6vLr>Twf<;T4U$$t8|xaPDqN>P zRjx$JRn`7we;Lxuy)C2)d={ka<2>)}kqe%gUqsv)=}Gma$n?bApI z?-@$UG|`~C&KlsL^3dZX$&(ieCOt}drq|rXNYDUQha?d{%=A8flfU)Wx~d13n}|KoqVebN)v-uu;G+WzPd{%npG zF1b9u|Hb}x@8YRWHA}wXxF3H%Yft-^1@LeEt@mbu+=rihxxLgJ{5QXGW&6WF{CTkN zHfMb1_)*g|XD@fi%@<#6FTeZp_VTMYw!irOf4N<5|24acxyc}IH4L7Unxv@}_FbW4 zg~&!iX1Gm~SAZtjvW4o6E%rIFS68qnQ~$eQAZY68oso&>5{PMYqn0MoO%G?4bvKNH zgiB!ce9|ih+iT6I>YXGYG7nY~>z|?NY`*uZ6}#*C%Jpi$?m}f684TocL+^Th2X!F! zMlY+HBAiaItmCh&Hf zRY12k=2?!8zQKWg%=4o=EwD%OEaY7a)Y@GUc#!fLo;l&0M8-89q7}|5WVjgSj(4+F zP^nS;P~snWD|`;0K=)lRtagdXPRYLqaKM>kSI};S(v2auVHZO#|`4 z%9D}b;GDFD5>LLA3mqOl&9~7x!jURX zm8Dn7NwI?Q(tI?r%<9G0NsNnehX*}er8XpydA-U60?%q*Jsj2+P4W#zbg_>m$cRnnE+N^vMoLjw%LzfxK{za3hgyE95}lN0y@{aUNa-Csb|YohA2D4 zG}|=@b4O1jp^t9C8Z$xw@`h^m^vYA>L7oE796=JOmGM`#cTiGiD0MfOB0I;;0a1G- z3FvK}^DX+N4Xscv@?Y$s4rk0qR}!`qi1L87@DQBp;S1zByDb_(H58&WC$-ZA$Q zIcveid@UM{7>r<|MejY*{G(77!8k~OXz^nlOb`hb?3w$ju136R$q<#)WgbW~*Av>3 z_DMUGOPj%r&y&s9*6MgimpBRUa2YnoH@yr$8WMtIO|NLr5!Y1j;pU+&!O@)|l~IuX z<5LCtL|#D%wxKXi#JmvzdLCBv4voPQFs3c~MyWtnH=M_T4K^t+y{II74;vD5s7int z#FQ|E2^;7>$D;z>hdWsDF?z>HD&zVZ4x}vxPRr>zFn{QZu9I1+xPy<-)UFGw%vTOg>ntMzj1wc zyZZ2tw$Fa>&$o|1J+*yYPkis{ zcK+)2$N%y#w+jt${qFC+-JER!{E<-*G_)x0(WC9??2YYP-*{pB_~)N&|KA^ev|Yb; zYWr{g(R#j`M}i^$iqH#FX1-5(b==1P-i;cOrocBVxTjDzE0<~a>R-BP z@f_W5%h2FTE|csnXFV$5;{=E*&zTAWK|fXQe)}%6FQ+eR-@N*Md-+XSoV(bXft;lW z=j@ay%G&z4oogq9H3^c<Au?XDI(jVxA8$a`&8P zlWauq=r7nRN0;GWpH~vaB0^(G$JSd;)iQN;o7GenjEr5 z{EG5Rr`+gI95FJBcUtB<1XjK1nHtEKqMeN&IZpyTl~LX4m8R`xfPu2ouM-#(hvF+g zGrJH3a0<)c^wRNLeR48WDO$$|^<{?RE05M>VMDJGiXp0hU@sqf!Ux3SRqy!42cck& z^p>QR)E(IEP%-!bq@!Ye0)Fo^j~U?kAqiy{ZHe_(1=c>3l*3Hwj^1gR|l8?O_1v9oK<-FJ**ACFt` zWE)#UXXd|qCkTuZj56yQG(0nd8yQ$3tpvzHHnzVtub$3u2zoYQic#LXnxLbrq-C;0 z!s{o(Io#*NPNQ86c_>b}8B=Y+bA%9}S{voBlN>&hgxZj+$>b^Z%t>jJ)ka_Z8GhnL znW}UR{CMPh{4}zK8C?V~(U3v-YAia#XQ~8m?Zz+!R`J@5VSSF?$PuRbRiGV0=ZcSE%k%K|$?x2?}$;xJBaf}2c&QLt_r zg4qVcAG9fa{DNNv!^4O`&2+8Iz2zF}#=EwS86A?K%!y+!ytcjln}26}=dCN-r3>-( z(cPWBa~F2r{LZ(w|NI~Sz3TLIwB}-e`Fp=wBG_Dae7p4G)gH2PZacsK)9qm^=0Cdq z($1|17k73`65hI5hTnr$jvjy2{QAyz?fUhGgfDc|Ys2gZcWeJ}d*?gf+1`5Zm$q{~ zyZW`)Uf%ik*I(V^o7<;9`#gQ#->zJ_xPAKmhXaQOSz)Y4?2UfM_c)}D?lRxF zWSE^S3{y{n-*>uL_1Ia(iY<91Y6(I&~FMBP4X6-ga%K+X!)b znCexfd+5y#5PxO;qr;Lp$`fQmq;xj@1W|9>9O}%~W>B|2{$hJ@=U(`T4ib7!HtFu3 zTl7RSNIS_6ctIy?TXqt8-NG1taNEz*nrGCLx*ts26FK#U5&M%8K`)JL`f!2>v~e@0 z%_{gt_SX6Ll3TvP0#gm7iNbr%d2qqfpv^j25ugu+l1yfMpkwO z_L^&I0zYR2kBJF9(W@P8Pb`2F-gE~)@YUzg@m@;+xYFnJ$&jSSlqTXjYVrOc+4s&L z>(Ely!DE6*i@^BkTI?@gX*(M1ei$xS)~)0NpRnqe&Q``TzVK;;!Xnx=Y|f9w^L)d| zL)Ge|zJh}#-RUZ=)NX(3=yG5nIjS^w^}vu8#lsx8(>c?vibi1LAzpacs+s=KAHO5k zIThq9C3@aYZg22X8b1aFy2;PQqkvb|xf42|l9G7YXECJIq~hp}WAqol+!N8z5IitT zXd5D+?9+wsc}9KC(b8T?LjVkfxKEHv$jDt{g7Bf7qHMCkFE7$JM69@8t-C~=cK_89JB1;K%v`Hhi%6Q%aCS`?w)(CB_W zkD)03pdb)~2NEmXHGArF^x`R{mq1hy>x>f(qdCmJcu5FW=309JYnXG2s5ge;8UTH- z`2a=T@TT#M+8AUQVQosG1_s*vW66W20~)j+(z3A-TaGYJ^s2}Ni?J=vr*;i=(fxW{ z1T%U@7wwEVY8cnfi_1L}$XRw8&8feUb(V;@#$yV{o9{ymzs-x zzeQ?y?zb52T$+5R)5IQq)jaE??c?`vd-~cC{PfO8+YkQ5KizKq{KpM{zr6kAFM1l= z(+;L7nA@^;v6a|2ZoJr>_`BO{Z@sxa`tqm8_a8kyew2zhZR+0r9@0_}xm*&ELT^9* z*`w{<@BOXqz2E+=?ZtxrkAB`za4Wk1?tlM}wwHhHZymq+8~@qyTmRs{-d_5xf7n#P zo$YZ))H)XVtK0WSu7$ODm#eTiE15|$smuhv8G4VG=_9>d+i}vzf;ov$4Skg9aTy+- z;DF7BxT{82s#oP~ii~~lE4?r;my85AP#Ds5$8w-c2b4_OMz_+(dPCt9?COpm^SRg3 zj8P_b)y2Vx{py6bK=AC0wmtaZ<6NXL*DHZj2d?Ve`9|T+RlnCxiM&T(qEKp&5n+rQ z{*P^R5Lpj!dNv~``z2&gAKXq3`K5+CP8~kU_IsF9Nz0jzoK1PCTBJG9g{}=e2t>yE zn{bZ5SOblv_2}vgAJ@I`g^t(pAxE4+z^!W%ar{K@5EpX<-&xb5Ox4!IZotm=?Az5! z|Ht0?V(6AnTFL(epkVS5gHX*?sDBk3pVbK8pf^FI7OIyGm4=6Fe4DbYTqKfeJ0Wv= z;45@>KY1(FeYz)oADu#PdcY0n<%fgl9nSozwGqK?nAjmKeCMmz&TiMv?;Kw^Pj}Lm z{SKUoJkB8WJ5xl^0}G@01w<=3t@k-zq0{?(&L65U(D+fG`LOD$47Ym9D0F4c%I{kV zs=PP2ngB*XxxYA2qc(z3;#0Iw_sMGcK@pLh zOp8TJsjGHE0v=tTHWALx#yhdYln3WF9W1=9Y>C2nQAuf(r)6F5%7ClvkRQA?a$D=! zslZ-|ReO4h6U;MCc8ka!L}&}sWIo<=nowWEP~C@Mc0LQsaIP9mHO`H2g*{mxqyz>` z99jfQO$%Pxzf67Z^?91Rb(R1CKmbWZK~x0R-Pr~f1F@Wf2-t??*TOf5Cze^T*Qem0 zIctcf@+>nPA~iumruzx_z~0L6wJ^>Ts>=bwTWHKw3$}%w@KkoS83}!#DuXNMy7njL z0OID#6dQ7{Z#6*$8r4~JX-i0>h!{K!UHCZznE>k<5GJdbB2}hs6F^HsCRO4*b8kV4 z*UCK?q=wMuUhk8%P>3!iLxR;C8;k4ykd*>W;^eaXE+(5XKFS?R?4&WC{@a6vzA zfZ!A^WM7<{rfGp`&K%2Zu|6u zR#hLhr}^ovjwZgaU2WL>;VYN7-}%RXr$pew_SX;FqkiZ7_Se7nd)p7Me!2aP|Ki)* zx4-wj?P0-YrzGKY2jaB#db{`e=Pl4XvwiDte62vYvppi`(Wh*L&zhbA{Yu^=IR>A; z7_&_gX!JO`9Gjl5#Z+W7PtD2>Mjmu3Sx$0giMi3!^AOUK2WpnRQkQX|eLd3< z!QtWgE+iX?>5N2Frv?r=($=Zds_+4-N*_XGTw1@wE+x^cJ2hP>-^#)=-z+M=z) zwkJL+8N1jHgHwACn}hHCf!dz*{$!M4Mn9s5FQ_kRjZKK7n7)mbg>RpuNpR6aw{yny zK=bH>=k(|!G|HlZzhl2v_AlGTLkyXzKxHa9Pe~i!7~g7g^nG}=nm?VN=!DJXTf%vr z&B4|Y&BvUWX3!>3&CtJ%p4s+z{fb40Uia94pM@Sfeg;!W6}W8cwZ&ao_|5_?j7`sI zQy24M(VwiXp9q8eV>BioYldd35=6X#T5#=r`rdnA-@ftg8zsX}w?`$-pLQ_Ntve64 z+js9LljzF79L^|7@A-Isf6vZY9#G0WS#`)lNeu8Q>d=M${&0#fpJ^DfL~muSy)d6`G_-VQ=mmx^Trc_# zMRovLF{T0%h3Z85kj2+)cSdK8+$w{(t__}$KDh>^*}~X&`eg)U@S>aDgIx>?8yWUg z3~`@S9ARa&74X1|rA!!o;+(rK*v~5`u-8x+R4^9|;5i>#%2sxTuu4QA;;KFjLIO$L%-HU@7UD7R{)k_r5n-71#xyPMD$?jouGG z2q#^?PWwsH59Pqqu!F7$V^ zA==y9!}tHRgF{~0zWTxUgL}NaaN&E~55E6zw@?4sA8%j($G^M1)Pqxg_$NQuE?&Q! zquASC|Hj+3)42v`FK%CbRx;35v7dbYWP7J=We<<9Z+Gt8+HSn|^(L@yZ2#s@{(AfF zFTb;W|7Sngnp)p}<#&3_W)6pL(Zh2E^IKm$*gpNM?{D9E=auaz?>9f6bF%(OAaPXa z^%pO;O8#jHz~k*_53g@6n%}O!{Nnbl|LPxZ&szU+x3lHXo_=9_bm~S4NIa+qdG2gO z{hg)o;KAeYxxd{^#w3_Kh*2Z(_=+!m=lqg8$3JrOJzY-I!!>x>NRLt+aaf4Oh0#kI zID+XD$Jc?XBLhh|caYVLJhVsdct+CvO)Q89X<9O2D-sO@l4KuZ=>mOvt~cBveG&xd zZ0&d7{HDhvEAPl;f#6&)X5gkC(ewa=yW4?0Ec9&rc=fHf8b$fzo;iP^pxk3s&$NwA z0zhTQUI&A*jVCSOJ>7}Rr^utJnR|EdY!^CUW;!Fm^)MC~84fmeU{RK}g?h{qv~lLa z+-I*?RawclcYzphB@gDl>GcG%aTM%T`EL0TT{H4oDS$ln(fT3qTgiNacK4Fec#By; zOp@$Hau)UAYC61u_8m&IXe`_XEZ0fBK4|g@v9Vp%gWG3GdfmnOd}ip)-=B0w*xo_9 z=S03qrn;}QEFl^=u*uQzGxGqYTC$VqT02KxY6R+h{`XF&DB1$*X@YkibUbdb{^mDc z-QN4gyK`{R;Xz5*@wM&sS6<#8wtM5F9##C~4?b+`=)v}_ci*a=XX$#oKRO2Z5$r=gwcZUa3UG;aXOEEN42hA3DvU(*C@Ngko}9TO2o$?@xj8R<7QVPiKW!E~FwNt*A9a7c~~f{`#8j_U}}4 zv5i$ke)R={h>6f(@6=EJn!=?EXa}^EY+0(kWD{?bV@C7KICn0*U zhynJvSoU5HAdqy?$z>Kgz{SxXMqRwVU{;Bl)pPT@Js zOqt<6E0Ak@IOjZR%ZZwT2yIjn^r*!QxTpHss+*DUgHxMj46}-|S}hoi+A|H6!Hpu- zH)s`a4gwPvKKRja;5f6m6L8?g{O;!l2(Y?NYt8}0GBTuc$OqLHM4qMM{p(W&RY^6Y ztKc!jP*Zfw%HQffd-!1I$N%b|AAkApf3$Py)z^<-`?cR_&v*|~x$#=7%+GA+zS%0~ zGW@%(NWWb0zWPu8!S>DfzOnPPRn8AP(dAcv_t&=OHesu`c&A zyv9GwjB_n<709KiL?rnm%X*YmPifM4e@5WODN0b2k?(xfEc7(A&xXu*&ZvyKe$;1M zUlkA3@ zBYdBfSUDoPz;I$uXwDJ(%a`F7oje`A_}H5Iv4a%1>s{+zuiSz>rD<>r{KC+u_@VJg zAL9s#Ld{6angF)ez{d94D>u@|!|l=IhufR4zO>!B`=srO54M{(FNfRF_Q~x=9kg37 zNaCB+(g`vQhw2-q6~M>#qf=5B-eb>v277N?@W{Z~=!9O?^q~hrr&CtvJn7J+H^bvs zzWvU2v60OuPaki8_QMahPdmDpi=GZ!vekJ&#jFV`G1^Ti{MLnrKmO*gertQ<<(u1; z79{I+e0l#tvl)8WXLo-5^AC?-ymoEpm%skjvBwPKUJDw9#o|%=b8dI%n{T~-{Km^K zwcvAq=kn!Cb#!M-;m-ZXPmcc_E??Z;dHf{ZsUyjU+7E7_p&bT130V_JYZvDEAM}r% zWJ8HAMNj9ACQ@j+JvLB1@hiQ|H45-SBEw{_W@|uVG@qW5n9|WSRY`;CGT$ZUr5RI` z5Sp;83`OH-0n6sdXDTGCK~j=cRh+2U>_+I0L?c8e0mwRrPct4OuBi{6skP;ep&I>_ zNam9f^qu|=-TZhNr)g|zxC`Yu1+F4)OdOc@3AmxFLqRRF1>=-T7BbrbIP5`{i6-RK1v4-v{z!5mUT;+-fEq!X^KDthYTuP3nC4ie_e0j1*V z3?FM1QQ1Gt`+4rj^l4&!QZY>11KL8RY@c&#A=2r?=5dSj>SCK=`MyLK1r|*E9L$>Z z6Y0Ihe1@fp7%$*hc{$m9?T$?0K|9n@70%`r-S2DEy4B5So)6&{KZR?!XlE)Cyv#9P z@!B)wzZifDfZ|WEC@paCN!Vq&gYPV8N*2>Zc*gf}peK!w@5L>^r)NJ%*DsC;s(a)B zpOO#AG!l}kgXH?!Dn+jBN!f8`;p&Q1XEM~qu;jb6dp*#cAzpr=Gu1mO?u%QW^t82?w%>g7_3a=0 zcmMP8AN~ISyM6ML&H=whOf)y!F_JLy&*lOf*ZZD5I1I_Wpk#|S(j_al<5nW1ht+MAX=K@* z86lx$>D~WN)SEq7nwZX!fTA`^vZSyh9N}>2LKnXF ztw;ZWZ+$0+Zya(sOokSWCt1fwmb%1W4d& z0E+vo>@nt!j{dUG^2)*fHh6)r7@&!GCeF#G;XgCrRh0DC!Hy7xJQ`gD!qe3fSl4mZ zcRr&&CWaZVBF-mAAI(9sEk8Dn{D^n9Gjittn=fAy#pTiCC!A3*SgxG2uVW6)^1`dm za`VnT@-3D*sK_m(j9Y9%Gv!R3qa4)(ZF?s=xzuSL@09uJsq%{tz9{c~@t}882k59U}q^qaAef@md+S_SAU)y0Biz|(h`)6EDywA3j9EfCofqIzMt1(tj$d9xw zGlMhHg#iwBQAN}!D~%wOa##7tKbD67;SrrDqdVDBNxuo><5a*$Xh!&kfn-FqqmB$s z_+JJh%nmgDbi2`yuktDp-gv!qq;l4X03I)4Rreq&_%g49XBMQR9mb`YfP%yz0aMO% z!YvTm!z^S0LzM6(Z!r9PM$a$+-v9@rHtpWyWowvSfOroAOsFuirHKIg%o=9(xlO5$ zEkZSs0vKTg!(?3~`_$tNHQ*4KHuPM4*5j<2G3n*Ix0@1p;RqVcg`*N!99kv9FDH4{ni#e!@1ZW|`m` z*4@3MaH$2sIv7YS){PW+24){bFK&pU@j;0fgvG|K1U zV0aTexuz#aB$osXMbYy_+T{!b*LzdKus8IAu3hpnGIYW^I>e+MBw1d7o|Rv%EIp)6 z%opX6!;W2^L-z(pyaJ0PQ?_a-S?3p608(XH4WaAfRgtQ!j#*!J{n4Ee%J4oXhZ7A2XbJOJ!ODNaA`}-`ra7YhZ3UB{E{}*K^n88s2y{nHZJW5

zKCi0;w?t#rbkXN*0yjH&*ecWUpuI6%LF+w5k9wp@RIGH@{v4aRMC3zW1^4kN1aRm zd;$!9fRXw+9vFKg27Oy21bhZI9XCcpoCWac+MQ?a zy7iOlsi?wnT|`-SE!>A(jRq{H=GzZo#3)pwD^^8;gG{z8g#bPYd+vYj-bTTxNVy4bec}8R|%L~k!f-&p*o<7ny{nS~n^<^yErZ**7VlsaF zaIPRuooT2@<1CjDTmrwuQpOn}B;Xf`tgCn4%%{lJ<<>2!V%HYg3G0M@h8lVP|2gb-% z#hc`dd~K0qX))NJK3*^5J%ht5p*3~G z`|vIQ!neqf{dTa|*l3IqKlO!rW1q8bvpvcgGrX<*(y4(|W}Ypav5RHzcenVwgMl0_ zQ|IPNL7vED8qO=&1ghHn@4j@qT`kMiSqRuFO|_@B0D%Z zUel^Fj(k(LZrdZ0hNEMvipZ67(`AzFmAg1NpWS*??s9KH95iG<21(;2ZJ?8ro;mVA z_P`squ3YB+_X`WtM73!siXOd5Iis8|7uMZBRi1=j`Kn`~5esVpCxBZsl(MA2W> zMMg1`Qw4K`-@%{uz`m`cK2|H!jEuu)Crhdl^{DiwB)E_^*$SDy(2MjA85}%y{HyaK zzp48eyW`}$++;DME$ZHSI;r)Y6k3-4XsX%?IfJJ*O?hQDZ8PrZTD~dYnJj91>6425 zWeilZo{`$l>ob$@YScUj*A=>hLlh^!S;Mr$D0_y$JG-QHkfe8xfFKwZWExH9>8H_d{xCD_IRn;QkPzF=^>&(y& zL~(q2!xBxVtLtL+Z}^Hr_D5l8O{|w7${3EVKF@8VUWTpOfr${VQC9W z3Aq2x`l=m-t$dntjzI?{4hj9~jM%me8)xa1mJuxYNYD%`IWQSKu-XgX^~QOS=ky-@ zI`fMmIu$M%RqsBbB^nWp2kgzhcke0dq5Co7+iNRjb9KEuUs*4AZ!fdgd!0ht<>BXF zV1&oZ3-&edvMufWwF`u6`^(zHha}{17z`QD-Gm%RCMIa7hjs23<;u6-DofXv$`V=S zAASC`OmyyYM*Qt^_p3Y1;+M-4>VEu-pEDyLhetch{PnlW_19l1SFT)wN1d(imzQ5z zD3eozC}MaDK0CyS4A0NAHh;B@)Bgc8@8-y8Y4i@32k-r|j4do>@cWzVWt;ZBaqNV) z_m=O%6J6!W{iiG)43%E?rgyb=F=}pOTQBFzz0x^4S9S=edT2{{Gl@{jAwMABzfo4S zr^c1HG>kGq_%pPuvvg&W7+q#3Nsde-@{x6HODd`8pf$$0n;GRmW4~OUpDYWD=T!x~ zr7dLyjp8=NdN7T=rc&hpBvGN?_{)3b`G9rYeJ-xiu|FV^;%#&~D7!mHWo&}|_`vN> z2zk~qn#gs+t`;m{`Pz5B$3;uW<^HXY%h=QcN9E3x#^h*O{pAOx;mv#S>KK=}crZ^l zOBo(M*1l{X7R5caAxlb zmyUO!mmLzUPmc~X4L_6jfli>H@}ry>t?^iHL*Uson}^ebl{YEo~vCwBf+}b4<6-*_%ibgt~>^}E?*=e zk2e}+%YY^>R2pX2$N*-6Ad|1UCV=v&FD=gML4>A@-4cB z87=jUfXLhQL?hxwJOfXxqYrFGn^4<`p;d)d#;^z8*(N_ID!*%do$P5d=qS%@BW>9i zc8BI{7@^=#ga`=aH~W>){-j^Wp$u72+@Tl2sc=}{_I)pdDsjTyd2M?3T|-+MJujMY zJ*2it2ec<{YUqnTY=ht2xI-hpr4^4hG`1aF8;NuxGz+R4e6;EQn8|IBM=UWb|m-2r0Hv~m3MKCQ*A`M99>S_!+ zvjGh{crZ9zLoI345!Dt%CUr%ejFl(_>s%T+$5TftGV9DF-)j@;TP7A4!8N7)aonl| zncbQxL%A@RSQ958m_Na(Gri38RUooEWy1l1Nk|EW*$#{(e8G!KhBpdW#s8_}grX3l z%uG(5hV%vVo%JcsgxPumEh;K7GKATo^aj(>6GJ{mFZOT~=;BDSczy zL*B>jTF=+YI0{|`(P{LC?M;_oJzs`+_vFs=GO{>@As-7rY2-#H24ZLyFVDbV%!>P) ztf{}i*u2OIgsJy*t=zi(sLWirRHoP#*VWo7D-RxUdCM3x#H-wjHdFT39+r)#_sTiK zofoEi%Jf8cdHJnZ%B9y|FXyI5%JrqGvds+f@zZtcO_CVZDEHs{%QCgNL>PZ7>w|6f zOYd&%knFWndI;I;04ku)DsO*0sbLIIZ|K*{ntV@B z6CAO|3!i^Z^J2O^jun;OI`)Xg|ZcKv>cFIp<>h6`YY_}4o05u(G=i8-iM)i zsyu=lWkzSI17ofJL$=0;`pXaAdArO{j`Pl0AO+=pCobULJ>BX@Cp_87!+T|~hEkCT zXlvxr5P2Wr@5gLobdA)y(Mjm&sWC(Mz3i}x4DV+QoidT+I zI8?Hg_e3=9SI~Xx7~l={-O4(L{173qoXf6P&T+=W{B-FDm!N<$8Wby5r_!V^4XO@; zPKqaY#<=p#aXWr@BQ7$e3&{x z_Gw>83$W|_>0kyHcqEZn$5)kW>UO?1HSw_C2MSTLDZUQPzNsh>0))`QTzm;Y zX^?lB!BQk|AgF^=FdRr_Adw?O>c~{algV7ywT&Zd)5- z52{A~gy?~@^d4?zl1SNZ;+W{GBH3{{+f~(JF!Gnk0}8>Jc?`PB1p}>D80O@!W+vm} z|4CM$wxMiCr7-=?b*VCfPa*p0hgy^VW%jh>v|j>K4rmx>#i>U5OwdF|JVP4+$m*R* zG-}b7_^!s4anV*9qfc?`$Q*kBNS{)M=TwWs1Wv2*2X5hjzP1;LWNKd9iuhFc64pyO zA=Y8xG4P!+bKv2bEtqYp0?^Q>Po6oBusLm1XTtZ6i~nqgGT~$Dsg!h4?a3l$4=2n^ zyYf-S2Yf0vXHa*3^x5~uJOC-bf=R~mW9q!6TV@zOd zSGkPE>pb51-q(nWACw1Q-6MIGEO#zlaaP>LEXc6-0U^dQGIsk=!j8p*X!^#d%KpY` z85<o^nBWomATHG9r(e|WP@j`oysE@kO& z@0Rs@H_PUeM;Q9Ca{uPT^1*Lzm*chjrRR`qT_4>pM}(G-b~ZS{&NX!|WV!Q6nVg-= z)xh4&);GWm3q?3Qvw%~xT=v$8P!L`6@S5h-Jp3*9DzWgL0o-z#gh`KW06@yYwABE0@n-Ea#RkqW_&aQi3{J zzgPLPOn2;TizeJ#?9FqA2#>i3{g}&Uyhr|!i4<4XQ^DjbUsT7Y6_huCrL&oHCEk>L*wnFB}PU$LJl0b^wih~|G?w@ z;g6rKajdWjge)$~-z3yckK!nj4|0w>2D+HoSlvmI`1CtNrX42_>JVrc_~ikAg-YG! zXJBW1>T1`TyKy2s=;zM;NA!nt>gkG?7U+{(2;~=HrA<2odg!FVb6Ui>be{O0gfMu_ z*?2!u3IFD9dz@7J*WeqmXu_vW>jWp}LSaO~Xlw`gQEM2b(*PeHv%3KN9T54d%K{`i za;Cp_D+W%K@#8Kjb+_8cNQ!IY$qSR``R+D8Cn;42Ya34Wmi8v-zpx}cHfXj()DsXqGcRP&& zpsottc4QzKL&S*$i6v~J*ui>JQsAPTLaL$i5DEA6^7yv@{=iQ(n%jP15!^2 znev@b68NVaVL3e-+Nav-;L*4+RqdpbKyT^@raL5Y=@5f%_IpkKb0~b4nYCfS8-oH9 zRU;2vE<{2!Wxa;k7<-URS-S?IGM%DLFf84g-Rrgwg;Oimc3(1@_DuPDY@jph5Q-W! zPuCJXXidZ*&H_LsW0=P(zYH`CY=5*yzj2yqFAN0^q)|O^1f~jeD-bqJos64y_%6d* z_(<;-xaAo>(4M$6O5@DJhcK><@-p5nec6^tPb!9bjH|q&fwpa5Q-(0Gq9N4Q_jCH- ziOMoL+H_^p?9s_uoFw!D2j9zk_UW%Ptyg~hI~ZHG{b0l!?gK~Zyz^iFh|uG4`H)1Z zP4;XL^j2l0bI{q-D)S`pB&>@OacN_R5d76Qt`UwMilN-bAZ)Vlb>$&$f{Puk?(^J0oqaR}wTIFls{x%`Y zQhxdG|Cp_F^Xy*4V0DwheEuDt6Db&4DXmKz<<{MGu3>#t`lgo3i!VPaN88(__glY< z@xvHm1rPUkn3b__7h~zJgYFad-D}ubk6m8@pE0t^qd4V9dMH;ul1HGO<5y8tIS}){ zlbUBVgK;^W2X?>*P!2JAG9iDx#6|r3lO~~u3YE!K8SL00X{pKb!fRB7N1$6AUOv~E z4iO`am5zm>OKEFU_S}W?R?&Cqq-zA2JY-i4`)T*Mv;O#yyU^)Ghf!IM0}WrQJ8~p3 z51Tk$=FNBpMU(Ar-}}A4UH;S0|Fk^1dk3RFU0!ACmSyuMPSljh-s0$?QImy@| zqkfSngA!Sr7s-->@iLYeSjM7E07c538Nn0whO5=(L9c9;jvB<*uU^8it^;a|=+7hv zBe8_Up+}J};dLRK&bWM%`sf{KANdka$L(vL)gFOg{6dQub9A41E{4H1eog3op2))Y z-?~xW`}k(Lae2Nhke{HQcDh&Fp-IYmFpiqqTm@7(p?*JC9O|@WqG0=iWv9X+D4-^5 z=-3*n(6$0X4mVbonRRaT~1X4xmw`fQy&@H7l|1Do9L zNGsQCb)x?D&6HvacN)bSOKzz{6bj)lvVjt zPgmzd-ucY_eF>+UaVI2t+Mb7gjCOEMPeOpk5SmfgRw|h@F7sEghr|A zfAq(Ig_pi60wk<6Z;`_Q`0Pvuy-x&!@#rCe1hEnrK!C*0l0yX5Z+dW)al{erG6kfi95_-K zfVL8y5{^eYQ75x=aV1kR^Jy3#wCUN+@o&*IP zybGQJnZP*~Mh{#nCdcdSK`;J{@tIfg!X81kWA*>QlI?J!oM$TPGee?M6hD>?oh%_e z#2t7v*{F-`_49AOgFS0-d@EVzw^qt9lT0)G}KPs<%`}bgz?(*}0 z^T%bL!)AsD*~+%dxddzjT%77GdD_iBm-)@!l)@XV7t6wo=!AKD? z;F?g9`G_X5tnl_vzE3jO^U@+5Kdwa`XymlJb`-acF+>7b9W}9=bMwJ}i-T;GAFn!= zWnD4~FT#vNRwc&~oG!`ufvC;3D_(H!`S{dA_$A<;=L~#O`xa+yDlumoaU*om~DDK1!-J zDlhOEn)I-K-{03C={q2^)tTh)eD6o)XFvVF%gV}XnVLUeuDtz5+4!6_c?`EtEShMN{aB03 zR_Kj)B!=-U`VIJ88t|RCtdWb5joG(67iwRaA~w$H2DK;M7t70f+yx)EJ#uBf@#c zSp4U1AKTVhQHG2(xkWV27UPdJ^4N%k?MR{UW$LKswGirAof29OuE4#xtG!dV+FpT* zZ2?fC+A_38No1pr^;}?{Is@Yw`$gT(Qr8?@6T_%cPA@j=-vA4C9;* zPAEiRTn~wANRZk%lQuwi?CWl<8L{H1^DE9zvtE=l&rH`5lJza#<+ouN+CXH zb`)jFdw*c2x-Q~-6&wShWtrL;x@3ui<5=Yo8|A${I(y00i84z%l%If(@o69x76EZ* zK_wg>JUHM53@jaNhcVkO3=mrSgaPSf*}uw3FI(I@15aoRs4RcR_^x-#NExyW6=qZ! z(-$-cbcWCK^ke%9h2>8L*aE!wpFdG3fl>bOzy@#>8mT%F=qNt=WR~F`?2Lg~TV{UZ znU;IFK63+){SjxYKYst0rNQdjs4yyWdoys;pLZbhI%+Q_G)H*`>bJiV=y=flMwqxne0cQ z4o!jJ-7-O95Z0W@P?NA9`^0fB2PQ9-9rlZR9q>47oo3^|`JHc_6iAjt7UPnAEQ1~E??{|-?%py&l z>svAQ8xP?J^5}_5bo3%|c<-&z!LljMn7N0qqG3Q4d?2y8sUUb$*h42~j+u>7N4^bp zQ9{YW#m2~PZeManbb_41#$X+uvmPWF8-xa%_sgr?WH&xJ!&1jy63x;!xT>^PAIP)( z4-9%MU!pj^mH#4@v?0&ed@#-qSG*eAj&#hRaW_|@e0ON)p^VR<#{lM;TdKfdzcwItp$&cH2Db7$-`D6 z^T>vd30jCY)q~JduaUBpj*ijk_3BJiYm8S9^j7zDvNqqq2=qb=ClDG3`M0Z+1T~x> z#Vy9q0r6US*O>Xw&!HnSE#&r<$cG@9Ikb(WW<;s>d_p z@RisXmo!HUnH) zDOY+xhNN@4(`f-KBwQ=Qu`VWL-YuLD5LRaBX06X~n2iDghDUG^7C6SEG6hPHsv2zG z(O;MG{)F50l9>}=tP~QB_8_yXA(K(AYmy9+0s66r(-v%)maV2P8nIu*%z<*iSRlmku2>3yNHrWbuM(MfGh3kc3Ins3J+?)&>jzZ z;Uo8f~N@ zn*4;4G2yBY{_>_aw=VXRY&kJKOqTTkeAmUbn>*#}Z!DBouX9@(=Z5#9^!u1WPcV}| zAj6jPJIW4w#hZg1BJ*Gc7(2`4+&m}BO_mwpH6Q-sw`FLQ zP&(s%^6*93dA`YkIWGujj+Zx?v7evsDf9D#?4|ExS>d=$a!>oyJGVHYbFb`j0$$I| zxzaUtr5uf1EQf;=@U0k9k%@PCMcTo$@=|CC-+^o2MH%tjD?6hQT>tgEvyxO%!SU|2 z-rt;`fDpMSxWDzZ+<5uba_QyQlhD@XOb%W@)&H_RG{y16*ckGaH*8o%6$1jFOJjGH znDKsqgX4r@Vwh+fGcA)+*VwV(tY@49W@sYgayA{_ilRdnnURi+vC{zK=6H32U3Ti_ zz@LSsi{;^+FUtCh7aR}Ex%jMq91|VsAR9c+sG~y}PR)*%rKRaII*zeohTk~c&X&i6 zP80lKWCS?6i?UfZQ%2!IiyBm-iMbeE2LqduwIOxj&Dc`!k}>ikp*)ai>8#-LQu3@vcgje~5EBcDl7w@C4dcYwf46G6IFq7sGwCEF7t-sD-2Th&<0 z9OUFL%NZ(bK_nC7Jd~jer=i0n0u(g$3cXw}oKU&MY+%sMtYxbVJhJaB z5F{kp0DKzxgv~=lx9g##*`t@`-xxa;H*G_h8gHX_S1n7m}+dRA`z!&{o1D>Rs{fqM#0E6*0pJ2 z6GGca_sAqK?8~PCRaz;KRQIyYfR)mD`?%CJBS@ZE^Lf zv)_ZVPO_6src>EO=$(mekuhy}c64I2%(9)#NtAgA=470|JX_`#$2e>Q2U5Bbz8z#) zr%86X3e}4xHec+N#qnl&>FOfkeiG;!8>Q>;8Oyl`I0qR0L6YSROYU`WxC|jgW}EvP zk6BOUaGOz1t0UWfe&OY^zWkuf5t7|pc~n-O?Ux6v*YEGrFZ-xJ`|PgKXl9Mi*qFFm z&R-ZK$*Grpw|(V^HTczg%jNm=-Q<2W*~Ziun<+<}j>jqH`R6*cJmSTVFmmvp?U5+V z6OKa;eZg_12LqLU@{H4AjKA_3rG){IK~SOmp%8?2mWRJn`dZJ*xv9}|JO%g8cj*v(; zJ2y(?gN*Gy+d%Q-1on);Ph6&BiD!TczLmX0lKjqd!rCjWF((@z zTo z1%G`YFD2p^XUzx`zrZCwm}h0jIiG;CF8z+M)1nu9VGhRZ@WR|!GW1`* zIM2a4v(URH->u{5Z?gqf{bKhy_dq~Np!7ka8uTq2C}ry3I7re$rr^hWGN1L}lq$oTfd>MEBB`I#m%)P8DUe z8)uFP74o_B&7}+fGR#GznY_W!`X_{Zs7fVTv5~6Wt8faCgZDsw`QLlS4Ouuu7?|iI zr170IZbCy%GBz+1cy9yFt=>OS~@WCS;lI3KD&QGC@rK$ID+w0-WHwdn*fARFgkp8 zyQ7RrAouU~ z>5uxJ|89?`gLB5iv9yA!>aVb%@SrK+;T{R3KlVw9>{UkzRs88VMjK)T+>OnFEzh2p z1CCW4XO`862R_Oco(c9Kk23ilW?%Ti+(cPo(S2#Qzj2w<#9%j+^)bm;+hq{NenR3| z$H7in<$#KtpWh{ft|QwhH{buN@zF;&8=K2qw6eTX`Yv7N;*@@lpF|o^{EmK{etNpd z$Yu?>iGdks#?Dr_#^C5oV+Wr3@X!CxGQsS;hgsv$Fl+jYv*noUI}bLN$^U4SVUDGB zN5Qk_^uNJ&%%T3q=y-qQ(SvnvquMXeA3P<=ZimA~1_LGwM0X@QXE#_N#vXpqun}x4 z!)2o}Gt$?1`ZnA0Z(2|>xg&cvK;O= z+>`I&J_B@;`tZWq$}^&8I1rrnhx&tm85=UL!B^Ka6K`nHhuGI0Mil_rmNC+pz;mM- z4$-B{<*N^W%Wi;H8DutZKG5FIE?j3sj^sZ?$uk0u3C$W+>A)~I$7jlJ*C4P0fc-{^ zD_=%Hf~m-o9m?${?W!94P;!?R&oN`JVQ~}dd&#$VI7N}|lsY38N9`>he(fH z?sUN@-E+>eke`Lw{zD&z2Y&b>{OVhEpPES7Qo0zouhm77DS>u|?BxJz80ATRVVo#l zN9ed}9pDDf{)Be415RWD3eZr z{-M9Zsi%6v)R`GNCWU&&8$j;KVQ1OpZx>(nQo6av@T7$x^4-G$R5W=DALkUa&I?S>$)f)9-h;Bd^1SU{{pZ{abcg~-xRy**qhtz(GkX() z6fTqQL|Txi?1=YMW&!QMs4!RN4s-DYLJzCWRpMII!*$(dS@OOao-?>18 zS#{?*wzZLs{W;su-uT*V>1F0QKqmDO#9!yV2XY12bap{fA+tzK7Wz@&BJBx zjXx-xhBOb>*!DF_c$OR02s2M|6Wq*P5}exW%!K~>!}T&T2brM3ou54_ue`xh5p8=` zegDaBdx)Fc8YD=y4_G~Cmd@-K+(9=nui6o-%s6vK06gVwS{}iiqcFpJ(uEIaVOcO6 zRp4LhdEU6j!bF}XPS0r9Rv9L#z5MtO|7rP?Kl;}!!!V=gKoXbUj-g#$l3+DE@ar(E$LjK2 zc$t2jNkvh@gO>9sTl;c^$?J&xiqIa`&j+15&^|^wVgkQn)Gg0HActPki2jwO7&}@L z4?6W&DvdW!kc)7e>VERmaw}ISCRyf2Mm>DU~Mbe&kz- zgDV-HACn$?z|#QN8iN~j8%_&#vaMNu1;<8QX}dkh(%S`gi##Fvw!)H^J1@+R&*Uce zj}TF!%`=B3Sak~F1ucv_IuHu^SsY~c8#U&4_*GjkGVqk7N6=-9@^I)!IsTeQq%-NH zSBF~%R=UJU!wVpdrfM3V}w&^2NGmv7szaq{teT6a6vDG zcZS|5<6Qgq>Wx>p&X3UR)GP-xFw0`yt%o&vQR(={@a`v3*h8G}IYfwc0uS&U;iv0Y z&a;){TshBL+$EB)W;xJ8W52~7+O#i4#=a%2 zKQO$Es$sI9v!6H};?Z~1^+1VU4iV|**wo#%7o|D7Sf1XzU!FaBQd&bZW%-kj%fb)- zfcvs1%Hx~&*kU$RHaQ1Kqv*eu6e9LBM>3j4B` zo&D<@k8hD+*C?Zl*XgaZym8H1^0@8s}YBE#due>+bWVr(4_%_CT7+Ep#u*dVeX)jAgv>7W!?fis!V1m#Q zK#VS>U&`}X1uG0WNsRA@YzsU-*y55T&hiFyqjTHjL71>Qf;jxpj2TpBfW3SAkA z8zkz{$sTjz^|^hz)S~L}2wKDWL!2eBFn_Lm`rfZO&bpH=ZBxL2(PVc*qJ!|f3NyOL zi-?8@xfj;ZgH;_$aGNo?Bo)pDez7Fa%KN100{a{no-A>JjxrwWi6at1SQk7y_O^B4eY#%m1y&JH`PH?301;jMi|POx8|R z#a$TG_f|nR-tyihUnUyhLbRwSMVpfksizzmm5_h?n}g`+gR-(lawPmFK0;IV6Lpdc zC9T1op%3LIxZ$08P2DZ_P5Sgl&;?X{$JjzE+lwP7{Loy^(esn;075w@JL*lo3$Hp) zhrn@W(6wRpue6ViJJqkjFSxOu<5JEdcYKOIrB3HJuD$dxb7K$>kI-*zJRWAmgh_~p zgLh_e=u?IQ0wVZyT*m=Y8Ms3cZz5D=YNKpigcxp@xJ^6*&-9I|KV>d4;5(Z;GXN?b zm1T~RMf$|2rMe-Q`|~j9gx(muQ({BL_Dj1FxG)LIi*Y2OhB{`&!ffJ!5EO`{?8xX) zV`P*I9YAFJn3FnqiPIDWMaWSw44UoHxOD<&Iq+b}i;D{y{ako3#WkG^tf5^xw^+ux zaAA-&xi>B^mI10h#ArAu!-cReV`Dr%;IF_ze*OXwpL8JH0>g)l6F`w^L)O*sVRW@+ z5|wAQd>WxS6^yu%=>BDxJX8W`OJ<~h6N6Sid9N|ZKHZLZv5bkK#=}cx@R-2|ZGb@^ zG!qC_2wp2lIs3Lrp{`+EXx1<^NE!amdkayXnvB`cmH8{4&@A)=E=1XXzFdZ{zFLOn&zHj;ZeKg(b|$?02D9J}Jz&C? z`zOuDI>x!5of3xPwl`l8O575#5N*6TiY-cd^9$H zh4A}OS?3~{wUzr^Lc&R-81MMtM0lD5-3_w58xx#%my2C+DyF%hWRRKp2D8e;5zfz^ znQOEr7Rx%x0__&?e0QIy$|1I{IlvdJKI;21dLkZ0r+ zc?!o8<)REt4&iKVJ}Ni3U2b}Ik?{C_qX#D^?i|ssN^TXYHEplb%9dyVdeV+?k!h+@ zjLtxbv4_~>1~9l><;hZ4i#5+KW~aR*T?xmFRhChRxeu}(OM_~J&s-3b$?7^Gb2yxe zfuYeXV-!NwjAX~q?i}nl=Ge16h{Nwhr$)Y1+^S9D z6u_43cG|0WM$Y{aRdiN&0U#yxLltE%@N)w^%Zivwjez`#(E%W646gZwo)z!r0Dxzz zGSg>!B*u-LBMK;!c^pVcWaf6^gGS4&du3W^=+j@vT>HjcfxlklE6L4>r<#Ppr>TsE zuga&g8@%vcV%0=73&*U{uEL&oG|kvuvkiCZTyoq~?3gfA9#Q|qc2XsiI;zXdwy3ir z&t{nbgYHzo2rm31^W!AK2Ob}c_cz!-0N&^coGGW`vxfRzx+0T9QJ_^1COSvr>?Ef| z4UkZ04h&n}2-X00;8w533V}uWJK9Z|6DA2jdF+9>2%g1M21OYM!4Jy@SSvomKUQMU z*(yqPi=>!F1t8^Fc}g7F4!EX}`roGDBl#eoZA<#`ql}!=0a*{SjBMFp=ht7l`mS5M zyuOigm1!g>Os%2!>{-3CWJ%f$<@A>0P4GMeWhAmOAi(UjPshSV5R1Pk4~+(8nPiCs ze({hISZk%89s2{4M5cN@Au`$&ahc^)&nx!22!m%oJI+EB6oyNrB#gy?OuWkaJ&HLq zBF5sDw|WjdAuJUI%&Wmoau@CLk?-`w%(L6@l>MLm4I9)lIxHedK{eS&Voi+Usk2t< z9l=-F9yN>d@R-kSW(yv9Sl6ZLKwLOJgfn-pJ5C|b*ip3f8NsJf2^c03-7*sRR#2^* zm*P7>TF#0T6qr*Q#H%B$wRQiC-0;*jJdHjAR$Xtcid)K7Ij;r#3|yJ~{KS zKRroM!wYZ>%+@v8sUJ0Hvfzo^DN<9qtL*Ey+(pkfXEX{ylmiCR(M4d;@I=~byY#4X z(WqF|?&PM*%<^SzHD#FLwb-BC&53UPoE)}Kw)r5lVYkn0kPO!{Jj`;+5D9TB5AJ7u zZF}`eSw}ApaZKhhS?bOPWq-%5aaA~um^JUZ9gWCLKcQQcG)8_)6muq2KF)A z%Z}H$4Q=nF+#?i!mGin6xU5C${@K6!AnkR`A-h+?#SWa>`3D>WJ1GfKw7q9i4W{|3B9(sA6*=F z;*9cv|G-}i3Dv=t5tOQIp#$SlnM7_;{Nmd>%Bs&*oX#pUYsb)=d^$FWJnTLw*DhZ# z7hZZD7%+1r8M<_mXnl~nX@|`#GiAjr9D+)v1^5hz*^m8keC&syyaLfTw zoyVIfVPur8S8≤O?z2Tn7WX8&U7{hI+idYUnq)8KMtN>T%S@oQU&CW3EUvQMc1TVW71rWl&& z1oaU~f2_rKz{7@%&CgMW#i{5{vZMM4u9e0WPz}BOYZ)Cj^_TfX!iod~UmV96ZzBTs zDFoXU=NobjSmf!X(nZpVS zfC%yc0tc5Yc#!6rL!1s6OJ(0hQjK0dYyJ?AIw&Rk6|tT^l&oP|s3-MESjUMXacs5= z7-1-h#y|C2go)R*hH_Ad$)d*lyTF;<51*`Nrso=u<#lk`!oE{(uDa ztDz$bIQ|%Os?&Fz8`_fSdd^WE5Lnw-+Y*;OWrI-k{_c%_e5J6WlLtdIwF;J8~ zc*3Wud;(Aeo<|f=`0d&VfIXiA#pl|0du^FwEX@!e4eNzb9VbQjaS0qA(v>t@910}fqbd|$~2!F3_u&4YD zmhhXL=61bIPmh+Jo1d2HSKcg#+}8H&>9ewM<88K`?UlVJ_e%$}=`IiwMJ2Q-HjP*Z zd^^UO;L`Hx%}2Rd&A3lueEo zy?r0vKYCK$;DDO3+4E&}g{^fD{;FKMG|vp3{mbJVV@u>98Lhyuu|?7yW7UYetfZlN z%D(g_d+N%a+-BP*IS*P(_Wok0Z$Bh)EsRqvv4NzmXp0GXWUMO zZK9R?d{5YUa&)|fP9XOLqwF4H4rgVyp_lpR%WGzIXH%O*PgC$Dj3lU zmNt4gm%Wpy+tJCMLxwiw8DPw1>|PunZ|}+smNB|kJjU7m({qdEv-f^UWY!b-*j2zO zYdxHG;OyUPK|86x*U;G0plIT+_^lI`G>xn)?ELaxXTz<%lPtqsVCk*Ot29&pX#5Px z(-D88(|8(s(LuIUsat8v3~od2xsxKik1RMSg?wIQA%xCHcIf2qV7w$T9 z_FXkBxIQ>MHJ8XHX%3vyK6up$Q-@f`P_{CqV=ZmuA)}iBzz=x00_Jo zM8oXTC`QqCxtf)0Tl3J-P)EI~KCV#++K5eKNd~zxisbq5CPup?;IgpWT;1cIu{kbn z)F}^Sgus-siVtX|_SWgk4vz4JxVJjvkDQ(2jrMKRXPNjqzTlSDp}nXLzS6FAr8n9W zyV-^a!}?ts2zPXTXiN!dXJ7p1U$vpIB1Hu~ zsxTw(nQqV^3LRz<2t6&65OfNJJL~fHiEzr`^c2Dv`iXN8qzecvLAxq<6Qwfq0B~nA z4Q4bdK4NY`pi0%VnVnI&Hk7lYO~46}bR;CC@X{rS(3r>D6#=%b_YW+dJY^=u#DDCNahOp`joD+~1BJ!2-`U|mqCQYc7uo>fkipYk=deT^$Pcg1!aNc(}d)~UqI%i8p9Hni>^^7(5i!Yt$#IC-ww~2Sp zv9M!{m&#CkgPCA|S-Ln>h%A?l7u(^BIoIQtez&Y04TDFF72_LkZe@$w%U4IsRSuh( zUgYXa!nd0noNfkun;7>!PO0lM(USa(juYbbo2xd8vI1U>syIOY!cZ{0ib(phs~|id zT4PW(1}aVEpc-Y$DZkVoLIy_4b{QoZ?u~DJAB^x1Ue@4=WhoUyKwTmtWflZ$cs)fqYzhamolD2`+He)-w?rSg-%_fN|5=Xc73JNHX3 zbnWBN6egwoC<}Z5XF4Sg*IB=grXx}wGLKg8p(yJ*t*TQ+ivz|XJBjwdKPD6&8G*o% z8u@cJ>N|CmBuR!j-<467v4&NeDBGDC+9v<7ITxdx`p;&B+2G3nclLtJU4_;!OkoDAEDu7*xDu9H}2koE%1{sS+< z<=E7>JmR~5s!j?n`JCs_AzS1ur$Ph1@Qw(SpKva8lEk#3b@i*auaRiNb~Mf`!0z=uCOC_`vn+G(w*4D!kxKl>*oDmxJmX1a9K%ypK-_x^D=?<| zUV)a`DhpA?AI2jC2%n$Dk9(MxKmD*=dGmF$ehJ^Q65Pglp*PC#f4ENNnxkfE-+px# zqywYlyUL7#5?Yi#0-L(xl6v)MUfN&=)M7hU*Z4V-#V{(CVnBjpd7X_U=h?6Ap}=a} zD3~y|W0pqsozE(B%B0i**`iAzbZS&4dfzAPL2uKycZ!R{ooA~YV4SLiS3NORH&X!*xb9WHp*&~!k<;rW^DP2YO4nKgSB zY)cm*;Q@?#KlC1E5&rd`e4XW%zJ&V5XXeY!`eyl$|LdQZZnn*NA2u7U%F?;XGB-OI zhxXnIGxvKt<@r8))_dnlW9t!Pl#ej0$K0|;hS>D9=gjMDc9$nByBK$Oonr*JcD2hH z#U|U|W-gUuw-PZXWx+O#e6Um*9uvoKfDF_J*gNr%WpggE*jH%14A zFN`|M&Tw@ET_@k?VwM(%%XAa{GU3$Q6`zxIsNU%#(xNl&HOT6OZe-qTM@LA89GUWl zw0YLU$C9`;fgvB|avXWaFtzfbBGia#*v*B|`E$GJ1ZNj4o?j|=zxcRJEG{rk5+*qZ zyc5~)9iJ=PEPseYltJY)T8cb^xEM9YXWqK{NrjxSH;H~Tv^&NOKe7WJG&cU^h*^a@ z3PXj?M8nTeCbLo6bv7q_R;kIfT6KPWr}GTvxcsP_Uk~hxEYQ6A(`J?3xL}lZjFEGVJNQy= ztCQly1m5GBTDeY!{Oc^pdWfDzm$xnlxb6?|aahd{@FE|}`Z-znL_9RoE(f_(qdqZL zq)M#1m)kQj$on`UI{!vr)jR44CzTlk@O$x*xaSRgjV(cjg)=q>y1M^fK6Wnhf;^R2 zCd#XKoy27L0*_3o`;^B-Nx-%JRsAU*_-;SIuas4si$8vKs(~?Qf=~vE^EfTj0;Sct z){Cuy){G&`jP$GhqC@KF%vgtVv{4;m`b<0ajfQkwKYabQclQ~M>vx8|?9MD~Vanim z7Dj}ShS3u8lUwlcPL7>KZ=2&#ux4Za~L z>ooC5RB|~f$aAzw&Z%pI5F}kz#6k@kaV8DYWgf<%b5QSQRygbg)EjmO29>4bk)e~E zVjaw$Fv{F5!g(HKBe}BnhY(4F5G=6rj`uRLZ6w3g79m)c1xVWC{!SdAgEGOP3`-aX zu4XQ)>(96^+vF9-iLwvzfGq+}*RGX|antDPS`-k_U#t)C#{_Lot=4q7;V#7-)_6K1Z-~JgDud7*xjo&?hkH zOjyv}4bsMTDa5-*)>HSNuatfg-O%`DW0`ZjIWfv@Qd`@EuOHs#4D^k%@U0(k4C@f# zS(PV|c#Q?{Jpa`%OUDrVj!_nD4d)a%98U1A!ABF9j_tiN!uFP4UhF=2QZByw5@C7_ z(H1iW%J!0x-Lcs!-~Pk5xP9%oy#E)!E^C}Xb$|7+JRo~EhK*U8*Me?vXY+&4KP#Vo z&K0Pg!}N!Np^>#V%LTg|W&L=fJUf^mB+t>Wckh-XPNj3(qqzy>>o98ta~RZT_nwjn z=!tdQA2xHPv@yIYG=Jb^a9E>G8f~3vqcMI~UPY)*qU(z>7<`wjtZGMb$gOAOvaxhxG?2)gvV?-($C27*JDc~|dlTN~ z#&eF)eay^_)8$r{vp4_ogPY~y&5yI*--mJ4xprqpX2f7gV=4W|x$1W5+(P;4?mNXW8)C#g@nWX#t^)zL8-bcPPK*!TDhezH(bG!Ue_rrp0JPIMA)lK=h*7%(g}MF zz)v4K$~(xTpu@;DIZK`Aj5fMR-0;2Li$L@R9IBqx9~uZ@$P8CmqupxYom?oB%CW{# z1ETy3jK)X3icG7cHI^D}`?p`533ZHh;DEfBYow1+#V%&)p0?NIx0{4cL(tW+uEtkf zQrZ9bISun_*uaJG*th-AdBz}o8a?$K1tK$i1)f^p(Ef?hCTHEsCVwqYBRtx%nF?EL zk8x=DDQj!An{BQTKnGELy083*F=R>Atq|!%9nRRC#q)ba0%q2&sjqX{M!&l>W4?!% z!0d2kv36mcHTCT+?w@D*M_Oo4Y|hAmy3(Hv6Xyr80kYC9c*p>iGw}=<@K-*oTP>Uj ztMVs|w#_e%ML$X>@kCkqKy4JdQqAK22#ZnSs_(@&HiY+ao@mb!{xkZ}`Q6v9yt{{| zrpKiEObi`O!uqLi*?6q9qc>!doSs8G7mbC3 z1Q-=c)}O+(DkOzi0jeIO*Q0`0!tQwBo|SE=NZ3JEolR-PdB*<@0XbMPuHwq}1{srn zg)E*Hdvjwv0-)uAOXb1rHNrtj4e8k#y->UoRICQM5EVv-4N!I-e4|59odOw1k50yr zT=80I%czH!KP+;W_Ek<6n_}H`i#_Z+c%6+7va-QhRl?HUdcZ|HnKGhKvQCs||Nr{2M3%MT0t!}t#W0k0{2VzJE{t1H>Z+{!je_*>=toSWQ6mL|(9 z-+8NSvPO?(FGJ_ZG6#;?fpxZ)O_aNL*2@3^PD+5OPie-Rnb2z29&HRmptCu^;8 zpG^7JZp?B)3rpc7WpTzU(I9ZM&tWvnUFBJGraXGQRi4~_K*Aka^cXRhpjcw&aw2A^ z>|dW{rqAF@d*WK@7#xT9sv-oV=nK7gaA$^l_{G2Sh4RJ+-uX?#uc}oc%nYE^*6x-Y zZ+@pNUAUeIhvk(yeO~S3b$}A7MPH|=L<%LNOF`40D znL0^ucSGHc;}V~=a7HN%YIyJhV%+mzbr8=nO3FgF4g46=ocCc zu9TH_l)Gwz#fpFU-G9diOg;9m*OKDH9(v_Z5 zJ{K`*YygeU_)i!GO=i)xRF?>Y$-gLIbQXz%M?0p*<%iU1uI(7Hsw8>Bo8NJkZH%IP;LizS^svutNrzG!Y=`ut+H_JUJ*(5?14j*Az=1#V7Z#fehFAG$ z*g>2`*#ViS?3k#~Mv^(&<85dcX5jy>eThWb$*l@hrk+M#m_P>q z=$Zl$lYSaU7=sSHKI#+0q%1QI!{|X3;d0t>&__6ix)Z7#Th*|N52`#3o9 zhJhb!(H=b+#-ZPYzGW)c)I&r1wSDnPIgfu-pmYsREi{S{j~$N}&f)BIu?1`*J=|T# zAs#$%!{ZseXrj~gdIYGBiK33yaq-?+gUOKgo6ALbOLf|Xp>Yh|GN+(Z6dcqw$|JJG z_lSIO)C%{HKVYUu#x_ZbCUOA@<(wtKE8wY{S>@3Dg|haWUzCoi$wVj)8M6xbWRG*b z8D|fMrrWTiwb>(n`{%#Kz|WT-{No>%2cLdc4pkH!w@8%RDGQgTnL!wSrriejsbgHdtifYXb#&#>I_3N2UneVc_%jtfj zU1eryx@>Gd13wSA_hyr$Sm(+F(H3u}^UitmF&fCH_c%FkdcTTq=EuBx(rF5C_k|4dUU~>P%!q z88DfQW+cNUOl7zarbv8;9)KShgw&Hy0geP*Jj`YSC%CuQ90QPnjXb#06<6QXBq zM(|7u^KhfujXJ3=yV4Mk&@tfJqkO$yIl95wzLQDDZggm$=+PcYc;4V>gz|ui!6E$M zmD-1GcCRpF_i9vkPVVfh6Qq@cMxNjuVbeKO|NBC63rh?xgpjtGiSs#mM(~}HOl@9r z$bv5$1aIgnoi{K-eQ3*5ovCUAz&Ska1lL4$`%*8dQne1kLGP(!lry$Knc=&>e*emJG6tTc zeFzH-nDcjGa1Y*)&~C4Rcj%P=z^UG{p%Js_yBISxC0xi|+lKuqXG_8$0}7``RWm5H zLO}lN)Hsj`5Fag{s7UZeS#WxcLSFvz(=vAHA~TU)l6AJqix%TR_Ds6)CNS5395SR4 zI7UUAe0K&S(`6eQZR_QA!eoJgY+xOl6jOUpPB^p9#n=qPb{a;;5Ie)xInYtlfJ?pP zAE;1x<}G|w$FRfDWgBO$`|;ig&F+_*pWZAx?ZGlPN{=Xr#c>j%$l&f6 zrcFZR{lL(dK_EyIMHw$nreXSAx0`<6*P`?aL;(*RVYb)&O!j+Ihr}V!*(u0 z67AjPGQY?nNpF3V^=uPLO#}tQ8Xa(3*XG+~|8ACBx1W~bt|#UCJhRvpF1^7JHRo<* zy|vvQVAj4?{^(!*kLCTJepRmh?hW!8X3NU`hvmorKgJ^S42IAwrA4*WSF zXJF;wa(VdvPYHRtKI)7MMt}#5^fb6<+AQE+4E4z9IDCM7z-#7+$TuC}PMPXya-Kiq zGjm>KX>#2LcZ}o6USP(893!(`I3bQnM_WZXzj&cM`QQU=6a7kiem$h8&g$Va<_&J| zy~MrSZ@zkkIvm%F<05`9p2L`vq!*_tGN^1JV?+ey?<|Ee(%iNf0jv`N#XyGd!hnO} z@MMz9B3&?~Kc~vwM>V?0bK1@jU5+Y&hP=%9UmlzVdrz4H5^iOgl^a3rE zsSbilU%oowZiK3ToK;hk-}J>hb$MOSRZFN7Xux)bQFo3H>TCYTXD3lr#_Dbe8{i%N zX>=V~3n#(Jitc6{8}cUv+7oY=Z0}K94z%H+s;mq zJAv?^AZ?C0T3t999$s|7meQ8TfF}wnh}bc}InvOgE8T%5eh!Qp@fll*+>Tt z{)&8OV$S-lZq{Y5o+U@NdbOkdcn)(S0D9g5OoND5hM})p>_9kztN{Bx*K#&s0>eE# zk2|1;Na&>nV|l0Vo>|0ns4`jKk^Iv;>^pLl!}B?B?F=1O`cMY%Y6D- z>*mC^)w_?%x4!>I+5FWf<>5ykl?z{g2Y#m=hi&`7ZAN~R+1bG5%S62ga$}mOl7VVw z+=Mr2uQ`B%V-NY(V@`x)W@@t5Axi6u556i(3sYrima_&%2nTj>Ga5j&S}SE|mD2*d z*<%i`x`w*?bhq66a=qMmb)Hk{*2{&fTo%Ee^4niMF2DZdv(j^&SptsTamRcaKO)1t z<9T_^y6_15vPa3NH(6_Zggg?~@z)Nom19H6;9{-gHN=s)J#=)Y=d3C-DR5Ckcq&o8 zFxEI-jJ@Jfx^cQ)KT7amxeU?%>u-LSI`A^a*2K_=vD$&FE9EVd$~MRi`1m(pmN%{t z&EO!VHcrpbm3PX(#1e8yUHV^n@}xZY%~$2W`XBx|cuGZxoQ^vRDr)> z-}vSh=e6Td50BJqFJG`8KFAE*3y{;*p+vkNO+r~4z|rW^W;g|PBVEAEWT83T|0%}larIO$Q9cM zBS6SrGNL_okFv)^U_oFEs`u6lzs>-y1rFO$t^u2Jj!o)_1RrR)D5*kWtclFVN>*Ku zpwo&MsGu|9L5b^*r04^2rQ^=nyE)QW9>uyKgDiu>UCKyH-C-SQvd?U~!48%l@^_2` z8&NRd$($3J)+pQ|vuiS2!FRkea4x?2W#;ca39WX$@aUM?`T=q-GM$W!ahl{f+Z^MV zT}HBwAFw~21Y8`2{Ghw!Q*~7YgnED!yg4SX>*b}QlfQAw!G|bPy+(7Pj2hUa8l7fb z@N}g;bn%lqQQC#4pa(in-IWO(-;xgjhUA|la%;!Hwf~F|$w_PtZ!`wl-8d~G2t0GY z9p~JD8ye5zGzC2Cmahs!56X0qS*#h7n;hx^5s)R`0=#QZz(NJokQQpP8x2?65%!NV|uNWbiJTRP7Ve;Q)<6 zwj{w|xfKh>Y>{QW@!_9=HRcK!1&*t2x4OK8yO-Swz3q->KaacH-OM06F&Kwr5Q*zU z)_k7d`HBRpqjL4l8;vP$85$lQY_AgX>}zclCUj+QV5-qQIm?or;h9Ebe`|-J2D95f zTn(gMW6n#kf85SmD6k_yK_g_5wV-LIY+P*yn8HeO@)sUzS8sa<(#$^pxY1ntdVBSl ztYzCG)ZkiIq9oGe#Guh3XZvBD+XMw=7n)WF5ZZ(rWIPZHh?Ac9!hp^;A(l&d!aGi*KF@Iwq1l31%>^iC23d z*clWCz@zY>pL?&H#~Y;^2k_1B{%w?ESHg}}7*)O+NaO$+Qi$2L(YV0%uK(;OFSmQy zr{3lW+}`O|OZVs;d~MeDF<04sQU2}!^xu~^{{BBG^Gg>RYb#INheW*2Exd&YAfeF0 zEMqR&0Q0J*R%2&t4dsALVHb|~JIZLcO4UdV-<=w(cT^(4p`05Am2de7yGSeK1G;3qqZkXrj+nO z@D-NGMce~(Z0`dT#JywQdV5$kR&|MnmbMCcZ^c)>50D5(&iRZlK*f9 z0c*z-&4&9g(-#hHv?k!dEclG~fsqF6O#FM}^wwYnUh9o3G;c_zx!%k%) zky911xL-luMJ&(^OSLYtkJxGsB#{sW9IlpRR9yg*F?IVQb1+VE!7xS=0`WwFpr$P% zsdWnE+-eSVQEM|vif7}+5kb_75Inrr4P(ryR^B1*jlhKHRm?hjO_O4{3NboeE($g# z?!BG#1#Tfw)iv}OND#%`**K7-p0OYtHE5$1T4R-oXFJym$0yQ4UFMJgnU1j7BRON> z%*PlLKgb@n%gpBxZIU*b?bh-lXMW#pA0psO&EgrgLtNdquf@_n+@a>y)1uJ8?%Z08 zkx@FHwFlw=L9)H_BdpA+_Oa>dgayaXf`h`8cvM$6qye*JEw$|f!l!r>$aK|34OH5P z#BcYuH`?8n7O69-amgeTSviP7YYA(rmqsW3rQNpO1|>)?O2+G6i0WxcSU8>gf_erV z74+Z}4*?6>5h@=do(p6Gg>S2yWi7_Y${yxfZ@o&d_lW@%D>GD46uG7o3w4POl>?yh zBJF^wY)9DdQpVO7=g;)S^($`!pi6B#1@~=|ZOvXZ4`C|A5zIDk_fEz}FQ@qrx_D?&(FbA-a9)zQK38RsV%Y#IHJubqTf1(#0JtQ9f^hdrxxz{+!Zq&Hz zfr2mETf$^B^*F7Z9Eiggs`x59Il(Str}W6LP_EXb{@3!(MxBm$-}ra56n-*0;^jbm zzE3B&(TQ=X0h*$;2t01LYsm!Z+=atSD~Wst;8Ii5YGXOWUr>D#UvzyHNkl5)EJ*q< zlY!xa+FOJOHt`0jV2GsbHqyJGWds#C(@EY6l+0LE3qZ>%a)&%4q0xz=jH2C-5IKJ0 zMEvRh_HW{gzw&Ew;)y5YyFYv_7ARA!k{tEyGiT%C*^`vZ8cJg&c=8k)M7{xy_zCPw z7(vWBXO>u|wbnP5_J;2k&MVG}PMOv!xQuu%tIm|_+ zm!+#{Fg6N+!mwQFBNW!pG@w`3&88z}NMv&@{D;Dw_v@{L>ay1!H-kgdG^x%)m zQWtxPicV|j&Ne!6E?t~6c`$@a4Bc<8RVs=jZi%<#wc<(Q4)?)Rg|)(-1-ed@9LB^ug+Y^a zld_jJDzwD~u|;}tTwuh<+~rm=7ZgVp$KnZFfi%Y41dc=m8+Ymw|-Pode0qZhc?D`wHWK=?YRu=LLKm?j-EqQB+nJFpL%EJJ7fSHtH#Xf?$ zVZsH9PYvzbc?=)`sxa*HGgEPQWg~7ZZUQT!f*j#!!xP?+FTlowsE6FwU~Ic24a1lY z=y0VlYtq`Zj%Q?t(yc9eU_05Z#nl*_n*u2gW0N@H4zbVI=C5(649~mvF4wOjq=q=y z1ej2kQ5~O%LtM;SPcrpGTwCsyiEk<+Wx^^7Lh-;OEt4_f8r?@*ZlJ{IMW~TTVwP~{ zKxh2)4(lNExQzE^aGV-tN?{Ut9TR-{z?zmhiU&oUosD2yKp5r?uz8-YH<4D@Du?{q zi!u}dj(E*7mek?ed_4WsC9+pGk~#FzZubCY%t+uY3L1<74-Ys6ei))*NO8`Q6|{BA z&RSGd^kOmh0(aeSZt4(CVYIY3b1+)ewDkdt9<%^#K$E}q1M2zftYiDZt*C-av&4>< zAC}_YJi-DG002M$Nkl`+$^tXo>L2on7#DqQkH zUCCuSr{&}+1MBF05;}qQdgz{qhe#K>0{;D@_ST?F_^e`{a6L;Orf^Z>~g9ekSw7xPV6} zV9gzH0EM?qQJc8IK}5!N>+I>?&Kk-x;vH|I$EY4)mHBsncRTLixt};Xf8%yM`P|3i zW6wTCvfegIwP$LRy-p3hOxC$DAd}M6;kAI5MIo#U43)2GkrmilG}>a1V4Ql4v<&>t z`3c{A4a}2y3vX@%8c8E*wy!#yS@v zva@IMd-sk*tMhY}1g%d)8M=*99^hec&ca$ABX3WjCcGJ6UXS;n2RYKH?y-O5GBE{} z*$?2DSvBVBz>n?8pHoFq5-*&GcyfpWEi5%iDps)Zt2z2h{dhXG4dO%Wa)leSm z)3~uu!o^w6Qt#S|_wFtwyp^5qKf^PYcp=Q4xgYR?f6^$(H|#f;WQ%b5n(*?W{L59m z;xiSIgtPEU!p(0@UiBfF;K{feKsGj#0TjTPRk7XmCNmP<@vCTnq0ConG}m_+Z$Z%6k&U)tAUJTHyp1F_X9xYqL59(_6-}yin)iec){=htEjKU zB}{A%gv7#c@Y~Vyh{l3MGfoBLwgk^o7dd(vyj8mJ~QF5wjl-+n#@EMxn(+iWau8H3)wwj*HFk+)!Ow2u+cnb zpH5_k0TjL!1V+`mV``!Msg+b=SbOUogz{jVfA&&5_T>3^_+T~O{m~n&x|H3?N+j)E zI@d$yJ55%WYH{uETXBAb1Ty9g{zunGzXaF|xV!(2GV2Haod|N{%rYaB_Zpo0-QH)t zj}mLJ6=VL**AV0@oJBs8MtiA!6^2Xav#$4|wY&nHJ`;!3_6iFbpowY1O~zitl_X+5 z;zu}gcmv{_lf7j1WdPd5DrbPi=K->oF_y9DQKS$Sc7`)H--S@Ck{P=}Eou{mHOnTMGZpSw zYNeQX_PMx8ruX>9d-3Y6y;#L({N-Q%aLi)C{`T*`nE0}~xJXi2;fKbGe2#Ml4#|1r*g>F47pXU?}sa;uP_CcjF-Ob$)D@7Yfi zc%(I3u@(n}C{ScY#%iS-J1OdNSD|xV*lN-dYqxIy5HDPvL@ACK$%Mz9JC=9vnFJi;AP@4b5^iPWzcN$E5BonDp0~khMlc;xu&q5{9Z;Y zUh|sobydv7UKPNUjod4~ zxn~686axZ--$X7ZLpFy}OQl*p|B zb1_N~He0LN{kAQoZW z%dtFg#5SZsbA={|wnht3v#xtONys8@`|PiNnoQj(62fR|f%#HvUM40u5L45~5I)2u zLE66Z0oLpi<8gq+!}a*-yPGjEAycNg%KO)%Mx&BWO00H4V3i$W^~CZ)_Owh$4qSK; zyZ7(LdhQWOgf=aL8)`9@-%GY~bDykdvXuLWt8xC+bgb_ljYSlqQ8vkB7V~l>bYWoZ zQ$$SboWC+b6RTw0S}o#6x-H!b1sMM&lX!G@Zjo>Xj}FK=c>S+lPUIh?W8B^M@8SJG zKug?QvwiJ)+`&2%F!c<3<6Bjf%R_pjH}?>pBn}?qxZEKW)EWX{?l=c| z3{_&oGw(UH;nH|D7HK|FWxOpcN{Q=#d5th-j#e&Id-vJhMSnC@?Am~j~ zm=`dX>Wdu`!tR$?=sz+`Sr;^uLP4ellLQ}M`hWjDmeX`R@!^X)SQyIrqXc3BFI~At z{sD}I^_i29jZ#Rm`3yi4TTZ>6wlGlL}xNA#36} zEf~Gey|WfEclH!YAsrQgv-~b?4E`!OGuo3MpeR1_Tx!9pR24#*KW+y9gPDx4a8tO# zy?~u_FBDQXMLK}t%Jk(g&RHJE74W{opz->g1HwW6U}&Q7Kop-Rzk~mJX9r5`qX%;> zd+1OK6n3nG>WaKC;myYiV4i4~$acaS3y^urcWtkoBSM28@Mbi{Vs4JhNXdVhvzDLu zB4f7_A$hzHX5Nqf@kQdK#*5r9!)oBQ&(8!$KGj=LA!O-+Q6NEHb4XmfXbTM93M=<2 zVN1lEYY#f`f{KrPX0op#v<`6?rO*(fI*9Yg#4(j$greXz^h&%E93rfW;Z{S2=_c>j zGiSI}4HVkdbEA>d&$GJ8YtguonUwLu&W<0_Phtx>CyZRIatoYLpu%4TzKW1u6kn%s zWee`Z&R^MSPM<2!WiO7F4R3? zTri8pqrKz?fDnqa?6dB4AxLu_$|TfSvaT!8pMnw7y@?oGmCW3JTsDmwqz^`BjG4c> z5TN0-ka%=5Z0489CsB7`rR!o(0I^VAe-edpX8#jtBD=1<4&qKeo2)HI;ue6N3;fTo zJpjVbnj!|@E67@P&RynGP#n#U&Bqm-C#F6B6w&hm!@^c1L-J+mB;5Mo-jJf1J;Dh3$2u80Y8YGjbkVp%8IO*;oc=XX!Tob0E>hQfbb1P%VmlTW7o$#vf@2(l ztf|EjI%tBh4;Su7H*h;~Vj?Dq?SAyq6eq#0#W#OSZ8WE94fdh=lGT}3D2ys=WtnT^ zfFnZpK|C0AA0dfuGOl-MsN!*#9k?#tD>ZAarllL#Xv9$FL@Sb4faS}t+@wYsp%dI=7**gZK>z zkiG6E1k#6#ehhf--dp70js`XSG%mr)-dbq}bpo-wxSbu}u734fvB*{(IW-zT|Kd=5 z>iHuX8(+KqkP3s#?9&W$!TT~sDc3g5WEM8!iN}spp0yUMgaaPjThEjDh>6ETvf?)k zw}3a}Q*@{z{9-M5quE70JIVtd7V{(u#YVVjy%7s!K4x$0V%Di46$rPa7hq;dpq3uU znB-%k6-EwarfV@e$C>H~_y%tOL;7ON4AQDVcnV)bZ7FcEoWb0&81-FwdbiL$UI3g|fUljeNG^&;Fm+gY)LFq_kX6+VNEBC@AxNR}bF|&cwhd zYsJg-B6EB9$~G_8X?uJco(7CiI1g9ud&Uw11X`s)pi1t^;0*l$j-QTCewr4|Fl<}L zvQp@q3xTJ~xGsB@zk)|uq9m+y5v)SS41K!*VD3&ReB_s|5kT_23>*cLI6Pd1>*Q$# zeS`VD^&ZD77`~VAt^WWkVGfVvy`DQg0$QRetazlfKnx@wl?oYjllAjHo*(bbIdYHs z$OnQ$&z0{LggF}otJ%&jLjx;mP{E*Zatu4VIY*z#_lYm;8!6+({v{l|*8LGjoujjc zZ^JVMT9NTjh!cc0UhKCODg_LuGI9|vWgPi(AtbH8W*lId$$1Q~VW+_6Q-q!KQNdNP z8!loHc(b`TD!G82XB!q|?cjb3MbM6eH4F{3$~1XQ>tBhULe$!Db6PI0DV5s%zv17-os7uDHo^*QbpW(Dg#ov;yQsY zC1=n0+;hHUpT#BJ^!Z?vF7eJa$q!i_@N_&M2j2TfewpxREMUfgQ@l-g9)vEj0VwJP z5XJx;gjR$)UpqyaFe9-;qfCf6szR16>LV^_IQC4@TAN5E)32{ym7%!G~GHdHyPRM8Z_JJ#0B2d zT#KWl#j&1dOmkq# zyYFxy1xiGv+!wRdO0QF=IZCX!PS5VcJ;DogB4a@)Di94>{^aqqJrEaM@PJrj9~MvU z0Z7<_D9UKLHj?S2m1#eTO?K;AXFlDyER9bc!)wz^X-NZ%wrt3tEb&uo48e01oOPJJ zf zwc?j}u6Y1yF+qD!0)T+=ssJYAluY;@3>A0y5F+q1eibzCBf`52FWSW94Cu|5e%C4l zZssWfKfQF00W=dA62lR0HsnZmDj|a2>e1}y{vZGNe~wT8*S{2}XsEMCBbaVH3qY9b znlZ%r;HRH@EE$W|jtP-6_YA4w^>glMpZS~*ELr22=&yJXNE!u?H_2o9^MCn+IPui+ zICo(>m9H*{ODdPh(+{T5M4Z#3ynrv6_Mnt8^<_~)!JpxcZ zgGtMmplLk;Dg$~$Quuf?ZC>T?NK~Hdz$_dPr-A@KlE?Xh zLOeY{?tp@bxu#cFe#!s-r18QVR-M8y-yd>aDYanEg<1vQ0J}4v^5>RIGJi|8?Lbh6})};Gh-qFcFeeCZx zW7bsEIQ&^}mi6o1D9^UN-DfU6Bo;L*=%jCv9rpo3@4jP=JBO{^X)gTKr{__$)> z#h-##IWz9fT7Tsj^Wdy?_LD1-JE& zlZG>=fqh7kkxBq#FsO5?)JOm@S*1<}wQ;Eg3XDY;S&`Aqq?w#T(DJdulIJ~|oq%T~ zT({kv)`SK_%yyS#7!%ysH|~Q$s+#bBUJ7tcPO{O3o9tA@buP^7nGuWidT;pPJLbZM z84Jz1sbw1YJS+2hi$T zSQ{e0f?Ne5N@LI%NQ}F=3Sx`Q<{Bf%GqFg)e^8SV7M8k1v4eZogg6s=Tsy^J2n`a- z@B&y9>*ncF@pPXgG8h0wMa)^I3PZyEH;olNd-`-dKg}s!yq9HYm1)VAAjk>{N!vyYPo_^yqre~}*u!4__NV|sYz^?k`fG_{n7!Fs zxF2iaLZh4W$MK3})+$1yLNg0b$ubEK2%}KpHpLp=S!ZvLeLDNXyLB0JV;^-fCO+_4 zW?F-j-6RV34vfTh1>wwpJt&`5i3nod#{yStmY|HAKZN*9Lfl61JWeh3De!s`ExQ{J z#Uk^&{r0{1@TJ+P{OptQ(qCPUj-zC9(;aRL7~i?M9bM!P7;78>GTSC}g#C7eu%lUz_B zWCU7YXDc3s3-^sJ#uC8cLr%7BOj7yaH4%|?3#QBuaLIA7P@KDqrZis20w-g)5wp* zkA|nFXg)I+M~}~;jMrmvc{4uu3qJ?G_kxpj{S#kNXjLdPRt@MhBD8SQ6ItE>C-E|% zXe%o+$47z#_O?*9{Iw9utOb0_Mpo>J^Q5+v>lu&zlF7@!Gjzza^e7ae5cy}po05BT zgA6|1%E{#Tj_0aG7l~x-wTch;tXGD?`5%N)kwa-~vsZ0;AQXZYo{?@|jRyIf23HQo zdpQ~Raewk}-WM@?_#*hxMGuMWe1_tpF5%TCRc6*Z>r){!#$9-O*k6VfhC)GH^4b(3 zehxR4Zd2$u9?)|%%{8%Adi1JW(dTb4|~|-oiW^DUjscx zsBrXIAtXnVr(QB!c`LVSBw$!IW1C+?yLkW*`ys4~y#ZS!NnoI?NhJyVaR0^o4BY|( zRj~AOh(D~E%bg~l`W(FtLX15FYUy$p=w8KM_$oIcAHu!zMhTqjv*b;MoUy3pBmJmF zEjGfBMPr1wIFXUQUJS1MhN#>D@s)8!oB}~40*M)LfWvTj7SKb&5eW*VI(h!Ag_$CV z(Toz;*bWzwKxcZ|9t`O`#YCP{XrlrsH5)1tONW$M>1Ia|*?zXws?q^gXt3FEZc2}s zD?{+!)#$Pjvt)P@pMBID)J3{zv@~#3ty>sUCLx31yBcS`YDL?r$~y&q6Tt>lO1qVv zcD8oJC0Dld9V za4MFiZlDgz%Khuh?K+y|6LZ6{zCvfQiQ!UpcmQxyL)}*`VF}^vfO#Z7vVkyB9AM0k zYI_JtZw<{hN(X!R4O&~JUM%`DfnRdZN&}55bD&`eI7&fX)TuSDA|we-l-g@M?c*n> z$P}%V_Qxyj5$b!d?2W{=0~l);JI^{YzY#ppg6+nKWV0e`Ja(2I;ua-H1FE1>KwvU2 z_JFMwzaSyDMwG&b1m1OHWlcW)4BmoW;`}!-IO*s%JP@;|PQ*Juz8dwFwNe9%@36nO zy^8QUbz~sUM$<_x4#NG#KKywP=_x@jBFTL?+@5YUD6YVa9;wXyE2v*`-*MA(J!R_B7 zTX$-9iY==XuRR?<{{CC+%T|ojJx=bxmYKBH?xa-CRVtyGz%+er~Fj}wfY%Gu?0Lt+Y z_9YHTLw@|%C~m3ZTOuf*k-9+XBW3C+}6?ZMw0j(a!Wi|_oSe-eM=4}T}E7nSSG z3RV~($bnCZQbObn-X&Zt%pj+iRWF>T0vkr6_!qYh0-*zDt^!_$E6WpFB)Dm#ww7WDsI@F%}&_&8JX#g@cL5IClrNPnhc3=Mhd3&XHm7J3eg!y;B z`Azakrhx(QQ<+9Es&JSrh{A!lm8=OA_s#VM+Rt)=91MN-JoJznyetd2C7*Hug!`}@ zjn~R>3!F>muU=WFqUW2fP#lbV0)SAPT^I_a$ zzW^JCI8sUgEkTgJ(cWgE&^6XCEjmPUcs={Y+{v$C4a_}-G+tstd7f$s8S;=l`pxW- z(gWisb6wmKHgIE_36c^9V{2g?2|bktc_y42Vqq_ta>lR0Gd&b~dc-rnD0L&esZpn2 zLkZqYb1#rlS;z^RB{~eYRTg)4=>7z~shG$om28i;kf$lUg(Q9vXf@#v{S^i!a8PP6 zSEuEV^U=Gig)gPMoeaqjeyt>J#*+BK1O8zTn5)~E3lo>*k$%PoBPATye}Mq{3!11S z(&0^qHhALW1+JD4=)`y^>uFgywntAoFk%&<*>3lmwa6D8W!zakjrKlgf@h43YmE!= z9L5tF6xw_Qf^a3-iWX71 zr`hfr;3!QH*<`X^V!-<(5A1L_MmNYxLmTczVZnXsnzr}$+mDfPzf4SeCuZ6mgU`hL z4zcA%4;rJ2q3SnCs4~W$Dy%r^jeR!(hV1$YEV$%CkC=z6l^DGTh`FtaG_2bUZ{J(1 zTt*UAIUl=kY>N(X15^ai3}|*P&VK0Q?FCLbyLaOTrD#L(^szw>_*jh}ucCqT{P}re zr*u6WpeFonO3sD{ZpBlV&VV9>5LmA*Rh6`00`MsA=@D1qY4Sr#!=uKQln_dgZhP&> zNHpk?H(R(+j(({ z?Cd_g28dO>CA6I-A+Kvrj}XkC1xw$$;b@Em1o+D{RdXa^iK_q)*y3z_lb`s9uQh#2 zy|GGfc^6>PS;@F=_n9b9S>FuZa#Qatc*NiDe&-$f$q&Tok3QCJZEwcOi>J7+9)Iz# z{&jr(zxbIrbNU2sP*n{?m}Uw0E0Emd13Un>;e}Zyz82>pV1-lO(t#JuMo|p~r}POt zt)4J!(&7ZR47EZ3a3T~s<^wEV_{Hbq__^sAsP-VZdx0U1UZ9N%v8%5Bqo9-CF`fb* zE9&gyZMbj`PlI6@RX}Ujc`9C}ecJD?w-@K{<#DYWYYWun|1^H#cmEk4M*G6U@WnT- zwed?5Fr#{SipqhESC3FW2^hn>a*Z zUt{FpnQmT<8Mre8E9|+SUx)BK3;8pba80q_RuS+G3I*?>l}vzSyB%K!uaT2^LaaU& z1)f!uDmq6761^BdOICT5idA|&d5*l+JyIDFzx;9T`QA0c=k$02BLz6uO5W}{syA&Z zjcsQr#OLIkxJG!Wm?NUAeiAvAw%P_df+M zN|wq6tcwxF8rBZa+gsahr1v+&eHajS?l zS-;3w&dzv*WitHF2gb(uV|;+O8t0z&)UC_SFSx#kWTgWR3h_So!NfB+GTAfV8&g*U z)A$#01wQBpV8~QPvZsb)0+qs*&((aizUhmSf3v+RueCOuhjA=VT1q5l;{=delrbe%S5P#);h?NZh6_H*9b_)HJ&CR+m2A-Zo<$$W~pUrgETNPJDov!p3VHp z3}@MfBTV+Ha|pl=bKr71g)Ng5j%C1ps|>nHd)X)f>w{U^*IBFS;^|pRX2#=d-}@;W z%AA3*3RSvu5TL2LF*mn3V>Z0f^`j!Qk1_%HQsb;q(lc>_gFJZP1LCud@qr}ND#)sZ zsq4#=(p0_*=Fh|ts!B`>i+HZc102{iyAicfI?98cQN{JPPkerl<|Zu^nZ6MOCopLe zJAXj)mA)gh2-b;s{oT7!K0b>3i{95YdNVU;OF@_|uyy8|6e1#^U_L5xG6~2H#f*K| z0^zSj%okq7UzQI}>W;$KCZKV(i5py}tHAvSxZRYYxp$pI6EXF~QzSuE;tp+AKX!rx zanMGub&S$tfWG~VQ9_tY==N_Oo$M#ki!;87nLYR9@wj~TPQ1;jXET#lEvQ+;#+k<^ z+i(PW@5dA$ zf8qCDjAx!b9w%n{NU*`1LVfEtF>|C9;SE~n!c71h}pkDA?Zj; z@<$;e{=oo%7a#Zwu(CM`*ZNj{;EBu)QJWRZHt@6iYckY4QI)5~g z3SNZSr!v63vUP>8OkX;I-u+K&h22ZR3_T?LG&uM)DT!xk$Z$uFsjyDrfUroP~ihw%V;wPOlFuOOv{XeCzksRJDNxh4=_=fzbY0c1*Xz~ zFs|*B?_drM_~-rk3mmd!Sh`LZpwEM!plm0v%rGZ{l+oN96$N`yI8Ua@U&jKkgf&Ku zbX6mFs8%`1GD%MgJV4pgtB4WFz7%xgc&<4!&rzuSDdd~7sMs6vGhO41ubNiuk5|DD z-FtrB)#P{l&D^2n|KOO@WiHM!-|*nXNA^#m&1Zo(j>#feJ3JeOhGVP&9sp3A)oS%f zxPWK$}3-Y4}{p;V|0 zCL*XT2uy*2ZppJ*4rAkKNb8(?7*~%`0#pvnxVC3hIP`EDhUa5@3}!Ye0r9)>KIpO7 zV{?h*B1}O>)}XR5NCHlc7?19p8hf!%H>;8aCF~$-wSL`yYjoq2QC6{JJ>E}mn6=q8 z;514S)p$P^D{GRmcviXKG2Sn6BvD8heD7j1i&Vq_iG`zU=Vng4Lc8SU_CDonv>DvQ zm@ai|?E+J(6|8kKiB9d*lg|{iigO#V2rglU?aXWUUf#@pBVc;Qh>a7E-9>?@!eF^F zO2mpMuskcomw){7^;qEmjuS`D$3}St%g?N9=IwewQjPRPKK50zwVE;qmNbu=y3CbLoD1MdbWN66B@8Woc3E?hhw_ZN3!=OL%69id-45u1IU@1*;9 z?hwGO6PNdy9vD)WVUYkhb$lieWl00wm3pks-)HZhjLF&2nELn=B;`>0a_fG~oSBY& z8n@Kwi7<6~E_(5Lj1R)d`o`i~eL5Dm9>fIWAMm_XdfX$Wu6xb?=W~U}2Z9`UO1p^x z%t;24%mTQ&e@CXq;<^9jAJAul{TJTiIs0#(LN=7LY>8`)^?8EXJFi`iIZk(*I69fn zTv)gtU;FR=dHk(Eq=C%f2*MP&OFrzMmXmwa3+^@#Hsk7>Z(wN*=D5aS*>vXfXx~|n zY{efdPW;silY$=$y#b>h>>5T9LpcNYjC)teU&-ne{66IhjcxcZW4Aoh?D+~bs=yOh zwWiiMP@}uH6W3n*VRVrZzi|C+@&+ETW{d$^IgilFwIze+w?=5JN&Z6CW202E14&oH zH(53F&aCx>sXPSu31tMNRyn2`*Qt~cu)MvF!9BkbWGonIaFP|}WYkRc z*66Ag5Nb1Ct2j~nZIYa)C9Y62#FL3*3N4U2kx0uBg&^Y}UQnj9n_8BDVGU(_pp)5K zxUv);YD=sM#Hd?K4$k{pCZ=X??95z%R9~40DG_XxC;zY+%uVGWkMSc(qstm?1cyy@ z;N<4aB*Gep%LEdLyvj8r#Pd3+9pC4m3-P{%Wzs@JMV}>+h(sI4Jiuxs7l^p2-R2+! zkqpZ?Pm~f-fOSS9S(*DLB1w=UK%tF^C~i8>2Ek=~o)$MTG(ZSv5|m73woo&kBTt44I$D3Ti=^ncg!Blc$^3GO^8}KFDX4 zgu7$aID!?wa+f;MA-*SbUAMDZvq)GVu%4Cz++zPUGcIKYk3{?2I?D{!543F?;S@tlzm#k{Na9^f15r z{(4NDJ{hg*T-50J)99UG0hEC1kpX_5J79>)+Tafab~0vWo{2WV_CK>{q5DV}fJfqk zIAYxI;_(s67N1S#!^8j`!1YI>=DndPm2^Mtq2gczp*VNpBuQpxAft_V?WLFF!+-n5 zIB^Nzy&ALM^e&5kk%!8$~ucQUCN=%i;Re3IUz-NZj302aX$#3iUU z*Pv@#gF?@Nu5f8k4~52obCt1bP=ZO&4c-UTksQF`peY;0$f`4O$-b+P^>4bfRV)}wp6M{IEn~VoucSi5^J(wP zy{?JzpwC_7_sAJkW;BU&&RmntzA(50h&-QvX%Ldx|A;ysH|UF7CO?M{EdtF z9`8gdb%m0ha!8PyaEbTZXXFphaO~vKNwdyV8unso9^3>6Dm}y~fIukbYkjBPp|lu^+Fw5x^+gz&yaE)5 z5lkZMHPf{ylQjv2$wKhL&A5lm+i|)R*dVBgj0Ig}B7!V@=W~srkQl%_g)xE-;?N^d z$KtKAVG<&_e*ht!%;}9VyI4y%PleAOrqklMS%MUD5^kmrC>E*YI&&Fmq*PlkDKT{n#zOz9pQUAWz3 z(hdUwNbF3xICV@ClSuh5H8vCu-26uYOs@F$BFF{n$*gNrf$oRZxsJ0sOZ!OTU1vm3Yz7kc0_<01?&F_CJhDfF><63V* z;3@=poCBk{M^%V_0`)50&GIobN~+N4y=kA3YkK7d{p}Bb?eqV;Ke7-No+s&>Z#WWZf_JO>)@F zSlpsR-A)aU5X_|l9d?%KQ4ioG_q5JbFhKx6Nn(^a3MiEgui`Qh#IQ<1QBs!l2VJ5p z?s8taM~xk1e;_v^n-v=+)2Lz}L zyM`zj_r!yF}KY zCC@mhGI7hnDkG0t*71`z5yv?VI;SNC7sf_EDJ zORH4j#Mt7$R(aM)gQK>-O+x@am;?%K6k$E^K|_aqOY9c$rCY%5Z}zfaG2*#G&=cY0 z0}4l3o%@g-#$eWI-2rci31}$fxB!;Pfh6!e=oUU%u^?Ze@aTGJ&NY}&nH|#*lqtk> zY@h2`(iuXBasACuWhzJv!&u53WrUv#170w3KIa*FSF@i0&tQA_Ddweh*g+ms<}Y!N zu6zON8ju_b3zaxM8s=iScIS~7F@CzIQGiuwgt66B?(;t98642sc@afz`QakR7mp`T zQTf(GU{04!rp)889k+tsHFkho+2s;`-wQ4`4&lM%F&!X?EfY>fiPp_}gvf^mM%>Gq z6y)-ReUQI|yIp086mbxiMph^q3m8TFcmKVmaz9b2sHOhnDY!IQGttG;y!>OybxCkCCK;k)=YI!Ch;7UkHb=z zS)vM2Fc{Ye3CFF{j_wZT$VYspjqBVB;cndAQMBqF<>Zk-=7xyFI9NHl!~X3wcVsOJ zNN0kB70iY_xCYQM7(ZgXv*Zhj)4FaGAo!kia;$W6diavfF#8Y-p8e!7%haN9f^k9* z6JRcE0!YV!F1fK8$6+&a)9UH)<#SXDHn{;uCbR(fP%D5%rP|NQJ}?zxejSuDh|#$G zb85h<-A&Vsas>|r?qo9_IY+l$G4V(379}9-(p7NSB&{NGwFj5x2n{xNcJ@oeU)uY? zxIt2xF&J*7v`?MX&GJYT+6XnY z5KaKTzPnqxy|AQfmxHlz=Z^Qs^xPz|`ATW#$V|KS^1XQV`gW<>&*}0I9&=?-4q0K5 zU7ZK<m&UJJv2*EbN7$Ib&AKMgsv34I+SrAa~zjUuj~v5{vWe?b}zENm`21;=)?H zF*!zz64pQls$-R5K#&+WBWe?M7#6MrS&1mN<_89R4a|kOYh`ABo1PCX5(Dz0A6^)C z6twrS54->-PI>ONXJhxFsA%n3QgrJluSR99U7GogzZtD#Q|&s8p_`JZ{$sPT{N6js zK(w4?q@tZ;{41eav zW^dkpx3o8OHa`00ue5KRdLLnrS~EcdlnWPF8=e@9lkS-dvCMs=G$DFOJ@+o>JTm;U`9 z{jc%GKlt12Vfq&2iHEG0a|7e8%U9yn|Kl6+^si1)(?de&9?5X+{nEmlTkVO9t$6=O zKP;`U{AD{<-+`&t;wP^!lx{E5MR*}fAN%65c1Q0B3e6=Nyd2^Cp1AP%EOdnXQ-;Pe zy2$uHB#Etq7M)tHcmhiImX_M93wPuJF*!FAXU-lkjU1V7_l}(@m3ohWHxV1}z84Sg zE}~3PE{VcW0Vk?`@F)mfg zK&w)16CeP~|Ih=UV+p6@NcN+*LR@fedi1EKflskn2q^nz;0oe1j3x$@9`YP}U~Xo* zA%qkNVj?>Y-Lrct+a@RZmAlVu7@HyU+DzJ1XhJaHy&p1n=6$#aUO+>h4NbyUfh_Vr zXo?dvg{53X1=$>iG%PTRzSL+_PY%IAaq=?9t-)riUd#M5jfQe*Wr9F6y_i(;7hoG88hEpAK(cTD{d=1#25Q@b47p9Y$sb2>Z}S?eGiD`D>73!mj%L8rDNd~+|sz0AnCfwh~zsx zE5@go%Et0WEKze^$6_2A#*#*Hc{; zfC27+e3ddc@;OQsogxeOIoQb>^vVD*=4sns+pTsIPv1N2Zg7`K&8T@6AYdu6Td9Kf~U;bQ_hKHkx z_1N95B1hi=1d=iTrSE+s8c%;R#*Wcb0EJ+KWV-ps6R?Ty zEgODl!X&Sa?MQ3B1fHqL!~P(0aR=HIe*lQ@RZn;UEtsr^%M~U%b@Ehu=KMKAF&wOd zr>MdC>y5o_y4j%^ZK(v4?*Tq~(2(v#TdHnukHvrd)33Epe&{1{?$U+yQWyhPQC(kI zjr(u@B);$ue=eT?!WT&TyFnj;HSXDNpZvwA;?C6vapRRAwR`VJ{M!HULY#f!*|_l8 zzV<)=$A1z}{KCohIvwrXMp+=m%m;(fAd*vNy$fiH^=7Vm_&}{=cv5IDiIQYW7 zjrQE*XXD&6pQHACnx-*Sc$E9dLPi|6iWAfnXuUN5IQQZg>B6@NGsntfb8(2Fj3Cf~ zPxG*od-Uu=J7`)B+VnQC0;NA;DE{cL0faJXMQqk17o8tvs9xiq05^!&eenm;o&3hQ zwMx1gR_3fQW?Ey49FtFZuCCei#%Yzwkdk#H$RrE)om>!}4)LUzlI_bk5XAC5r5CT| zBTPgkmVddCJLHi*S9u^~wL+(ZSaf2S0wonbF66l#&_w31KzJ;qDT;&u=^B7O>v~jIeT9{9YLwGe~`V`kG zO(P@VEj$GrV8l&zpi{N>T z3H`$KfQ7PGkT|&XEu7pQ@JMeHm#6oG53Lxoo1-9fsmulD$c2c*jPq%|70vLqd=uEI zU8+rZ*ouhn*4R8n_M-B|lXxIdXlkFUsQ^AE?dN#_1fPK3sUdZsy_#8QLL4lMxk_}% zi&T3H1;Upqt0lCqqz+D`vaO?u7`eXGrrjvV;$kyW1%ayuF%#^%dJ+-c9f6Sh!9B$I zy@E}5gN>DRQNrTAsa@$-%|$YIgf=5){HlYlSCm|$AYkDnqOf7iOBi}GP?>c(RxjDh zgSewjtk|KpeU~is9T-)WCL^UDgb`~u%hu4r>Mo5*aE(q44Dfsy1{;?EMwk@>9%kWY zx=+BnGZBnvpxy;w;Th>iXGaowwrNz-M{|VB?~v)vwd;=(V5ro@#argcPrMO?2G~Z% zhCLDBbZ*9sGM5j=u}z|$`2fSzRPWKFyBEuD`s6e>04LU5hAF7cpL&k9u+w!L>7e;` zHCO2<#z}Cs-dHP5$0i->WI!@PaYam1Q55N=PXD^^>Ir_UVB!OJFhBFd^yv8B*C+^H zX9+8GiL0EL8gZUB;f>|nD{=H=b9=P!Q}Jy{*C?@HKmoxcWVBv|pzz{9an>8t z74Gfa>vvXUT$r7SnW;(evq3o=$(AD{C=(`?G6Dpz;_F!j@}P|ELLZrf0Su3vemdTI z?TvW*Prn!c@T-4-2gJ^XCPIQo%S-XSfAL3g=9w4b#ozcl-W=7X8=NG!7FQpF{V?BJ z3u}GqbW~?caqT+EkwdqlfAU0f0!si#sUEn$soNbzl9+axoP<_+Bo^O)9oiqvW6V-{O5&Jo(u##4|KfGV#t(96Abniv(u{5I|9B!n{Es>{FhhMe6rr1^fi>j5MjTj&RQVl(h;J z1%|e^V@d&f2OkMQ{xTN#s7z#%pTeIJGX~84${mG?xS=O76%anl#9?qt9_RNg1-xU; z&Rs#pI+Is=KQMMotwfC{g}D3(_ejbDdLJ_eyo@a|jA0%LcjY=f@59@Ze=#+^Ajx}D zL2|Cljb<;N&*55dka7*7ANCD-g{vD{fY!PV!5}M~hxr^rB3rzAKOT@g=@R}TjCLq* z6DH=@xLz*gDrJfok?x_94)Wx4Q>Pd$~UF|+mYS?8llB@is$?&SHS z=BC-t{J?vJYc9&iF*(Mo0ZbR+44Mi!kexdLXt6--H3yL=d#fHIUI5es%Mt*V9md2W z7HbTPgDhdsR~pC|NgCjjasgs~{Xu<8FPg zE|r0SQQ!?qfQU6p;AA{SY!_u=V>`w+3JH$ryG-&0dvj*7o%@L^68nRII||V7d>^Y7tUYlqsz3iLvHYVS#@zEWQRzP#54(rs z_Et5{j?TxG(nxGT=w?|<4X$2~kjz#qkIpoZg;Y}fm`{<4ZPJ;z;$GlZ%rn1R@z}}P zxHvZl{i3k3ev`<$3@0&H$4v&3TpF52V6h8aM=X-WyngURMfU!c_v6?9r(fe-bE-E; zCbDW}b!8=9{Z^p+et4 z5+_&U@{fNU8_kjUxBuUbxNv$fKK}XV+h7E(JDtp#V||5LdFy3ED_Y9vqk6QGdbHLQvQryc-3~ z{Zp!?0# zau&+3=AAG^kn_BX@&Flw8yqDNUTPi-_+_H5=WtYr;VW48NCM4yL%;F`>A$WrApCfi z32}Oo3A-|Xl`uL#px_k;wh;Q%D1ooY!w%{ia}2>;g^lu2PnK)f>X$pZ24$TxCUbp+ z2k?`(sYLiakL3zri<#_o*_|)t8Z!kyQ@PMMW&efM2e?xhKifucYKTwjyK8m9a z1N+XSbosIGGH;=ELK;|iHb!A;B+ih4h4QHjxgVlG{@CMjlhA>?k>~*~=uS=M%BN+# zB6e;&w44$KVbWu2-qI2K+fCb2@2m8;;?!ejV*SB#TvxwDVQOK)w*=ji^o&+(pbzI7+Q^elscizAnwh#lb8TE7~lqnBWB zrFfgO)K8Re#d+$lU)eK`t*3;|!g37GM#%GzRYv&{r-hd*v%Cnv@V~?$+Y@O>+GPH- z<707dZWaLy1L0%CXjY3zU%LFt(31+fOs2y9@{eQ?D79L1R;Mi7=ajQIzY(AK-QNOk zC_GdIY_6}y8(;s!c>dGB5|@7ND-29boxQLNWC!)!5Gz8RAUQ(D1A6HCfp9y-T|XL6 ze&SQny1Wtx<99h@pf_gwOL5^0O3d1QltmPm@#C?L<$vq`dfZ|<1Iw(En&MMWik_U8 zpzMLCi02@X&|qHDu9i{3WYcm&DN#6^vE4;jq>QJx0?azitY^)D0QahxfHx31EM?LDTDBd^LbPz z&&~cTDk8L3Zs7K}Ci@WUvtY=MD)UERiQw?utl(iF(rvjG9t}V0$jTmYg0VZmXOnm} zBv>(f4ZUG_IH?YVyNR1^ya&3Yt;F%b92hv?^Bgo;*`=;>uI7cn{(x^Pl$e7*@H6S8 zfD_N<8Sv z$`W%ryg!@!Ajs%o3EeXm-Wfd+b&R`gf6NrDwt#vFf$bphxr_N)-YiY2JoV~L(-H=M zEJd!9W*~??DRke{{9GSGdl!Y=L&i)_hDGyQDqvmA(H&RbDHAfs1sSM(%j|vk)_F6A zyg&B_coZ;1Ho~7$NDF2O@B?Kh@#7JXWNvJh7Zkt3%SlSV;J14tEvrcSKH<&f!aKK= zXIHw3g9@a=y`dn2kkFc^(9S33I3AIOz4`iU&=J-oeVpx2Eko@D0I9tuLKKTpxT-}L zv38gQ<7)kI3CaK>Bbh7D69GjEzJc{3zyTP7&~8Zz!G@ZWc^$^abpg2`0f73H795Zh zU|k3%vu-;O@?t@G%+sp!*<+JvxJ0GUDt6IySLu0f2;jUWUR=^7}@C5;7)r6G-e#d6> zn!QjF;CrJuS_#ICavX+~Af!KWw!E_%59a6N^kXNI=`YcRZHVN&&H)aA0f(eAty*_p zW@au%03R*1JQM-}WQ}&U8+<~6vy<8k@$xYacv)XYA)6kFx97>22hWZ_dpcJ3H=_I5 zPjG4%o#_TT;_UOCGzJ=t`?R=y`6mzKOD~Ke=t{(UFJ{$5miwScPJZawXx-wJwfEnR z>iHL9y?ZF`91O?F(qas8Oe&6Q=7`dYo6WYHKreq~nnFu~uMZ^!53j@zR>!Zxs1j3j z#JhOrSSIdu!w5|*O;;`qNk$+u1{A=Nzu-8moX-TePzNR`5;E9TEb7G{EXDrcIpE0G zE}F6|##{g9AIFD3|66h4XTQW;=+%z(XJQtB%?$EwnC94=_NAZ&%Mod024KISZa<=(}uRIfz z$7i`4iUJSyY)PY9s4#|HlS(WFhe_uEgtf^ZD&X-Bl{(NnLVF*a+efIYD`{*P-fMzy zTKOrsH8QwYcxnmSyhg5+RvENuS(RSJgth=ry!J0b)_E{1!K-l~pDN1Mco_T!kpWCn z95AlJ%l%?(B|Xn65#v@EXran?3fj;xVpImz>%h0*td%oR3-@XL>bbEhCC>I$naiym%_<4GYteXGvwLA zOT~kUBu$f2nQ%xYgjZgy2SXtv9N_5rY(p7pn$FLC=;C#Y8gu1_ma8&>%lTV8L)>?r z6*e#72w!%elrJi8D!JmDI7U;>ELYpzL3v^RJ+#j5BjGgH#r{{U5WtvopY+zbu59U? z;}ixSOg&-3On@266W?u7y4sLNz=ibS^2{6)g-E4L4}*$|`xYyjVv$05fc_U1RfVB06k0Bh9X&K2@qI+v>|*M zzglCS#|#3c1;~2p2roSrbuz2-5F4(s&%1G)3Jjqput6}Im*N1REQk6^yL)*uUAOhf zDiEx@(^#~}Hm(6btZ0+Qm<6X(O%c-`V*Pbo)VLQ>uKLJ2?j_O8BM!H*2;aN*4z5WK z>);fyz7ZOJOrIhwaS%)NKi^p0ic{yGh{4Bu;+1dz6>-OJ#qpCDu#np^!MW+j&OaGv z&p$C0b^fBDs~#-Dua&G-j@3w?wZ7O=v%R(9gjrD@J%?`Q9KiJ7xD6r({-B$GegzRvNN=jp%&5o9g{aQh&3 z#fSVa(uufk2{7lK+IlK{e3tPzK9g6tZ|N#T6Hac!eF`KAfKIzHg2Ll^bYourci)J) zPrVT1$7#xf@LBok>oIldd<@}sXJgre!umWUgi|L&s*8?tT?i5hzXF`?DAq-3p-iU< zZdj_)fMB*!YML+$a~-PmZGY$H3g>^Hitd?{oWpGfHtXmbh?#R|;~_0&-=ci))VZNp zzH>L`PH^hj9Nv?(uv^d#tz6Nunsn#ce*G12ltW-h3cJyHAx`dJi@C1NSZt0(F9^90 zgDrvUTHuf(KQK99YOL7gQ8WA993f~-+}sU2X`QD-;4u`vE*Q8jcCV57iR3b%O!#t4 ztTC;PnFWPF@me-=1002ofU{(cFD=BCzxbng`WJsY&VTZk*#SHfeA_{&sJ42K z1I}&(uTlEjugA&MPdd$E6pQ5uxz6vhq+YdI$CxOPs zj>A(pQI1Y-?=KvQu_xF8Jac8kK;&$cIYg*~(2W_u*;SIKcc3sF=CQKE1#g2)P-IE? zz?8QrS$yY>xN~cXaF53n+YAH?EUi14HoYiZ95gD9+t5ehk;jCJm?<1(=imb%hXnvP z*_79SzrZntH1{Vn=9-MFBj^mVsccwZEdskg1^5|P{AGMCwB$L7lkhQx7)FY8k@%f- zCm-}$IHgJlbI|jc$-sQ(d=+%YdXaXF$Gmt%o_~Q-tl}&WXHfVYllOXXN*dKTD-N46 zS7?~!_T{|tA&F-$*!hYt&R?rq-l#zGJwi;aglh^VN96;a2Tyj5D#_A$df&W1g?b^@ zG`dtKnGyU^x4RZT?WI*_Sf3tXn^AG5GFLclU?8xB4(qfvYZnd5GQWY(tzZ&~vz(dx z>|B^nBIJ6Afw+tJ&=#;7WwtlgeP5~SQYSoz+h>n3PR<$JXv(VqqARzzfhEN%<-_n9 zn|5rtWX4HrJ1y%%h-N7xBPX2+H^y`xt`3vc9MY@9A7_gbWJ1J$sY zxf-LBQ|nWxiYz9Yxu^;nQUPL@4*(l7E@C4jcq!_$`HX~wm00b?Zoo}BG-Oi~Yg?%` zW?n6{f1U=h!7N1fL^kK)$TqQIef-+zx6X5GxNyszSRcfdTUZx8oYr*6`RZEedEyuX zzKI2=YhFc#S-TsFe2`HJ1ddUG(c-G?Vg4!;2z6asy)ddl8tss7!E@P8;Ip#$koDF% zEsDe_K)geYy+&;;r8S(kMUxR~rXM?VF5dphn@*6WmDWb?H%D?X*WEc0R~KCmSm* z=8l!SObgGGC&_vjMcFk3E`Qip(bFG=Q=&3^uHzZyC^hAxED?DMGb^sN>5cA9MS6iq zuyC~Q(NCU&SodO#+=6}xZup4{bO58q8$uGUvc?M1(ZhDG1`yP_rxWvNU9Gi3-MlX zlq>%=C~WT;q%|zeyu|!P4DpEVW7a6%2Kj^$j71AU_um=3D-)xmxW~=2VBK01;CzM? z0Dx{m39yL86-+fp1MhXS9wPXJfJ)Nt2Bkhc>+aP%(OP;dp8AFVHcmbNNth!{jyW}J z>#CZCdn7gMM34x%ubtu3u;386x>pSp*Egr5_$HfEVM z)X;Hr^B&I}jA^`yL&?yw&|&U+29r-A+z-gDFxFb*Oq2TJyL9xs%Hb@}v38Xpq3ry` zBJo#?U(5i8z)|?8V3V17sr2n1ag}-KR?Z|hK1)o5XL^^)nPqRTK#!n|TShGpEZJ_A z@ftN!s(GG*GS`=XVcy_wVQerK*OgInbhE4NY2_7FB#y6l(5EtY{ul2Rw7!=II$?LY zOhT&`p6=|VPhbcz`C58og(;K^!|MZ&n&@M~7hfivJVeD6mf(YCyk~|w_~HDVYbrk~ zP3(OEA%&1I6==c^BMF|0HI?|9{Xg7e@I4cteV|ifKFS2_JQVT1=tKH-cJ0KrJq^Iw zR5bXkI0pbe3oUdaM{M=1)xWbtk~j1>G-h6rr>G4>PUL)$jM<06D1WIDhFK>BC!b59 zt+Is^NPN?19H=(2{KptVGq+yQnk=g$1y2iBX0u5a>39FnGm zg>ut9(<8_LW!XC+M#ZjWP0Gv34`JBX$3BS;%qrs=Mc2_q=g=yYjr;9~0|`*@ur zA4hR*4|9gM=ahGM!Bn?sY(cFopkmXC=iATR8boVrQwX>LA`u8wW{s)!v7s%9-?e0$ zNapBS!r8JCEl!Qt4ejL~MN^{-r9fo@kQ_EZZWx~4yh3X;yA*2|H&FT}UM_03pcT*YO1NEbL@_JbXQ(WRG!6xf*7LaA zE|A@yDGp_Og05%pe)p}o%==@UZ#{@Gw=ci((iu9?-Co+n^*#_I)G7~7lK6+v-dJ48 zJTWgeD-HZCxI>d?~l$nADG89*mw8OpDf2Mm3#*winaL;+M1$Z7+C%6W@ zH#8#tmdWI90h66gm@D3qZrpS_3++eWMNUBpMQ?MR+y#yUvp(N)nJVpKkFzJ={q`HN z`N~@{{L8;eKlaV&-dK;m(W%&fa68)LkKqAhB-{%_R3e=FnUBQMpZ{M`IW-q?yJKOGcrp%gMJ1pxSES3f|=f-vu8Y)oFpZCRwB$vBTsjKyPT&ww*9d4KW-u4U%@ z18urW{*#YFTVOmF@kUl4&G9my$NH_+-r?lHPQv2rcYYL~{`KF9qmMlg&L|eB&t0C6 z9S&Zq^v}^*kAD7yDc0}(FcxmTiy@7LjuHew86Jh}_!&HX=VJcdmw`P8q7tr&Jro@j zz*G`PaF$a!F^Oyb*kqM*IFg$XHqz)Q&W-6)kLOq(>QX3#g=^A9rJI^L$y((>nJ2?K z`=(Bj4Uea8VIew+&vSqu)jBX_=(tbv z)7~OMnI>YEIRaStD+)8AVtXufmbXbnCijJ$m|i@~W8{K$8pcGfxUSxwp?q$GwS#Ub zNaa|-+o_y${`R(zZyEaKv2+~7%f{aOpj3JU0QdRF<%ud@FLtC7Eu4KP@6)TnYeR}E z9#(`Y{1Y|`a|d#4K1@%V%uhoozY(7b;qHE>;>i5v@A5;(N#&h6NF)3cl{?G3rQyN8 zb`Q*~gN6iN?iL^7(%^)ncDveJ+B?{7dt7G?Eyl!zQQC-hz}yz@&hx+~rR}g@9^`*m z;CKK&XlfwwL3Ct3WPKe7n;K~D6i>IG0Mm58B~7y%jp-pmjioaV1z<4E8g6b+q2dxR zE<7yJah)I%Tt{Rd1R_C5fIVi#mW7G zX%nvpK3Hr3mY|7kVsWWtGZ^E!mU*!ud|u;(HZ(>UZ-;JYGAN- z64~<1_x~sA&0-|W(!;zHdq!sDzAu%TwJ%-OGt{P#`FZLu!Vch11>BJ>5OEW-VE{@0pRYNBH~xTSaAc zcE)n=x##@n-?xLKj^{A$phlwF5N_Vi5iTb^8f-!wP(i6zhf}_`UTo7af%D8~CkOLO zYq-s40Z?tQeB<5R%@_NFlbwg@^uPf>5$|~^Em6j2kL5;cja|tM(~bvqO61nA&*z$($Aj*~wRF~{(Si9fZ;+mcu}-nT z1}G^vHfSK1P{=AHxf+Rz0%4NO``&ORcY2Bn0Q1vY@D5|&2!BGp_65l08uiP;G4B5% zGX0J=u^=zqF_0LSIolmxh&-)d(Q)31ZWD#=YO#{!o9#5=#1_Fiq~I#buu&C=vx zuDhKcX{DpQcz{fv@w?&0APDfbNeHniybXYqh-1Gwc<0(`?&``F@CwhAt~YTvlh6Sd zNRrPyIGB5|UV?eRMBEx(b{+v4;Su&j#-SU!MY82OsCD}rzm{9L`#$@?X@!>u?DcD? zlAivv_hHK9TAb`t5rAS_njty!YTDmF$Xx)tHr6=^mpeawHkg|Glu8c{@0qyD)Qa>2 zB9$Hoee;AmWr#%!s|tK)Idw4q>1%}RB=K{?NZ(mcGs|nt4}~4xhO3r!m(~d z6)CRx11b~sg`wxL65F(^tY)_Q)pHN)4A29?&BAWQZ@DVI~a2tqcEwF2D^C0$hpQAKpu( z;2H2HotOqp)QaOIRiTBIzzdl*G-QE&)@}}EWo{JrgU;ni#;@v+1cSV!nm!|qm;uE%HwV}9xch( z!(2tEPDdya-VE^7l{G9R&X*?MrF%1k%bN~7)+4$h3TYS56aoR{=x&?Tc>(ZllGX!m)Wc5QeSOt!8 zkv5F9z-IW{K-;$Ie-)2`u1`B~*V2Abyhd{0x_dhnu3p7GG@QQnonJ|NFUFWNE@MiB@QVa81mj_yCY7*hFaQw?n9=b> ziqm?~QZ}}x?@F#{U8i$0-s*moV5;Bv=(}8w1;IG$GjlxqZdzQLO|^gWkJG>U*=vP* zI;JcR8brB4QNWGdOQ-3a@{o%L8KybH9CX!FQ37qmc{&MDceYV8s$x4FzE@ECz%da%q8UI&h}#RX0%;p8wa&C;l3oR}is zodew8Ym1ZV=Ilt?p`+VAitAu;ie$4&IzI%2xGL*dB_8Lg%Pc_?#^J|ktYaRmR=a1V zPb@DWxTmSnew3Ez>%M)2u%tG2Vi7#itHHX8&DZJp$*1Yj%u0HHnuBNSrSuCDGz;P` zOOJ9GHxt|loCJ>IyA>t(Z(L1l%Zs$L9R<(0IN$~?D4gXBeB}PSr@@RFN5SFuS^N`N zf&=nT?3=jWhBm{DSEVKY7$@jSuNQl-@p7G#e!fm}`#7%Qvvl_4N920&9AdEj)(ISr z7RDT%I=s9br`LJbc9SHzmmibUapzjP_Vsr;I0f3lZ7MyTQ?hq@OnwUPS4WL^9v-BX|NPgf5vSXm%7l^(%vj53p=hJz@C*9@+_Xkz`$GPDgG)FxDHrkF!g)y zgJ+@QXf^Sczrd5VxN)%uz}_2zX8~gIm%F&mJw%zp^XHywu=uM-MgcRRsgC={AY9G; z@R)%mL%PP{c_xujBtvM3ei=M78YjWevbekW$IT%)RCIl&KVD}}5$i!%tB6KZ8x((# zhqbUm)@@iR;=;UEpm~^Xd?u>hKI6 z3(V=?j+Z3LGF>wZ6`$p4B`t4Ys(>?Qt{0+*YuT)FbBy$Q=7EFfiO8eOVJdmTBHaW{ zr1tq{k}Iofl;L(z09FT!$BilDrmE4`J8kWRoEGRJtKG#s06~C2B|OM`%UKIol8i#QgoXl| z003_0f-u&ZBnu71mw3>M(3X6*nt9YU^2V%Z$Qoit)c|p|OjHnbYelUph=R09W5Bcq z^Pr4o(8bkhjMc7DhDwYX_8@NCxH@MO^Ljj2%Zon%l=F6Rk6WWH5(@{eb*JbqN(x-; zTCj##rwDJe7ZqO#y^YdPB;HPZ7W9BDZ3)LXWe{$P;~lLDZQx=K#)`&#a?Zq9%Mco- z%oovzck(iQ@$=6^<6K``AC#ESwRgTv93CxRHy5#5FrCSY%>1Dvo+pk4vanZ?OraZ^ zIR_(QRKN$+f=!61l!)7NKzE~TmFIG$jP0`19&{k8>kmE{{5SvoKTc!+<3CS-@WUsA zDgv<2X>n+|SOE|=0^oqTU*bLJRwq)8wWB1_J&Jt<&?;fNwoy9J3L&0$1KvdZ8Ow+! z5Q82BImrPWJqUWYt5&YN9SoJZ$y~YLNjZ*g78M@hd5qh86~$!)LcU;MUVrv7<$m)! zxW<5mIMh5FEG`YFY3I9CgCLnFiEP8Bx``G)0?aSY4hBsKelcC9 zYYQCL37lq%>=6R?@F`&tu6?yf!V?~-W;)&`x@a zd)i!%0q3K~At>${dmN$&(@pE<=GyAu8nxzH4v{dY;1e>;Uj`s^L8Id{h=7%3%=RrK z#y=ra2q$-4WtMyug@Zk)$FLM>bVptP*5K&%<8;xVPUX3KFc0ogkYeRctv*07>#D{g z=0KEj-0%}Rjf_mD@l~4qY#$5`w?3wnwTxp5MQjT18A{9O_?n&# z66SuGsxP+E@WNKATuJHreM=@_1h2u)BS zFtz>>6%Ko8g5C~ogrg0F zpg>;WBm75Z44wEtK9_L=dU*%1RteCkQ3!fJ!kbUbWRW%?~IMc#>x$e55L^MsG$idGo) zNLF+np1rMTu~`>abDx40A;H|_Np`$*j=onIh0quFd>%#t>k(Q4BFg``1Uys}@)^4l zRY8PjfqVVpFubfoG}uDgvyWT@zS2}0{~`(vaY+;a24YSaP=pd-$3PCt*&Y;z2w-iH z9p@&{LRK80T`cj-8p;|yOQ0Xat9o!8OSl0JcrD5&^R3`rD->&h2?Y$gH~bqN{g}V! zdo-EPWxYN)HAEwd6`*0ub7S%eM3h%#kd;2@2qAZjssY<1nV*sO~SYLF42Y7ggbv!3*0iU< zNnmljs)Pc<0b16KP-?CPd*?44Xp~&f2)Z~sKIIgZPrq{%v4~UQU(9Vr{StD-7 zxE6bdtaufH;UVHKWS$#)(KX`v=D2r)LE4QLE}h8Bg6$u%y(gGm%}P_yQ7+@4RW8I+GGmGY%b|pDfp1uY3tP{mgp4q;FFPo zp2IyoYMrgC6iIl#9+9AHfhabEIBJ`EE{xAaCn5GAlq|J6t`YUJQWvcRHASwEL0kuy zU>S5cG-mz&d+B%olix}I{NMZ!>F(@W>e${CrAG8pyS5P!Ep^f`$%&V*pW-s5W7_)l zC*7<ggFHmj3B|KE6R?ergWuh^oX-QgJt^yPEivmM?)&4*)5qrs1CiQub zuD_1%KjOYc#+DaAYD|UyL0fvU1k>fjHro=IDczKwUYXD{{ zZoJ`I1+2gU&TInI$2$3n5wzZYnYrP0|7+I^U#@`)r#3j&>376h-W*d#kOLNym|DY@6)x}I_~Ow$S*xt!P4 zun_=Q1$JP2>%wyJYU%<&ZAWx7+3kq+H1~<2G3FR2V95v$E_pU(5j~PAhB@%7kqx+ihacjXosqHz~&BKeNU-Y^b*}`uWKziF0(|!i_Y9SLD*JZ6Kkiz*WK< z!6xc{3Jri+@eGkM=D>bRI3k~JD#HrO#vp8!DBXGPEOQPaAd`v21Q-=C7O{0ru3o#D z{?284kW13&QY(5AJ0 z+Qx;uo5silpTDz6NeH#fWYf0>oHwu115rw(fy5{(_}YTt6>6RcI7U~f0%u+C_J`6e zo)eSJh9+z2VDp86QIrA{%Hw)^@tE_I881&9TW`TfAFf4m;}OhG94e4*J-oy*w^wG< zXHWK0ZH>MQ^ryc{v!5d48&8wzQJU zFV84B`$-x`Kr}h!>*RPJ>vJOQJ^m`ys1ZK{e_PuxP$GuX6v=jlc|0vNKbhs2+nWoc zl)KGPy|B;TQ7u3W(BnZ{A^|%jtgQ}-L?qsOF0=Kw9#A8P(S<8+MH3R3u^u2xl~j1i z7zx}1{bD4*{pQ+?USyOq+35Vm<5ur~Go^Rl$6E)T@Jz#>-q}UDw2nJMHq`!KHjilH zp{tujzm@k;GG=krzmv{3KS^iL{usKcpC`5NyW zS-3@24GJsIFytjcc?w}WU@RWL z<#C1<6~+a4pGm_mHSmXP;&@iZtW+r}RLNp5tm#%ThwGjVR50@q%I#d5n7I7!DD7okQc~RIOSBpFCV@{_V8S;8^svW z6Sq&E5J<%+2zu#?xd}ssbEr7rN#LJz7Fv9#l7t??{AJ{R$~E36e);2knVI;V;R2rt zWao!664^~$Bkl!R04GBwu`YOa)}c>jg>l?F#u64%y)qysVn;UHEVlX8-A7Uy!Vh5Z z95n<5ro`j~;y6)etZ)Z>Jb?e}ieLhz=B5 z0JL+RLZ^Y1+NHnmFl7f#JNW=4C*J_rEygl#Qo%*;-bXq7*EtI_` zajZ)k0&O$bDY~3h=1JfJ_awLiV>BK&Qk9}4|p58Bk2pe>b>{gSUTU{ zCx-rYEQsB-PrUv4%Y!sZsiER#Zi*vKDXknDCq#5>Ih3O#DmAoH0Wo_dVG0ZO8{8My z^?03o_id5wE-xFteh+#-)q`PCiin@oX|E^VeWN+~J~4;Xu$zlcg` znHau^3CD`+6y_nWv1;fTPt4vEa1H#^0tBC-OcZ2F04YtItbHx@u7Rgm_HPoAXd8SC zBZnbYr%_C)TPKg9Onqe}UxAHQlgRG)fafqbc0PW9u?B&{jLGmd-^+|4`%J*PHkD;P zV0_Q@)D%LmD&)WcSg|DTYP0X-ey?{VYyVxL$}>|8PoEaGxSg%maWbK=RRekwj(5Bo`XM*C$rx;!Ovl@zQJkqStBo(WeV!8N#z8OM9|u;y?r2fjFhYmRw{ zPbdQM8N3rOZMeKuPNYW_Jk~x8{YIue`_e^W0ur$f*Wk&NW^)g)tV`r!84f8Upz_lz z@S`T4FA{)JMs3jLP0_=g${Si}#dGy~d1#d3ANgwx%)E020Hv@kEMA%DbC-$nIKDkf(&`{ggo{d zga-I}A-Go^ku|%(#zny?@0~cslEr=2;3b_Y*JjN#L`{OsEN9vniCpWaS|~(m?@;({jSx z<~hgWbs0hqVWJBX;94pKTJr$F1Ti`>6Q#^V7V+B%Sx&^{T4rF98LE{D7RETlmJ-NF zA~^z;loLK`zzDc8Hs0((+xaO_v=%O~xMS`-4+zodg;E9>l?bdm#1fb`r%QQ_b*TQ> z#Z0Sn7&m-T>zSW3>Zv2+8EAOYoAD3A$V{)F=BMm9M5imPw&lsG^l0Nnpx%gOLI_K{ z9hGp&L{D($sXNX!Xf?}3BJm9`gYywd0aFM}tx!AK=`NRBx-rgC$BJFP2&KS;g?JBZ zh!(4R`w$VXZ4$fG&I*(XDFMsCP6F-_3yqhuMhHs;k3<>bs*L_-PqYL02|*4#*mmdO zIz$hJys=7Sv6mi}z*sl}qO|A1hu=&${`a5Y#T2PUTA7#(7a@m2V<@IiqMT=H7k6Lc z<~~aYe8=WSBH0u!*8({SmsqM8*AaU8x8Hx2{)_+g>uGg;G@ZX>jqn@-VQ!@$L8t|Z z1fIc7u>4`x)OweBzM67@B&`u#;suUZ9LF-}x_5q*B`d3reUs zwGMc{#-;wX>t)KRX4Cd1cnc32vBdO&UuiURv0Gm&_^z*o;z2n}{k z<|dg+YrD+z#g=7+wDt|=B5pxf#Qen(&%?%b5n(MbjAyvFFmM!w051O*XZ1pWvhfVv zlA)A|Kj4mh!E03jWHyddBtfl!FfOfrJ+k?=gfI6WGcTBN&=<^|+sd^?Zsi4Yv%Jm* zCjB}|sW41KFea0t7ldFI{fxzj;>M=Se07z}b0QNMz92n|1bV%+oSa*nM)k(P5Jn1E z6Ohh17-(mU22Q8sB7lEqWYl}=n%+RI&2Wh_Nv*|>RTdfMGtZ1hWVG43c_svLebh+Lz{;eN#W6q4*+0k1$LFZv*73>>^vm%03?gw;DbwHjMr zAqi50#*Nl8w$^%->JRz&;m%`t28t+i4q+wGLcpQK8y-+Ofv-hB3eX{v*P?H`?M>m` zdR0-}5@ClDeN;5ulrar*BQ8+vt9bkhD3zh8u}&2+ty>ioJx_+uimcV}s7j+}YshVC zbdhXwVr3YY&|De#Ow&{1oJtQ*MKtl)Sa-lqr4ByihG#+*nCK}1&jm47%>V#E07*na zRD`)+2%iO*c4%csH;(?2b1IhEmT{-ErTG&c|y@}wG1uwpHbglLZh`?+|2B3?rS%%41WIjIoIJH zM!4x(R#DMX_Wb2M+GFNHg6KFGnNbN-aRo<=>XK4m+M7J86PU3K)s51eCUcOzU=Ac? z_&LbTelQULLKgqvl9Hlewh#tKxw3hq!H!riTSPh=+I5)^lI)2ZhAVn z4rb?Qc5pi&v41c)Ry@HcPIdK6qZeLPsf z0#ZVO}VM<9&vQHW-2h#%D~JXDbMy z5gDR@3&;hRiW{Lo=EMk&!;j3_wFRGo#$rxhz&c|3jFAadm^YLkIWO`)6(a7lzPv!L z2^+>;{Jz~RVh9K!D8u{Wfx#~<@q}GKGjT7pZR={~DPbM}N!k@~)U(3=g4+?kWEreb zT8@+9%w<>@m?t<4WNj2?d82Um2bqj4qeDJRp}J}?LXxhA z3ZAK48C<0SPOb|5jFV7HI2PHFqwp4b0T1w&xrsmnCabrV&w(r3rjQqG3Uznh_geB? z4otM#-9mOmY8xQUdxxrOa*ZONx}6MoDE=zi9YA-@ll9Oi&E{_L(u%yg|2Rr@n0t)(den)Ks_*o2~uRbhgmJ=KSWKluT!yHDIqrJzqdP!Om* zSkbQ%V~D`-qJ<~G9MoU2E=qUFa|Jm=QFaPQ9 z<0V;VY~c?RCQfI`pq`T}Si>FMrzPezJ^^ex=V_KkA~jltHX7&Y{SWV?$Cpd#;{(DT zXg$V+|Ls1>Yg^DH9k2|UMAs>I{SC64xZ>XVUV^Z!QS=|fTFH^APQ@u_Yn$K$PT^kS z$~4tJC!Dg6n|+Wre*7YxT)CcBKKN^?IQ}qw_P_ogX{S4yKKjn>^sCFm>FL8wGSxTe zyq8a3yStM1_77<(%n4vO#)BrF{j05XHXa*;vV>(z)~w2F>&Q=chm76ezwQ zrhV_>&MwU4I8CoyPe0tcmC`P4Bx!&8cmKirY3}}Xn&e=csfl$2%5-{lHj;jN%5kBc zC#erkt#TZ;=N^E1@(OUn|4And4lYRczBiH8qj>LMy+SU?IQWY7geI+$C+Qs4`5B#mC$Sy}{ga?h6V&?epL%x{qv2K1t8B%bGP2ii`?LPROh|hdI;3M=;ts`-jt6i3O;cp0{SeN|7 z+chv$Vi-c@h0E;J7Pr7OGuUVQoEa5TF*FLZj~of0!S1pLz=C-w*jY4t$6pmM1%`t{ zx8Y)j_iCN0cy*vdX-Ix<8Jyu#lZDMcvP-7bODb8Juy73b1tSO7x)!;g!x8yhzUI7~ zubevbqQrZiA9LmT;+%NPOn9!MhF7AkuT;P}5NeDz)@I!|Fqka6^nfk-RES=9lRA-B zXH;$g*yz@%cNX06b#%N7MF#jXSMka!8PESH(NL^tGBY_IJ$g;V0a08NFbjw?E^zQZ z`F+R^)_BSB!CkF$wo_#YNCQp{I@+NV`Yp0w4RBe73ny}fU%g5kHbmHHY8&=9sR@X(lBj6YvAHp{Un~!z*4lfuSzljwT|5ELnskIP|{PK zvNaYDfYqU7)Z-JS!Q|vLc;2IZ<}0R4xtsc9o^jfgBH%u-5^M)1Um`zt44p0Y*AX4!HuxyM!^8@F?-)ei^%p`7Myi z{@rgAn#oP4ozC>&5=;1(r;Wi@|A1o(OX)nzPQkMF2v*k8qofI(u|z94D6#YrP@_92 z4O$R}+OtHaCrQE5M3_yPyHK7;kNev|iF?t!XK_0p{POW&;}8EZt^Vq7r~co6Kiyj= zs};Uy^aeeSMkv(5i_>`MM5KWUhc@q7`W>7phg>HTgcYK{B zw_KVck!}+UxG+lAK5#CL6bC!F?(4sNNXIyKf%(nT$?xERXQDV=yMmQN4nnW`Anm_8 zPLp#l)61jfR3BP~3dROIb@m>m35)@cRgT$tHZ;z)X7^DZz+Z$0_zN@TwTd>D1NW@X zjt!Q{X^4bWC`3l5awy)4{~24*+ry%@ymW0t@fE^0G98(UR1gAH2E%$XHY`5r`jXj+ z5B~Z!f{{TL5=~BEJACj}zZU=z!njI~V)+Hi1jt z0!-lvc|5_wK}Ck3N8BO&%9Kkhs1)dyJqr)Ro_b83lX3bIxI0uFWxbATmkRemcmi{t zSITp4C*Cb1Khi;kkcD&X*n#( zd#2oq@CG*qP6W-lo8ljrG6F4n_@kU)k-X28V7`@4gjdaL00Ou0C5r^w-ju~-aO(mT z1xy(W%d&{+X)S>w$xM`g5S#Wot?CBakPY{3&=A?OAXym!QlXaJIJKG^4iM^2Ct={6 z@=Hm}DY35IogJ)S821PVC~%(jDRsNXVvQflY9ufjqzKE$VcA-qVmGmDk=eFdb|Yd) zmRgvZ7E3%qB$5L$PQ)e|L*>L|wK(5eturR*JqQYZ z-3KNOYNVt50D&QM6N$?ZoXK*!am~or(^MrVI8}|&scA9>>yC_Agsicd_)N;>bF-sm z(rg2$rkjm-TH<$2E490!r>%8hC|Bla6wFGNs<`m4Cm z+j?6;-kMEY>Ubj30$woUQ79bSn;cw%oBEvo-KUfd7Gb!9DsFH~K`glfP6aJ?EVvxU zY4*UuDf;q{kRV7KaPZZ!S%*w3hff^*0!!E<1r+PDn--sqY2nk+?DWM(y7tkxQv2T5 zqg`$8;9+VWe3cd^sZ0K7H9dchQpT7I*YBj4#LCOfXVgp2rN8{aiWM zJSuH;*x+!`Nu6**@f?P>h_L6hv_3S@<^3`xbVKj`jnv(H2Ae`ioVDV#wgW`=<>I@k zMVvg}{XA{E*rOq!!n%^&X0rNXY!S?PoC#a|F!B`fF;91CVmtkXU6*L?^9iz{22 zn#41qfC!es`o%ekUEI-Xj6~Id6$}8FmPvxsLWn#U8Hx&k_lW|0&MxyDX)iE}&wVD} z`NLnWxGs%kjyHaq4xfA;GR#Ih?Zc<3X9J#v8!$*?F0WY(4QEjNJEso`*(|}e*O(6s zSi#KrGOe(%1Ha%sTDeX;7K7WQAGHdw5IU0o?AX{Nn*CuC_%i=1bGH$ zXDr^y8{&SM#hah7KKWF52R`yFR~dT{*C2>NUU_C#OK!dt@A+rRv9J(tAW*IWPVgi5 z(Bw+kU*y+VX^}OH!8-_ENXztx2r6@+-xk(r2l!?X3X)Jmeck&1w#D<8~S_7*^sn#Lk zc}PaDGa7x;>!kYI<6MC$!y)Xzg6RlF48<6oLM%!jK+Z8-)ivTL6@vqu=`-|W%I9wGFs++ExC$x8-iM-G6Fl#Yu zUDEx9eJAG3>02jFEOadB9*RaA;eYSWm9*F>q$jT^>l)#-E&6*qw_<~XeoT;IFKEq+ zBEX#3Cleo|Jt>GUlM4h!hm$n+OUE!xZh)|WDcMA={5Stb8oRQZ>YoB97>)0LeQGuDbzw(_zj9*rr0sdo0zy|2YH*2Th{NPG^Yvwey0g#+tmJ$V%p zt}!%dd07ko<=%=z@)AftaH)#{AVZ8w7jV`-C-TsMt9TE7i37q~T#xyxC;)f&J9rY$ z(j(FD!H*3|zL}dzg7OJ7)1|fO)aL#IDm^g;#*x=wHdzDh_zdfqd!~|M&niQwHF|)I zlZgBL^**7TI$>8}%jC~ZL}YA#1hYn!aESv|d;sBL6|4}xTmXAxU}48S;)wVuJt(}H zOV9}`bhF%A<)}PRPr6lN-k#k9O@kAbSS$B9SPYE=H(3pdQBwnfVU{Op`AI2}maI&m z8~{b<0T7X)idAJjp^U+f@y zjlrY1fQSVckb*(&s(~=?;APMgA<&x$FwfW>ix0)+3`d^Hr_VyCygoquEfvKrEChvBN~T zJ87OnFov--M>zFErtCpH(L+zyDfhvFu;O%6h$kfDFvr6)=Ept)g7{p9$l{n2$_TH+ zjmKuHCD$fdJCkA5sK2e;c|RT1=F%|7c8!&ZtIpFSym^xz=^J1@XCwn*!hP1$v!lFZ-^6!eqyh_W=_wOl;Z6>rB)-G$ z*&7f@@-k*%BcF$BNzH}Fa-v~m@}ojxpqNA*Fme_=Gdkh< zaFvO86tDdcJaUd1pFnH_<1ECQedoPS94Q1(7Up~a>~(#|oSCBsP|@i>x09C8uk+E?sn^9FJ5DW;VVPdp3Vrpt`wgk-XV}6Y3uR@Qdrd^J9T$#g-hufW= zCD9y)V^@NK$Y?GJ&Z@K+*EG#LTujW76+lgLQ23%q}DpeC?sk}LKB}w%+2d@<>v5)6gWfO+EO=LBv;@G7~NXNmR$@fAFPk- zBOa)*l*pKyr^1O%6o`i?~lmwVmxlJT!DxW07ZA z!D`Y?GTyECcb6{lZR#PIo9fdjg)U6EaN~Om&6B0Xrbx>Li~s40k|zK zq!Btnoza{8<<5DUz*{u6h84-&`(DSt(x`%vc%4U)Rm`b;%_tJc6Hf@5NC6qOfMH)t7oIqj7gHO78jalR8iP!KRXi6oGDf9j6{4mFl zE=9G23U>r_QOIQHT*-d&nlZ9w6f$82w|F6Q=6Y~RYgE?Z9a0=00&_h^{G!Zhx@bi! zJmr7XvrxNlLzy!|4&(-~o{_oRc&wlac!y|L0AE7Dg}`BRRTjic_9oAqoSO!KxEP{> zzbd%?YGwMZ^N8!XFXkp*hyqSRX3lRke1Ry8wVrN#ObOu&XuQ5=RMFAHryDmEDi&zC&@nPsYX+<6TE?eM zEZRvrqSC+{B4J*oJVQAb3Ky?c(ESOa&$@#a-Ai5veIRe;&w6<^a^!DZ>p%&VKncE?VB&kRO zv$dkymIs}vxm2w;bg~E#Ejwe{d6LJXBbSq8!xbPV`h=AZ&~&?8Qp?_;J*c(XHY1VA zupMlRM8kr^jqjrQb@lE5uL8MrGlEnKvFtN}CW^}#TrzNGE>XV1P~<$ucu}Or*jlr8 zvI>U9YoDiU$D}fa{PGMgIRLqh1V5p+(ekwwYSL|Sib2BE1a>~Xq`Oz0@})643T0;u zi>%yAWk3$sVlNPtS@}%nH^PZ)V+f!w9mnWr%%(}KC_5rNh+9#ZyIv4jLL9Qh3jwPI z#sPG!O&1?=A-=Wgk71Gy_16<)9&Ujb0a$0Xr92N1p&y`ZSSp901THd-KqZjVTJ1YXa$~sB0Gwhbbwm zq8WgloW@4N8aTQ6_%uzRoev}27aqKuM%nM%>*Hj%Lj3HZZH(J^HclynDIIO4-?~q@ zpmz_e{3zYN#)*1ZRNeeUn)uDXmCkqR;@Z z1HquHjJ?ZnxCWdA&A|;=1WJm+iKZd#C;w3~-sE~9g9fFj$PHi4O4<@PQ zHbmHLloG!z35apAy4|lXin&xTL!yQ?cnFhGS9_zVISU!If&ttBx3yZ`R-1~*2Tb(w zZ^SZHlwA}5cO0Jq0%q6Y?$7m(!2N|(8snE90}C_naE;8Haa>&Q91qPYa7JN60JKpC zJA}>*7oHyNlF#!Zo$+^ab|nP1&r-n=zr0`1M7ZItit#&eIevoq@_ao~2xj?E2z#yx z{uAmkO956M1FUC?1!jEA{Rt*7q*xoU$^@5tAq?0tgj5%yZ%&D4p!Ye_S78_mH?U>^ z2j@SY=UgKR)_rhxd@n$mN#F$QkdAB_>e?b^t0zyP4t5|ZV=v-$K-zivldYB6L6`pO zd0||c3*!XO*6ZRN7&&O!YgH(Oo%3@W;5B+OP2%k}2{*7t6?V(f>=7XT=^=xWa4pt3 z_p>XDLSuS(e?U23D=OiCe&l*x*2Ay@9L1gG@RIv?%Onvqa2r+p+H0aNOP|5UBkW z-Vm2S)|!jnz1$-bksb~bu&HLq10dU@is&L`*G3gEQjtoRbGV@ZuCQt{hd7le>Sc8Y zbAxN<+WBdO2S9zcdBU-!oaKz#Hc1XZm6-6(_I5hu6bs>D35Zy0Zh)I75_;sS{esO- zSExJ3j5LU=tD$Kr1Mq&-M6y=CG**)fMM37rSYU_Rs%QKXQ(8{z{NUt^y(*=lpLhaTSgnpBDjLa?w3Vnk0v46nm z6nNP> z&n~dINfadR*o9f(pTs)PrD?(tdNf{;fnEnri)*W)bd_L=Mk(dQ z(f@stHmrGs{v~+S!`d5S-==BPS|Z_XTAM5Y^_iO3;wQr}N z{Mk?0uS$A#No!2zP?#ZfLK0@~y>F)c%MGl^%XGT=C~?4Cy17J_`oiOse%VX=y0?#x zs0737MMAh$5V=YAS)l~2$=t+Q8$K=0Or-bj-%gXGl+~d~*wS)nYK8BCmz-Rt3*H2S z0{@J|@=5G(SWDnGkM`~K%&Qqd>yg2{2!CU86M~5O zfL!n_#sIez(o%x_M>qXjWjBloo@XpGv!sA0)@TolFMd?EBRmC`%6l+)AQ|Qe{{e=5 zb5xkKYcJy6(q$js7I_-i$ewvx#;ke1NgKlx|mOL!N;6GQ>Gz-{)lNz!7YLA8#8&2Wp) za}Dx-_>FrkF54;38XqlRd50k!@zy@`EtKZ$C|ZT1!q4Z4Z@`J9K@-<3ck3DsVr=W_ zU7N7i^3U?Rn3n!*=4{SW582crZr)MQvzO+9nGa%;wjEBp?12%kCdwg&wp>RRhTbUP zIE-RZp{Kzfl{_8Z8%_+~C9H*kikH9K$ricX44-{A@8!GV5jzkxp`SFu$jUfA%bL=> z`HVH#UGrZxl>@NMG*cE7RI!=Qn2WRtiea~Ouu?NZYVk}A+! z2tPNtsX%9=MmXL$JM9d_1E^+@DhPCGYwc<&YL!~2e1Y|m33G@DLecduR+NZ;HAtuu z4_usrEQAoi5D_IPH;ZB5Em;ha^>ZBz0)i1pG7JIheOfST!&w=a*N6~rHeG-*&j2b9 zbNq0*{1tKX8P8G4*+1b}P~xV>*@j^7N9T?7WOFy&tt|5lV%Zg10`6@yLXT6wvL-Xm zKV0_!Necnoz-klO6qlQ6%%-ALVBvf2`Y;>K3>d>(zgo}?#!{dn`!MPr<*e_<#hWavX2phhjjKkLkT)Le4WOpX*@)I_*th$yd9xMo`Wagfvw=6 zOp5s-cx{@&$;l*@!JzTtblE)5IyWz3)aE+4#Jlus@8emjxc=5YeGlltjAFn+(COk3 zxX@5zd|`&u0VnB&KpRpj6x68Mo^%ErrdAiyw%RQW-VEUm9*`E#u8z;4Pn0+k6!;(j z?4e$y1#g)ea|J^QH~NR)T}apN-A@mxBMPGRYUSZ?Y^ljSEKKQNw zE`9lfKS~`uIujp!kX|C~ul7GrAKsWxKg{1Cf$nK4>`>C!7*4xeo2gg6f>(iLIvBfW z7~}{Mak^T%u{?)|hx%Y}UoSz#|G-&=wSp+K=z*gI#coF88lNkyBIgA>6HjFru*QsW zu=l#}PIexp-sx9qXr2RQwmyN85Psk+Rh8Nnz4y;ve~d+S7VKT?(k$janfyHzc9|sF z6#r$l>6VKP-~~t$h}GB31$+uuHgFctq-!J~uc0$z)Cj@sq%YUOdZ1;ke`hLV6yZZj z2llvo0!7}NPxvwxfpOEY~W8?#NMOo zuAAF9cH9#Rin4E@eb0BEZ2+bNNqr8N^AUAA8NGczE0gK*_}A zQ5qq%>c$%xCJFQPJ~$japX=>gph06u17)^nI8X~cWP}RRo0WsVeFT1VC}j*oeoey2 z9yn9=9Qn8mY~aB$E?&kS%8z*GTTm-viEQ}PCpFRebNr#pOmvHTC_&Sb;u6-R;q}M2$9vvP9w8#-|mVTtE zVvlpEJp{sj=xWIYG)3$FPLC0kPC{YG#+rm>7qJ#hm6z1{kx+tAwz*g9me zt<51CO|qF&q8tQ7jzl_KTpozfQpzzgo5PqLFC($Jzsy{>H0xu-xECPX06{`knNx@h z8)P5zp( z$yfRQ7;{4K12~2ZL=qunjsh-{4*9~KjY{r$iH0B@H1p-gESR}33C5-3f(R4?r z=hFg~?d(j20}aY(rzk+X96_n#0ZuGHNNT8UkaBjC^}&b^>9%HBNFM9m6OgW8@z&IG zp$~;YS#2D|Y=R#-_S_T3go6nzQKo3PM5e*!=SD}<;j<@rBg)i1Pjd_^-3PS>f#oIc z{U=)_jFHxSj7{m@V9|x9+WA*^k8G3uBALw0z?_ z@qX3>p)apZc=Th+(Kqz|hu=o?tueP2Cx@+*{r(vDC#`HzoK|mA#z$CYk8|uVfWNqX z^C~fX+Rpyvmvnxki`|X&G0ORO(<@h`k`!6Vpfg}m+^ zZr7U5tT*1muN1`Li35r97#k6Dm2eoxgeU9F2OSbK*|R<_b}caJxx?p%bM!Ej$8V(k z7;6*{O>UK5_=iu7dr=1Xm(`N<%C(>2yBU}H(e%l3s9X-~4#DDc zn`F?p302glt}r)vD;u{ASwtR+)&1>n|2m7916)E?D=g2f2(Q&E7*BF6RuDE9yGG;)uZ?|oP_S%4>*(~Mv{*i>s1(Q=LY(Ct?TLMs zW7r3t>?_95FvM;(!BPAL{h|br$N1yp;+f)z#*%zCcsz4pf=v>U>H4N)6IjasGM?-j zZMSWmzAkvZOUBJLLJfnxHO?Ymf@>ANE?ph9jFR9%I0i5P1lKZ8U?9H@H#uup&iD+` zGTSlnNW6AmfDOOOq^P?Gr|;uJ*DHK}juMJ;bY*#g@AGMQe~V<_-!IhQBg57wBAUFe7~sF{i5v3353U zE^c;iDnvlY*J2mX(~U{V7PySoNQ!I|>$S|-c@N%DWnmwAH22bqRzDI?frQv&H%4aN zp&KIp0=**;A8d+yoC7TuGcO4Dl)zOWhQOU&T+Ds*BP_O~6L6Z?4HG)MfXzV!t!6tn zL9>igUKS7Jx}Cr*&N12>krQ5@m8@ zxjJw;q+`+w!v57SpAswGNo#j*q^oO-5wAXI!X#ksZ5qbho1>R}y^!0WUw(BkI`F`j z3LU2ow;PmhPNnUyK1r9v)rZHYQ*~-FHBNW<%X!{p_`mw%v)loVhc*w%j|CA+(8Q1a z;uQ^LifL_WIqlO#=JBIfss8FO(~S>5OpjmvC=DL}>omG_GZ8J%E#93;%jcgBKK}Ve zx;}zn@-()UX%3m0g1HRnN&krIrdu?|7~huuyS$QOGOlzs(C|aMgIV|v?v3_m{8KYXq^QD|FS_wqJba^<444%Brp0*ZO z25Z)PFx)qJI|^7XLP(;R(;5^e%Nw4>8bDjuiah5$c#z+|NYaJY6+SUQVOgBY3KlgV z$@nZeqCYJ8RJ33Y>kwR@7oHHVW^OKpAAJt^JDCF;!<-m}%i$M*55B>8 zgf??s0@Nng@+ET}88m3nlJ9^60ebh2Z$KP|7Vlw*WXWW6VZMw+ z?g!7M3xtzlDJF1sFjQJL)#~$H_7MpOkCM*=?@$m-g5)v19{YviCR2737>^eLL%$=9 zndWPR5p7THVI&w@(WaJbMo6B`SE_?O@-ZyWCbU8GJ#a(|l6eS<@MNfH17jJniHRDi zdIeP4JW2D$mF4u!2Y1p)wM1h@+i(+3;x!dYgDd2zC|7J}dqG&vZj{l7K9tTLrT7)F zhEOY0Cvh4eu)#Qo&@ypg;aEvecY^sTIBquEH^s#+V2&u1Mhw7@w)X)_e2 z382&^-Rp6Q0CYZ$Jyu~F)*$q6vo?&1V@BM6}|wJ8kle?sXJ z2+?C6qPAS6F0!acg1Gt+9)&y}jt)wWIRv7ru0TuP6z$GInvrf%&Pa@9q7X(GFNw$^ z@*1-Zjo!hD1NLwOyPsWL0^JhYEc+)h1$Hr@%*m7Ggr7-@EQt@rI-h5Q1-m2B5s&?| z@3PtNw4z6Ot5BAUD-!~&fWUpiGF!OS8z%H2tR<=jipA+!dvN{MR4PxuoAyg{#G={P zk+x0#I1uHlK@o36c>?!w=Xg+S?;%uamIKaJXg+PjhTQ%(8Sy-a3J9~=FIrXTmvhWf zH0gCuJfB)o&WG0#xAEhTDG{4Z6$JYTCzh>TnGg7HwZ{i7%8(Z4CClen0=0DG{a;Uq zWVnC-k3Nom2-oMQ;BUtq?lPRlC)v-TiNV7cM`>b>Y;D%UI@0{w-AI5dRY?Zhc$vQX z^p|Pl$^Vnq*A~*R{r2C&NFBmcMR(|p{q*qv`eC~9!8+FMX_}!tZ9p7!l6v>_=#LZH zO&a|Maqrb@#OxojKeR_h0oiF>3|{V6()u_Ux4o6xS1AQVQQSU3(bzai`(Ki`GXD-e z&^Z%9_n^Fsb!$_xB@#JPrG!w1=RPUs`8>`p2gkI+@rsF6&=Cy9@317=tuyF(jJa3o zWlkyI&gU=)tZkHr8A2hcvP8^w1zAg+wr%N<+2AsNaue{<#K~Q_mF*d!r9{mn$O$X) zFbp{rTB1x04bhp(HzZJ*8F&WHvj&AiMqil)&KG*=826N|>|c=!EB|0qtXX!+-kVGU_aI(rd*J^8`4& zkou)n-Moy$YlT-Ix4OL_R9?E|M^L&qC?cE-L#3R6S6yY+)EEnR9hg2cX9=pg`Ds+^=2e_gDt9V36 zPv~itVUjS3*mJobN(SsfSUY|!G=r^o2LA-cyf%y&)~hq|5q!+h0Uzt}eWrT@h`GW? zz*A&;JXg5@^Ko;z1)dRmr7@+IqSstShj2n}axg>)qD9l6i{ssa2Yl(>BqRf>aJ{eH z755R(sHK7`sK&TG8!w6zVfZ%7|Ribo7GOjcoyEXNdJc0*Vfa_6n$Cbf842IwzwezfLNhavBXX) zjKq64jmL_(YSw0Hp0NoZsSUQ`ITV89*YN&v;0?95%25mJUUXr{*T$y_Yp^6x6_ z0bdZb7x4TNi;5&4##13sdsDpAD?f-^I?(!chXuImiQn5>-S~pqZMYlVgs@_e&)j4^ zazI1m2u+G{_{`R^5`cUNNU;G5b1XxXwU~<^TmLKps+IKfocdKE#)Z54;NU176K}7P zAw7!rX1GE3pxNV|&1(WoBvY`k=r$z%4JZS0|FuQ{a#&cnkX1T3FNd1iZfJ8OBq2sd zv{pUe>1dH5g-b{-2LkGL&$X#dFI>=*oa7p;yy%h_1fUq?K44e^**zdb_N6vxvXO4c ztce5YWTq(aH%CJEH&{Y^^aS_e8Ln*-NMMjH@Z^{SSgZmt7R7F*GV?Jre}0l=HoE_{ z4yaMC&L zW|f4pp_51H7^``V-qg!?Z!l&JOa6oez0tIUFmI4FG(xw&L#!fW@?!{~(=(K%+EV(( zG0kW8u@Z10@9j6!k@1>jXy+i1um$sgL#a_Vx5TrBr-%suGmudOqFCua7@b#0FJKsnX ztM8_ZgI$hCeT_ALC76AOIID;G*w|!ZmT=8>FFpMEPWs;8oJ+s@;ansZ&NF7AGL`m5 zR#KbeRf`A&OM9X$1_r4Ig>*(Cfrd9vpb2Adfx{&|gNUD6p(V8Vb_ook>$(uV5CJohO4Z zmTZpiP!c4Qw*oG1pA2IbzEweXi8X8|MYtKn!#d``(@*bf+1Rse$D#(|zOc?V1&!;zS!SfNPCza*QiJshcRt`RKd$>#M&Xc!+xWY|D%0BfYzCj!6LRji>x;q5Agy3bm{Je^2FMk2G#L06!>aqtw z{XTjU^AtT2ZJU9Ehy~_le0}%O40i@%7!kdS0J%cq)z}P79?UvEJ|O0SX9FY%2EaJ2 zo(EFk2sD{V369IJhu{w2FubaWb2Bk*xyN$)IARnARZiYLE>4F z*rjE0CY7*4VBD|?-w10S+9zNMm7a-LUZG(lo?S0!Y(*lV$#R&w-5}DxSB?!*~8mUQD5##r2wcGud zseg**%JV8Ph1TUI0(l-K5j-IQ<>Hi#Yii@?#z}y?+@h~H`3_oCFnLc<`r?Owk*+L` zr80`h=8u1oYF~dhospRK(=U$F!84N3Oo(y6*nlBIEb8{JKif&0uMg8NzI;Ygm_nN8 z)V8UW)zm%YjC1xGJuO{u&&uj-JXg2D*vx!-^2O7%#=3TCd3$s~=mGSu?@m+ZMb8EZ zvx5>ei6wkMwZPM-yQ#kQDt+(4aQajBtuj7J7XKia2Z$_2;8-oBnL4`0%KknVKTGm)@oM!xRcy^Y6dcw!=wR~sA)cZLUP452*= zZZ$dI{qpDlW&xfDUQW+Ar-#oGibJ-JEcluhii3fON;O- z-LgznhOHwR%K+HG%sFd4v+Sh-7Q=7~LaTioGz2{NC_v2PQFs>c5{yjU;u-`Re5FTi zyk3inC3f^w5I)QzU@Cy*dqPm9CKMdTu+G{iy|t)y&$twM8S*HuS0KbIK6l*;X-DED zv4E*z89jBGP-e_fSiHrtguMgl;ucnNBJN{Ib0V~CU91X)N=jxD`jHO;>9xee5YtW0 z-m!m%K=h~f5J0xf_FfZv&6jGDQ($>kWt5Ug_?r1w_MUfNc{mC_c;?S{0nz#e0(n6A); zQrG$@l{ZUs^r2W^MNZ0hSsFjFXDl;Qtjwk0S#R)Gxutc_K&;2|IQE1ar9Hg@o=U0w zbtrbj*f}>N5fv_yCsW2E1c})n7vUy_<;Ld%O(cwtRzOAJp#Q#y^>IPtjyAD#El`0W zBjOy1cyJFZH-JGb<_)}=umBc)MKnHU0V0;x2(#uhkq2Dikv`KBE2kXE3fvWzh6tuG zW6{b;PY-e|H`29anU4&I&_Vx#?)VY}(~8MlMNqfTj7Ne9ihrK3sC4O|3ja_d*gO%{ za*7HBYVtw)V3nsjo)v-VH7qaOY0;&hsxTc$0jS)G=xS=OSp?wmjn)I-m7H-@5H z(xXtdoQWQ`N(@E}`8;)!&xp3?#sAxaK7ZCFX9ngy9x=cDxItBvFI?D4DA zn7Eq8uii|TFQ2Dz%DM_gD^M5}g#O3|0w2U#K2l+jf>gu3H^ve-#ZUxp2mxKeJXbpN z8(w6;K9}o!jvz4EO`+*|oMB1IQKb*|5EP83C=hke`+@i5Ngxi_H6Eby2+4^k=DKMq zbVA?*EACf8!j+L-2j8?iU5U)mtOlGB@kYw@3wRM+Nm%jx1?!obf}1JwPd*osFku&J zreaFr#zcT`+{-sM8WB)h^Hwcn0>U}4aN!+H489d`fWN+ht;#}V?7#=)1rU~x=Ephl zngwfo1s}-ao>f6&F0qHm3ZJi{|v7SF0c->*s=XV8YHg8> zc}B^wp4MXyY-yRDPMNHD;f`iJo=%xFbLp>r^nmazN++{2PYmyR;4csa22d$7i3|be z!84hA;570tj9(0@%uB2XT=}P=suGo90XZhjQI8f?I3ll!0-p4$A6~S{5d)5R0G0rI zC`7mcW{>3+_`yd7h%kgdfUxG;(h2);+Zxr<25x2Zyg8r^aM+X(ybv2gFc1$Q>9Xf+ zbMEIC&LzQ!U<%u?roE8+)bO;T!O_rOL`*l;(Y1h8n0;scyDnk|K-d9XuuW~oCM*hH zKIXj}rp6{z5$;Me?)B*OATS5x9<%;c0zxB|l>UFF-sDHtEKBgZHk-Mb`8>P7d5XxG zvnokSS#(ub(kM$UO3k2NGz(U}VuAPrQ1=4SOBM*RL7)~Ob+zvu4HC-cE(vyb2Jd>$p-pFcPMhC7^l=5y}3FaebpQRNKHuAv(U_j06z zU=4dv#W~j3S1$(XE{BoKlLh^%=Nf5wgBu9^Pe0lUCf@Cm7>PDJ%h9?Y-fgFQTMyFlxRqWal%@!CP>cS`jMbzl znE@&U%ph{0fORO4j7uVL`;&P-&(hi7Mlcfl-2LLO($}TmPZhRhiEAxffyczr zAOHDd67Eu3-}_9^uVj&^n^o%~5! z+SpE;_rH^l$;>~0hXq}qr1tl2r_*EJJvar2V1~8J^v&02>H2TnOzR}WJ^9%&$_9IA z8Q&B^;Mv_KuuT!7J-iZS=%vGnHJwAwaR(+N3LvR~OzMagk_a>Q?N`D7dV_*D;E{Xn z>v$Ks)~cVOzC6MK6DTdQ-H;?7m<}Ec6p!+ly}|6O2ce0rqoJWd2?rTwE`)WdLo>kA zyR^=bk5Z4qwEC4^WJwXu5cGak1QWIkQ5kn5) zrQ28qn`<_!0O4I94UZ8xEdCV9Wr*6{Fkh`Ve{u%QyS$^olkRdiEbr2QeSr4JDHtg~ zMVPwQqrAeW?NHngS20DmvKmJj)yS8b;n1N52Mig0pxw#z$brQ_6Yq3o=UxMp6}FRx zCxX6$U&+gWzp-}@O_77C5YZWPlJm-0w7K@!|P!hdVyHVtgv_DuME0%ptBTXz4S?3LoJvZZhb~ zcCo@^PVge%91!M1*)ja0mnG)KSgda_!e!RS^CRDB!uRS3y%IlKm&TPN_8oo5qa8eB zqHVMPV_P0561yvcXSp^CH0POn6AS=MgI_al4MiCLq@w4Et(Wj+bDA_hqns@B$>3kD z*-Ey8Km?00Q#{BfvrBao_^@6uD=3 z63Ag<-8J0&aeEr`W*&JKg)%^x5JNX@KeSGS3Db4$`BA8|gja!n5nZ9@8oF@06m(%k z?3K4v3P!jAxB*T82sY@8c%nzfXG^_?FlRs_v6_%fS0cA^Q15~twiP)?-w}BfPO=Og z=1z_o2Xd)-ixgVgG8;9^i1Ui~!idR+E1&1Xg&4kg<`YOM1bH6dh1<^;B$Tie=z(sc z2)=WPacH{OsQvTF$0cIs>!O$#7j6&6sDfDmKw3Kzf`>2QU?;&)F9iY{E75R55v|m6 zewNeqx*ELYC}(F$BqW|PKZ~$ZU|bQRZk-xiDa`Kfk$m{<#Uqf4mqKpN7C264 z!^Jy+Xv57W(PEVks9@G-Cqowta7YH`C3#0~H9N;U^ZeNXNmd+Yz`pPql!SE@9GQq) zFy2SA?XaDzPmOnvDxIPT_8`Ew?EgQe1fzTE)k_pA;i|;rQ=u449purYr2B5;8m=?8 zczpi<{&Cv;?4z`_zC{eblWuEKu zE3mX{civCm{agP|`r&{0_hCpE>G1hOipij${@IJPyoF-I+AV-@CA>2Yh&#jDsLbC^ zhn&=Q{QuC#ueHE>^6z!4|!9pTn5m3q`ZfFY7|Qo>RzFfgh)EK%Dh1k^Nl!+rTf5(!ZfD=J;5JlCEj$AJ6}3M4f-kGOnWreeA;E~CmkL~(HfRQ%Kti><|x1%5VAl99Nr0jMyXg-)KG1u@EK^K*@#60xW zX`T5V?L$yi$}w|_HaRD;%SY(YHn?nCB*y06N4;z095_A&l|Yl4fkn9QgW+g6@tzZo z`~2d!0^W6T9x6`xoN_)FAx^HF6y~;8JvMSBg?t`Qr#+b+UyfH#TYTmjF63()ZL>Zo z7Ve^P2^?l5eEDbaO%)3SCrVS91iu11JbKg{F)UXmPCOzDyJ|Q>#SZ0CtK00}u&Nn& zgj|496-!(TT}4I(g%R+)TuENboaiIQt$Q6h6gHu#Av7l_v5}bR{PPhapy#^ymx6=-CZ z@ZkL1JUkeM1UV@0US=z=TV((0;luRm?J;ezzQ~>B#U_bw6ue>%o#O-YRLFf{r_3TD zFBQ?qFJR2dS8+bp2R`p|SdYQDxctxc(HIQ5#@aj1ygDHdGUNy^#Rqt{S+8nYZ2sUO z07V12tHCwIVv-O6qVaDvhd@}EqV8@%uRx9DG2E@5U946`PJ0eb7Du-h;0!>ii5Scm zGbElVz6#yLmoPGyPr<`1x$kE*KAy9mP!PE0h`1Z~bx&)tX}LwE9T9~4%;4%d$d!9M z6i8@$@hA5~5l{=UE85Sw17oylt5#jzi!B_$B~IpHY$`q`sPzzDOY^vuS@eUEORX%X`!mltfBMav=Mc#NYnU><0}ev5!WC|5m&kQ#vYnxgC!mH_sA6IaN9|OQ znCzzqVk8rVk*fwoOdUAJ(lJkSA5=`-LSziK816`r3M4X9w@0S#2w~=WdpvGE_M!8e zz8VPQ?|itLp6_-bYTT9(ZViHa#g?y%IRJorZf%_yE&>k=egVyAbq?60Rrkr5?)ToL za}u>?Nff%QuBWpCM~}j+9XB9i9<1znH_ec*QJ7t0k2EDykDdgKH^2KS$JkO+o#H2B zl*2Y!_x7!OfeYu0OT@04Y5T@)>d>=gh&%?v6V~VMU7kce`Ub+TKDU`#o43KBLYm)x zKV5t9e){=8`)BFtKl)!%qgM<1nw+LypSrS6+ItTpaWwL9v5WA4$?X&(skIwYGUS;O#H0+9fUvI&VYTDgKV^nD~;t;Zc@q>>(THEJCcH# zNH?;PoOV^Pp)Ie$C_~WjK;&_VuY5($jYUWh=n7HkG>?IM#B!eC<#SS;8Tl+P?~AWc zFb(I7oep@wDFSS8R%6KuUu)~52g@i^NLvR>IZmbdbD|JI* zppzmVrwPI*=uyFp^*1H?q(~B%Au+#Vt~?uFL&oS4q$(*Tn76b9Ckn>w5X&e5j!)ri z$x*{!kspAirt!dOGB!wSTJv(P9BwKYDhI-rSNW2+@{1p5U|C*;fPyKEnz#X6cwJAY z$_WtV2TPiMq2lN#EPmSbk+9}@Bot);s<4R9TI>N=J(3DV?!i;?K3#A41(>K;FZP5x zNXGhY4n>r<#dxR?Qs`csVx`&%r%S>!#4XEJi$7EF6^0kquCsE-+P7W}g?@|$8KRd` z0cY89<&ykNtJ%0XuYnT)MYzN-cmf6sTBvDLP7=Ohm{<=jYv^+-Fs?g|R#0N=7%QH3 zr_`v!7aC0jFKz1Z9MVsn#83-2U7R0sO;3ztk{2Sk!UCTWy0i&@#%nbPf0~`;xN#JB zjl?>PbdikoB?`{Wc$gQ7kmhEcoE$rS!l@hv1TR)0&JU51hQv?T1Ebis8H{I~D4$d^ zifr#?6#SXXK|m4;RVX{WYjspScxp8+j17toEF1!1LIyK%qyyfv0kkx?FNFoG1wm_Z zX02WGpamiq{70Jj2*4-{%cwLfGcsqyi zR`9p@5(o=HM~slCgQ3|c&mJQee=o zbnU|U!$l1-HL(6$XvqzbzJ{=^!*p^Ff;d4hz$cIz_pl0ps#dHG@bi>=6gPtfd2)D?ZomH^?NJ+@ z)rQdN4dG_CT90$Z$7(j#MFv?{*akK~S5KR0u4d0q*$!1^Kla`!`>@YgSKMb)wnAn% z{zkfi5;K9&B8ltHDg{t*53?Q?K011K#JWt>JA*0%35ZsK2r1djLX*jeBniAqzGZa~2D+)DcXfADuxm%sm;|NNh%?CyG6#cj5T zkgMa)AHMyPMDItbGhIz1y&%B7y}F4Y9;Rg!+l##~UqFaa6I!s>iFdGiwGHKp6X z`P=DY=LMQS^EloiNe-nND1jDO6bJ^afjhS-?TV}I@Q7raa|o651v=6;r8B?#JKswu z`F(}fNbbB4Z68(6|h8K@-$ zBWoK+6bldDbFD*3h(@`_+{>0|#brMM?)+oUv?DFjhJsN($OF-b?MH9ikNHA-K5746 zFGNh2zZp}&NS;X_h#|nQaPYd6j71rJh*b;*Sm(J<%o*hIp`F>1;CgM!BT=s~qz zLzadRgoB|&{tGyH&+PYH31Kd@WlY_0ODK7c!{>peyjmGx$jEjg2%zVNPkFA8Zg8H% zli)T%6%W^0MaZHbAqSX;!j-*37y(q;!~mIRJK6D(2m6@MgE5B}Qa)RQPs<}6HX8%s zTOn@iI(2 zw#A_7iv)3OB7oBRR+})c8^jj0&Onij0zFRi3PP4()vi@QP$dxX0xIgg&=ryvtJU`y zh1$GGir~vNW5||qMn)_^FfN{x21A$7_=&7kFwKQgQ_8hXfFwZSm?EO+=vnJBfqecV zhDgLcv>b(B2zcXRe5=~(ZWA&z8i>e@=zp9JIp(wTsGsf;W8GT9ea<2-VzKS-y-P=j zo=QbYN!&5k{-qQ03|L^o5J+?YU~H3vX;?_>VLLKUq#932h9c{_jUX@Ms`j|oCd~Hw z-A|Z8Hyt1BVObuLNlqaWm_?5wKVnwhEfBc1>sWJDG+z;ci7+2v?y4eatlKP+3zZrA}oz zYMXm3;5zWcn8ten{7DWdb0S();A327l6D7&)MbYe1G@%uUcGmN^NQIW@bXcrtj>|i zy@$X)BIax%9JFn+jM@Ip7*hJ)kN$p|-MWYS3=Ps)qKw4pvfgg7GLHGfzxTIsOV6aA z{Ad4dsyx`@sMJ*~+Is3^^_H;G#~fk1`}Purjzlp8<^}8Q;T3hP{?XZ6K2OrCU;Pxg zwkV?0N5GQ6_I8K;^;Fd()BWrJ`+rLh!Jj+#)(K6LiB8C(#a{FxVU~kkwxRL&IX_6@ zU=Ss&$;aR965bf5^{sX~MarL%oH+mvo=YH1K!LP+i!AD3o_LEWb?cG8b@CWW24~Xhl~kf8I0w$O)|j}vCtwb^i-TZLxFdsHfdP}25rY)`h(Dm5|H+J?H~!}v<^hgG zNJs0K&kB0!ERambi`Pb%y#fXn7I-J_g|!VLT0v2fB&~*JDaYiWG#?%}`m?WKVp_OY zbpO}w2i8l!w!-*ew){^&(ymIX;}To6fix@-A_(s~vGEM1IVQm( zq`?#M&~fq|3JGoKfuQnFj5)?`-;7!Q6R=W3V3-U67h~g7>=O^A z0c93A<*>?O8$4T?U|U*^^l!IO0Kv795A$0K-nG+H6Ls5}hcFmoS&zX-l_=^~KvKED zFH_S)d82WNQA^v-h4*89_{g~Y$-OP|Uty&3AZ(nK{drZ3)Bz#HXhQ|ZS3|jRf*`oc znVymG{2+6LpIZeDbw+>CW_Wt-(rMBvu7&t4KaI+9v}@gtD8R!L9>=YV{8G&vSQ;9r z5necj7k1enj$Y4v!6#@>n200t5*2~)Bq8um2z|8(vChq+M7u^jXPuOgxfl%Q4|!B; z)O!J5u``S@T8+*U-g!k-2Fx{;9KE(0b$)SHI@RH*o2I!BsHnBWNA?>SA4f>?pByUw zU^)sjkr!lQA%&r9NoQ&E*y3+S?PF$W@F_j`3RFGsY(xq%8_sqo-Pm$M-Y)0vIa;rrDER<+XN^s z0FRuIbd9j47|42aIV}^*e);HgwmzLOM1&7tGb9I9*?_Hwq>m72Hkr4llz5h8xF4|@ z%CG}~htNSs8D%DJkSoDw3J5{CdNfGB!C8Xnhx3>z%CU{vMQ5*Yaq>1hz@>Eb?znJx zS*BJyRpigGjL-XplcUS*J>tT16bU=I9A%xoPP%)4FRf#h{}=!GPYU0E|61fOe*Vot zc6$>f?2_`$yOZe!BYT}b`oSGo=LwbT3b7@wPf6GB{^loXd%a4Tl45poddA96vkpmy z3mX)yL7;i?4N5Ox<1~koaDmWj-*gss&=se ztuhiFf~>OFeR=xhH(6kewWkSNV4goAh11dVSX#s zso>Onv|m_fD_Ci!k}YrFN=r-hV?ttkB~4CPpZ$w;^zy58>q33uc6#uGdUpKH^EijSjQf3YrH-qQ^8u-Q(Bveb0&6jY$EN~p$%X|Qq_7S< zpwP51yliAW)_Ml!aE-FJZ{8A9Z?9v)@9^%7>M1TXife1^r%BR=mxI6G89Y@RBLR;6Rjf;n?Xt1P;N zCKwmA#NY4(-F}RV=b{CzSl^-3oZm2R%fdo0Tt`8~IB`p-JR=Mm7<4hq+g8D4?o7Z6 zj51Q1q`jC^NF9*~aU1>8zEfu$dYU4!lqP*c?q&bNFaXDIEJh%vpn>;zUw+G26vPT@ z1)3n%5{G%_Q&d<9aOM|{1~X)y@h8pmTj}ClLXhd&h7W~+f#&7MQB);d_B1Zv26hT{ zM=D*!h{X&3&WEN~7xO823!`F<;D_)Gf-CIdosd7!yWG>|UZ_?ZRO4h@SYwWzkVN$5 zF${%0?3vrJPzaSbjM}w_=g5rd+Zp*!9CCijrN17CcS0s0FoSh!Nm6X+O4-O`%(kbc z>+7s{u>lw{ob)s?9xGcaUo2MYPNd+9j66I^W6U*@jyOz)9Jf9z5i+7sug+xd8?Sjf z<8)9k;rXK1%`T{u(V*R#Ip(Oxhq0J9by+#XqBoyFMwQcyWzKhAev?W!??(S=Ix!64dZ5gRcESZ-fN~WzejK;i9z4NR05T7{Bx+L#A5kYv z_&}^ceps&u%!3@b5rjEN#h2E(;P*5wG)fvW76P!KyAYH}ENGQ`ScEPDz{Cxm_1Jo) z_8>9POe|U~s#*xSd@>D=1+6Uk^ z8#Kt+NK)hO6;U%-g|jidoS>{k#n41zXsc{naf|2WCjhS`AvqL?LT`d|HB<7%?j(Ko zU^%U=Eo5)r5i^NOMb3#=8M|>I3#im^lbOY+CMfZ#2-QKn3gtI1aRuYb$!b_tRggnm zwvbL0*tw`C-OMemv)7&a;>2o3AYBa$+wCGl;A|w2si2Lrup=!5B498;5nUvfXv$c7 zUZ;LBp+aD3Y?=4?3BrgX^JtVkikPtVDZ9X>bU~uk;o*6@goA*w#`rXC!_$B-Bx2~~f z9A<Ec#is(a3In|A*lIX?F1PNt(TOH?7{i zla_$-sQ4Es2tP_c`GY?ocK<%L3|iUc{+EOU?uBb{boQD|TecqpFAuMX%;B7aa33N? z*elH`Fa{a`8@VMI5hp83TigpFB*T@+_*?3P0Z5ntoHoQCfiEt3ORmI+Vv(||FkG2) zeotZR^I{!vhmO2%!c{2pv^~XQQj|();;M8We8-k)rqY;qaC~Guv>ngTisJ}HU*W{n zd_35pcFmWrWxZ7~6#6ca=w=cqM`G9?Pw2L$8NwMBg%TFhx6qKWkt7Iv;sHWs@0<^Zn+>8_JrBqOdko7&iXi8Z?X7BQY_(Ly&pA}R*4`dlLa|I-b zzJNiDD`1ag1WxQ%r&C^uF~H+u&W;@T(z=jT?}(o0%!+8yckn_k``d8`+!!P9QL0Eg z31!T1hF${sl?hp6<`*xpHYh{z5{}en=fto}KFxV0^K1|EiW>D+RwsV#)?5 zTk@h9E92BYQU(QRWBksOZq1`WR)nBq-VV=jP~bwqF|P(E{d&|lW&NN#*Nyiol;L#& zI6+N|T3j*Y5$i`^7*kPdRbF8ZLnLb(KgL^yb&dUo@(V23E6@1ZhDbbIxj}_E@?$U4 zKH)`&+JKI#^>Q`?KXzZc;ZyOp#|e-o6QUyo98?CJKo_gMMq8e-o|75U%dJ=%qk6J1 z0$sXDh-3UxhhLH9?h?m8q$7OKzWU@Bm5qn8L@38BGr)Bli$HT5j`3(++&R+Ghfr1^ z8`l*h7=;rZvPVs%5^B@WJsik6$;1uUrv1q)WfH zk1K^q$2?Re1ZNN*bfW+dw-oc{RbI|rWFY`}H{TB`l@KOoz+JXinII=#=tc`g#rZOR zx}Zb1c#v3ui=YHCTWZz%!zOEa+k!X+-0MARj$2ALbMHHb_JIC%pDKJBbIa)(TZnX( z>pD88#<}kJNGJgToF)qb!q7i}3^%0An+_xrEhC{z=|d2Sq$|P$Win2+UdnFAa|Z%N zH#`8vEoDRE*B(C6;OzCu4(El>aZ(=7t|1$4ZqKDJpPi9YP~=bn65BZMefPyqdhne) z>EHf0w^Fy$NzZ>F+b*O}K3?WjqGH;3fIucjb+|9#ci3Ca))p$AWv9C&zLDg`xDH;u zNIPHtCDqPWQ<*lG=&Xz&ULs~cMo=GW93O2`_?w3wqfKpVZI$EkrOUIu3`-|dhmfWXF` zmbD9PiTjxN{c2ief9dY4qqMovrv5sihF|?5A=BHbx<~l=3z6Noa}PWetOUHE~ahbl&EoJ{dRV~PS-aZY@H*HhtgN8P#+&FOh(xt`LBe+ z7_^CnV)d>l=0XepmO)ByGRqQprBWkp2d4#Hlp)fxZG`{^ACX@Cz<7C6S`Gs8Gl(X2 zqK)Rb2TKZJ;ro8}QF~K3@H-Us_{@!9U*f08Dsl2TXr4>{Mwxf(mp}0g?>Q$09*sqt zs`m`ACc*1|hZ!2j)nBv=)2};*Gg;k;`1y%?q6h2a|l=hUs(_;tEj=VgH~j;m0_f z7te-ZWXv&Tr;~${R^^+}InSDOsDO2sf{H~(^+> zfOwwIaZOKy<79k#7Ihhm6K=h;RTHh{y_Nts$HucRnllR|2e*R9V8UvTGO%Zpz z)}n`Ll~B@cV?eyTj4~Jn&d9Z4Wnk{g%t=~SmT@I&oJdcMBzBIm4j~t4gN%cKf7GA{ z4k62cjYgeI=sE}^@nOKc%2vP;82OmZQ0GB_!ju81M3W<%uE)5hHbKYw^&_^*|2FL8 zb!+e%e*-G6X)O~D=RM;pJdbrqgJkhiFP;PxS0oUYfjR;aHK51e#@?gMW#D-}Mi(^h zGYR)Wn#2~&hSnu41*l9xgsjkOqx9L`x%A7gPs!Bpr$;Z)O2-^xLrK+s?{T_AQrSlzt)wqM?{Wyi za{9l1cE~gAPiGsIcseCqarpcvZ2Ou|H^2Yel#Sg%aI?LyN!J!_iSe;t!!OSEegj#N9po0$s>1x*VVUfH6j4u(g4@j6psLLXA%e*gC4OJM2b$HEA5a?gIo1184UOrEpsXk&%-qdMID2Du3DI13vO z&$v2|x_{0oc5hPUgHNFzPJ-L`Dvin3pSyV{J=oa7m3f@@9{oimAdT21Qe;bExqQYc zaU0|fe4QGuElhAo@r!B$#tQ66Kp;bZL*FtP4a|W9i$GY4rBt<6 zq#S4h9O5RQp)cTM2qzab+!v*!1_e)8*$DZ7r>g+1#$EB(FW==^=_+tq+}0hL3lZtp zwKAmVbDq3s%v6QUZ}(zDR5?zgC~G-};!X>}KX@kEigEHWzUJNdtOkB{)kQ23A;@x}E}5_mR;B_TvCMf0MJ_lA-K zoXgTI{L_2#>~BLif+mvCn3p_?M);Sv6$#eBGmJ{c<<_~V2_Iu{TnJ3w7e3Z=H};*6 z54=>4@XxKhdR_EF>6uiK2yRZhfR1;+RY+Vva1I$pz4k~-bKIeXpZ(NJ@Z;~IEahTnxYXj*TRb!rg(7KnqB$Y)aD!O&<%q5EL1Tg?V)>q~o=L&<2<)~gIpcLe} zqA~)Jhiq>WgnAPSE4#URlR_}1)V_saW?$!kcBXHhVPTT}2_qdbrYVPp z#NiB7D z$Gv9lSp$T8|MCkMK`pJl|ATytr_WRE`e$i&{WId(m9+WbUK*Sqr)U4-_tVj%hiUuM z&(eEjm}gkwc=S^D-EN%dSG~KGOb_%3UrAJrs&!-@@PE-mP$UiA{mYFP-LRhhn2GO{-j<<<2>w@|L6lq z{`KFvWln$gadlro7|Jc%_M|HXrR+i|%vD|mVRGMHGbVn0EBM2( zfZy4_mVfxYp`#GU(iq$NLXgvzG;SiLIV^M+^Y#NP2fXA};wFCsEG^2QP_9MNl$P<8 zXT^01V+J@&?>=ye`&-xzZ&4%4dX? z>!so%uT%gz==?o6P4HM|gCc)IiD8Z)2KST;Q8kWnxjRZO6v`3n8?OU$3(^E{XPia6 zaC#wyM&KWx#0Xjk4)S1HKqiFj2)GhI#yE-6uzFMB2eDO7?}PA(Z&^xgw`PBr zu^cRON0^gXE3rqZl%Kp~kr76WX=xcni-K4;UZj*zVnO1AZR+v|i2zLe*g!O*B_0^X zbtQ6O)D{cvV@4zUM8orA#Gb?&>mY3SoB&HIY{9>V&WFiIXQUD z8pwomd@&a!98bV?sGvTfB3NS;!3KWMkoZPHifVl!O({@QKv?7RCmgY#rYAe8(mPDu z{l7@ZU;meB^Kbmub_#=mpcg3t{Nzt5=CT+t+<)@3wEfxrbc*#21c3!>fuQZZeG|oo z8XTLt_b0#N+;?2|RCpU5yuw|;9##~;xmkA^P+$JlLKGqL0Gu|~{qTfZ1Xvc`4u|Jp zL6CTaK1C(C2T!0> zX&JhZYD2Kf7r4uxe8H83KY#j_!!l#<20fB&#WN~k-nS2^%jZ~AEqAVRje$v6!3*fi0dOs5QF|m$C zA?;Y8bLS@S^>;3moqt^98NDNn8?T6C3{RrQlkdu81OD~}B;|giGzjhp_qk>$HzrBim0RB|v=iY0h9{qM(Dtp;?Yf1s zs0S&=rc44pD4T2s2Yf2$;)9mGYs+o$K4UjgEQ|CF9z7aQ)R7d|V1BMYGROVyj>9nsW`Fyi@QiGE*2(?#6~kmMLyRehw|Ik? zR<5v^v}c@ag1nHWVWgF)rX#um zOa>;=q3_ZjLQA6aB5mcCmjaQ|xCj8(y@3pyO8}pNs-!BAEXk*MjlnZ68xHNznZ$@} zhUaNZLCe7S32{tc%!6?=QGm>4`x8MF+8~nQfSkGT2)9HE5nO=QK!6Fz7lm;cyD=w+ zFVN%oKCXF8wTr4&sVi}Xq^vrzZnfFv`6gN$`!U535s^AdiA?DiVvPu-$4 zA*SvA)Ts0&J~Q8nCy+6p40r?%ld;<;2JhMwcz22`rm=+3o!w0JmHVv48S=fuHoI-y z@VD59zMDGFe@Yws>G6;MkMwK*?!S*RP)REv{A=vfhj?lG)z5#DRzLlez3v=kio%ed zf1L)49Lsy{S~`F6JodF8aeCj7jQeU6701Ifq^aYV6yafO8gLb6qzGZ9q{nV7j&ivz569zU z_8IqX2`9#ForLI0-bGWf{g-h`|9LVYa6>?And^R!_&4)6kxS1-8gsWlVSwTzB|*4a2jAcd`oKHw>$LU%Z@wH^^u8WVA@) z9pW*o5({5qchbq3a#4$&yaEdu=}kkRs~Gh)>Qo-!(tJ06+jqL_t(-X2c?!_{O-I z39OO|DdM@pS3#_62GjxM>@H;Cbd8v(SHW!~bq529h30)4)UwpN71sncrbA^;z>yp~xQT)nNyD%NU~!ZZGZaK^(Z< z$=oO2egW}-19*w*Sf}Tg zG&V!ngUVV=+5NM;?{*uQOm+4KyXHuat6`a2+7!$Tfh$b_lL&1AEr1d8()tNzuaJbv zh$iuaEk{b6cOy#@!>&{7XdHVntCS81Fs`}umcDX2}gM__$VCYmyw+2 zb9{DdkTZ>O3md*bat*-9z6T~+Z9tnFJSqd^$1q_IKw*z8Vjk`h$ITAE>vQY{MV#<( zS(+N9fG1S$Sz+KHBr$lRggq_sj2>)<16O7&;n|Zuz!nTFI}YBgMQu`YC`QajPw&8x zGjy;{A*v?DujnmkH%2R|#FW4%j@0xyW&15Nv{>$?emzuCF~9+77i;T)}zX{2(nap!-S0~a30HCnU@h93;er9!}ty- ztGU!+C3X&9(huWQtJd|xybB00=m5`nmpbONF#81*J_HSe0eN|b410*Dh^1T})w3a% z1uo8P%z+Ar6nWtQmaNBCG#m}gf0`|j>Fw!i7PYAsHnO8Hf0>rAZQ^05W+Ydq+wFe( z7cbtX=C7}(x%N!f>l{$7lr3%Sk%p4+P)xHK@qllW#JaTx(GuVW915J9UL&rhmR)JJ zvg;qbm&(rv+2HV1YEi(aw0bK|*?!eb_^!wfU!o|Dg-JxmAtuhWe=w#seYAP2zXRI$!c5UDgkc|CkFwb0kfI3H+GZ3y;i9dW0C_eic`KiL=%tdmAE@ zX_?2NTbh#q_?sqzd}O6~#H2yxvk1$7kuYi}UXVZd8*ZjZH+C(vlbU=w{~WXic`;L@ zYi>osL}58lX5ekDT?7vFAx+qLL6vOk79>ENc`vgGKM~gn{jZMN6ly0iT%=?Lq%=4u-~4j5$s(L#gBj^K)Eo+te!q z0q~0O(*0|wAUCe~%zIAPcXHmP5TS1rPEFsuA{?F!>R)sy;`H>JhuHvEqFc8tJjPT5 z8KN)_ix+$zI8#EJQ9(}_0d(Skqpxbkt&Q{nj}ZQbCv(p%PmpaFWml=q!EhK7W0wtk zrn)r{fMJvv1|LOPaYc^}pFlfz5Qzr}8i!8rp_onKi%3R80&F~Iu7r@0ag!l6Qdk#^ z8F;3aGq18XRmY1lH!``E{q>$0r{M~8G0v(8Yfd$B&48J5PS1*2>{iE8MTjbI0Mygm z7)^{ZbYgIaha4D}fw_q25<(|>MIs!PBX_KixZzmGqsy#EBCVcOafPHf_eDcQ3NAhu zBkS6ZXbr$b5*N@h{#eEu#BkJNjCimeYxRuSUNxeAipWJ10T6|%F~`--bxKkmr2&H7 zSdRzJhsub1D>wcbJTod1Z|rtiNUe7Zd1n`LXZBOzVTaHV7xK_ z$B1Vk2%6OfFpxq3D|glLjD!Q?+K+Gpyo|-Mht?I(R?rwH#&$pg6+96D%nCO!p^#|T z(G;m+o@dh|AXu?Ej`;NXa|F{OuID&dg*Mz?=<${lw0y7_y4B?eN`=NOlY)81@Rf zVFVHL1c66=kek!EBrdn{xK-_2%9All+8!=5z?;(LM6e!Kc$XjWowBR0w*+6}EO5$- zVmW4HbhZgE)X?S*3DG|L@|!58@X4=lvQ4*R*94(g^Zk2MVl5e+jk}jS;Vy(*I zY3S{~3V~D9!X|cIt9xJx7=E5ieEpOI&5<5pJ9uZP$ zTX%r-19Y!9aP{@T4?_}$x~v+vNd5W2`4L*WC$8ZtCsVtJSC>Qz1BKfTMqg_5r zq|uTB(-w2O)g`KRDUfC8sNnG%4C5X8kZXtj4Q<%AiZpPwj=I*J<5S%ZE1$X9M_6dI zVvh>(_>!>P_szFV{#yYY94nsTKG$uRo6(OkW>flI-I`7f%LIbXhuc<@5V!j#Pi0I&$KEBHBQX;6s}bY|+)*tZ8ut*-y2lC*(+wxQiVtr6d z;N*PfIon3C_=k$4#*Ge&VgCdkfwhJ!{p{?F%9!9EL-q8&ItueFxin_Hk4Rqi%dyI$ zBXJs@I7S}mef5waRr{C^&uO%Ul7`}78DwjeS5O{D2>Hl#=NW}_>_g{XuIK@*9xcSZU9}eB21WchTLmdi#aS(CE-^xe-X67(r>_Pt%=iZN!<$(#;1hl`OZ(W&jkIAjJiMEfp+E^QP$h|VJ#ofG%?T)=YcavE0!Yphry1~y=P z#9|s0T~UK2)Ersd*10#w0S^Lkw$|G08`mllfA6$(7jh3XO`I4~kf4TF{w(~4=ALs^#ANFYPN3~>E; zTwnu%sdg(-$5yw64faYRpec#k+uI@LOC>b+Rx8BkHntF?z?ay483dgnvm9OuK|nAZ z8#*OEq}4!-2YTe3axC7XH@oTm-@2OyoNH`y%F}1>*#Do>{SOz?>n}d%eUAoZ-jlQR z{7ob6?wieuW!%0VM}Qi?0wlEQf8YoNTOn5MXK`1Td+ZCPtZJ3wFe+{mm99ZE#Df7$ zIVlc7GsTk6g%H%lT9*)-`@n;vOCi)#=ISni zkKQAzoxSOv)YYZ}+_Vke0t^08GIB9p`|!JIhl63<8d)mO;}Pco2$Ju5{5^(oo0us) z3TW*m5m4p)i8UfxelGvyKlGe3Mr6(2h5LCYIGrXvfFqQ^3ubi>vg4TCpAl!$Z9JynG56BM;EvTCK6%hm4=Tk8H$lj)ps9&Mb zh5t>u?|>~ysSOl26jcR9yiUIw6*13f?%O_v6EM+G0KQ-VKdb})2kkTO5Wf5w#tOI< z+mINuZ9CqaXDPIR32=*-Rczuj0+M&lX4fcD2~$Z@F#4SM>iYo^#xO$oikmq;>-E1H`YK0mN^0@tWD%|?wVkZP0m4Z z>*x6e32pFR1PjK6h6(f!U#s9z)bn6n_xzY#j3eeulvZ?M%~VM8oG%q#`FtPvn<2lo zaU;#*Zs(y`E^$L8MMW;$>rBv`fv9%?eD%U=xVU}|y+i=q=Q50|8TZDMKx^(%>E8A0 z|3Idxdm*-rxu8ruG&|-XX2?jHHxQQ@NU$IYgA1QNohaH}QH6{I==rt5ocY{mLEUP# zxKm`M+GExcDLAsi> z71lZP;A6N7dBQrs4n6--^YPIfv0&kOsy|Z7g*7H&QlJRs8xoXA;r20jB2%O-J_rlr zl{Ff34`PcfN6LF+m2#pOpVyb?D7(s$p;&#V=Yw>7iU)(X2UzC{x16w1DHPg_C1P}P zE!%a0Sqr(CI|p=sjYxh|GEkf};xaVFV0eyeVO_f({H<4|hW2l=T#u}4lYB&;It3oA z+wGCB#_B8hdc!>%uwy=XbF>KqgJ2{av#{U2d5QKpL2$r)Rd#s3h9!CD&I-wo#Kx&b zehGpfk!7tVyZ|x3enVN*>$fS0Gfr#7&+7;zC^xn;#LaQ#zQH2DLpj=JvzDHG`5Ylf4fnOVAQJ1{16~NUhsDb=ionW* zz-oIL__R=J-az2sE&`YuslZ=c1m|I7l!An>P^k%c;_+wX$^>t$PxttR+o`0&+z z#-+fu0pKzv*(?N>0*3x0F3T;yglC*vtzdx;zJ;$YMlBtMzIhE6{{cwRB%MV6(XYbK z_vlO5I0i<+93^5u=ikA5LV)Cq-8GwnXH$|q)v&y zWtAM237&DMPAHY~FZwrw-5d}<>n6rM%Q*6)FG4Ls2eFP$5I(MAqOc4#;R;?c8Q0pj znU6SPpRBV7BAuO_#5UGBjtw@aYH?+YVo|G{XlBtY@CN)hDa=zJP1uYw)vRxx>p-`3 ztvs=Skzba_*6Wgw-^f!`y4l;o z2pFjQ_l9U6CPrwr6lpkO9ng|)JToz)dJviiTiy!uxHV;(9bCj};Cx3i3+`YN#}uYO zBM6r6Wt8+vx51d4t=XdRRK4~2e~ineg|cvs@QZf@ic2kn36;-9MOvbLf`1~avx!Uw zp5lQC7e?(MGTeR2W*o1+dVOTG=cTyvd$Jj3I>C42Nyh(;^3h z*tf2l4#`;^EbQ(uBT-@+-i0ATLPaJBkG>sKxR!BuaTWX|0gj6*GR%f(1as#F&lsO& zE0T&%#QW4~0@DQwni<#*6Q@=({1R7rk3FC!flSM-LPlNrvMzZNRJ537mP`k@gOE`< z${s~Lv`DnnYOQ5!>CN+ph0F5`WXJEq>Mz@MW~{!?*jO36yB!8t!(J zg2pJIS|2i>z!wluItfcw#)DAdK6p$b0R$)P#9fsFt+=ps#8P78>Ph`mtavE6f|tdN z);dW|hJt5U*W*_wX^eMfc5Nx`9utcrk#6h8A{zY|xGXTNV}$v&LS>OWfTLFhlTJw< z6w1&byokO7uT&CYV&r4u%rMERT&SVEKvf_NxP}lcx=jt-nUV>wFvTnfAH{3LOl^sW z7_8(X;>pmAU-NvRq&MQTyg`;q=X@dXxuN)@-8{XWi!wYJ6K^OSfp5_0;0XB2z=18_ zfsD~Ogv6H*DNMZ+Nm^Xc8^Q=}gCXO6sx|>H$_I}JPw<29I?3n}<_6}~JNGoe z&}=j4)8PmsMiFC?Zhgj3aFU-xapq!G!j__9D(Q6}8R!~G6SnSFQjT4mtcrC-u z*9Gq%yhz!?J**f#i?LGlA$|sqDBzGIt|iHNf<=fzSHKcM$sA}_p$+~sb|&XtCqQ4o zg-lVEM)#R3f2?`Yy+9e!$OK*p{(?tBR}tpoKiNjmwYeUQgud%o!Sx17sGNFP!TaXc zYdml`!2oc`E_#DbhT~zOH-L}u*(`iLrN~|KURKh^_EuV6*<{BP-W|qixK+=EI+wT- zv;i;SD+D%2V;AzX;#FiB8@WdE7nAqQIjNcmS+0Zss6ccwfN!c+K#QPKFe{Tr1fqNg zM1%Q&n!=VIW&Xg2>t@l4Fls!!>KlR%Ab>z2It41%V4RUG#^z{+ponYiSa-JQX`i&t zczBz8Ccc4|t_4DlgbRcqmvdpS&f`cGwmxps+7-jF58mKenG=kL>kcd2J9ITAerBIu zg|V;Wf9nvmSA%0^7(q+2qSVc#@T|dgo_} zL6gL~e=^GED8+g?uDaJcd-=ijIA=$AeoLeLK{ClILc<0 zIIEoMYOcpjX^0Qp81mW$oxV;xh|L%yoFaFY=;PtaA-Y zWe$y{8}L??kQa{-WbCouUKODb#jWhkH*cauYrO^09+60iN5>;mH?D2)2*)-Kn%RLz z()JEgYwH2V20I%HgRDYEy~&BBz2lxA;sGE(fdsrM_j@P6kb3PV@KqYktZ2e20yMY4 z6($NGtt{{iCMWZiezeZTPcWQ&@IYOfU?PKMEaIdBMjCaU#93|y4oMp<9-sM->s2V# zPkfd$fW``9m=+g#NnC~*1@iNp&nmna6>sp{ZCz@S^zR98z9%(B<5;YD6JRk4L=bMY zs_M<(k=Q2nFdvwz0?^Mv^HT#pIUU8|c+c5s{W>T2_>55&6?(C#_R08-KdbOYF((wh zY)Xmai%C|8@fYC(h9aG3%sG$&v&ZBsI0#^iaYcy0Pw;y8m}^nW)D=3$$n&|V)XN;@ zCJK54pCIAK_TfqV#*$_{uF-VTz@TM*A#->_RxZ~gCB?B za~*&ao(I>)40iwl3>af1#4>&bJ4%5#VthDqD+D>TBRp6e#!5M144csq*#R>Juk1LM zRsl;zMg9inOsg?VA})P6FLFH~4eYk~hird|JFnroYe6vozFKvq0%FUcx@_hej?1pK5KX+$fSifQ`SVrxm}H5X{xh*R}5HMqK*Z8 zTKaX1ng*yj8&SXwJTUW}5lLt8dKDc6IA?5d76-7zGfPf_^e6xK43Uur1V&)3j9CE0 z4DSuuDqeOQjjmL^VFK4`K4F;$F@Rt!fO=a=;%35H7*Rhy9tQv{Fo$&^-a$Yt4Daz0 z|A01#VB4|93Ky)J+S>LuXgN)X950%o{kp)Y2=u@t8DO}5mDnJbE*npAb3&mgiL5Lj zD;4eQZgN2+02z!Lk=;6Rr=wQyeHn%0QnQrkOaxPaaqtgMY6%#*k;y^sv_XCSn1g+J zP=@Dy`(A7dqCFEdTFiBMp8NnbuGHF2FJHb)uUKL<5(}q zi@0EZT8ItT4}_PgxwdAnH;w-iFVlrgU=VmP9{OjB7;7MobLRfdU;n*yYj~0#{?R`H zwq)zilAX45CF6XNIhueZ|0yYtk+6<=M%q9<|g)ebQ? ztmN5+`Lup>o0GvPDpQ-J4{pz=CvUOH$#m{*@5b zUZjI(Z{f2f+Ogp7Vo>or-QP>|)NofB7_$YhMfSW$2T#Gvno0&lUm!bPJdy#s$6ikY zrD%Wz)(gsp=RX&)EOKTHz4Hx(nHuj)88S5HDIEn;dV$0!6AhiW3cSxtUAYkV*&Khx zoBXqX2j#`zLPM(aEtBr7TM;-0vyrQ)RH#_#2A$}|uy63%Y;P@By*>aS#w8hq6%&dG zEIJq~YZK#jeX#KOb}kAK#~Rj&JcG70KJqYu&1AM>-61!=hH}Fi*Ta3dw(g^=EZgAh&`SL2ouVR~;yF~CBFjm(# zLL`h;9MsyJ&Uk2&@OEs9FcFU~d3Fqb^vT)rK3+fOC7+`&zkD9v9JSbitG&x0ArXFz zmxBXEDlD?z@+x_#RX)8RvF5AwLH3f6djL--yjEk|qqqSa^tPD;r9xOF#!pHcU2>hZ zqZ?plaw;K5>#Rh$O_@TDR1*OU%&O$EKxGbwaatS1b2wNZ`n7)$XI|@m ziH9XffTflY-z=Y)6F~O~T40gY78Sy}N@#|~N01@R%mP&KnN5p&5xAii8e6DfPM-6+ z2vcnUgd@($t`PGz4XuLk30FH^$HDjXgs9-YF*Dw$Bwh@{74j3~iPaMMn7n^VqOm5l zCx=o%`>rv3%i`t0@iTogPN5MN2LR z#wJXM297bGQF$9II#>_a3t7=V!K3m`)ySKE^C9daC<3Zh4iV_=~o;FMi7 z;cc}q;ox|f2G6-3G6vrTQFz>#SImWbx@3+H57?FkveTE!kVq;(tR5Fi0GMe4oyc#7 zyj9-RZpR39@S3Y28f|)}_LTUoYuF?StKS`@hrjse9GG$wi;s38Hue4rXSF+!Aq@GJ z2eJ@OX*EkxIqs)_vd590*2{8x+cbUn;Z}P6^jUh&@t!~U?f3B_oTXp<@+E6T41RYH zV_=frb+AxRPSXr=#%pA$uW}UWAOGn$;K>CR5y>g<&eE-Gvz&k4OaJnp{w%eL;jgW5 zzW2K)cmzanU`~SEoja>o?NnnsEvAd3gEX@+o1VWuNta}!_Ry&HGU$e`5mjoC#COTL z_14F?y4v{>v0dONgB504Un*!G>`>5xfw2_gObtZGtP&N2ORYZ zT=(`!uoR9kXA`op1Pbg;kAzPcd8NiyKK7JHY=7_;oIqR{FUoX;4v)uV{zQao=k)dsrzix`tkH41R?>ncr?7%oT6DQnAzR>iD1z;e#R(+0NiX z3GYx|RZsd;7ywJ!)8(tzFDzU;iMhi&Rb&{Og2wrYKQdte!ZmP-p9z9f3l`49^9Xo8 ztE4$Tg|9+R@1M$$ITi})yzl3L8gP&7czVjOan*6*z3P)6B5!IfaKx~2+%TM2Yua~QDg&;ma8dcyAkkZE zcvjx5LLlCl(PYg*9=+|9eDle|Dpt9hR>!PIP z0P&cYIHQ3fTB{7Ihz)7CM=p;1N4R-F+6W^H>=$@yYb+`fOGGkV-(x;=HxYmqiP$p~ z7O&Pb){GXe`%s;f?lHA1B8eminjpUCvODfTB8D6c zSdAIv=JA-jNEaG1Z3kk6m5eZ@Au9q^Kv?V98^=~LXduwn%-pXorVhd`D|4Dqo62w; zTJIJlg|e2gzja|5Kcp+>2jK%}wikQ(Lh-Q7(8>3{X(^!SVYbpKwH)6v+A-VrYRkZ;j;%O4tW=Piee zVAZ=l?qWi*9K0O+uaDEU@7^by7KVEHCX~cuya8Pb%xrVw*?adl((7a3)S|X?#5~!4 zM>RQ4m`y_p(44Y`?t~JwCkK_Z|70& z;#!&orhOEkjmk^b&h3XBnalXAoMB$1C`XT-4P)G(H9VHv8%=5(T$0Vo+7?iP!FBT$ zOiXkSzY6(ys8Yn4%*5(c?kvPnn7m*Pn0ZC0f z)S|L-VMbvl@l+?DecMB1q8ne%*yW%=mEnuub{P%k{pN~>pJ6W0L&yaMDfr}~+hkv>aRc3m{6xRs%eqMStmA}YO=JTjAKt?Z~`EpH3K7;$15F7Tl* z-RCL`@>QG6;psc_0AUJ0@He=ka-Sy}Vj&9*1v-7IAC}=Q-#%}0Ti%yE!ki(51g`Q> zG8l)yO_~Zfv+~TzGoi?MXAG`Wu>#%(T>PEyv3E@Jn^+TtFMLmn+EI%a3K7TPP6lx* z)>P#Kc$uL-ATe`72tvHN+SB59d!i!5aFvP2 z?(PZR6;q&Z?wK=I$6MA#ktjl&?hput30Nbs7@M$k-Wq{GH`+u=;a$um#_z5e)(60_ zns`wNvl_x>E9-zFqh$0v@PBxJI?PivY&wLZhG%u^NzF z;T;6V0>|eNmN?^m%Tb87JWp}~VIs9d|Av$pPZj@xTFc^mD=iTVJ=)u)N2A+RJ!h5d z(#%{n-bmK6F=Iji+!M20nV=zNZFxpmgvn!!SWVQ0;3ot-)+^5jk}^)(Wprwr)vO5v zq2xJivsS9nTIEELIFa`xN2BUI;c!*J`NXQ%*sN)tE3Pj#!u}Nmh z?=T54NF*>xzCVO8)I52|toSYhpjw$0W)YtK(@X6g+J|%`3}((Ae@PrmuvnO>Q^19G z&icJVyVlHV%zZo|v7^|4&3S|?#6fZmDoH*f##f>E4+jbqIxjj|jpI>Cjw^I=C-q4T zkRTT4>xJV}#5~@Cm6dj3?)t5)H8V}G9zRV*4q;hdYGZ&C3*Ol;%+IY86YmhNm@oXj zzx84ISAX<`oIT38QXvXUKU?G^H{Fd^$g2>WTiKi~ygs_jE{~52cs=kM&f?x%NEMDn zohy^o>xQnv`0wV_N>Uln~N}JlNmWw+MqEeT;54fi{puWT@?l*3x!RhPl((nX$ zumF8@JM25J_fxy`i`1mbV11>Lwda?}+h~Bhr$lc^=YnQ9vX`B96w`&M>o62+;p(Et z5yYP9d4@IzT*0AQMKTD6E{$mwSrb4@LdFTE!Zetl%v?qQ{CzmbQP4$Z9)c=YOy!vh z3BR=dMK0(sWF{{I9a%{H0@HYydprjrfGp6jUIJEwKHw4fx2%N^#UENT#*ghb1SWSwfcsZIK<_gSV!aYQ!o{gFDE`xU z(JO<9XNQAf6_q&qiZKzk$WNeynoy*jD#|+9I58*M2}toikIV2)UV-1lyXFD zfFJG4qMf|S&BVzfTr4!9VT9-9g)~eYn)z@p1SFwbm&`8SgCEk3 zpXrtGkP<^VIUqNN&a;}rSwo0XqUr!Tc@-fJ;Ls!#pO2&Sf~c#s)6XqZb7 zUC2o~1zQzM>b4F>Yct|E2!crR2qs`~AJ`BFokb9arN}^tF+eFYX{|~%8h|X^|u&nE1u%acbhC1sg#K z#spJ8B3@jw5Qjt$u?}pLcC0{V6)-J%Mh@;8A4C$#%b1kU#AnCI48SUWL_d*J=mRQ{ z*H{+_01Hx2gf0S(z)nh}P3cz~!?sE+G#T(DNb z0W4Yf6lv(h@5k8@iCwSuWWI&;4acWmYi^S+F;DD$KI<``9@Xy(qoeFMzk4fv^M~K0 zCM8{0Hy5(XUN`L=_tJsK4oJV`}bFIai6Cj|KZzg;TmU&Q#;ulh9a8m+VyMc=IG3)x5$?%scn3Y?s zZ)dN*{EBTitUsB%=1t76;YMVys0nVy@8e8n1fRu^iny&uxaBI-UN*)$cN-bfmT~98 zJZjgoMw|EVkr>pb@D0zhm2pyDr`f+4Iw$`g2Y^Mg&43797Y zQGivHfHB#_D5E_Ja?MbMaWgyFeSxceHS4oSpN7)Xtq;=97ypWyA=XQn5RP!W+ZIOV zI|y7%QCKVR@M_4M1qj&DSik@B8gsRl9OHr|RU?IgHptF$}38WjR2GVZwkR z*t*fJ{{Y|9cLogj!Z6@Jz!x%L2m(YKP9)3#Q5tH-GsBtb>F$|MC-ezbr*hS<+VS=0 zyWVQ<3j2M3;R)+md97zX*$;p+?p6#xW|~VMW2&_*Uk|%hBmMJb`qB`Vn%jS@nv`Dn zn@ia02VW<#!h$)b2SdvW_)0uLeP2IIOjA| z|M9-yvW1DSFn=1KQ4WQ=r+|pobmT4=qq96>ZN=)8x?;vBEOSzE`pGTd;0r&LS>Ne} zlpO{0I7}F%w=0!bx z{$RWuTNywB+!|4oMgffN)<+PVbwhm7Z*BLz7ZPU}mj#LViA@Vv59`cZpp|Y3eo4e1_V2+hW1c+T+jKif6Z8dt^=h=IKtS z&B7k9l{QM2PNpTVW*3G3=&yh<0-FBCi$DV?F_GbA)}8|k?mOe{S{u$8Welwou%To| z6cG}r7RL#AjI2l{4nf# zWo~6-zr?U#g41KM+yi`I*COR^iKRg=fFa0%vc2~qK+1DjnlYL9O=0}(bJ0#YSlf}LHtQF`5=2wk zO_(4AvpP{2e8PMPk3*+!RV6Sxr-DACAehT!?i%}vBUB$a+Vq@cme+6Ru9q&7F$Gb} zZ>X&3V{zhraH%TZQ||0%zfj)v$<}fgB7Xhj&zE!m!JEt7yZ4u$|K!KZyFdEiaQ=WZ$TAo+{+Pk~YMtfV9RusFFgD)(*u*={b zMU!>4DRdS5Ds8+ee_+o(az*Z~17RAS?W+sFhdFBN$eHSyA}h82cNKwou7FMmd5Knk z2CheE7uN-2_~cH&YYMp=Nq(rcIey8xg&q5UT6K0PZt9HXU7D%s<9#5;KO0 z_s54kx8abJavD-Hqn#7@no^zK-E(tjiZ4_N^hdy;S-1R9Xn1x`3H=0DI5tBLo*4HT z>+$HxUGTM4G#*$o8Rox@XH8-lm-F_Dg>Np+%7fEi=F7~d$;I1UF9)Dg8&j0VeA50H z3LQuUzgxexRohy}k6nxRwvfTWyi!z~f4I5Ji_yU=!K(e&yL~*JtHxl= z6oBIQDGDJ^+4B{sI+Q72rMz{vI^N)&uqa+J($V}j(0^5LGA`ArC!Begn?JZ5QDnE` z+hXu*1;IefcuQVU>)KJQ(L~;vU~8hYo=5n1MPv$l^toCkQCjr99pZ+4X})b&$aUq- zu|jHK4+kAh&owH}5V$L|z>|G+sD*CI`73RcOu+$sYm z|C0N(x7_QOWIJ~{)dg;fT$MINulIP1Gjbgh?OPj1Cx@#8S&e0SCXgTVehl$wqg|dU z8b^?%M2LNp>-f_4A!or?aQ~Uu`oPjScL>~EB;jy#ULnK~B6P3ql%^Phh>@G-bBYMW zPKuxGimz;o-;{dTdgekJ)+3HtjHVV6a)NK>p*A_8dgcI?Gw%%(l5xu{Tx-8zJj?eIsMO-e6bsy2Bm#Wqo2?yr# zlrTIHVygV>C-+n`dYu(*4PqYGKN6R=c9*Wcw!HSvx4aQzpGuJXbouBTZ!F*c!Mn@H zpWPo~d2HK-?RU%7yasKB0kmfixv00_Tpm3>S$_JLpDb^@A){Zro7~Ayzm|9hrl*R- zT$cRx>rWpqfB1)=Ifwnl^8R}XbT~sntD@b(XAcpMELuAg9;w2X)_d^cOr3d|@7EAM z_-C=)|MlJFBcbjQyg+kTy=AVbqu?JUrQu!=dV4DAeIFoAN8RRDIhp&hW7Py3mIr z&T5pQA4A6~x&klrVGT8fNbqc^a;{q#_Y$i8j{aiywX|kR3DamYg|PK@6TiR&2A-ZD zM&wPmh!$%H2gCGYpv8iF-<8IY6;qaZR&bIb#eC=?C82Q`bI*<-9X_Se`VFhR8|_yR zXXcQw?+*HFfC<)7W*d*^6R0zApKD<~C)Wv&wUN{3lovCf!8gx7YbPNbqw5jnPg5*5 z!MXm9pWXKhedNN%Ffs}pcAu}!bqst~A!ws>?X_3?SQ2E`W6)AI!2FFAJ*#()6PY4> z$;Lc!`r)#&&hm8k<>wd&XFTJHAzIiUPCYO@cj(J$&$WU{@ks@Q%N? zUR$>L=FyuS1^yoeMHt`$C3sKr*`ays(fjd`Ua3Y%0u5LR*9wy1 z@Sy(k&;{=BB#%tEd`c;LF0@%0+>-a2erJnR_oSq9Qa25Y5b&pMWJ*6M#}@&m@fZx# z-bVt$Z^V}co_l9H^=7!fCPbgLp7x&m;ebFZq3j&So?GSu&64wC?Mh@Zd5!ycy%#f* z4#mNLenu#3=U53#TlbruMehwP!S&I0y%C}1WHN1&ZIO)+@@x}O=P-y%EUk-WX^CTf zd6y7VNTe-PyO8lB>$J0I1HxX#2?*%VzIZTJ_-02~`6-b{C~GvYG|5nJJyugTWtL-v0&D}3h-e=} z$jYdcY)7!>rmHgA6;;B@YCPZqn;1VP{owvX)-vl{rkbdcHnbiRWbs0f$?!Fg9qz5- zZO_Ka002M$NklKzK@>nEH5zr`z-X<=sZtL*1k9*-=pER)_BAc}mU#1JXN$l6oezd! z4`j+;qTE=GS%(PxQ1!XJORuf9^s`9v5Oq;iPf#BVu?LX8=Y( zxxag$^|6&eN-4J1;q%6J7Eg@PYGY*Uc<-7Aw7}5M{V*ZCOs5zwC1RGdv;1vo_FcU3%V9vVzb(98VD%e`(0%C*e?^Zh>#OiM9!iP=}_A*72VN2!^(a zSmxQD$Jo}bp~skDqwr2cpCMz4#ZA|9G2R(})^HC*bguZEka7X9GtPLqm$NXllHzSm z0vE8xo{iC>uhojz2nbK@KIpl#)2}swrq0y=m%y0S*?30jN%0uJ9v;92e+UOoxo)?j zbsn`U0oH$+^LzUp0OU>URsoa4=MRCPv+S?KVHUQv9mPF<4G&&&s~0|)%3_30_;})X zUWNqMF=e+ric^}fV143iV539*#0eV1ng=9E&g2XfIjWVHgfa!G$Rge}bzRd3=k~K@#&++B%(71mktC zf@aV_Fgg)-J4&vw(m@2S5z+)|O3pgVTsAyja>fujK6RwDysI4rye_b~mkJUAT_IK( zOYpG+xOm)H=r5dE64$>f**7@#-Z4HD?3(Hvp9&rs2Jx=O*4EDGB4s=noG^~V#ljSm zU4;mo>X{@*UK>=N5O#VG<)pcdCZ;(Q6&bWQFKac;FQFBZmpEdat&hjc0N2KBo=e=83S%h#u0*c>deNAO`D`dC`?Lo?)lSHBgU!+A>_gTCO9$tR7Te( zREfd{C!*#-2nz;)S*tne(?W7pJU=+yIO;4)c)Dxf9|5*;`TRKva*)Pni@Q2-HuWJu zkFsEnC61g@l?f-F81n$w7)vSxK8!fiz)XB!FOz>x$5AMCyp-dK#=FpI$(!f-@zBJh6a@cmf>L-9jjaZuP-{3JPa2F_879GNn~!TV0k-nwu`7tc+ty z>oH++B!1#_w~Yg?C9Za`i~)gROi(~x5)6ofi3Al1d_G%z$E0x$TfsaD>@^*faN*{% z#n|7a^;l6!R|K-8s_m-&(3*1JKYZ|f;{hS%3@qeX=t1Qi@qo5F!N!dnHu9IhQt_Q_ z%)pi~FTYqlRE=%_*7emBg;GBKLPe!58SWom*AGNIe`({5JIcziP74Qs?r5Di-X>aa zTv}Z?Z%@RFy_EtmvkgvRI5FZo&N{!aeDk9_j%|FneDFK(iHBi=PA|En8r|jH!_{Mg z>y)DP#y4I=Xy=zNzWAEk+Af7_tO4hwcQW2F;b{gGcAgPh)yZ80D=ti27@i)L|Cx0JJ!GI_%qIwoe@K!IhM>ghJKy*Z2={j=01xf4f5>uw zZv8P9N26}Cx=wd5Dt;zw-8Q z0j}=g>pEoNDffoK!U|zljbLiG$wAe?n};vj za#d@FSE5t%FsJSrWp^#+%xQ~+*xbtHYJC&Dm}Ww*ZL94ViDn^G*3*QtXPU!sDRI_d zgRs-C`I2++xET}*#C*V8=tXa5F?))|nNqSH?fdxd=Rl_fF{=%1v0>XHFI-SaNkzZ< zbJf|w6nDE72>VUe_0GQC411T+bEb0ZhkKIxl#$i*C!d>cyqf?&R$-CQIXSdflfau& zp{(51rXn*0+c{n!8lFDPk?@&tX5h^L=$AH%zsH%Id)DU}k3;2y@hLO+7t9I4#K%h7 z8>qE#crF|cX)Ipph#<^dRE#EraO>;^{yJD!zk!j=Fpo~VuttSwk~P6R0Zw}{h60M- zaR`H~eQ0b{$Zm%LcXlEJWP7jB{JgWVSu8ze1unzKEbw7`@z=yiN=a*D{aL?dR}zvo z`K2gKOdK0Z5J`mF*|7(KEO7#;x!@1pFO)3zq;-asN2{m8$Zf@Qs;IeiRkA9011V54 z@bK0=7lq-f3-aK>{pAbmepPLc*I!G?Pm#9sOVU!O3#OEr|Ar%84c$FBH_4019p5W* z^nxdYr*|k5Tw3zv-w0*Bx}Iwx?G1d(BcF1sh&yorVwNtgJu9TfX)HcR47u-WCt)4b zx(<715?Om;J}@ksGJEkbNF?#d*_MyRV_VCtY!7)a4UoTJEXqV(fQcYlZ}&qk1U5p^ zl^K?IW4dd+(wBZ?AJr_c*u--dYZ_Vm-1mAAL)g|2bkSE%ZgFArIXIAQ`>LnniuKcK zPr;P{m|p~H9mc8S8!O0GPZElQ6BoN_*Ka5n^}V{hfL5i%5o~S;T(st*KeQSNXe8R%MN}xUw-vOZIrh`$w{XJHtXMUw3+8IxVA;#X87eBkV z+;!sErOg-PLVWP`Lu+ulTvG@CSY^0yCXIC8!4B`fcFTHRSbp;7pP>`*;a#aRaiw4H z+`hzx{$lyJfA*){&VOsU!CzTO1;@)^z$Qo?WTn%zgg zW^`=-GDd9`RgLOQ`WWO>R>l|0Qzd6F-hPK-^CsstrH;TqQHS{;3% zL2H&)O5rPqqwPKWXXb4G|IxODC~La!(16Wj``up@L#5q2(dm@yf3G;4A~4lw-^Ltd zA7c>43vH+H4aKxf{S|lc#%9H0Uft71mca=1Xv&|g<}4i=t8a$WqG^*<8G7m2g#Ng= z;}d8+fsjJePtPX+)&ey~gD!eE8h3x#1g1$d9eLl*nZ`MyAjUTQ!24i5?Sk1@e3ahp zDwaC|n>pS@`7?h4WyVpq^uWXO#d;Ik@t+jHjcsja8L~`QCtpamy{qoQL--+| zsJ6spArUXFzsM^@Xk@(| zzeGzc+qN+taqaI2v!yVUseNq6#Y$r7rch&ex3$4DW_-bA&Y_0`!#{ezHL~vO-4Wcz zm`%$R_*PlEeFC>UbYr= zUMzjfx)+bmZQtk4Sg8NJJcC~po{szNJch>P)_VP|S#y|DL9Y2r}i zR!E|gR4UJgFcC=1r6J+NDxry;myF==HfuafabGO;0;`;_W_7@2o27^Gt)6vGd&O8F zZGC9ioM>Bo2PGY_ae!&y;1R0FAZmRYM@)N^9Auqqo=fLQtT_Svl%=L(*Xlf%*zws@ zN@7~A*BYgM)^H9{*GfQfFZORvkif1;cvx`uWvQKj9|hf_Ta5{P?eb zvwY)iXPUopefi{*>**cfv$gz>|Fa*goa?f@@y-o3%imr8i~sAtSZ#1ApL^r|W#8#& zJPVXD4Cd;atKB!gvmAW-iFr^AX{eq5ThW{=4*a=#eslTf|L;FF{}d|T3U0se{@~i` zlTRK~7`B#M1bU&K-~93`?)shO)^+alM{+|tES+WY^ci8uVt?b(Wf}bxyT`fY-~*WE z3E8pN{2Xg2h88ZyFvb&6D5wnEphk#pakZ6bnb6vjY;#OOJbcFbR;}$9|LlLi+<5PA zEm!Z{f~wA0&t2-@h1c(_o@?Up_)F{crS(ooT03i04TOyoBNFSsr@!Yrn0_g>QkCJ=NP6c@pmpT}?GLisP0&$$J>!p_qO1 z%90uZPM8#G)3D#DOgn(I#@RLUuD_eIo;>4z%B|<~tHI#G?J;-2VJ&R(K82W(tMSA0 ztR<$N22w-`3a|{e1~KoCaMBt`xUPRZCgG-VF&Fe31Q3NJWDiE9aMdjg*eYA05={x*s3C=b0246i?F4_xMsS=(#rI&s!0X1#4D3I6r^ylb{XeI=qLV(UraSQ z?^EClzpe`?ZBpAdCrocobFF$#`zT-1#5PWLmTRh6 zwFRvzVeJX;G_t3(6cC+-U3=qnte=#OCL8&o2=t|0gDTs-KG*b|CPeE&Oj|PSvuLk~ zGrz`in^Up$*%q)}LaB`AiTy;O8m;EJeO?nAe)`Y?5o8rsh#BO{9&%~kF%4s>EG^Is z(Q?KcGEV~mAN`#Pl!kp$+~1tNI!M_J+E=;>A#+!UwCk*O$QWY`@dy8U4^nA2 z%RQx__;zE5&eo$W(dx~EfvR*z23c06TNC>pVP2o+7xNgOBW95H=}ZHo08bj_C|N#l zLWt%x$w?5V#IF>ylIe=OB1B{0<_p78cD#H2wVTU3Z#`aq@#_c6cmKiPT7H*h`SGv5 zkdMF{L4ZI0qvZwxxoyp^LcFg(zqefA{(i1d$`>DhvHZw^EBBRJed6roAN~H{ zSU&rW)6u@Zw|uCk`cEFGtrKwS;dhSlC05Ei|KLAfY&9n{|H9aEZb@d@zp3Qw&ho+A z*O$Neqn{||^K7~H*6Yh7iEsPYoGJd|L_A(e)r)5Bgg0s?;-LuG-SEu4eaU^*pMGQv z+fIZ#l0;bIP8r1K)I>jWY%B_b6Uw|^d~J=N=08Wsf>AN7uziRXzLYsz7VbGQv67Fr zoR@yWwSHds<%H7p#t;7I%T@DxcJCo(ON0OFuav<2XnA0c7y!#4t3Zq~Ue!r%o~f$T zN{PxXvGu9K`0SJA^qd64iL-RBGn`Ydb1EQq;+&Z1rhWOjjEjwKY@t7K-A#(wB&nH8 z+Gq?hCfzAi@!k>Saoe~afgGJ%d;=|Zv$qRsPj$!p>2d=3x$ zpsg(BSy(+p4{KK_!KvSxtxf-dx%<&QT4{UT+y{&izHmC)+}u&{Wu7+w;Z^Y=oTt|{ zmLi)%KK0R?4gBwkv9M=OjaPrh)O_nVT#wHsoTJ^!c%B|ec8iWG!csoP@U;XL*fw#o ztk?6%5&Fxri@Y8+R4*e#KX7YV{08>-a5Ik zynM(TlqJAR6YMiChW6tDt)KQzDSNe0Ma}X>au<)=H&-9NQXn>OYHK_Jm4u}+O?A6? zPV*|az;lByjRm}o&>ygC^s!0z>nC~(M$ezzpD<$9X(eS#25n53sYlj|cKBe3c;zf3 zP*4uFk#aJ%2lSuUxV_vMp3j6XzSa@9_H28X)tq-g%c#zG+4JVrUi`Ark4_j8gyT7} z`2$IgFJ#|mxP%+=*(rVoZsk3d2bSU(9QMFDDsq6w%<>67GPCo{YHDCq0kT!0HsP&7 zqUB2lbZXs~669XHrK~g5PKoMf0zD-n-dz~`Wn0s^_jjx-cRX0B0!V?%YmwniS_Eia zv^c&NVYQ+&+1wr>{UCG5IxcyQd*@V4y@i@`t)9v);6GysfDy|&n>j=R&T;=79kb?C z@7kv!t++Ir2pO#1gorM^lRLR&fFUML#x>G((%8yw3;_&M-w?vkn|N7_Atgm?4*R%F zSvY(V^0@C`2zp=(&?W(PMUqxX+T@=(letMx4jvO+l#Qe{y)db2s5a@}3#$~%AIONeVrBuO2P$e~%?jGkyB(e-|Ep#5KmMW9$8ImbVRi3w4T2y~^x5*&*DsbEEc5Sw`_}T{tNY9U_y6&e z<@Ptv4uzclP*& z>oITn+?0?Ld$~KU;rykTXE5^J{9~dkaB78^&EUm7n817~`}n-IXWgYVw&k-7*b;l% znsw#f55d@Rr?=n79JxKOU6;W2;qv7d2a+2pLy%;)YQf=6xHn;_1oz2ZA%kzQW^dY= z@j2oBwdWA8Y39lfepY{J?THqR%G#AH`b~9e#;d!>WY05-$E#{jTe*hgbV z;(KlPe;>Q*=aq6k)_OP5aIx?_g{a2!6pNwRp0)Cx|Lq7GYZh^XXw9p3nZTZ;G}OO^ zL070LVV@wVhWMESj83_~k7Z0}C3ci+mNn3iY5}&*ov7-9;(>`Wa75pzx>vn`@(nbnE;2+Khh&-{z86COk0y0U`c$ zL@*sXd9i$z;@G>+r>Ko7CZzuXv$a_ZukhG49vlBnAAsO*_!BJ!bAA51F*Q+Z!2_08 zHJMV*XlMs#`>G!1s*S&Lc7}Quv#)bLe>&Sx?%V;aU zYNZM*Ulnd`5BYPRy5QWAs>zfI)4`M0(|KA9GzO_Lr%J!@?8_It1VGiZ>zxiMeT|h| zMAL&?`Xc6ezFFqxwT%~L1Wr*L`v^@fm2Z$C9}W^@C8 z*Fn{mT=5npH=@LmG0Pp`Fm8lQXLwT*hAzzjpqEpqluIVC0ImN7$Ro&9hEtdRue(Zc$B5^}EC6 z!c$3%0I`4VHOm4d+>ssAS$25qhhzJUv1)52@$BU3mm6D0@(M1#i?8v-Y_jeN^^~jw zZq;k={m3f>{@~+UnYnODmb&dun=6&ytc-PD?c}=K;%bbpSo{DXJcSeNOKOuex7z=k z@^x=V;s!b33@M?V1XHUdr*9>}? zeM2$e=x7v(=00<2{Qmk^toBHj`kc1b)376BBieD>4}Rvs60T|-ED;l^Ca^Kv#u`mJ z32(#cuH3Oo>$;Ya5-fL=k1aF*DxuXbi~C>R$LBD6!lG?lyS#|E-g|4~veN*MUOrz{ z>!4Wi)Yb>S#+X1)x{U$=coFlglQoFXnp2K}hD#T-HTU9eyWVz}d(ZE#4j!|9Pd;Vr z$Xl^JZ0{luRK}HeD=yR;pPq{IQ*^V24UH1%{sI6)|J_+&BoLw{THL;~suy}?sS;S-Y<&PvbiQE_$2N1wOi5X+sWwGaV z<+p&|Ql)pLnPGm}ir#Bf&FfgZC4g6LE)A=~D`i1W{v#LhY;B?ORvoXSAg#!He&7T* z23Lc6H29+Z;}Co|O_HT?maAsiB6l#C$}RmXF@P zy8P?^=Euu_`_F%|yz}9W<%<5VD&F$#@4Vr(Hww zUvhr^BY6%nSdpv8YQMRm$s*s;F5=Y=`c z0!+8$MG!T+6Ev zuBP?3Tcz3oh1WVTXwK6{V}PIobAoT-d3cvVh>ON1qO#gRf#A|}@trh4_kA91u)hD@ zPXmr&M6Xe(;S4={22JH&Z7sA?ui7gHodAuG5SYE&I2(_-=6Q;nuNkp-Jh!d{nU%F3 zcXR0Dj19iiyw<$JwUGo9Q-|v_8Mi$eujrk%p%TBsZ2ha>=GB(hWXIZ_j0f~&o|y0; z#lA)q0-11K^Fia3!+B=Hfdu3F9iEphl_$|&J4aqe2gk>MCb{)D%TwDy_tAvx%erk) z@YWiDdsbM=UW&}&)6eDov_8TryfD>zNKlUIg}*L%?Z5y(YmV5+@Um@|<>lADh5vlM z-2LP)oi0d`xXk;M_eQ(pR@ZSwY_i_B$9vK+hLgY(90Hz=>Hr7h6G3l|j!lI~AoAm>Z#vMjZ^|mZeaEY&9RLtq}Ra-JWb0RS@91LbF^gH=g zaK0mGi{;Py8-E%1EoOAQMq^`eeQ-f9zg=Sr2e_giMU((bWOwO5R~g`a5_eKMAz(0TVmYHt#ZTx-GqDiHMyqz<0EBDllqT9U zZELs)>GvI8agJs99DyWYCZ0SRc?)e~V(LUN?T7RtP%E1kyGrQx(`NntO`TuMR$0E1 z?bgNh^p!;`VQQ+t5l9u7se0R`3xG~@I>g9(HrHm_A3<-t=KAz#b;)U9ZHaGp)403M zr(;-y$<*p)eKWzWMH)S~oVd4Lj*{fuw<^U>&(rWj{Is=3+!me|W`w&G$&`SA7$TRT zGhT#Bc*fg!6M&$l0J&mVvK=C%m#J{m`~31R{^d`WPk(uD`TPG+=%cXA^UtCBHgCXcbwvfVAN}yQ)7K7{ z|MuVfdii_*$?q>Wc2)ROM8%o&Q{>@>^U%*Li7JKM_BvJW9&&rxw)pn@1e`&KtzUfl zPh9`+i687=JJyfAyipE^}7Jv-}uTO2#(@oV^Bv7l=g4_044#pJc)FT6tJaQVcE zP+KP|W}W*xuD8v{P47vVq_3en^w}Xj4h-@eL&tbJ3V6@^B@#yy?ZAkb_s_9{v~W_) z9KID2sErs(>}X_jj3gjf>yV_P7``UO2ABG8twe`kn4nu@VGN#yQS*MhXF!G%zz|4> zcGh^B<cwUW(+Yaf-0qGl3UHSZ1L`nQt3`PA3GAeDiIp+U2mWs;R-`@ z!@qidpw)QRgGmUjt=ViykkH3kNlBSlkGFx>|0oVAIktrzzjzQk(rj(jUz0_n=sL#R zg)H*I990IazWmv#f58`1m=Kuxk0LRNR{%ON@jf0`6~ec^^&@>ws<745;7LGxs)0v|)m(44FfE;YK8*UT0gC#{Um`=Zs zq3sjKT_n|dRw}$ubs6`jkB;@}`RkPVT|74nJWnyDj=hnr5|$&xx365IKw_x%*C5j_vfLwx zT&P`xB13S+MeLa#0G-!yTMrI9>zZbpoNkYJtCp5svR5wjNSRC=VLZPPa|s7WZ4qdE zkgrVo-b>_{T^K>N)nT*6FwcPut@ymHWaqitD=?6-K4JN$F&B;)xUH9wqCv~`{(75D z+EnouNLkT`U`h$<9P*N!c)|QmARB`5wgsglH7jTn@2ZWgq?k}KrS%%bXd@TjjIX{2 znY~j2&wN7w(0tzQbfh^#b_k@guC>gRjrz{jc7Xz6F(y&vIji--y$2G>9A#=-+MN34 z-DeU%&c6P7xi34svbaD+V5`FBbhA6}I>z+9*Op823x4y-SIa$h)GwX4-HiYQDxs3_ zO13p2xd}cWef!%4@X>PjvyYeG`-gvXvE6<7&;RrPX8B5q*9TvGx%~J4(cfRL+`6&+ zZ~xc-#quBhum02J_x|>`^>c6e_0K=4v@z>Am$vm{IX!uLB%wtCnlxKOxXnncJ~5Ve8z@<@x7->9joWW42c>sM+ld=Oaf@ zE?k`#Bs^|hxV?P!*;mWueSp01AshzN)K2gUU#eymo<>nq<#3RsRn6k)st2{%Hoo7rZ<;rH$JBt$<5PMfkOXq`sJ{R+ zbM{>Prg_HfXN-Q2JK43yA5BIt;dJzo1r%LJuP*uvt}L{j3irT)W5?lkWNyc`0+fHo zQ=4le#p5wua7!4>cCRdRN(p#QwKZ^TW_f|uKM9+<(gxbeC4W`f;&!$is5wx^^noPd zJZSqWGVaEtHYbpoemoF*%^EbuK++*Ay z!#tP4o3O0?=8iXJ&%V+iUrmFO-gNwHJQ<~_tH|EOq*LVo! z#AwgKJld##^2e%H$mx>Yvr9sC+vG!~2VRnI+_O&ktXLL2Z0?kCy-sc<5@G&}aS)@^ zR5*^-lcjZm*SXN&h~3T6xGV(lj90n_OG*&nqepF5h(z`?cLf)O0e9E@a*@aMPsJ5p zY&_ZDS-p1U!UjuqVialDvqd7SuoyZ`caG`PM*I9AZ#x-mqk|Sh$8;!&33X<%innA< z9*P0IBvg`Bv@UBGkb7eia_VNZMs}uU6Pr$GCe#~$j16LotXn^@0GV`xvLsb}+OYp} z<`H1prc|bWN<)_Cp$>D&4-%{w7i<)X5jcTFG_N$^blDI%#4$iV+hcCs66Vf}L6jb0 zM#c&`n0A5uP->)28&*;VbvPNawJ>S7xja)=TK?8-#%#Q-9=M0NG?$k`63Nz9Xxi2_ zJ-FKXHU>~Pnr(*>B;?!cyK5X~&P{M4LB}zPwy`>;P8|t7a}A%{*kzDXnr7R^-i6?} z3sP`{i$(>!ReSp@4P3z<0Nqd?clGq}%xP~st1n4&RoXVTi&bx<@2w<+W7FB z2dnS>;H~A)|Cv4r-lyhweCs1?{&e*>{^mC}KKkx=R{!E({^|0=|LN~6@4a(#x%b(x zmcRPhrxH(iN#NTbbIE`2?|yIDe)x3bXaDLCmv=sTeYGzH!0EF5{I5QvET~5J_4DP< zor}x&zW>JZ%^!S-vAtZ5?)djiRA<9Jn`OsKz|o z>|DpB86TFsdBlvYU>0550^@JLO{R>|O)^?@v64M*^5H0BJl5ooE1Haw=Vz>8&n84% zf=%IBePmL>*I{;^XNLI~3vs=eP6D78qUqYJE1xh>_&N7V;%t1u6pIq&OLg#`Khw+* zhc@L1Y;fBRBb-s?0q~TU8}syn<~(Vw`S`rv7%ig4qoai1`Z>UMUH>e&EtLm1mQVeQ z`P*LQUR4(pGG`R3a#qx~&=%f<=7HXGXt8h=U!wA(>t^BQxSWx5eRiC4%6p?Lsj?)e zDofse>-K=)(c=SP#YHZ35J*-(nzk)!bMMAl*n%G%hHyR4OUY9At~J~qw*VST>jT#42aJthei-zrr0>=tzDHir;gtPP zNIL@hARHrDHyx_v`#A<*JCdF~`9cRm!VJ(E9;NTS55LRW<_*ICCz~H_9#}74y2%Zb z+`4hqX?9uZ=c_(PncbrNl`}NNXmt)9cX@IDq8ul=4+b)7#K@?TkmjDIKj>>70K!2{ z=OE-lR_fc7V07IxPzDkKaezt23=x5ShZ3aww7so`JhxXOPXy(;7=}J1uC3bc^yJAy zp_r1|ay`UIA{Lf7I?EcbJgOI5GZDUsz#5Sce-J-DhSP25s-Q@~Df{~_3ouVX3dT`q zyXvn@_!hqq0z#Oh8^ z;K8k#&`!aT#kh8vGrJwd%F1q@+6$J!Oq-pxK8Fq9f7_}b&0z;%)q;ufSj^hXh3CCA z?Fd)f6X%To(9&85Gc(V)m36b|TM)M^ggBZ>KsCUY&=x1^Bu@}Igth5wPvYMBEXb@3 zu}txz)kO!@D9yHd`S`1-QAgGgJ#^~Vfm6SL$=V`2AS9^xcFh-DN{CD_>~E@SXBwaW z=IiC}e)q%Gr(fS){=5Ir&sV?ycm8`U42sF=-s-ZN>4gyXxC0OG9vrm@q|LEJx zcYpVewK$bCaJ)RZ`*`)k-#NGZ=m`BW82QktvGhN z%PEU?-x>8^Dhd1K$&1x%Z>y6|*?RCCUI5_>)_N!Az4Nw2BZu_7gf|_8a$Uy!hu{6K zEZ`Tbr%u(2@H=~Mj!SiT#YzHG_;M@*cN5WEct8! zXTScFDfY7C@SojFKbq}tXODliY+iYLSw87w zsSH;R43HX4-ksr5ed5ti?o~2vso+I5UFG8<^P$ zQ4huJU%5j)KdtJ>{Hf)T%yVluw_sc|R!egHQQ)In&37o8oDGPk$yNMoL%*06YUTgx^G z^lc~h)qh#`xpzm2Ow$|q*0!}T{E*U6k`xBIJ|n$}C(I1e{^{#?zCnoJ(a$y2=qL(= zOw9X;EBWQchs&ccf4Ure^%>fBB(E);3GOVK9Z7UM5=~EKdFyQEVv4H@a#i^d;HeXK z{+7! zbFUKeYF1DnDlGG=bBF?&3)HVC>U#{|A%&rhpf45;;Ktx^y+CC!L( zcux{AE1CQm%m4Xf+o)9C(ItkdfmYo6BNXwf5X8Xs12knCrg_#U&KWZ}&7J3S`$K12 zo_5eCummHPI>RF+1&IYJi{99b+qJQgwVr@FM_{%mK4IN+#lxCUOFV9FDpG}aauZj- zd;hnDqn|-i?bCO_)mRhQ2%VnS$Ol0t^x?hWJ_jVs7_u}#UVl8w7WP$+LxeC4y5|u) z0-7UiSq^*L%zKKUDD4U{Fbr*0RcQ`rG2fS5!4nFg_#ScVMBGOTBjnmHU-fJcg9jU-nh8Dxp#N@+rRriu;zQqKl!izZ_CTu7ni^M>1WG_AH8op ztL6M-vYo)VB^0jJVY(q%nEp1xSWMqQYlP_f$>pDtZmQ zpogl2ZBPo%U3(oIWx$GypQi}yz4f=`ReU!1^pe$%hm+%&ZVE(eEj!%_$Af#}Z|iMw z2AA7vm~s{!rTkSZDLkC}G3}wvwJ>dDx$nA1uf^YZ8G6(|lk{kD{}^212K0>vQpU=@ z&GjBlM9JP*zhm9PL0`=WZnt>dwZ5%nHgwi~H&D83uLKz$kx*GH5BhQURQO`&vg&tE z6pOwmY1#M_OhZfBK7+4OO)xi+KKaxC@gf>2MmIw+sflMdF#U|zYUUDc_Pmi8TXWQt zXBu;Dx#oVe^Z`%sETbcs1f%*m1CNOl_il3o_n9v%eiZB8N23F;8LKhRwP1$P8{i1v zlo(A<|4m0Po%ZztABwNG_3qevIh-dSX|4qee>zST0+*1LMHmcRHbCQDcQD@*Ak(+G z1NOj@;bX|2ukVTdZZFqff0KfM{|c)#mmsty7o{p>M>60q-;l#$8i!9SY{X>@-X*%N z&QdIL85_d}0aSvnhVz&nQyPVx@l+Q4Bs{8~Mkx?c*Oq?p!fGGn!RUm!lJ6$GGs|Jz z2~AnzRkvGjzhcd1u^*PWSuqv3ZN4dNRk6!UQCOoL8{=*IcRVglw&|wySTE1jXEapd zn<>837zTiF%Dke9E%goJk3$<4YOv;&9Lgljn2NWLoXFtgH7@5Pb3J_Ry*v*k(&b^9 zBx<Xt-D?UnB&`IIga!vmgR zFz&1WkrHqY|K5QUor}Ir(C_dz5a?VoX;(mSTDq-u&xt-rZum4$OJ1KAeP}yG1zbpy zLIkPxlB>h3IjBVsRKRP|7T}c{PXg+`j{KE^CXY(*$+GkB7jx4%0dfHDO%= zlVCT1TzP=&4LTS!F=xafM_Rw3!8)J=*Gt?}=M@GyR5c(blIInG*Sv!n*6szNVK34c zwP7b4o{6!0H-wCcTQvjIXy^A@E8`wzUZs8&(#$p_1{%^m*Pz(nhP~txa2);s%WYpt zahRGX@a@(A^Pm3F#^;|sU)_AO0}ZUV6UdHL zl6#al20|IAlDUgYaGT3}w=PA+%i;agRaFDCYBC58#QsZsJCvy#Ly6g4yi7~p`)s-Q z$tOINZ<_kD+P^Fm@KU^y1-~O1uF7w_teI0qgTlAmbQ?Sniz86a-+bR`i?-J#M-bkb zf->71cK~w7AuFFR7ZsE_e)7w8*-or++nkRH|GiTT@8GkI7f+o8_~tv4NK`EI{59T* zr(ni1qlAY!j1bsL$VPH@*f>If3JeX!gsmGmh?{-57d-jaZb*}IMu3bPns`+rNs z^sgHJ39qSo7rw%fXeRe^ufTm+m9-Z^o6g*g0Yn!Gtyg`5?eK#^dZanHSiLIPlV**1 zvM=b0WgQelf~N4ees{Q$7uzDwR0h=b^iWUzO|kH_Yw?xT5$40pahQ&8Zkjmq8@)Fr zp+*uwwbU>mx&I=w`h)+I8Z}_@To24c_23i6Zoa`W*JcHm&Xgtot-u`R#^a~jfe2ui zb}wBu3s(!nG0}6bd35SRtQgFbv5+HrY~JN3gm-z?67ZXpxV>|b4pZ*_<3|r?3)SoI zU6FVi&Eka^|M81^YE(GRmr9t;HEWwKMN&1Fww$>r`f!BpwXF-^&?c|E6%>=@Ny4!7 zX^w-e=0K4PE*_#4@+1*Pmv4CX*!Ho5XUpA>KV{A5sbiH>v?}ksmr%r@4xY&kp_x~6 z{=~7v;^Fj-^SBU)IJfu-<+q&+alO@>I#Tx+pOxbu8?YdkVQx{Eh+cQrQ*jm{(YwZdjB?C;s z^F5PyvwQ%ml(p`>fXeKy6|oaN!7ppcI#<8FFmSu@ao%{ZPo4Kcu9^kU0mb5F#-Q!M zdybdom6&&)C~S3rUtPU%o8beNGuYoM0PSzLqsF!JSfYV&N2mf8S$8;|hmK^P=>?zl zak|C(lXY$;8!9?aNoxCaY+uJMlGdz{0jG5gc^kDxkpU!_&it3=;OcY`Pz=TwowGQh zNEoU;mK^-)eC%^vG{;w7uCCY;RIBa%J$z>WF86tJg9oz+X}=uK1`P7tQm`&%JtpQ~ z3?_m=3EnE^!wt$+zjn88n4D42va$)92L{gV?279c5>8M%sR^VbgvYA1fl;duvb$X5 zx_{^GbK)Ea%g3MG6_d50E!T{8(%L4GO<$45$v`ZDzzu~kUb0xrqTZn`wX>nEMCm6Z z;AtQ;9cbNHA|`u>jltJknP5cqL z^GT!DI3?@!xe|lcl3S?sfA*F!xZ7ARDn&{YcX9}=a<#hmz@A#d^SpTc&1=_|J8!?$ zz@7cg74FRPo3|b=Kl}6FtS*0eowkW_Yva5!xA&gyEk9;0zbl!pgJrg#e!6`B-~YYU zpZv*RFaM|i<3C^hy?^jcXNx;}(QblsN@yl>KK|KP%XLE9nFg+_abRoO<*PgD49Fxt z&xLCSyY$pI->}ukpRL@PQ`lm?pP~0q6#xK007*naR6n8h(ZB!u%a=dF(Kz zkRo(s4>^RwlS=w2a~Y5^w)U%T?-2Z$$ytd(+=ctsZ%s+k9h&zMD?QK8u{hWH8y~n& z5I_2q^<_a)sVIMYhaU^NS*x4xt&Sx_Wm#Q%?ak%r(N`^`Sp7}!!(}ngbH(+*CpYyr zh7@k0RSZ(T19iOG|08VRVk|PN#pumBD~ezeU+(=80D8)^mM{Srq?7<7ShSA1K*zhT z=c+e(tQfOT!!hPGmf9#}leCPX&7SpWe%{Au8@q2h^;uuON0_0Bhux!zO1@0FjG-Lc z^L^MMGSPGrSU)*S!~NcDOd+g8XI8rs)#A!2t0~m_8hlUVY~<~_sheqkUT(F~m{vNF zB>1`DkyMlQpXqCijNq{r%qc9*``nec#!F zPlizN&+B3g+`g^x&^DYZY?)`EGvoJ^6-MRD^9T2MD;=--|fgEcg;Ip{yh5dnjo|p9jPUr=XOqrE1 zG;ZY09lWJ}%mrL65XA{X@I~Ov=B54Pm-aacuQs+qgfwEf5FWym`1GqB24N!?TdTFp zJr)H)z$N@)kPvX0T%$FA+wuDzobw#QK%R|o zw8gN$#PfM@!O?hjDRi}!Xo~)1tgp6vU%$^8KgX1j3+f7N;}<8v(inGm;O1F1-d{2r zn!^QQ#}p89L*-rWMB}mxLCf%8A5HTxZ<|46pcIw~o#oajZO%w3QJE)T>%H5z|8Xw^ z(o8n87~%)JMw+1QBNVXk79XI-)l_3WqQkVqfD=JOL$7%!WHzygsyM-}g{f@m99AO3 zxZfp$l|f$Lnrkwn2)OAUgdq45oFR2G(CwK#np}^!{=GWMKGSdH-8?)?2`HN$Ud``_ zI!it^B3DxxrAMy8C9w-aNm$^P<27%;2BBpTK7177vEs#HbDh_y$Mr=iG5!ueY3R@F zmp0s4Al-780CY^ky^v!*G+vNN*mojb@W_)e6T&=VW-;axpW4dNV#heMc3Q~11ydm? zW~$98Opqmj5VP2`(nGKWerr?P6xzxDpCToV!`~7Tcg#Ilr#KPs(_h=*st23~6|5_L z84gU{@_V`S`nAb+|M4#$acgfcfBy&j%isI{rRCkX zC9WxPdGz&fSQrQPVDB$?-nq5h{n9B@5cxKD_-)VZUxX_V{(!shvw!x<^8OF5Emv<| zUasAGjXb=t{L}yYf4zM4!*{4J5SDA>ef!X_?3*A2eui0AA~zVGV|iYZUHZAK_^X#- zEO=C#Ie4n=!(F&%T~%_Uv?MI9zh)bhy3zZWZ!ec`-ZDl>II>Pv8xjt(Weyyt+#@8K zJRCTc>+`(W*YTszS>9iQE9J#*fXPO#zIvyrJtL?dJFETH>zEO7sShV(!7*;JP@)jb z$V|8fu6s({CR`h5=NdeJxv%Jv_pCwdRC&;1(&0O%< z&{Ze*(gHx z5x;|58Z%VyH=o+5pM-dfq!t>WzX{7@$9?vt_QJ>htQ87P)wH(TMmEY3&)X28HhVvY z);r;9v@*^1Uw3qsK&_FktlN!FyBk5KGz?*St|!2$XVd0^Fdibd451y4cd7vEfqqcyek++4C zSmWBR)iBdQ)@XvRCrkXdYp@mqc@rip-++JXUQQ6r z1Zr%}$UL(w_dfr%jQA&TFJ7iSczVQ5nA>`N7!~E8!4~grM@(V7aIhq{v8rQF41w@) z@D6?zN@yM_L9I_~H`U=1RCOBc-7CyF?+vVLn^1Rc);M>28R_9PJewT_DXi!qVIPXF z@y_?IYp-?(x4I7jTK9GhY}rMyVOw6_ueN@+LuJeN;e#o@o}ACgAwoSKTozX)w^L%a z?SP1vx2A<@^UkJ#6ko3p%?rxkscY?R&r2K~j76p8#x}`?gnHr23gm<@@qdJmr;$;2oe{x>6tWAOQB3?R_xE!gzqYySgs z1M+nZG>8N^gH{k~z13^od<>+8NvIZAJCc>#0-ozUau(230dst3!d!-Vbm=+FD?c>E zd?m;?@*ET^)VN9b<=Hu?1%kz6TYgg*i#R5ga2Z321=e$ERgt+-TjNnxbOjB#OjQRm zld&^qoX=HjYTMZjr+wYIePiRngGY`d9nEtXWo0}sob#^MN`gBSKYF2FeeOqAkcqbl z6qmNR&|p(YxcPI}Kmy961&rZlIi=k6Y%ayNe-sDlFqW4pvpJsC5`=`kmrRH3)BWP1@-mUc(xs@A_FJ!}8>wS{%dmogzvDk0D zT7LPLpDjQ7?xl^Zx8GQP{n-jzqI`6AO3XXuYdkIf>mUIz`rCG)mFJGJw}-xOwe zVfqA676n*j&98+51RdFcu}~_`U2~ch9NZ9hV+uJ#B!b-w$+a4W0b1L>8E!R=B}C7T2QTJcCN%k?g-v+1QQO=e#>Mh!@i`*N}a~^AOt)? z`-!ngG-2ecCcNG+SJ(f|y?W~cTF6bg4lj8xx`yZ}E1d5B?;-fAfh~Jyxk7lbW$31O zph+WluVO4QR6pK7f8{O5Kub`Y{?{#F?$!!TyVw=4`-$IZ-vC>wvsWBqyb>;>3y7XV z)3ej%tA1d!_4dUW>X$d^**zfs-CCiEk`)e^NqUZ?a~1r-Hhvvn0Jn*s6Jqmyyb}qVT=GO9{`4F!KmGcP zdA4NcIVfushc$lXjoNdfOmW*y@$K1dV9w4&z`xK9rLZlke+zHyg6L6|h5-lyXJ`$3%=n3r+*u%quOB_GpZq`1-$AJ?( zNudD8gm^G-i{ju=*oilWvmu(&FtQW;$J1IEWei%mFzBG#CJ_Z?0q($gp-=-9T1$bFo8(@h z;Xadc=1zg~i6W;WcKKb&FSRZ}E+_(u&aw2cB*?lLaOY#LiWHy{S0YBOc|O1<8rm`f zP_JBfP?=X7(S(c!e`FvJU*rsWF^IDN&Lsy)oKqF;iJ@qLI+mu=cD0yVbW!JxBw1eu zd{Vlof=Lw?C*J|cV@=95{lh4;E(WEI4MWJojIHRv__*RHCg*xk<)Zn6)d=NDMcf;WnE(UeG9Jb>o=ph()+e-x|vo0pOnZ| zWomU)79i99Azxy99 zcYpoa^2x71T`pg_q}-^u)TFUw;DexP@8Ww2!g8 zN6=s-M=Ir&K-Q7ES<<_^Z&-UhNpNXBj>Q$lF(j@?)M-_1G1?}$&z^k3OYzs%Nb<`0 zm@}a%nbY>D)t-~os`@n*v$)aQ2{DTtE+Yo8!r?To12_?nawpX*MIlp$rDRSqgl4tP zm0x{`1UkgV`_AQ6osWWG+KapSmu2Tu2}7+1Eb_nghtCNT)?ponlU=TF_l?Q#HFrqx zMPL077nJtx%UJ$p$cH4an5xdn8SP9<=7iQtF6|iK!SA8LXw(DKPX>WCK>yKgp|RmN z;4#cBYrjnH8qCd*5{5`nqU+6S1~t9Sw}D;M;aPA?+)h!Z5&qE+-setyY+!C`TRcO< z6X8?*FI+9tC?(+(ylWvrl5n0}9sFyo`z*<#{o1Rocv-YPumba;)may9aF6$KkEM7h z{(4o7e|)UrEiWBXk}}a$j)W<9)h~Lt$#^=IOFAfTA<^Aik{z{k>8z|PVP}NgPw=J_2{djV2=6>7hOgO@8zuV8g zwOdgk8m**K%EHW>s2f~Q_PPk?v^zML_XK=e)WH?FEPPdnV>_i44{t)7=5v<95M2cd zz@E9bmg1p>JZyz_(q-EVt^3N3k&bkw6jU;|_fmp%I`oleBAH@0mhESPd4zH}HCDC* zwc5tF!;n*qsyTxvBbQ=u(HR#}WuqmF5`*Ej?_p3_onnz9pMfjMP=fHw3f%C_%JtBn zL}YVu8r-t={+n<9p^Tnxm zX|}suCRxv;!5ep3ixoVH0#b%r1KQ5O-WRZqd4sE6>E#Oq`(T`fAF@uE!csA&m*>0) zPhLPQ!aZ$q?O{NK!6atuxd=I&FFO;Iii63xzP!$|)PLJc+NV8d!naPe!>5lAmU~~z ze%H zfc(;4qxI9X)@v*_w6MvtnlgZ9uo&Y8apLMT*%}I(43v1R;PEEDsPbrP#Wwg*2 zZhd39bn8R-5G)B`u$VC!!x%!^8c!bUvG7~>g0~O;HePyPJQ^Iby!(b z2AGl!-Oxuo$oNVB=TAD6MJyS<=E2Nsl;W@p7KO~({W0P5G!yS?bmsQzgJCgNj6c{{ zu6cfXHlEmGwC37QnQ9LGOkCQ8JBo3q5|eEg*e0~nfjap8p4#6PeYth@q6|t2a`zuuXwQnH)Q2z) zK@N~JctTr)wckX#M#lpToi)~eTcA$*Y8?HfnKEfyUsqy_NZT$~WvY;=ICX8bVCa@@ zil{6~_I=gf?mD6C=;TYD1r^0&@aFPdR`fTyY&%Z$U;V*fxW^@ILDRUJcoutgjnf9J z<-SG8m5|nm@+^>(J{KR^R=*rcsT!08k#Lipww7{H*_8y7jGK~X~(x#5R^wQ-=|FYh~xo+5RX_1l)M(*Qk|2ThSX@bECZ zuk-4;ZS>W_0wL~}J(SsSTZ9i6n`%b;at$sozq$WHP4`@dA+do#ut4zm$+Hf?QRpQB zOV^hje#|QW`Onm>KA<%4&LqT-T1)e;LR)z-`#cDL`+xGC<%4%GEpNVmb-DZbqvfKr z%RhMQ-14XY)yK=(Tki@*D2CmxrI-rwvooRJ*%+^WyT_bq6vK;7`78FaGYLO6+o}EOVCjrsq0y9TGq# z+lH)7=N@l?0oB`l?Mu%R-*Z+vrr4UT&Sd*y#+#CdHWlu8{L4Q=tb~$%xqH`cEk~4# zvmNCyg=xa!BT0!_2|GKT|NMk>N-#5d!r%H( z5>t|*z)sBDvb8fyCnh$UeDWw3F=(?E2nn5-{kR>qpQot*Kke6DbA%o%H56vfxTjeb>8lZWuEF(;fzSKhGLE|Ma?knD~R?glrsH!J}67`Ju8-<|%$kXsv0G5;t$7;`{eGkCZX6LwT;Cl7`* z!FH0dyt+q_&bYO|u7VaWUYTntl?mX+u!aG}58cak_6qoqP3M^`?*t=Qx0YV=eoJX? z!y|PBcKxJyj28;M%-kp%ubeE#PO%?2I~@$&UQn4V7jY%LM`=lDCh*Zfo}JdaGX*-v zb`I0Y5+&`1OEG*e%_eY)#My)jCpLZVskiSOK_JU2fq}y#yN7y;tl2}cs*n^ z=58GJ*T?HXPSfVq+IuEXLVWC42rV?|zu+U4&Dz4xUKu6L?~o$ckrOE%;piy3XeePU z6CYocTjZTexu%GuOs|zYaMW+x_zv;vYpwedEo4oO^OT zc)-v}L>aP%@O2Qu)?tD=K@ovBkp#Fgcx?UuJnNltfKB`(#*r1|V$XrbbLUZK`Sw;v zsixsiOqA=lDsLT^cS4wzjB$i0rt~Qwwf-SS-$Ss7Eu!sD&bM)ucqU8LO!Z|!8}AU6 z51#9Lqf0ReHBvUliuYcjnUt9BJt26WRz?(l_D)&s#Wz2A`;AE~`^0(0+qN=|RvhxX zmn*B7&kz*k+0$Srj-@~_>ZV|SOl|T5jJu?vTuPlPlocO=BxuV6Fg0^EP6`nJZ6mkn zMx5Kow^MG?+G;v_cMW1-7?Z#OQLE2xK)p5N&kK;fa*0d)67Bk>SadF%R;_naLS8D) z@>0^9ttE~#m7v4Do~C@B%l*Qo*G4EDLe9y2*7mV1*VZsEj@^G#{`9iLan2;WU6z=3 z5%E8~f9T%P^1FZYn@SsAoAzJ7eP#Ld3v9;H9 zkC5Ltj%#ncUW|X)2ZOJEd4Kuj=jzeJmv4XX&E=cle;Z>pcV%Uf<0QFVc>P`LhVgK> zRp_OvVKK@w_)l5+XS_fzd^m+ zmo;O)SMRKa7^UD$fr*z6TG?HJsngs#eeJyB6Q>)U9j{M#i@#z@6w=l_696sdVF}0b zqYe$aTRz5KiX5EC8VJ`DSk)7#|CGWQZuB{dueN(;Xatk}twkkJZ9Q2F;5ThY_ci5b zu;~@k>s!pW_cc8C*7-H)$~8m8TVQH5IQKoL=YDE#aJOe*->Cy{e#=CM&!CO^j;^=% zu7RbkZ3Yn|OEFkmnCn3Wm+3De_0BquZ9K1F)U{&i(R}0V`8<)0Wjs~JFrEsu8<^Jj zy5IbKr|g|bXiS+{k3rjwCBZp=dMjb>o#x%vqh8EnHQw1B(0Wj~hK7w}tah}z-gBL0 z-d=8kgi>>a4ijFp4bvJ|8a4Ou21R?S*JZ8hC+|V;3`FC1o=X^Md`2~}Hm2s-2!9KY z*l@!O7O$Y7?O^BC#_LZbo%ZmXLkWxbKK-d{S(6Mi%KLfVv2(&vCBKyeu?xrIAz9KT ztBIk*M|H=2kH^*cteR60H92d4B>TG%Q~R*n20KQ~@ba~Qi&o-|DeBuP%|6dMVT^+Z z#libSfoLr{k9=^)S{RHjTaOf)T=jWRt*?nsfvmZX4&u4B*Q1^v*ZCM3vM}g1{MgO~vLxBF$AH>r0Ij_zWQF&}mbXv^SM7+ z5Sfwb!-og&1|D7_ zsj=1@^B@-n7IKeu;NEC&p4}vX@*d>WXm7mHVwj#}{gzNv+hwGuup}6+Dq(v{(4|Gb z|G}N*=b!#&dFU9^?pt~Pa(PFx-02XeOvH+(*mC5WfknuGHm+z|La}s($sn8&c;@PU zOjPD`!*zi5_HZ%y5d=#7k*hGlsB$f zS8nBJhw2cM8+WeID(jb^4&D_D+Eh5^BA3!V3eYp|`Sy~QL{kpH1#6!WZPlKz;9tm5 zc>Q-inDf=|{^p*u(mz~2d$7B_wflVe=vz0I16uB*jjPMKw>}a>|1}qRC#;#lH?Hvl z>@GL2(${SX+_p{VG9rJW%?B02v1WNM_EI2(CeB@^af3xgaeC%Z=-0L=_hKR^s$_k< zTz%_LWP3vwOoSB(AW#cu;OkvcaeZza7 z5Lm2l+3(e^-e!>&~@({FOOXQ8iM zt>7FJA3>Ct2VPL^68Z`I;)H4aM_hdN23y6LYWG^tm{$xCZenr?++3e|<+fz)t}W}3 zr`6X-gg@^iXy0S*QDrncOxC+Dj#52>q%ZKNaSkJnces`y8Ty zwUuR;anY8lGpB6-p9s?M7jaP6z-$oivd$Uow-w0BWqknVp zY~#xE9~i|0dy=5&`EkMa02ocI8P3#y)=@BPOcN%^ij<_8VxL=m)<#SJf2QuNSJNy@ z?D})!%tOp$4wacTbalH&H|;?*2*d?q6c?5d;-2qO{s!E00XIgzN4BwKB!j^~V=UWp zxd(T3Raa$J4iT9#N1Qk(PQ=Uazn;i8C-UTZ-)Go+?X}mu*Iqk0B~{)tNth9c+D&e_ zjxQ73oorYrz2v7|rB>%G0LxsHT^57`4h_jg&h&^%ip-7ZBE9(sO>yFa~r6w zuL1_{^Vp1aLhvZE9HWF#bLek!SXt`9!$d2(&Kc-_il2K0-)e~8_)v1(&t1T4WI)O` z8IwbkQ8!11N-`T-vXyprN@(M?EdFh^=i9gY){;{p^hl+!wM!C;U#bM@IBh)Goa|U0 zt1ZYNKhYh&S!vXaWWZ!lfUC$&&RxqB@#G{@r`R*P@QJ~JtC?R7sW;~-cJ!Bis`yj1 zZ|*a|CXWCfI*l+nUTq3lirOkuonzx5oEI-dGwGk?F{};EuMuADHIR3Fl#z92EZbMw zZYD;W1WFYvEuoF)RG3RklMuc*$3~(iuH;-vuaz%ft)JcjnKsKjpSQzBw;kNCeOw53G-Rs^HAFZ zn!X4pUcLhHH?JS@R(aGE!&4sQmk*wdE=NW4OwVQM?eHEsOmi(EKc}TpWaAMGbmazM z7D#Ccqe(oVBq#Kg5H+sWswWo!2h8LREN~~ZozSj}aci+29QNCV|?63XsJD>30ozA9y^yRC^%ZtO08Iab*cV8>Gp$_{K@7(>q zqBN4z9{xhOx?>GDLRyM9SjLG5?iQ`YxG#)Sf_ojWHE zJTMn8tYHd$SMX)uiAE($Id^@+p0A%hGLLYLSy0YIkfNu(dw+d-zWqL22{Wchz*y~t z*@js-uWc{~_Du`dZycGqd+Rq`L+j^Xl8d(nw|k2!a@LPGmk8%dHompT7fv_ZceG0= zlSxG|#%M<8(Gg0>j^=li z3i^nCTR)$*Pwg0{pEzb!;L&S1$Q$k6ssstudDe3=lSYOeQy8&jug9mg!HbxQ@GZeU z>*_vtKuZS?<-TQ%0APBZ0x01Dw`;%-5@3Z@if|<3THIw3V^mdwF~4ssymUPUK{oWt18(WRJovggf3v zA@`y>Xd@i7WFur_9N+?hH-JQmV$#CPC~*|euKMcxqGFXEP8N*R%8%v{f5#ce%`g#&MDIz+MLU23v6@hW$$|86`777}DG%qsZ&9spozc;wFqdwata@~l#v445$E)0D92M;`zI1f+)Hx)5Z$kUN z{qem!KWeXB#4urrl;rrVo*7`9;4J$lF^?0$hcL>h>6=6j+?(Rf>mAP$^T&YX(eQAV z)s4$0++-AP&&s>3_V*PfL(hbN+kZUk3V}3mP*PwdS`E%;NDSbv&7_(o`1eKo1YP5q zGJg?m0!;sfMY8aQ8No7x?l}*)Y=E?S-Mh7ouuM<{m7!4B{-VSlpUYT$Y$(Qx7UFLGd`p!;iEP&9+h2R@_EqcIfFL9;UTr2k{%`%>hlV?1{A z&MoIQ--e;G=i%i@u@&1|PHmsbg8$mO`Ia@PizkJ^UNI!B>0dpRsMpb?Dysp1zvCV4 z>pr^cOm)jR`{tqI8A$66#{PaeRXTWz~x05N8^8AB79Aw&v$pFjC#*}rxVEfJ{Z zVpkpQa9f7%(fhx?Tpa%vG_|cHW-|Y9nsKR;6nd@nFzn%nPM|7VdfOWMjy>pYMce6A zHU`SYa&>upri$E7UJOC{`YYjO_}uN3w!)a^f#Au=%)lMP!u*$au+M{UD_+J@k&q0E z5+M@`7Bi%r6-@acoA?dn?r@_)KwEafZ;;p*rDg5!!@a$yZXIO!6PbuBb zbRXr>EB!Vev=KITg-~C;l`UeU(e&vZG3H22&v}w$T4B^A$ zTR}+CK1ciZXp0i<@R*B^ITF>pQ)b4~_*ky`G9&2s_~hm&yA*K+R}L+;l~LXpGdi2M zaklJ)Q3VW*;2ko#C{w(b93Q#Oc$$0=eGDBmPovz!zsT1_PUICqONK`~6ULAXhsF^( z_P-*S@o5Lcv`0RC=}mOD=4Nj;26Rwy=J; zKY9P&-;CMvsx7cuXPd*0!Tmz83L#F#y0QXfK&orIQU;*G43tRuo=q8wp~vL+51 zV8#QX*BCsDq~5^?soFDS;H^ORTo>{*rypfeGdvOsJ3Z_E?35kBlbVJK{gz!n+vhZN z!#1bh{@j{wS*UvUJz?e-flesPwd(Pz{>COUuGyf~M64F_b$h0(^`14BXZh3`eS6?f zUtK82da!lf8f^g&RwHU+1aZyJDaJEx%>g#yV{#5ys^QLc%D9a@wn0Z^l>79X1^0UE z>g7XTHSgSHz1G(k)}3ECoy2zSs^Y0>UKMGPiU=C){b`=EJ1`A=RX_MpNkd^aH($Sc zf;SJ@C2i+pkd;dB^N~>YODC$m*tw|=w5;;0yUSHYV;l>*_0fk{ms@vEwys`1nxr^| z6O8cn*4LkZ4u@x=2liy^Cu6AH5XX`x-;=m@yyKKN*0OS|Th|<&x+k)8g+e?Lp1iDN z>>(vsQrM0N%dvU1{{^jh#|d?hjB`u&@Jpp;Z#r{53;p2}!UjG%1?fnZ{ag6ICQ5K} z+1tvUw)>#0{xa1sSqmTHj+kwo>%^-~MuyOOmTGihi)8y$1v(O>F{!IryeTMiF&W?J zZ9KdEJCyb#oR`hQ&#`lf zx{ntTOJi$2QAaT#nvT)Yg>{e-bgm`fk|i1Hc}5*9vX-%Je1r`dkP)&P^izidf28n+ z3-uo&5PjsL&XxWP&f({z*~-(}(Y_Z);U`f1Ix|jx@MRqx`9PiPgzu_o z!Ppw@iM)gDgPzvWs-Pynrv@fO9B+FsKnej5IB{;@1EyI5|44FBQ<_X5*E|!V0=Uof z>rZ3MTWC07OV-WLjHAom4A=8E_14lrA>rC|vHlew0#b8MTE0|hh;n?Za$83w2jlE0 zpm;LcM!2WUGwR@8Rkm>F$6T4$k!a|pz4;jxGbFUODhJ>2Or?HPB5fd!K_$)so52B^ zX>1xwS|*EX6D|j3eP?8ZCcu|JG{>6ax4o1!PxIi|E&~d=CCt>qnT+q2agujQ_>y7| zD|M=rbjxwO_NKokt9B^H*4ibMxh1jIjs+1gt4)ks?@PD^2%}}Tf0Dk?oTIm|Hp9LW zsS}zn{Ee5Y)gFDypAs!En#;{W>s6y`{3&*#l5PmX3;~9ff8ln9=_Y3q?PX9E(Xei> zh?J7nymHRN)*eKnjd+&w%z%3NZXdWg zJ+d@$_4=KRZJowD@ggICa>CFDT51^a>qNNmVlv~a`@fV2bf0`ZB9mV%j~_qWdS<)r z8N-E>h>^4;V%xGx`mhAOU2R=CxjjbTt5?rDiBMPgUf1%tO72?8xe$AVSjpoN4&a{f z++Zac7LsD{5Etq8f5`MsE72V&Qg-=9mgww_Wku~n>WOWGj@p|H-6NBJU@oh>T|tnJ z9rdO*N}ki7zAtE&tkd??nlHd2?%@^+4e$|dASEpFDw6{vq`)`EKty12XN?HqO(@-W zHr9A@qv#=XRkp@JaH@~$-55KDGsz%XEDH{JO1K_m=N|`1SHH{`G^E zZ~!^$>Ah-PdG{|V&OHMP&eqB=E?9j$@vE}T4^7@`b-rD`cBH4bO%ys{{4_aP-$$~n zU!7Il9}~NNMzN1kL1{%0MMM0dgyF_JN+@F@eGQMBp-42MC(wR*|MBvjd-iExKU^L> z`kE0@-TFdSWw%>nMr3*KC0DJ7!WcS!J&@mUQzrX0O87+dplVHLondXh_Z{cgHjEt; zc}3N>mk!x^OUs;m@Pp;^N4v|XKmMo7>%aWN<+ncCS`iPKDgdL3nL9!((NBZ=G6G zN5MsoG6asV36Z0dUB*}k_UsT8W9;TJOU_XemvFNm7M;b`DTPQr;a~J*<|SqtArqIN zsMavqJ7mTPrnl|1xcpuF`t|(vpPM(d;CNOc5A!KZpW-VPg?HwsWiNA?z`ImflaY|{ z9d9e46~8h%%YZF;tB7;erdnFb^O!^)fJ{;=YHZ11^q_tc0EOCmTlH~;hn^8y9ElotXDV%VSlI;G#lhHpON<5YL?Y$Ll7r?*LaCpjKXnd3X{Z7HH ziVgoE@KGF$?u1l2c5q(E6m75VY8#C48Kewun#17IxHg(2@Ak5R$9Kt+F-WFa|Lf7B zi^+`I2wwf|*dyFdkJE>$0%ouUPs%&*&O8*A%lM(Ojh-B&fTjk#kZ?Ovl9e5v)Y@zn z$SxQwEg6jG?p}xs&WfDS1n)5Dx@)s8Zg)yQDe{E>>;xc}%5?ACamTlMa>f7aXbg>Z zUU~9o_3(_XnRy4RM+Z;kt{5Ghgw}ADoCvw#fy-S{uu%}k6s>I-3^_hGBz3*jZw^+u zFt5umDKgNK4LoNMmauwq%~;G2cS-NTMVV3788B@>g>3K1me)q5z~7vE9&f^+`z49d zAIAJrahoY(J$wa#b4zAu{WLs)Q3&FIz`_=OK7xz6^J1vrc!)~EVw(?!S z{XV&VFcF>dB8m`CGMwiIhwyqz-nL9obRc8viDbJ^fAUB0xwSkK-tT<%mvU^Lf$d!6 z=I)(JV>5oD?{JAmuZr;9RH?FU%NHAwNHY6#20=My)!b(;lF~E!1d`&=ai=SVjv^w; zkKIRDyqo+`GoMXZ+6(eFbF!FFXNr{hT{4r<@8yw|3B4I>Okz zl;H?5n>NJ9xB)VIs=Z6$$kPKPWgfwpg}Nn|n$Z|e@EPsL61_LPYv3Vjvh5p_dzc5AJQZcQCw%;akKbSZ_|G0t zy1}>-R8}<^ImmSk!LlvNHvI-JTy|dgM&ihkuvv-XXUoEA^#NwT z?CGn`SD!yxe(;ySODAk?KK=Pu%khn?%YBEnJiPvT`QCRAoQ1x%{Ih@j`R0qKhs)ht zN_oC|u(@l&`;j9g?>|(2mA1a(2uVk|j#7Sl|IzZ3uih@t#3eQlzg~X&lLt-*++Du< zrR`#{aQEYz%TG7oK;fT5Zr1Om<@bK?`^zVv+!h^pzB##Ssp0)R1IqwvwEyA1_}TK= z=TB`XajvjFj)V=H`kh1JGC2j8Hph;-<*}mAgj;KLr&Y4zFjgRD|1##m;N6Ek%#=qI zX-JV`#7=$mguROi_m+?l?g+2e-`rw&S&t>`D>HoMUbuqyj>YV}^*{fK=LtbH?4d|} zRjwerAi#HpDJc&+!^<#|{HYM4HB85g{LqFk;R~UzSCPtzpV>qwW z(#j8TM@BTrEy}Dq4kf>(Am4WKn}7Q!cOqtU^u*Xt)(B?&9MVfFn)py%22pUPs0&T| zoFH;#EuQ0hMsEUQWRp2c345)`Lq}f*<2;{1GqR%glSe7bWK;60B&4P@UQHo7CbPfz zx^?FqmNhXLpYlj99JO2c`L#R=u1YAUkQyi0;5833FwLf}8ke&J7=7W~h; zH33$(e$GHs)Y42_qNcnz1$-<^`^;{S!snVxih4f9FPj8z7%-c10IVT--_fDRw>Rac zaK%02C;9^m#Y)FWiss$F#RtieoI8e`hEj@qd!wkO_r)UMEQ2!R ztt3T}%1ztqiu#o3n+|APBd>gFwli!lEbkQE3=ccOo!Zo{XD4)wh)rb5J?|VE{UQ=wlnFXn zh>%|RqPELa9pyc+Xh$1E=O#I&2)`!GliD;ph!cHa5GfwUqfy8%6z5YjZUSPIOoGU? z&`2Z`1Ljz zWbw?FtK(xhBAl{BF9?k{;9A}aW$7^@bVjfVKAq!XQ!YC@&hwVd?Vz*eIpQeH-1_=# zYp6~+A)z4{3x8VSs!gW8eh=aQD%j7h301cCr6jdE!xwK!s%CUKF^%_kB0r$0!m+lf7IwmXV~b(oaBn%4I9Y6bAsC;+%@gF}}j%DWm&Tl_m zet7qM`ByOc_{-0ivoAKwU;d5j3eQ|#{?)&Jw0vpLd0WfA5e~m%{r$b`7#$N`W?)<( z^p~pl9aMW)J$l5d5G2$uE}Y4<9Xm@Q;4Fyn5^yNR{KN z*WP~luWd5_<`LsXBHY~@lF+mto*LJAMw1%ZQ!5!>N61xY9Io?Lwi&lu2X`ErvW6e$ zOaOw3crphB!wCQ8FKTfOzA}{ZK*Cq_9iJfucoS`5feUf0$Es3J)#%>(_&;6VKKh3d z8vfwTY&Oi#ENr=(LzU^eSlWdMI!_!2M%;juDlUV%XfgBz@ zHu{gFdI?>;61ljfY;whL)?5qY;_=6)k42*Y@DKl`-2z{eYj~I3X^Waw#PR5pA15dF zxTAw?)eN2z$1CT$53g4kNcWu9cI&2X-uB#|8&~JN?{L1_y>R!|dn#;RT^>AsEGh22 z@BApMEg)i>&}9xa-e;50wHPxc0h>dAuD!eC8Hh!M`!bSJ=6T-=2#pmI>*P75%yMiF zct+2jAbk8}Zf7DdMhH+tzK77doqgp5>ugseK!zx_IiYgTRBWb+oJK+{1|5Yu8kg_gz`@pkekVYRZMemt6>LE9e1V1P=v6?VCI+tw+QpJHk9 ze)h93z?HRU{1Hod;w86+sTT;?XJJyjVbVl}layrHjfXt7*9ysGtru!N6b`;jX|H_| zycjA5X^UAFVf%_x5LM7q2Ho&e=!oA~R^i}JFu>95NT_?TrD!vT_Ayq8ecwE=U4)P- zoBl{x{j%glMk6L|3lwj+odn*iOkr~#4(7jMY`8Hu(MxNb8N$^*f9eRuGC5yaKYRH6 zV)@gbS&pD|AO7tA@;iU!uKM`5mw)oV{&KnU-qq!kdppZ-{>FRi3~Vj`=l}6b`!*SR z#`HwUxuPm3dzkm?_2rwBPpn(1AMY@i&mX*8p0JXxPa>soy#hrgsuYSUm*F>mcw_nA z_pce}vRq{-eBVCveF-+rd0F*m>K_z>_R1`0Bup-=?Or&mC5NjwuMPZpUlph6w5@}y zAFI;!-R1E1ZwQlrBoWShwbd{0lrc5Yo0NG%9vm5gLpKSB=r1K{4uAls>vo?aHF?h^ zJ?0U`NqOx%N6rR^<}*)Y@u{D+pGMPXu%BA4nT&SI2|rg>v&cpKeo5GSj7V~#wsQ6L zFyo%Tng^LH>-G5zw)I}$zU`-nf-RuH96%8EEmkvy-JBj^Z0rV!7|W|?Us*T)T2MxD zAvNdM$O1W)GBP(Q^$uk@Q_KJ5lP}PjDvG@GB8!FPF`I zev<*i(**gtTo$eH(Dkvdp)xAqB#$xv>+J1DxH>v1kdz8pG>K!VqYwVEPFg!EYQXu=dQKS1~R5l2S%D8XlaM!M)4d-u8a9)Ea+l z@Sx-Xl8rkNd z!#lH_USohEn4-qzGkkRK=JIRb`_A&gT?x93lB!Iey(sCBK@wSe9-xS;$yopZKmbWZ zK~(4?=hr+pFRLP+S2CkSo^)4$Z8NXF_>aq+gSxrq}d@6;2jSy zYB)%6XuA{4PIa8%jz~gOUS?KXgNV4A-0i%;%-Z#lfjgp!c$Clx1n@ zjpb?B1kYF?1=)jETdeR1zb!^1BzlZMZmn?$%e}%QO)PHFd=@P-;&F|aG}cCEuAeWT zeId!^&YR_ibBV9>O5eYGZTa*ImB9+9fxArW7_k1xJIc6(KJZ7tCa&=2DdGEysXc^a zT(1z_5TF+hk6tXdtS@x}T!|#d=Ht0FZ)cFZ)_f$OF**XMeBd^VSDa0|*6byi(8)fp zcqrP@7|vO@PvEe<X@Hm7~#OX1HC?kz4Vt5 zTMdXbMC4&b)E{;gN1N7lOSc1aM{yDG`$?M&!yUoz0v{Rqe$ z$9D-PGfkjoh!*Jy|NE@tebKHfSFf5ow1Do8%vqjUq2BeVVS*i;8JW%TFrBvQ|Lv^h zW|T5`GBi>ocxq_W{SohYwNa#yR^gw~+`aDht6vkIxahf29R5GvG2R;LuQN}!K0W-& zAXqQGyPr@6Wge@Mde?OVb?@58%bsFJg<&gHG6q7HTHOF*YP_3*jOS6%E~lV_wP#lT zYmV0Iy)*K~|5w>#tO-Dmj6$Av!VAKHuZc_qEBtL#aeM1m_9S`J8XDo5laUud5e4Jx zGX}EAR1KC;m6x@@lX4|LvLFYSlsejqM=UDw2#dz`&%-X_<^b1ybd>@{hvv2DSaLXd ziwtHiTUsd5Zair+ECFBmw&CyJ{<#30Q*`H)Sy zQ?e}@7-I;HN9%_rS@~SGv`#Y2(>|4#G~1>d6zF?jt#zO98fP&_qrOw{S)1(~NG4DI zm8VWC4E5%i?D-IERjtEY;2;?pY?2nq(uvpv7o*(@v_8lA=NEvT!b%L9WBFnMqQck8^qrl5pF*Djs$V7+<{F&$kR|?HwSQ-Hq zVge}UnZ^08n)f_}J!>p^0DIawlI(VDy^^e)n(biV`SGYBJ+I=a!$M-Z7;!?1DfD5w ztV45+P-pE1aIFjK+QkH;{(;(F;gnQD>aFlb;pZxN1st8W8XYKP66Lz5z44qd$4W#E zBI_&f$LD_Zk!syDi4i9e=pE!9F}Uspuw}$#3BHwpupUTX^3}at*BlXgXZiUT-^`GT6eT361vEmit&u`r{I_wAB`chS)lJ98|XC&Qr-9=PxmtK7dW|G%f;&;^=-BcV%!ZyBjeSVox}}u}+l{b%2Ow zTTY9_hFRgCvSlxX_S*)R!au`EM@Q>E&z3d{*S1|Memxf^tEyjke}mbs3vDNOo>)?N zA+JC#1tPO2{m$X?Ga>fBuD##?Yws^V{%2n<-}}|yS^oJSf4%(pU%ud-m+fCkSliDu zc8ZSP5)4(WTh#IHh|NvMn%bYcE5`mgZ~eD__vZ4~{^}>o*Pq=lbKdet>+Ua>2cP|H z*}nV($!#Am-+c8GiFG$w!@pR*_~qxzn=6XG*b}~C+&otm@5Gk1(}!~lU)4)(H;-+0M~?+fd9Y$sS4XtleW;4y#k%>jBiwgckivaN4L zPby8DM#V$}2QZ#|`GpF5Uo&2uY-kxF@2iBN@kr5HN_*${j?%Puyedo@Y$@D~k9P*h zC^FA_+HdOHc=X?px;CNplrTI@7<_n;ap`_bK+(x6D>A}Y!*$y7d^lczw2ik*J{&~~ zhB$Hm$_KXg-PRB;Eu@OLTn5a^%;(Yi*;=NP6`6o8`&&&|kpFIVt z#0(9-x!B)aJ~`RCc{6re!{k$Hm4s-YKMH^=ir6--Y}Je1ZN|NMGRnLkYp+0Xf{}Ie zwpT&V<^muNzM$f*1hzN()%n)zH#?j5R~As&dP!vLoNv@OmlPfo?~X{7M6&tF7O%^f zPsl#ri7^!>l%>1%^z6l^(y}}l&b{P5b9hPk?5%a8&p&&<_4Ci}Z+`Xr?{8hZ_IsPh zFE`6S{KvoCx+YA%_2QYD8$yY;u~~AqQsO;hdr{_Vd$=7vFp#6n1^{&1ZkS{PVx}KP->! zVgK|eKUvQAzC+l&kuAEbCVlIEg-aFDxP0WOSfyUicQ=<>HzyP~m#qCsJc+*YR5C(S zvdu)!-0b>5Ky%)638e(b2+<)QG#Xu|98y|gJJqh83B@wVc;E8j9>+(xyDdW2`|o^@ zSN9WDswi~|4Hep=rUsyKUb*F1%`1++vt0&#bs7}(cM=)lk>QI@uiW`~LWxeZV-R*E z=jqm~XHO7i5g2QHY68gCApGFl-(i0eb%IOHZ1KqnlQk z;J7m_XKuCEG@9%3q4efA>8yEIbaGUb5Y*^wT$2 z?g!uf?dAIQ+fev=`Ru`Go6kP|`SSCh{8S+4be5d!{S61WT^u>(do9hvT~X9@K-EIq zqDh4g87d|PJ#?x5CU=1h$Z$UvNj!ViRyqdA%V)@f>uotD<7lUB!&LMfY7+NB9>r!h z;*_pu1f=jH$TGnrm#iWl1IvL%i6SP9uxF0Sz@-MUMU&nnMw4ESAbp#EfJ#4?U_%H( zP=I=pl+1&Ds~uK*kB%iXB^{$}@y=N#a04tEJKoF7C?P2ljfvfxzFde(Nt(&jBv7#$ zqKvLvmvS$FdNI*Oa0gTw{dpKEuO4Y^>tiYKorz{w9H%gL02aEvDg^UT;#PZTtwKk! z2&{LT3{TZk_n0~G>AK`L>pBmF@LO*#sH0+O)8BE3B8UPqhPfA%M` z#ecC}`RFfgZhp_!md_n;Rc|viz<8?*CezZuU0^ zWt*N`DyoJY$Fg1^0J-nxw4gz`aH*PLBKl)V>wbSLF{Jp=u{Lvr$?Pd4I?=B~{ z#@+bf#QV45-jmO-2?WIF9zo0t=p?(~WG1NGXUQf>M_?X0;qB_(k0v}={ey&I-o@n`6#mI& zJ)z@Vl)#yzHj|6-S#yLTSVfoBqX&ft+}nEvv#+(vfD5ZcIK;eY$9gB_}Z)nrIZ3CK#s1AytyL1tsE79!PvorO(83e6~ zrB<=?Ck>ZA#R1XoJPTYqPLONoucHULpL(HetRfof$u6~D&$VY;X%oC64FW^nTQCZJfdxl{fwqBT2Tq!7edyRZM|{rigo`l zW?lY@A}eh{DV!WXZ@q16jrC>(cj8|AHP6k*83X3!OWP^-PR;dGL~QGo%*s|qo{Kcx z`|y+H^vRx*vb;hxvibCjI-u~xK*82k)>#vvFAAP)B{ori* z!+-FZk9F$#zG)qqPKj9{6aP z@G$RclT9_rJVLSSmZW&AH8oKbhVALI`&zhW$)lnh(e}FRP$8Zxk^**wPNsWfGUE~M z3Kp83FaC^KADH!thCM-uFj}tP6&|*KzPt$6dNTO(6xPrFz*+oNtSC_n{cy0-YyC|n zGuM)&3F;XZz7t?}FPz0+wU(-0;S0AZ$Y_4HG)T1C&u1LDMo`y(Lx~1b z*v)MQWAK(7=a2&TntPwoaG}HIukd*H$1o00_@Oz-6SlQ=d8Pde5sQi5YHvrz_iS_kl&HB&p@8%0G;Dbg{{Js+8_%fRPzpj9Qcw|DO^fBv&SXWewr zivHm~2eX)m4u`HL_lpeQ*l|;vO;!Pm9Ld?y@J{Z2F5XoL;i1!!r|`V^ z3}ucG8?i+oxCmy=hb+IMBtrAz4aQwk#w9BfqU4*xKOh;exKnlZ_9as#2x2}={-(o0 zq_bCn?{NQkQ#HA^IYsC#UBpAL9KP~w`=y62ZOVH8{&(+eu_rc<9@}=(=Fc~i?+xip z9Ngb>MqGGuST?+0r9KYnG$Kj73J$83sMN^@HVCfB)Wc@~{8Na(L&5%ii@5&x zz*4HHe<;Fy`I|;aIm84B3EvVhGq)*&guY+xBMq>uE-Je4ygAk=Lx>`L=b-__rP`D< zgqCM*PG!2HYkO#V>;+>f2cmVoGH@?#o+Y%ls!x!yg64oz{DrEFA|?^UAOQ?aAPv;p7@YYj|awE%(OY7A+O#^s)8`&r*7JEpCS6#vwb zKfL)v>*CHnDEfs@n;&^b^th-|f_;n}qfL@|NA!7?AxnT7qhw%8t_5g(8IBmyj^zzU z(N(y3W6PO2)2>?WN=G(c$YE5YZ}Mln_`n5s@2Ktxt?hf(@04d|9tcG{XC8Z95eSBR z-Gy6Fqp=LlY0|tk@95LNL0~uutm(6;K+htnx-RryC*nN!~jZu|8yXwe-|OV4}R56p`!%5BlFx+))9G@ZoLMFFadk z!D%EIhTPiP=e?fEMX5&qKIbA5+kW|a!r|pVy?lXpEQ4%|YE}XA^$T9LXu;m`Z4Oy$ z(Xl;6Xzq-n6Z#YpkTAkF9OiT)qm z#2gc15Al@8EYMS$9Zh*+|MhX3`4f5sW(1!XAB(q#FB@S(H$Y7gTQ|{OgMdtODK9vW zyS3*zfTi!QqYd}Xo=J^<$Kuy!Lka;y%zBy76=I4?HLvZa!ZY(^->oT(g#)&P03k8s z1W#GSW2F%&^$^tc6^r+2>f)xvyQfPsH+o~ts5yScx}RB?q(GB%ILE*)%NJistf>ER z36@|@sI0<27<~t5Yh;C|5;j@)>$q0t>eXxr>nI4URq+&)WJ9NuP1J(IYL94wr2+>< zDiY9Pieu-QEYbF%Hj;K36mG;`;aH!~5cUN;T<4|Vb+F0><@4oznWPA+dhVBO#k2S~ zYdQ~~ol0aZyRtC6@b%sI9RDZ#TUfs2u*wit>%P*pXOeh2xh${j=G{BXz3=|IJ>R!P zCMBI&Z|k_$ZAogoN4Hc+A`~DpW8v0&+7$kNY;*|B6Eo-uM0$UU`|>gs6E>p+Lh3U|4-O^Nl;=F_*I(Lm(1lO@9m#)9jqw zvrX)}=T-(80}*}a@ihD4tt~v^9uckI6N(y{GR2e;MPh#4i|%ULPxvT#s$wLsg(&mH z^Q5BvN`$sP7eCFi45N|g5^*6IX0GuOob1Xwm}4LDVTn1r=p)6j9!(izH?(MWg)xsc z4=Px?GqfKw7+a2GhJ}xjT^T!Z?#jWn?=sldJd%LyU64b_$PZ5>huoM}{BNG``|xi4cMDDR{X%y1>la`6 zo@b|@ktyaRV+}7gzf;JG!IsxuR(3^X+9#c{G>Tn+MQ+w!acgVR#yw*QZ$ZzP+O)Rr zxeg#Hi+=ayE`GE1)hU0;pXPVl+I&aLu7+f=f9sL!k5@YQfN#LBI39j5=J0GxCDJ8_ zGDcj$KM~3d7@puS`UZn1S6Dn-{f;V2&Y9dzN|3`tM#gxV+ROl+$b>hit^q^$5*l8& z!r5-gxSq%g{A5tIOtD9PCTmy44h}k|ZC~O4l&AevmA;NwEoxYK+vfYg&X6mzzh64$ zxk`x5iPkiWjz;ptQQF)tRJ8*sl~|l421@df zm36~j?wcx(m9hHs&ur$0lUu%jsl@722Z1!Xjd4|YF-MH9{h2v%MVB6m6?&qP7)cDE#zXNmP?F@P^N})nov*Pqjllj%1Hlwl!R} zP4Jv+_|$tZPYEl;7QsDs*i1N0$h{#v@+8{EH-;8CPpu_Ad;V&9qQ-E~95@oSWR(|A zNqcH(;EM;(G&zWCx(+2$D1^KC2p&JTZm z45Q;4wzR3Dc5wNwf)BTaJl})LYDeb@f%zR?qRa45cy=hmy^s)2fov{gfP_QFVe|tA z0<-0lZEMo2SK75P3u?-CQs{qB~kV#q!7q>lCM+tsDHR?W^~H@lt*wpm(=6bkXz z=4feuT6?UozV{Oi4<6Zhh^BiXgm8otBxMF=~9BwWOYJ--?e8Cc=BWk?BS#xwa4;S0g`Z+_6mxs2UEfcl`_T)caE|ge8PbT zr)IhL=bZF-o&aqrsq@LL4UcTj`A9}X8&I2X0K$3oJ!Tm!IXdzr#RqpObo|zF&b#=d zlN2iv{o*G*DeDmPJ7_DNxw{`5-yZ zqo3i;;Vkd}C<|#-INvf~@35{A+k{$DiiX?gP6G4ZD7YzunB@zwGWN>F$8Dehpd3Yp&Z-+%8!WsDxwvEq6DP-Qx3Xj*d~YYCTm+#r<32+u-#j~8vQdgMTyf&rC7IdL z)I>AhAqICQVD0nAkDjZeIw~EuV{^wr07BCnT!HWnTM3sDyYi)C~7i7 z8@%gJ?Pi#)o?{AZlz2*1TWYed-{G)mQEP>*A%?>eJrkr=!HNhHc+o%+a~tL;^GjJ8F}T`Z+imtKh=7Oy>X%~e zCte%Fdhop}i0Yz4uDih*r$oJe(9H_B@91odU*lP~Oqug}Dy3YT-s*4gO&j1ExQ#y; zg4X<)Y#;qbC;AcWHBa>zt=Hzec4mxS_cIqWy2BM(QfYA<3!hVb;cW7A@^sBNjS6RI zzxD>6;L3T?7Z~#9Gq{6ydN9tEYDU0> z!#nLTh}UOsGdvgr#xe=3lr_}*l5zcxhm%~*Yf(pg-_H~~O8K|!Fw1u4y9B#x@)vb{ zsw!J*B`H<%PBIDGtzrGq9eSO#X#$zN>T z5jInJ5JtND-bHZet%K^VGH3%~YV4JToaeOQg!Nc@7iSXkrtpG<4Hj6qfgw=!541uv zV}SUUd}(&tJuoYJtnpnT{4&QAWHH>1J(ZP(jrdE#s}fR-RR8qY@1xSTdkG#jSF(ogQY};PT$>eWe4HZTXT#3NRAU9_eJH>S>L{`TfUW8jg;WR9lj$ zb(@j)#h0I&2hj}UoFC&WvF{o0{}usJ+Es+=KR+>F;uzli>%Yl+XmX7)1voO?vkq5U z@$Z>Jyis+6*+@p{c-ZSPNtExOCKAHowtF?y(rpI8?Dd8C)o5&L`<$^Bk$N;*^KY`( z&EL?Y5{)S$N5y&0T3YA~ULUFuRRN_pmNIM|(BW!ym=H81yqL#2Mnp9DtwJxn>v``j z8$vPxrM-TKzWSKrwN-NrLVPyLMZXy>4KT{edR~=G97UMN`~jEeKR9U~z@4D!RzDGh z_a^KOkKuncCe}eUH{*qd7j3K@-13Fz%vbPbl#F5yw|@GAV!UUJ5nnJghm9vZF)l{Q z!AY>U%#gxajf&cJ4NrAt_;`#La86TAX3DDP0xnt6qh@LPt-ag>f5VFbjFZr7!Pjt3yZfbvpP}ENf27U zbn(R4n-hw|seZ?d*)0krqjB|+;fexExF0Hsdzx38B0)>DtWl-C`qTf4f(#2kjn`ZT z&jo|Nh~Hayix(!eZ%=v1neir@Rl_UlnxaW*bZ;uZN#b)3vio_m#>cZbr)VRs%CoKs zeK5MS7G<)5dEQM1gtH=s89!e1eIN@uKQL!5hb&ri2mBiK=)FFZHB&UEoC8LJ$i%UN zN;zH~!@MK$@VyT|Axq(&!Eh|}e9psu4nIW@%H#;eRcQ;+$;ea1ezW@-Sus z!+<9{UlLpAQ$JJD?9n49c?qqg#*URKz3dF_!p~nnmch(x4lfaA1T_a6Xs>myq71I+ z!?*@6Q)CEe-PPd4Jo;=&Luvvz!*|8`g<=W6JhMEwlxE&+5r8VrRfn9$WT6vxh`P30 zud1eyIDv`YEZ;nqDKBgN&TR+6JULbS_e2@qQ|mls%S%KO1xQI00kHpZpo|7anW7ZO zJ`7GGx^w;S8+QzFCR^WDjUjY9%(P(+TtCm!Zwme9z3=k zMN)~b5O9JMj*o1o%cFm$T;ndUWW4lf^Dlm(5N;AuQ&n)3QZ$T~jXlM?MqR=g8uaXF z-fCH+r`bs}Pe&{8iB>COl408XCr}bR{f>bO@2i(bxx#2ac~KE^TOhNnAM`^lzbFZ4M{VvTh{-yn`G8^&-N*}@_c3x-Gi4EMswHu+8X1-AbWps z5RT{G4BSJ(p7*=X89&Id8wX{nuhoc2Zgqcv8UtnK(btLQ#OprCQ?nIraG>4ZAg@S< z6^F^7o~fzrYfy&Q;2`742ha5UI!_w**>iw>hk0L&E3{{fxjLicWxj0-llk6jo!1sj zi}m`T5G8~b?w+Dq?VnGG^H3#B;YZ`wK?-3;#-ND)-fnn`IWu4#gCv;IW1pG-MxuPM zM6q+HC8|N;n$r?5)P1+kT|{vzf`(&#jA2Omy(y`au~O79<7f;Q25S4hOZv?@d-m{` z%d0)%62kMB9wx16y#nro^i`yLd?E zQTCQ6c10~W4u7exe9%G~Pn?7G#vGJ0Hu+TKSX&t1w!fV&Y@RcGUdb^!Jj&2= z${n1Y+8$f^+Vs`A-9JZgeQ2)Afyv>S=mz;&w4wSN=P&X8;0l89qRsYpQ-}m#y*pcS zlLzU&crzG_7Owg(?7^3NQ<|52c*C(x;|@ZHf#Pw1daQOloFG6D5$6!;P^IhEPyrW( z&#{duF3ev`d1KD?#ysvJkS4y#N|Ddsyc2^jJNhLH>-D=nR?j{`VnB$t?P_Cr0y}R% z#gg)lKZk)($q=&peXpp9)Y1{CDZZwNZkwOl4weK!bq>}o6y7(~PPl2jgXJ%0rK2>^~KC$l@==o`qgCrt*y$ zC_}{Id5FI`08?vFvghScD^ZKw2$W$6gq@}O(wfr-4kGFVR36tE@4fTDs||3zD|DC# z3*lARn@E5?oAqTc@ACM?OGh!DPU2V!>#az}cyx$+HR!;aGD@fpE+cv9Kyxz-q0^nv z+VvP=ym+)Qo_!PD+`IWY1?<652EONz)W$@Fe9x;2o%?no|ADNtc& zQ1!_P2xCcP6-un6WfcUYi@I*VZlT-GW?p%OVKzohYHAH>wgu9_J&gVm(B^!FgV024 zDY7c-Wr#HDVOO8pJ{OJ}Q)8{4SbJ5lE4|IYNBd}>0Wd~@v5w*BTkWNA!o-ws9R;XO z^WdqG2|a_O=X>)IozD-itiCg-W{nqr>0DE*fU3=@P}gY2*sn!!`}eM4*U5}&E(`{9 z{gOMA$V51Ut?|tBO;|0Y0K-T039d2b>H_S|LB>*z_VoIbG7K?Y@pt;rUvQ(Blu=K2 z%7Kp-TCI04MrKgg!Q_v{7fwy#W zA4>kq$d7hMXX~4Vv21^I`_7Lh%P|0SwywKH_(tK)1ZBo&YY-jY(c$U`kXhFAWU`yY z+2Q2*Bi`0{jlu^B;0fD?I1^i^DX2WirgYb&C&bF}DpMQF4B=rwT>Fhv!;Xabn3ZPfX=#~VZ>}b`-UQ!CZBf&?+Ec5~g9W#4yNOAN2?KCsK)Jwx_l3@ejEm>UBZV83hf3D}1 zpMa}-&t-u|u%qn#4USGNdZmr$tZ2q1YkgnEG&O^7oJdp?Qp*EiDP}KHuEFCm41wX7UW(>p^0Grwg42r5-rF>fp4T$k#3kiZVqF+PQD;`FXrQBjp zzznT7R)V=}36fxpz{5lT%{%cmAy93E+8m+dT9@Ooenz2adkjR^;=Xyl=Xzk?_jj!8 zo^QTg)>r)mhQaDOoET$skupK6Q!Crk;ePH--zi?N#M^%BPiq5j9$W1;_F!21caLx$ zS@0KQTl>_&v|Aeq;eN(J7})y`4)A9*ui5k&2)&bh@h30IJerJoH+aV5HDAG8>Es@) zrJhf4H^0$PN;`U~@8GYGLiN*LxYY+PoOi=Ve13Q$(nlH0S}5hMgSB6tV_EL=Lzzxa z1-LJfy;F=wh_^Ek@){4+z29*r_ckTEjwKmJ5zX*dnOIb0EffekqOJ1Er5WTVa{nm}NCaLGmm*n85%tS@BwqK}C?R-2JZiR9&3;il5dtCw#1} z#$JD<-QcZ8=2G;NkuY$t_?a{5t&isfPNE!q7D=0(Amqs|d?c@ey~9~fd(6O*=B>TfVEEW^CJe4clSB!y?e+fYHc~2$7s+G2^)>2k_ ztWvb+jEf$b2(Wfi!u#fCJaUFx>+iAtEOQy($Ne@@bmJ;oFvZ4^N5nQovt4>^xhzu2DRtikmZuOU&4VC3Vu3T{FE z2tGiD;L0hol0=$VF9@Jyr4$6DnKzvbQ(|WWm<&Y&a>)cWcu!8p34N52l%56E1a^1r z{e@w)oG6Iq*5~xy9`Eqwpm;m<{9@M5qdkx`P&Ky8mu;Fx93?z;{N`mvTH03h=+X1# zi?5%q#bxyItpK;?{I0J=i^(+k69Vg7%ngXpFoh`KV&~Gu7H@p3zViC@+a6?igbNIm z5mt%734@}dW+)+C7*50_Q$U)51U^Z6SClsGNZV^C2U{oBpf1akKvR5(V=I)JaEe~v6 zl?-x3SiPF0z)qFhGp+hJkhY)%FUxbw34N%{7CW?MslhXOJPbs0@S63^eq?38tTeI9 z)W7*G|Dt+*m3TRfEN?8D4@Ui|KrfA>| zxb<>BQH&Qn+h5NO1N3bbis2YKSSZcxqM5 z9?*X+eXDSNnIQz1tNqWI)};k~&a>gTF?1F}gyaSIS*0QhkmR+)06urdxw`Ip4|cfn z+xvy{GddXj= z_2l_>Iw|aGSx!-zcGbcO1T5?(_#cv}L@nQ03jN$=4 zrl4XO2zlWb#`x-mYtOg7mYQK%hpxLdB*RkRr_&q zqYpESQJC5G>?%e%R z$};9jn0jg^9G|U&Zqof5KI@vnwmxo|z)4k2&IL;%g|}mnty}OyTXT(`&2d(A*~G0$ zB}Smg=LB>Ul4nYITexM9dEPq>0aKI_nZ+#aeUkn|wmNb{e({Dr* zcr%s^ge(e!s6cp>oMhT&&Gjimp!X+XPp1Q)aV88VG)gec9$;{^R(koQ6TK)X3fUvG zC%jW~5+s%EEA&&$$li-ZM%OYpv{mSu@h~0MDd<*3xQ6;# zs|)^#MEL8A->yYr{tQpnN8iI?&%FB_Z1Zdi=$*0S?jT${BUnALM$@zrt^;o1>Pw$% z@12sX|8M6hMPPpVnb36W;BYt|zxhI0zz%<&VZ6s%(PncvZFn~25G+Xn6g6X6VVXv@ z-&jF_X1+BJQUnRf=4I`^cJx7hWXm)c9hCFhAt)D)M%_N@5FHi#o;_fkqZwnOMhEt2 zrK)q~?<7|;>PB%5EGf5SrF-V}P%Zs~E9xMCrFHVW@2RF%8{^HE6iJCkLzZ6hE<CjiOFPgrdEzOBA;0IdFi{ z^}9+cU%fSv)`y?{;qv(FpD!rZA@>qf%*Nv`ePL(Rj}8 zjLh~oo*cuYX^3x(B_+Pj0r`aan&+YqdA${7DJsyB&*kWJF2TqnqiW9jo(#op)81b9 z!Es8_rhwo7&aVi^JF!owy~9ju@0I+47thq?Z~1_XD%o%4;q*lwd~&)0@#HxpFBmi< zOVE3Ufrsa}C_r#WtLGwy&P$lYw_W_2G-|1&(yrTJs;!YH@Y~YBtLNY&+YXi7o{D~W z>O_*?p)GHZ9^41Jc@XI?N&1*$lmpT{CbxDh9b{OH3_xc&v(>k#WON4m$c~Th-1?Dk zk=+2hG7>murXp^?eiBX-LN=&Qs^a*~B&Uq#WEseKM(GYic_%>~3x|YN0Oox~WU*mZ zB4X2KA=Q-Ac^mb$m0+rk5Vn(2qhL-XNmZHWRDwk7Io(cQrPAq5jrz(LA5BJQ~` z!dh(K_$rdt>lr?}9ma3%?uotLPi42C>aXi%Ms`+u%NB(ZTfaW_rLcN?!c&qdSRV2) zY;fi^uSd6L9Wy>+EJ~^MoQbp`mSM)*F*&p1YbO`o@|W!7(F4_l0ez_Q{*OQK`#mHD&6gBtCNSaXkYm)QI>l;H>)S(&y3F^N%0j+1kv)>fpD1Zc^@nscr)v1ar)kg$2i7JiD8wtD03kO%qG}<3#OlVJ% zOP)BQ1WQV63_&zo0gU+}AR_t_rf?Q`;690+_=w!ibEQ1{oUp4;4Gn#5Rm{a4M{|%r zu#BMtCUG@S8joA?(0tH;iajpvCpun75&ys`*oUCaKr*4vtM~4C4Mnq~s0KFQJ>Pvm zYG;)QwVffvJOw=ExVVhHUR8A8KI?nt?HRXO?DO2JY?;8tO9` z*S7oY%d{|^;mP0`10npBlW_KEdHKu#%dVxbmM?$&kCq?*ga2Nho3LL zn7^H;DZ3dRHo?g{{4raX%xi52L*vVXKeLW(;-~#~)wnq0FVyS+Ye1C0!J*f%Ii4wZ z_S`nooTtL}SMGc=vbfChojpqC`uDBziz4h@v2OmQvd#Cyp75~5IJ7!=MAOm4p{2nC ziEDYCDXJ78(d@G&!b)C=o2wz6;Py|95e^s_WJm>d=9tl7cYR-|yEcP)wub5} zqhXYx&ow&q3SmdQ;VV~fF4r8oa^<>x+!Shi(vPlLl7RcCkG>eEbI)=>+3?Y0)KTs9 z^L7aoIZNh`5fnZ{p8uDy8tTDaDeEE^hYmR^BJ||pgGq!dVKIlGF&>@Vk#Ki&*+03C zT=y6eB04&1+4QwUy|$`eah|$EV-(lA>IB6*%Vh@Mj zW1YYvI_Z-1Up*FC7zd2<=L7{p3f7+x9McSu7{Jhj0l|Jl%c>H1v4>W;fERfxi?5p7 zHGF|Mnu5;NHZQt?E)k&C3Hv@d^%`l=ZH21F?68%pW0#Kzag6e{J;*7m*~Z}6JjW)k z!&>%5AKw4)o|C)20w$P(+kb;}USAgec&m9Su9uyk%xlUbqOAJyQoUCwT`(PQz_PK; zvOrSJZ_KkXmK_F5)>^gLUw4QIWuxx?^zlN06^ENqg8J8Q`@Q$icBlMn%#_d`YqYkK zr3tK}VUqGDIx&JZg$frGOk;VYhWffhp+5?`5Xu`{%XTH+ZFuD+x4k?Ul=RRg@D&Mo z!y_P0Oc#Y6!xN4fF1zMwPs9XiiYicigh~o|#f5n>*%d!|s{o8IBOHtj%!Qz3XAT~; zT(~&wA@H`Gbznwy%g@`%u-_UN!i$J0*gOvw4a*ju37sbh_w4NDa$C;9J-=T%{j8Jd zie&8Uw(V_qiZqmQ&x8t*p@|m{?v!qZn=o~2?`7FKR;KWRd2T+(aE_eY`^;H;xlq z)AlYmZ#=AyeSK5-GlM5Q=^SBQf`rDlEKaR+seu<6(2Pb2r{RgxLldUaw;S_rd`_8< zXA0(d9?omR_3)Rl>;HXEt@cv{AS!E+)17uf>>1B#xA#)g6Y=T^-^W4lOnoQ12aLXZ zc&fuS>C+l0wUJ@k{0>dLgF6LZf@<&PT_;;I(t>NE4gu#%wA@aQwv>Gvg6s(BV2c;> zPUo{n^Sp92l6AiIwkE&?ws@uDPN$##i{dg=ED;h1bBZ`!)Xv=ienW0Ju@267{Mw$VkGXA0 zv<~B@qOy@rQQQ=|RZ?(D`n`5b{}SS!eB-^sf zGAucq^Sg1*I;^^uQJW!dzUUMOy0|XU&Kx9Rnww@a;NXfaU|nhiwOxFye>{MlVn~zH z_B2bhgV2OLL;#sh=kqy4@V3+lUu!fqs0r6bB3d1$=ZU7&>8UM_9;58J^5XlOBZJ&O zbEK#V&H6+pX+Q@>N}wX-W6am$O@?7SF=Oj4%0rmUbx&|Y3nL(K)&h_`8L`u@t&3W- zn9S^z^-n5);V zYpO+^f&_)Bxx{Y3Sd^yO-MTnGMGkMas_Fz5GXWJ+PcJANiGCdFgdkH$RKdfJ{5!yt zd|K(Fs>2zL>{s(t!q?rKS5-FN-aLBtaO?Q!iW(S_?6#h6si3=DavbR6X9_u};Y`qN zc8sYKlO5nt;)o>=Q*3i(TDRF?of;u$)^n3chG|6$GJ)x=NPT^KGzuy!+4 zO6Zv?SMXApJcB3+aZ!KmJ2R$B$5%~vj3PoHp>Njic*;>h0>XcUQc@+u0ke6!xkGat z;ogIH>zygz6wZ{&G)LAB%u=4$Dq!0a499b3kMk0Yj3O3V4BSo2OJ|?rhsV684<)x> zZ=Flu=qVyXIJ_~uL%lT(ThEXh0aWMaz=92+GD52Q*E3nkH5@Jy!A23tDKFO}m;`^) zXO_kcv>b;C6Nh#0^ek@lZa=l-MldHmVj&m&3_Z=ww1ey7&+a#>z8I#rCOjTr`Br3R zz>0637&U@NfA07W*5SY8k@n(@<2Y4wll_)JFQJLTG^W+dkAr$R8fOf4N7G>- za6-vk2!O_4m-TF2`uM|te>wW__m|y2{RhjXS6?hwe*M37BHYd8)BpW{T)z42i{;wy ze`sZ`Q@qF)FP0mTu`$M61OXSmP9Ghkx+f89jRLuqe5wC=GmL0v=7B=%91TWulCi@& z){_#O`=T`%CVb(+<^#uSs^d#Dx+qdSq9iENJ=gV))6ZU}bWESH?v3|07Zs(ECBK`2 zeD-vC=|G@2&I{OWJ^sW&OZcfZ`8l2M98GCu%bxf=NG$S%y!j{D)U14lq)K-gvNHttIVmTU1rYZ)%ZNi{$VS+Ku^Q4 z7NI-aywbXJ!kszXwr*|G*0$8qr(_@Zf8aT%r0)HyK`4hb)N^ zuJK;7rd){ausMHDj=exkp26rDi(3w>nzi&4G>@D?hZN{G5xvU9 zZhvy?&W|EuGciO~iN#s(!#{=cV&$xdSgdU8YtB5c&iGW%;i6`Ry%x47k z{7Yj_0#wr#&$t8{Eyhm)1fktSjPdKsm%_!u*a^Z}hbACn=scwo1xi%v-f0sV|HUy- zf)rMd6H~UvRM#o=gl>~JTL>b~riAd%0#A|Y+-D7Yf7X<|mN8I!d&2!UuG+GZVr4WG zc0Vwl3d)Q^gvaimNN9M*TmF-uK3G2giUMG9H31=i3=LtHa~4+Nq!BJNDNSMnNcqD? z6A!jLs!kG0{*?@3=oBQpz{xpD-a!S;RaM!VuR<_wYZyFCGYk?+sqr0^L?W@Sq6~+; zmUBw1wewR40+ii8`%bkn3>qV)jD?LJK6z>TM~OIjUPUtC5L{t8;!J^8G^Q({|Noe} zk8Vkm0v|Y<^My@L)1Li!gm| zul;>BiwFOqC(3Xy+zbIv6o-&xNZv3sGS)&@_$=&qp@2l!WVB$`g?`68HBaLuHs|h{ zXl)iI=kfBcQ*^VF!n@H%A;zK;9n26t1!L!+Un$rVU9^rDzIE+c|M`ijCgKwv1#@9~ zHxn>Iu!KSUodaJSlg&(oI5o!uyP<+Nj$Pw9xe|{~{pA!i6C}A=(_I3}XJ2 zTM8Ul{<#+ByBC}u4BFYgl`#=+(Xbb1enzGB(D0(?d~QCY`&plEfHU5V5dAdpdho$) z_yl$|*!q7UPvp+;=AsKN2mGY8>yfM^egbw_OQf~{T6$z~2t~Hp( zR-aK9MERH8U^)(d2M*VgSG`v^;WWH7UCEkYZpbawxa)g&`WH;~74G^PY~kM=@+`03 zy`FcU|MmZ}+B*DX_2|>jR!?kIy|6WRxHdGFphV78?Qr&9>o+eqnVVrz*f$!F^Q%8Zv7D?RlzGfUL@)>)HVli*;5$wGazw5h5P_&wt8XlH% zulmsck??R)uC}@zs7ToMtA1~_7QbKk`SDad<1^}p;g+$>OV)_-7@MAUtn0S+zo)IE z4h9k0e)au7@%r2PWlg(^hm4lJA_K;^yZc~u|Jlz6$IaOyZ(Cc>^Ihh5q1hCwaS!}> zsEC@eO)f*rNR4r5C!A`j>C)eZLcX;$((ag9enR`#JmKDpw_knx|2oe3+kxT0_}+_r zwU+)`A**WPPmvoF9xNFyg~9vXmbhwQytBm6ntJQ-MLsJ1x=)TBIe%i-xIKR)*PwGe z!uKP2KKC5qoK$RnveeT9DeEKi|K#JJO<(6HFU`Z=>d_}(*=zn0+M(PT2nWu+C{gm5 zVRg)S>a@6buO!M@W?qTTouA5GAQKWDv-EA=i@apa?H)8Qb_c=F_RpR@{grzL6NAKL zkQ+e)2bMkKCBU_P>wy@*N;H$a)rR=By~IZpM&9|!DeDpyUP(fzZu=aAYC4B;V>xUg5E2ZTL$B!9gHsSiJu+almnG-9QC34N-lNr8vwU#dH9cx7ME^8HB_uhAhdhg9w}!Gz^o_1>W=@TsP<>JYQXvEO%}U zyb1?pR0p{P!5j4u3dy~fXj1qyft80;SiMlJAe5d6I&CCG56aglp8{@nt?lgy*LBYF zB<$IqlX8!0Di{H&gP&VbiwVbpKSiAZRUeZu=jl82u!Kk(RVI#k&>xg6{Dh_u1z0JG zQDnaRJX>oh=`}%7=sb@?m*7qjwUp7C;PDigNI*>C9U;rr?Ry9}KL0vqvu#rT)3jbE)%4bNVn#{-UD0w62R>!0rYZDTNwajK%ljRQ@)YJH=H6#7V%c_p6fJ3OJ8fu*~Yb8t>{p(Ugg?6l{l4B2VFUn$b1y$rPh z!aR?c>sxJ%;R44o%Y-JAVc^J{p8KvA!jCRA>$wFcUHXg)l|H?B_Zw*m!q9dTtgn6) z!hWXN6x}PH5M$3-2sb?19iiAtx|YB=$}bwy?zSbMoQX=eb}VN*b^>9~{_TF3P`0|0 z6Xd+_4Ntmo?^L!d%1EImBe$(xC%iKTXZ^V3ImTy^u)OygisoFvGx%0!`dw?mU~y*f zB$-y#a@SgIj!p(nIYA5UiVRXDt>0#lTuYiu){IevMuI6ErHHE!zoA@9SR2J_KI-|s zHR-qC{s*6uA#?$yR*pbf;D;2g$upfiIDuPSXD)>j_yBQWHNj{diaHB71?_6 zog}&B+v1$HwNTh#WOdQM3(B(yT5_Rewf#BBiJf9DstT9URJMFue)sJFi06_oCSt)` z!-`MRpXRgs<%v}=D+T<(cD2V(?F3*jmYmnw?XB^LqxnIPIn4Xy?4ysq6hV0`A`q=y ziF$lQeo@}h)seOOj*ylkl|dP=7jPH>wOFVzX6rc-dV&e7Q_$gf=g4yisV#_WN}|~R zh0>=dlzPbl5hER>R(M(N-oY3ZQil{qrK(?^TAYK*6a5xe^{C-n>-e zI~g1pnvRLe{OpS!jnR;&zpTl`sV~5Uo56cTu=KxFoWlY!Lf$I4%ThP$JQw=R zNJ)U?wan6p@n&ohE#5&^`fN8rx~d`Pk!Mut2_eAj!Z!os-~$3%k1yoh319}G|AJQ$ zv+7F((|+4GDATrnZSAJ8tmZy_vii%v_>0wNpMK%l?bSd0r~hm9FaPu(R}VTi(AcXW z*LKGZrFupvt>2xoNb{TY75o%a36)zS6C3MSwVf570&ER#+2;#u*0uI6-*nnr+t|F` z#0{;r233Z9xIITiMHwolG4!CVyrWK|E4w#(88r;}S&y1Ne8xO`7;f8%O|4UIWV0?= z;t3`6!#FETc;S;!PYFju8LOU;&ckhI)Bcr96N#`=!rj$HRd>CmX!+!tatqRDbZ& zy-;L+fafP-2{t_E`w}f%48NW&e7U~&GmnumO0r(Q`cIPJ-mSj*5C4Ak#b5o;tAkJe z96!9l8&Bv?VN;Ed46KtBEd}Q0cyuL?PG9IQ!JUCrN#@q`Gry~(R{zO~mYJrkukq|S zww1L2?a@i|ZcZq?@t}=w=CHZ008xsb?7KeuJro_-3ib0AmrHjc7cw^BBs`e+=%^)$ zN^$25g}VxuIN?dP@n=~fKt)35#pepIzA>X70!o|doT zyY;KZ@zCj{e+QI#2%1E73RbomakA z^di~O_l+fp24C%slAt#gn=!Vcbe)!XdC34%WpB8fQ=3i7ew+Xp znI%3lRqC2%6=$h`u5&<-WlC`fx~zsBe3Ylxg~@WB6rjuXurQn19P&! z`I(^)MFCJlfU=}91e1`<+imMsp_`bzl6H3{??{1>)-Knr-?~$H{p77gjI^&to0d_k zew&36ZcCvIeYkXu2s;mYeaqr=p{+aqPN+C39EoU9Q&_AB?;CDZ@es4763;8W-V#LgW&fpJw}ON zIuiBO>gS(ZqcK+VAsl_V`cXmiy!*Va8y?De(F!ky{1i_i)sjsj=nR=rBrPkL0E_9K z49|fa5fP%^Swii#kwuOO;WJSb zu`-CA%vLscneM@C=JZi$m^GgPWAKG=s)L;L!%_&@ykxwM;MNP`=`fbYSxw}s&VBXO zpEyP6Iq!FO_43ur)u*3-Vf))h17E`Hmp}h%_2$QK9goVwBp@3{iCaHqC|@Q_$cm1F zEdo+_tI*-4?I%V^k>-^wedf`({E_o0YYKm#mtS$04t|+uOFk^I#@vn}35{mV zeKfE`uy}DQ2WCoUXm!`Kmojrph$%E$M5pz=9YT1kv+FYq+V*HZZozzaOR)LV94=dR zNQ>sE`DdJ!a5e=f;PvXxb{{-bQq8t8EZfr1Tl3pr&nD~-Sm|ZB?l4piRS(knNhG$V}LHg$Aj~;c*+SXZTBYy=eIGTC;_0q)g9vjuaUSu4}idINhSGF0#phYbECMhZF_$|@n7M@MlmCQ@ZbNlOVkmJRaA+d4K|bw%A8&zu%# zjxNt%Yo|y`xgr<(utP}1@ZiW1&}5E;0ujHWg0CbGS8`YTdEO!yMdj|u(2B{lO|1<6 zErv->);3;C$0c6N4Ok+AaKn(8<=^z9xtznED7SQ4c&zSx$DS7Ljt4U8`oCPBmXNxZ zehLPVkj)vO&86ogRkmeqrb2s@fay*N&kNnssd!el(Y5cX@@d*MvxS^#Sy(~JN$dtT zst|w;;Aq!?z&8pXf`z{&xAh>Up9f#1jYeR=DTyX+SG>GLujq2Z0&_01K&@13K&RF4 zJ%I_J1~ncn*-z1Bd|3m=EEx3U{pI>v-G%T`I-GZ<@6{$Gcrzgq;|#HYC&DLGEi=WO zI%!P?ctYKbqm(Kb))N2pIZQbW$YRo{=`apEO3Rg^6rVnQq`d3P^}F|1b|)nS5ROjI zg|RP$e*r(R*VYt1@qIBxLX9!%EiX6kY1cjVcTAkHX$+RIYzotq#wqg&LK8#L0^}Jb zMd5doW)c`v6HQ1VYb!6z2;n%!BnD*N$D|3T67o(jt~RHX-KE9%^i6BI&9P*%Fpwgb zLv^>gC@HS~#}8*dz+DIhNjM@0M+?N%YVTaFKKb~o&42e-e{BonJu40C&F4SUuEaZQ z+;Zml4m_-T67?QDc(VTOH{Wme4g__T5xSBAySsC>-nB-`s@`{fvc#rv9ef2v8+qmv5 zMJPApowmK8^!Y=c5d3C`A#SX9`gGQIRdhV2Oq!qmSs@ zd^Bkpo>vrbin*hA;gfK~xLL04XZ87pVONk~mzOVJV*{L=D)R|8#!81dpii{DPtN_g zSzmX;R`nYr+zPZrA{HbhBSwtzU)kMJ-4ex!qg}r{NAhT#a7Z~M$X)d>#Snk?FTnwD ze*x#Y#%U2Jg|4iRIGJnO_b>ah>wS+heCV(7L&qPk#$%rDmD)qPkOW*$^#*)v*eC6~ z6i*nGe;W5YSUlvnM-c2tEK;ZWE|!xA-h8Q&2#WZW7&E^lqfU*H99UC^Yxob=Wu6wT(oDbxj1>lDgSJ9c<&K=<_MoI z(KzBq|Ki+h+t-c=-^kPaZ5=&a-&~%FvNLu_1&5K))o=Zkp7TS@d1T1 zG|X9CSU#H8`l3w(itwj2(ZBu3@iHW}MF#RWDWD^3s1Ix}E5p5NVvL?ya}j$Pa=tMb zrP%>%AOIhH`LDONp1@IJALiM<&DYS8gEv^5p1veZ(xW~JryF_LN{yduH@g@|koZu*7?V5#`%t8D@vDk(ErnT*JJPB|dIKkU zg)67lz4iLmX3t>>wcoLU_ntg;65F?is9sjl8rEgrMoSCB0F7B6CHt8Ho^RfT^)zr@ zDcrV3JS`2yLWQ`u)krQp*q+kxKam>0!(sS{DO-=8^5mu2ueQJ*tNcJRQC`ZG!+kcEXEHt|1k0ig#%cgm zOhPuaHMxXO9{092H{8MraDYUpkHh9j$1@P4ooyM;l zJNI0Cl$1@j)QOuVa%PaTA^_WRkKHendGo30BGmcKm|kz+bFKOJ`bZ;i#6#LiPEEWk z*o>db1Fy$DH!$fpgLw{^Dnt%%V_-Fl89bhvB{=sii*4Fdty4<#J*BEc@;*0&-Q_y^ zD_N?`P1@}x5-+jsEN>opSb{s1mHzCHHV2jrS|%-_wM_ThE5&_IUyxM~pb{OZ%#z_- za;yHZNN1J5Aze(ynAIOH- zTgz^?YFJl&0)$zOh!ARhcZmoDQNZ}@g`Vv_p0dG%gV(>GQggkMpn2v?a#ruF(j-|S z5s-k&J~tN#E~H5wGZDiG5u^^{k3`L^zYlRk9%Ak=`wWC&AO6sEh_9~r&~k>wWY`jg zi1C6K-eCcse)M#b@+xin*82Jh!zS2gkGn}oF-PPc zh{}Dq9d$kLfZe1Q-T^*1n^f=BLCRWt3H*gAy{j2W(6)V!NBfhFpYn@cTQ@H}PIsF# z|0uPo(?0|E41`tfE-QGHrExaTd%XM|gkRQt;rotV45xXi*F1>7_{)F2`sG)D+6V}p zMbP8{Oa(M>xAm-|6<4ZP-6z_$%~Mg(#GY>TYZ*b8NFX40)pbwR*6jslBIMq4XK#Oi zHzOM1s%>y*Fqv$M=WNzCD`7nCL@%SX^OP5UEn-n}-8!!v-!z6o&e2K6($=9p%#=Y( zJG?!&g^ekT_MT^;M7QB#@%EU4d4}(QZ)HXGI$7TP5WlLR+E=5@M zH6Cv~%W{tXGaABY8TLMx#Mj#W%n5p~^laCg^LV7hy~aE=1**vv(BG{{(u}Wh!yu7N z2DMzf$cP+;w$oLxYKaPX&YToFE<^sn)~#LJ&}P}9zN~{^ON8ADHav4{yWJRc_K9bh z00vAV3YJNBgwl8KS)+dR=`3Ltg=^VrE#rOcYM0Y7`Pc^ierJ zxE=&<@gh5stAw#VL|+-u61Ya3^3+FIH#S1(v59Cq*ZF${i&$c~+NsJ}2x(BGFzdfr zXdMVX-&kybJ1gg%n(*+bNkLdPRyNfPatQ=T$r@#82|DY9}fYX z{|+35f9sn8po^K@X~P5pO}`%!;y@cdYin>__M&Txxk36z*7>uPMs5`8|I0P>;NH23Xs=G6D<_-oactIbvIgIHLlg z*l)eQb!NIfmaTOJ|D3OG4l6>lFBE(Y2Tz|pU;Xh{|BZx2#()r(h8PM67H(^=!vgx= ze*MDCFW$Yeb?u(e`@^Xs*P1@$TCRY9_-m{;H%^hdNZ1PVX@ifUy*zjbAOt1^@Iinl zY7@B`I0?4at@jA26zDB{rdTreI#)gXMJeGnIzB(MZG^|1l8O-{$Z+JQzG94J9x`rr zKP1GYM9Pf_|B+Q5dAP57U-ax143{xe8x(p=DNPkGsvR5A*tP9~9RvB`@kb5|vv&9T z_eJ%#OlKsBHw4oh@}g?oN`~}p-X5?g7XmKI zxK0Tba-Oy1QPjM@*~i@d6kD8=Fzz}3jph4a1VaV^&T~J7=sG-)k_1bgiIr$$W?;tI zn>`=0b?tf2kP+RSEcotqr>@{~3ElcKPlFxY!I05v()F1jjJ|t^{LpXx5qaK80fN1I zltu=`Y@tZHYh`bHmcBcOy*m z5rN==_@K4b#uS`_gO<81quGwt5v%de4I}-ElHR8*a{@Mb>B8XcKq+K?qj1%-buRix zsSK2D@MN4sBP9&3_wWHDAX@GqnxH8xpIoeslZFXN+}m5eEsIp4fzWV~BFeCVDdi*sBq{jQiza8>e}r3Y`ETn2o{ zp;i@SXD_|5M9GpxGgrDm0`R4i7#4jmwJDpoDHtw zHrmWUYu+^IeSY>D z-xD6HzX%DU#)!~nNnalDi1&gY!4+!EcxZuO2-%nx%YI?x>a1@$(y&8USdO94ct8dA z;CW^X8{k)`yxmHZe)9au>eZX8)d|;k5j6U0TyQWM={1nna#sKdG$9-e;mo*3$)v>H z9Yx~ZD&ko!hdaVN9A?!~BwhhWjITNP*;1v=i>~=vH|D@)R-&zGp0AejvGzI$r6W|U z7RL$J4@Hjo6@D3}$`HxpE&SX3mzdW~v@A1;8`_A*o<8B-f%T1q!21&6{^oE0d&(Iz z5*B73!^QZG(;q;lV2i3`_*KR30wWx1^Ybr%p*q~d)$hOkdiC9l7l?Zij?F_^vw61{ z=6kCUDp++r1UrUe^oiKpi(koC!^eY%a2dQr2L|fM*h>jDji)uV1H!c>h7M0CtG#pP z6M*%XlDak5t&eNTa|xlaS$pUwWD_v#@9z3%OuUS#hLxun7s73kspfd-k8rvn$SR9@ zWo#J`qx@|zyS?U_l*n1c0Q7n6d22TbR#B5x3c7V)0^3k-U(JsyFQ`ZA#^+k zuPuH3l!Mc3Pa-9L=y=o=<;WLcHBafA!p?cY!x1BI5_Z7yd1%DA>r!lS#=B&rr`(pg zEZR6Xv==-*+02f@#nX9ir*FRpXJk>v3jB_umlH$rR`u)9-t19IF{U{mqtqM+N0$n@ zmzAEfvrh~>1V68~z35Z_!}wz4{_^i?M7&*NQ2Cw+wy+Yp^7x12(Y#(^uc)EIabD-zJWpf^)%jajZwj8BmTFkxTOma~*2Kx!$4o0pmg z8zX5;8ybOt)^}s;EE*UFnCHDH+sRi5GmAsK5f0(8tP}H$5I~cg#g}mDorI$LKdW6- zmSCxxPL^2)O<{`);wDRBe?3@z4$E#sV4a%V7ibu)ZiEI zD!~}y4d1{J9;IFw%9r_*04mq%KyiTfF21+leqZv{y$&CFxW796QPw@-jE2Yfm>dQ8 zh~egJa4<#iGfux}{pCaJjWe~?q$Ln1C!l-2j%N(-vy=g^6K)1p-qREy0rOaD9xhd6 zi?`DJm5?{^gW)Y$TVtNEJ8S-uq-YLOa_7AG^2`oOu!EM?tCGWW93TM}u^18p-(Svr zx3;_U?f0waUwjFc&FaOEznhTx7(7DqbIhzaCTyhdXhu~y*|5<7lLi47y8pz!*C)@v zT>a8o^56a4|GxUCZ@!sg71uI$HxyHJRbBgLIIkppE%ebiTd+Z2@Yc4f@o)e?UM@OK zsg_NiAyx>l-2!ivmc6h;;|Tog=Xdf2EF2NM-V$wUiJ~Gl zzQWU6o(e+=EEn(X3ucA2hI**t*3SLsaPcXkQttrYmF?V5A^fd1#XVcvBv_fm^_Du- zI~4hjDqoC*gz7CLap)XBRBIuA$hxNqyc@my-MetnPjj|JeHdNf$K@_AibAjMnFs$; zelsJ{`A|%p*u8i_yZFq@eq4{R=O>Dfe&ap(O%bE#ents~_pT4N>$CSf16QN?`|VP+ z*Shk&r}Sxan!%C*R(nf#CL0_n@?=7Dnm0OI{gO`upEc;lZ;VrDD1*uyGjEMMW#{{j z2+I-NwGVv^R{Yje(MP!%5?!>{xCjiLWnfIGxbgekm}jfw+)sg`4a$6DPA7tr+(gJo40)_EBr>8)ZMmP$!5<$+p<4?DcF>93C{YC|Kc??55KJ$ zSF*Gy5jJKFS!aL$9cM%-SQ+vyW1TyZjGgLT`bUO%xh^>yYtgK6V)mLkFOw zlzzff0%Y-CFd`iI4~&9ZHKbj`^b;3nnHtNq*J>c;29GUolr)&)>@4>IKOdIPsGKNtNN}|85=Ra_H14Xk9l__b$4`uHtsRU` z01SagF=%W&g@zs%#2x4;>24l1oOxye7nAVEThUeVpJ+!u8y9v$45zS{@wvM)}Oy)tFRta14v<;)na7ugiQnSK=C7ZkY-UK?+SCuk z&tES#V%Okg@YwzSitzM(aP{GNeDTv-{6f<>gU}x@IOWZVm?g02Z5r`@GC}*3tv-W6 z%k?R8Fmshq%E%}jZO*c}GYBTa5R{(lr>$Q8^Jog*9L})f%-(ekz946T!PuuD%#2g- zXufwx$$P5yoBz9u@6oJ1=afzn&8bdiz9#w@FPo&ov4x-8c9^jg&vx!bKci%4e{(pD zHlxQp^VX|{yDJH5%h}fPFAPLPD8=kN^yoMOm;x$Ad}b}W?O!hE_0GC>#zS+IKyT^L zJ#b_MwGAv9>wEj9J455fa#~AMW6Z%v`@zQ}uheTOPC28Q6y>co;~QJHI>jyexT8Sw zfXh4|{$p$`vbgnQ|2SaPZ-@uy-Q+C(xHBeH##y_Q`1V5kYEc}0G_W2&`C_W1y?gn4 zyU1P&#VW~vt2scW zs%+&k{rd*i3Z&!c&Z~n*C&wue}_=0kGGUc<6KU;m|U&XVM1544DWM+mu zt{@NF&U%m1&nw;`v-WMXT{6;{oFAgft*5u%K6p%jfiK*om&z6L3w{|c_+~Le$+aS- z31M^-&Z3iW=}V*bY_!;$y#pH8ho9VwT}+JY+P@TLj39S~Bi_mKJ5hurgjJoa&ib6^ z4Z6S>BCfk+>?Cp-_=F5bIkGV4kCr2-kQ)%%Y3;1gX@d%eK^RZo?t?*0o?+q8Li~3G zc5B^p${Dy4N|U(Ma9SHdpjQ-YMFXx7bL;L6sv`Bn7HeIx|Ucpm(+?hQv7XOvBIQa^LpO1m;{tdYTUr`#1HD{tV2@OfSC zLYC7Q0Ok&XS70Y&;V@4ROb2bD5faO|=)j($00wF9+gEBV5HMTLB>db{km9fY;$KZ6 zq2GM_-K@Fa8SBuNVksFPoz498(h755oG`t zy6w2%*48r8s-e)@Ys^^r_7ZBYY=mD9;K^96;vbs2#TMU9_<~%0*6SopX2dzaaXfjIOoUCqCHIpS>WwJ3`72Hwd5d6aI1WCqL-pm*f`V5{e zX9%>NCNFK3rw`}D8@k9VE994Aor6PI{*iJ&KT)^A>&wH&Kb|(e>zVP4Biaw`Ed1pe z^fAK6vcQtm)*RL-d?uXG#=NB6+MJ)l$E?}$9?jQy_tQ=hnAYLpsQ2_W%B*EJo?f{B zFGfd(Q+=dJ%~Q17(n2((-+=x@4#6^UgYV?U!iOHAl&8NY80}8hz4>ZS@a&2~SMkPx zPU*JB-nkDt)<*P6dGl&BN@o4PoCb1mO*!YS4?mgXt-WXNqZ@otl&WFO^VJbRvkCXR zlCCn&yzj_#&v^)+mph<}yRpT)d9iu9l(4>MqnG)Q@28Adas!SFjTdQfj=+p@qMU`R zYa{tl1aA~{_ZfrecTe`V!TD}nqezPYf@>7G9UQaoyCLOpb-n<_pAo9RWv2=_%-OkR zv6nCZPe#9^v7dZ7)!3q+umAqvuP$Hyv-NJttxrDlK3oM~hDGNpn6nh$72I9E7TIGo zCw#;Gp^$gp@f~O7&H*wvGUmg}!NX5iKmUtQR^R^i*Q=A4->;s1{$H;inv44n?^}}F zJ%bPBDwG?N%VE^~Nd zkCwm7iKy?K;vCD-#UA4d+2ga>+q~nMpE~n_=xv|1M=5lBrFi|dR?B{2e7`c5Bc6(z zc#^T83Boavq|@cQd-H1AE^)WS&9rLfTDNl1x{W>WJL@AmYhtXNsar~-NAZcmZcr3) zm3#y;>S%5*h|#^v=9{g2Z2e*(rB{MK)RCiHlUU=yj7waLuF3XQx zXWW*s+Ybyn9t$)y7Dj?`2bsfEg253rOr=L>E+uS}_uk9^2C-%fr0=#`=zFA-REiaB0 zZDi?z!@bSkoWIRpwbvOHNASd_Jpl-F61otw?PTQNA#8IO002M$Nkl$U?;VxgWJ+H8Fg7BgNZ9Gu4u zwq)uYfy-F#^LX$rm8jLdy7&!(_}Kf0VD!3mohjoQZG|Mym;ci;X9co?)f}Q9FlfJL zEUnD=+Md64V{2o_?KY1|pxpC}-U$!uJ^DC++mwrP!qN12nkk6dW`v}alfQj8csz+e zgrlpN=Eao!)V0V{c76l<%NEnii;;D=<)fHKvI=MpO}Q*f9_meFkzIm+{rpe=W^?pF zp3B*Zr1q`N!NcdP@Biu7>sLSg&ek<$WSu`>!gOKgqD!*af&W0gjJvg+B77xBygvBE zk^lvM^-e!{w@m+JkFuz1g=Wy4zTT~mReaok^yTK`FTe6^Ml(4k3ikfhHz;B4kQLbg zSU{)0@V4BAhiKEOkL$2|d2wbByP5zQLcFTzFx>BxO^Kh5U*6{N&S6mIA72VyY&wk% z%Vv;Rwvee`{R_L?tgyT#-x)es@7Uh@mqtV0+K`VqiJH@962dgJXe?&>@1klGUqxKhGj|U$`46zq( zp3jAdmO&b5un#Kyju+ivqEN~o){ES4Xl{UYkTa~|+YoE=3E-H6cZW81?EuI+7rh3D zTHvP_M(1!7S=!fEit3yugj#cl;Fx>_K&|Pyb`Zo8(j*p=Fqk98?@tkn5nvSN!mx>e z0$3195D%^`iq9^822=0NR+eS>O&kG|vD4H`p1NUcPsBo4G4ICYPgKM0wwhIFroI1@ zP(lfxIofTGyd^-JV|I+5HI67%2=uBDo+N;4YaHiSwv8n6DEqMv$Nm|U&%1fVG9ut} zuUhHNK!V&vP6ePyr@|iu1SOEzKxdTY^%+<~xGaD0{_!9E>E`j{N0u+v4!Zfzt20D= z-x|Kc6kD2Li*OE+A|uCTf^%8W!;k^q69{88uM+H4AD>YcHm6-M^B$G;W2o$ zxo_T2NS{lXyRw#NEt>4NCJD}jT=nBK#rCwhRaq%;#pvN_U%wsjaFej1sH*-J-G#`= zvm~HvOF_{}N)znOkT&Ld%S7($%6Zju{o|6nX?GRF_&gYmq>X+qg= z9yi)o70_PjjfAYWAo1xXfT8m`gStnbD*@P<3gZ=%pVqhS|KcQ{(8yLU#?m`7*AnwI9DUOD@k_xA>h z(aIi=FQ20KDTP!=2rS|;1(V!uJ25m+`c}Ckj6w`rswh!Gy2b0{Ekr5;f%{{E2 z{ZzAs>CL8FiO=W`VVa-zKR4p!8KlC`GKUGYcyJQW(0`s(?^GH!8A%Et2D~!{KsYcz znk^rILWLU6Pxw;_E)P9#JKl-ETPjhM#H+^5yU#eoSpB<)*DamgFOmzHD5 zB=pBqGx^)=1I2ch=-8*9e$LjmU;X_k7<<>>Ifj(C{Ad5e=DXkiZ^GRNt7pIX8$2s= zQQm_2^`y2P`n3CK^W^9MhGAi;0-knMaJ*#fZ%-r#yktbieJ8X{!IzV(Rt-IR;w<}r zLOTxGir2C2l^XVqX4!hnXeJAMWvo}AC;G`=gj=*u5b*ty0Yn8}mA4>pB6?tKTPawy zysb!7^(>-=9ddSCWz}R!Nr&a5?Mw8#X24d|=Ir&iA_$*ber!uAV6=%tD(IE8jUG{3 z4q%cwLnD66u!>VWi+((U2HcMnVQA-qHFud(c9HnQ2BlmLYa`s-&5qgfLqHm56K*gQ z#?jt{aC!JF_2H2+M$==F{XAZ`Bp^M4w+Oy0?*Pf^tpE8XZ*j-*sjcOnO1>b;$`U_w zddM!9cwg4Uu~W@n{vbKcv4Fz%c&xX{xscxHX()t)aFk~2+xmfwEa}{RA&d_K$Lh8Q z7Vpk%EtzDbKF^fGO7Bj2KQjpgN270SUyy~$Li_UhqxFfx?RjmN)>+@YhpiZzAS*Oo zec>6_jNUkmn^04nMO45HLfr_1vOy!`m@X9LCC3!n8TliW&8g8={EvDl#AIQEUdEc$ zoF~oNzEq_y5qi$DY0cQa(Bv%TjiBI^z-kqNk1sDlbsPIdC37cec)T<3p^4qG#Ki`O zTl?;vpBV-34s3P}n8Y0xI55Dux37U9VQ#b>f>YKG(lI_>6=>x?!1WSS$dFK8#ki<9 z^%<3&7>DiTB2nsHO6nA&QEcnypMC~*ndPDn|M9oKK|5uPU#^|Pw-MG~KmY7AjLFa} zTxfu=UcaD_jX$j6pk=+4Z{gX5lv9#kp#+h z00DA0Klx9X%5&QoXQ~bQT|!|>HS4%-J$WETt7SN6?f=hsif*s&)Yc<7+v@1J$kOAzCMs}sR#Lu!JlyO! zSY|C`8(+o_(T-W0er|bDW96G0#6Usi%@bJG*E5t_qG>5&vY%6QHQL-^5kEd#d=pA0 z`QQ)LDtX)3(Y9yNp;v~D7t7kkg|egjMLCqia7kXh-(0`{c71a}xhEfuan3FW>Fj%k zHy`}-y#!k0Z7HDaZ^~G6sCa(BWStycVhe^MItHYB@HIRimzW1*oy&7gTRIh2A?FuYXhd9K6nMPx41==U^QwzX^s2GO-FKHSWOM(5h)#fCqbhh@`Gp}g?XHaZ04BECkyK@+tqtx+q zYv{A~KJ*?nS!&{)7YUIqW7&>^MU0 zA5;DQ?84{oHv4seExHhx)XHCaArJ5o4fKC@PVV5p=^uq zOrylym)6MB7k5{VlD&JSJcIS^_NXU6jk^=r$Spjyv-<6?|J~-JXOirkYW9wIerhVF zM*7_@yT=%B&l$0d9ORTZvCf6p$S&eHpgp0-qNj0e`p7dmKq6@E^xBm_L%x7aMDXa* z=3JR_AdqJr;DRsVuL;k9P5w;Py5?=_7I?sQ4e3Jj+G~V~>cm@lI7x~aZlRY9M4uBU zlwcYDG0658&z}9Nm9-%(54+VjLsmXOH33|7Ey6V>guWGFd7?zJOWQR%F|4gD?Qt#?6xkR4 zPeC!uVBv#MfRO}b%vPd7;kMxHb0M+8g5NPdYz(ryKAZUUKHidctIpOIurq~at|^X3 z4|t3n7YnYJ?9Z^NP-B)xSCTwBtDBdd8l53_xVF;hZ%b1hujWk6vz zXC0NEC$uji(&*vhL!w-;j`FI_yk0odee2ef{N)`VW?ouXYFl55_CSKju26BFoyX^$ zkQ<(>5*Q(0SR;PtD8u#(R~@bO-4o>*TSnO>*v={IqB5N}^y4dQP#S6*W!uXh3Y$G} zT;ZdKk5&&bWlHb#^xcAUA}3>xkpY=e2AGYx#Jmn>NaXiD8f#4~$UNg z7!!_4vLpP6vaE4JlJKuS_+E!Ew5>(oNDXZe?Al8RG+%jiyLh4`#O8aw`xJi(-+?DV z)@gAgfUrXec8znp^-QB0Yad-?aFxtDbCq$Dm+Oa=^%D~M4UYi0u@+bf%}K-pO9QXM zV(_Kpmv%=AX$OyiDM}o#oWP^WMcJ+OpT2UI|8|$ zPtlrv%RzbNw<0)~FMqq*vpb_Df}s<0t$v^^~kIU~26175g$lx`trif}^_ z-H0Tt!MpxISK+FJG{_dfkPkBJ-yh}-E^)(nn`>oITWY+fG#~+t;AdnT9D}pyTswD`B)VivKB%|N zNrrH;zBTlmkz0L4e;FLjdCVJs&-yZ2S2=Wniy|GP6c29iOMbIQee_j0%z(k4v1^Ic z`{wafyB!rBJyquTf@0id@N>L)%g2s%c8n<=tkBh=Wx5CVt*J9W=U8y?B-h)qlax!g z0q%n?qw?XSlSAhSX?X}K>5iM8vo)f5JY3%=wiE6-nC8jzU&v=k_myxMJtmW*^Q;9> z4>IwDKSRSVrQ0T%R%ARQA_ykJ05{t||M>Z@V1;7LGKEwP#Hd*r0UAM$pz!d*z%fI@ zvX=dg@khG3-yai`_1;r@HK89EGKC(n0QIH`tnEI#-=iAph|*fQl(aMZVqI%XSQGxv z$!1mtMroC4o&DP0_NBGOa!#4nPC_BmCT3p%ga9B9RKe3T2{Y~X)3_>50M|YvKtgf0 zo&YQX8OgB)Y-32FR>5q=xWH6|T8gzD0;4DpPJG!b;I|W|P0Xv1EV#2K5s>fY84zV%Cyb*4leEYI3B?(Ii zYI4lmQFYD-rz$zAcV5Q+duI%10JQre4?H8S!L&8$Smed1W?ni#CWg`sS)U=qN@rkP z@Y1iGrj=3EPvP;FL_D^@!uzYAOSPu+fTOKKl-nN^O1m?{dBV0e`NpyZp7$s3WYkKs z%Ts{sTibCmm^<3Gd9KYV_~UyUSK7|AE9tG{E=LFfp!M#_zEA1F>NdEyc~vC{?GiXC zzB|b^@oU4*K(dxHie?Ns&o!!W+Oe_Q1i&I7;}Nsl{LLI@3>kMbXLyqkizaT3r)qy4 zo>8`M24*`7Qi|K^?vG*|D`1(Uu6sxWIxTE$4|GZ|kLwP;+rJ%kR6J)6Vle;Qc7O*n z#pcD?GKjVz6g#gZ7LuBLQpw@n>-SlLBvT+Pk_c+9GSqza}fW;oZOsIPh~;JKIzdFqqI!MBQAGiStieWBf=+ZJ8_P znXGvH-~Q<9mXN@4Q)W9E5!)oos)2roPNK!+Syks!{yAS66-8DrygQZB(C8&Y*W!_A zXTX6K-ftxHUBY?BOYfGz+o^*^LQ9Mr2dvpeAIbN<4rn2KJBNNtq-wiU$f800bxd>z z)aZg*rRdE8OVIUbj;@Vh4Yyy7Y5S8WkABsBx>pFrLrfB1qh z>ZH`U1oxu!y*tnPZ+^3=20(++-n0$W6hT6z)>Gzf+8^r-uo(zh*9Q=HDD!Z_xF$Gs z?6cHPPR{%*F@jf22+n!lI?V`)QtKmP$@5(hXdD4E0+%o+d@B~x+U|r^_3x@@npy9q zATk^%s0e0bLQ>EnYK#NpeRSU%y2)HBHZ7beQ>! zQ(rS#+G1G;Ys@-Fj)Hf#LLjsXmULuhtIUps@oEYzVL)(aIPh$;gA=}GiwExorPulM zSZHTBmNE6+!~abr*XBaAdwuT}xIhcb0wo5zhjX z$%pU~VR$1HaMcF$RD-z+Yrz+NB-HX&doIuWnj)C<%#}Xvbg1w?!a$CM5zRQ;s&dFm z&X9TG^tSiz&9a6*D=)e4e&?%aD11mp6B!66yQ55JKW(%V-Y`!BH|Fnfg$$DDCB|zz zTZTjiLmBiE?#%UADxQz27P=bznYRlHDSSZ95Juiz^PFWpR=2h~$?2RjuUcQ;>=^-c zEv%Vx&KR@4CaZQ|qTd52(mm(VSkn{w%dmk+_X!STAN&S)lQ0BBxS6kQIM>XasCS_F zMO(Ex0ygh{*2uju34+!V4bvF>5Wr@VJRsD~`y@-CySBhJlz6cDPNDheck0rWtTyAz z<0|223?}@yc)Z30zq8dazVdl_FCwdB$`H=r zE((1jUD2lI!b5NdU;p*JC`A#OekQjh0cf6Dr|jK4zEP?J8@O6iA6R=iB46HZ9+P=f zK6J2K7nCPuSq6C7-m}*E=wnmi9kdZI#${uu&Uk|<&oHRTDv_biY1 zhT+?mtZJ}tRJ9Yja7XkCzOE^=Y95Sto~)@NBs{D|n{o~=@z6ECyS4}X63?~0EWuQi z>Y}p~&|W3QuPd}5Ko2U*D}BeDuqq-5G~ZwUfk3u{FQr zT*OzDaf+}L;FkLDoDtEjm*4%fT}&UXzWnOXCOWt*#h_G-(|HZS7GEs7i;N>2+wl__ z%(7MnuW28>fxkMGDH+-Bk(BVGCn@aCwAZ&92D1$h-yPq3BD?NvNsm@yu~ zLxbNa3=aC3wraAYxt|>2<`|UCiu;SM4cFVxpFIAzC~PikB{XOYiI-L}1S8#Rerr%C zG5dtgA{t8C@@qe?B0>Za|0J9OCWT&Symn3vbwaQW9Fc(7VJq#kzRUtF<6ABkVP*Zn zx?-?YY@~4c#hIi%Zt(?8boD_QIUFosS|1H&#*YyoqdzGWeKZ+Ovl5RYS^+ljYkWak zXp+ZoTrL4Ti)z;F2+sOXsMr4kTkEpcwCh0%08_Po{{C9H6w&3e9b_5$IdY=c#VJ@R zQ38C%oj6&DfEQ^33%{mt#*m;8^VoOwhfu8FfCuSe}C#5>YX=d%Wk8Zu(i5gPTmUlC42V9jb*vxJXSFO-VdppjY7 zbh$W|q9+KeFbnGX0gpA4Jo~A~F3d0)_z4JYp0n7`PTsCBSmUQoRLdwU45z4y>~QN% z`iVL2KX`!eCF}sIu-i4}mqA0A+^#R%381y;XSSc2`Iu~$A6U}I_Nv~qNB%L3H)HGM z&5w+BI|JY}BhK`YXM`Kr%QVM#ga*NoQZKUMvry^!>fEtGDfO&jPq!vrZb`>qCcI<# zqDn*W6Cz779&yU#p@8?ok*^sE8c z<_Q^Q`HcsQx!bdD{I;I4Nz4Udx~t;@b^nj=KX97joHA#O!tTRVAWq2nY^6jv*f06tksHN zAN-MK)C1o6h(LHcb2MAff}P>Eb>LbG)I4vtA4`mR46WgXmkoEfyH5ooB$nrmdh5t=>}WxEw^epT8!{8YpHfH5JpHM}7#^Z~z_zqDiqtNR!s;O`rQY$Qw;8KG zSALj=lFQ*)ZC+j8$UL`ZZ2fchP{w!$qo|;@cCsSfLD7>X*NQxqggSK1Il=_y$4_JO z2dk3{;f)b#N^>%cD(11fjTdH=&M1uT@$AU>B07n!Td*eEGV9vu5$j$u zPT#!v7r5M3Q0BYUtM9%Z-n3@k`7CVBrQ%3ORu28 zk=+Bw^ukb~xV)b%P!#-Jdn6 zV31(K;7GxpTcdtA>(5#CqaZQ*QuY-LEa8;%k+4{W8M4$1NbJEud)fDSwcovd1@{>o zI%V-VZWQzIj1z{Q8_$kWc5Lk}Bjc^YH+f{)X)(ZJ;4o#(*F_=Z3NzA@jT#YLVCf$%PQ9*{syYHj2%3) zj>t#~hY&XO9G)VG1k!k~WnzPC6kbZwb4|%&6p1jXQ6K$b4s*mHF51x?Cu2t0*KeLT z8m@Dr8R`UAuZ6QJZ4F?3ix^YvjVInSo)Y7x53~XSI?sCb3tqM;$#5PHhg9kVy%c?# zb$wa&H@wMnr*s`X6&`O~6iVKA;r)!35+f($+&lrG!TDu}MBfLm-N&q{sf{dy_|zP2l1+0rP?tVY2`#`MawcL$zt+g{%F@yERK z>KBN@fvsVuIGUPUUUZQMd4c**@fAJbe1*&q5KWP1H?~<#-33atWu)lQUezV?RPBSA zCk6y#;8JC{_Fm`l-?kU5C$=+**ZAL(}e%bO3g}(5XKErvkW}Ol0tF|*H zDxci$j)?-B*Gm5;GuwlHtJcE(r@xp|ty&n_)C}f;)z;?2IDIq6#%HOg<$-q&);S>? zs!;b2zy3Qi`Z1JQLS?w@^O_5P?+eiv0fdNU4&Y{t(P+^8mV>mX#A|Eo`?BaW%&WUm zL8x{U6`IdO-=*B^JNk<(>gRx$UJgRJS>yEwbM&XD9*j18Sv;B=ccTez?z))}uEkMYG*_YK)+Y`r#XPL~IzA9QBb7N44Lm09o{pD)V4E;`JuKG)Twex~;PIjGXF^ujeo~em1XoPAjY;#I8n#zP z;Z#(hL7^m$CSoe9>J{rf;PBEsCj>k7x$Z~tY}yY zBg@YC`mAlZE5Q*y40gfebOeIgj-nA>F?#^6v|nNimeZ}i!fBq6wzR{ex5mG$J+TAY z4(4PdncRh{-@SjouC}>=KJPuFAt%*lcd8q|V}QKPi}gFqc4Doo3QzZVwS}uL&)=GFJk&?CTu}X3Y@L3CG)hC3` zRi>(%lmjEyLh(FO0t=>E!fDTYLiMM$^@&i0_L#_1DQo!RWp@U=@H!kgSAIg084hz$ z2tzFee2;hjz~LP!`_`h16r7x1glXu6Ns)}k*L+WiQxqrfS-2XW79++fgmw@_!-~_E zCB7q+m1hH=H9Kpb%{qo}mU-)CKErS0YSyU-^Eg!*6W}R%;n*>6%gFZ3lB8zi zU*y4Z8^OWgZtZ;8M&+DuLaz*DPiUjQfH?w&v3GL{fkib4*A%34Vz+pX+(sTGC2IYA zmX3sC>whsqqd#lHl9-nFjNQBOxUEwV1DIe4|A>x}ujjn8C4vO(a0LYY^$?sUOg;VK zw{{0c--qWB9%VNQHX5ero7BaVEHmGC^VvG@>5t#99ypLFgD%?EWS`-tckpA~)L8fW zF1nE+Hs7NkMqY}u_3EOMjki!-nf7Dg6VQ446?1aWxMFI+)c)Yy#|%t=N_d>)O*Cz7 z8u|I{dQOjhG~P8|A{uk8$WuFmWKLHVaf!eHpOVPKD`75UH~O05T+O35E5=hqsAZ!R z`pgfd-5U0_lFfIFuK4Zp^3plxDV{*RC9lyuIWfy@jN3f@q*>dJOX@s-|8jlv-ho05 z3#=Im6Tu0W@IT3rWJLJ}CA_)9J}_>$yvARaTeZ#L@cT+)TYK1dDc4!=Z&rpb(1dmi z=TBiJ{Csix4*l(~J3Qyri+^67zCTf<>hADf3Q2lU>FdOgb?!LRU>_R<~_ zAPco8N|5j``F9*JG+^Fc!i$CHKPc38pdfOx*tO(k523d?xw2dUv=)tyu^FF|yELWe z(2q}?L|^sQoWjwZEt1Ug&e*y$zYv&FiFXznSe(-g^c|MGedIbtlmr%Cs^Mq?AQP&M zwDQvM>nKixgh7AduW)c1-?NPR-*pvh$iD_rg99pX+uaG=Ue$Q_VbOClFdN#0EZlb@ z>FSWcdgk2q6KAw$<&^UO?wxHT6u}s{#vY?L;w4lz-3)w_0g377k(5;o(bhJ<=1>pT z6j7kGv8=|{*8>`4Mm@;NQ=fGtTVxjG#VEo+XVzLhb71?{nG=jIB<)S22VrfbCduX! zg?aY8v?s}|@a9GWREW?+3MaWV47wZ%+&Z(~V@2FB+l~CLkO38EoM87N0Y7{7+Y~Wd zLkui0{TeeDahRMu#Fn7~3yB*-7HlG(8CiKk3kU-D2{_v;QerJD=zqo@qf_bAZ9=Z|9D7Y=+I;xcD9Gjp2+yVZB0N+#^AfMj<>3^XRUX0 z`LYQd+|Ooi*dYykDN4Z*DkeTZKI} zivl=C407n1!$$V>vvGELd>dyQXKq6{kB7Im(2x1Cm2C6&^_$hY6xygO1t22{qXhgR?j|3ku#%&;oHi1B~R-@2K<3-Z(}Gz{}OqO z50=dJ`W+`W4{;S%H=(N|-?Hz2uc2SLeCFZa$JJR0vt> zxmdNv^j-DLsh`i_Z|4`@96iwKeUX3THu})E+Wz?IlV8PrA%F2)dcg$%_8a-llPPp> z4?`9H3Yfg^o=8|P3=Gz+63^8_fEMrPg7~)l%qP&ct~E-{GyQG^dFR#mzVGbyE3{I! zVd0#YFNIv&qJsq5@-XWv-W$T!Y>?JrFdD^NiSe_3qNK5g0xOYLU+%T0+eAaj1v7+ple4$?iCOz1l#<^9gH_P z&T9sOfBAX+POz4=)M<9X5+0^qV+xmHyCkxb1urNds&wX|?R$l0OS+mRpU~*t*>^hQ zuPqS=Z7=@@Nh3VMd9$}1d}0g}Mk}WwMu%epEGk)yGcY2`f!6(qG#utZmYhdNfeCZ4 zs(Y_w#ghJdzq-dIpcHy-oqhlCkYZ~tB^DA869$((ex~ejg5;qh3y0R=cFPFW?!M%& zM@Prb7~t*4)bJ4`&GSwI3WHDR(D#xhCxg~A=d+#7vv{XE>V2pHMeA89u@q!@HB{d* zEQO1)hX*324AKb~o38+w^+tG~sf4*D^B&%LoSgub6@MvV=z_<8)iy^Bk0Ke`j^#$)- zLe%xi3n9=~ixMKFTmQ{#x+%My;P0S_S@WJSynV2|sB7L+m8?7n#sp261$)fvZxlU( z!hFxxyje5PlcUt}PloL+gQyzYZDA@LAL#KxO0)SbGIASjzk&U> zh(<}1M2BYp0H%~V&$@O0l$?QQ%%8n|vAWMgkB29EfR@Lz21DcT+LANmdV?96;Fmt* zrQ}rM{TbVM=pXW-nP zTo?2_wEkYDMcXoMMXlz037R`X?rlN4DJo{kqGhbE=KyXW$+Cz4lx62hOwsXkpGb10caFy{3 zVaX4_N5;lGcx8$mCBMd!^Gc76JnLiLc1uOUU35z4XudW&8@{Az+FcvVtw~fyBeQ@G zhWVra?N6UR{Z+38*f4QYG+=7aAHTFvvjIR6x{{yU=Q+%vL3PVJ37f9_vJh+FbSZ*AYqv2T=-PCgi~vqkRM|L zHHA4_n<(+{mXfQwT#0?15?A;&xaVOb8E+sMv*5g&6(VKCY{2%(_Rkx02 z)@Hkk0bmHmj6Fhz?+JK801nfI2FWb9RXqmgD!wcLU` zZH(b%Og;=QAO+3A4W&0^Daw_Xd{+(fo*~F#4E?9jNp>N}#!J?^@bhVaFd2VkdP(A+WlaYzB z%(9yVZA5!TVDyu)T@Lc(8ZZij-q8a_9)Zo$clOUId^u=^mRD8(MmnvdDaoKaTP>HP~#ZJQOIj#iXKg#rRvghgC|;)V-*z?Y?3+hNr( z-+%OUwZrq>xo7>q=u|MEn=6Vw9xXx@jVCjc*LkPWceEFsn&dHl%T=n+W^9xhyk4Ny z8%MAn-{WkJkpZ9SgZ{yDC%7FysxJGn4EK&w77;+J(b7`4qiwskqa8oACjZ&$!J|(M zySZ7$J#z$xqHl*BjXnInM3AIeG?Pcphx~P*K*hCMMd$}yh z&PAbzmENS|3PEO!GzZ4q{cQ$^2Ei4*M1bz~`=|cWD;_AfWBb!*6#59U5qSt*NB)J# zAPa%LyjU1nxb6ishN#JuLV~A(O|SJlFPbQbiJXnzGa)*r^i;oV)1{KP3Vnp2!s;bM z<=t-ELwxG^&%&I?&ftFWim=Q&^IXcjJ{Ck72*eCi2aB-;f8RrLmb5R+H66BvZD1O> zJb}0~W=3FmP-U%ni#3?1iRn||-bf-;8-3QjF<@YpzNAjkYbq8Hh` zdjG!lB=-jK5otmwJQ5l;?cM+xae!qKfI^#o+bXpbD+o?+PzaZVXaZ+Z#2?0KjKS8} zBZ@%EYfo6^g(SdI3QfbTEqZ_PHjK{oAudpav!JN}N&;Mzb%gX3Fd9ZT_uie7v@p|p za_k6F4@Qg;y8D#YzND%7%<>*wQ5=xO+ja=hyLT8C;Zguu%eB+qeCeUq#$@UeM7soc zYuv5>{`l&}>eYK*rbM(DI^u(>iH3}UC(4;%T@LaQ8Myyw_0bc;1pF^w{GgANL%3|- zunrt{j(cIkv$s513aOvLiSf0kJzDcHp-6ccVfdJ3oW8N1i*mH?eL*l@6vAh`-L~Zb z9N|0OSPZW`vevT+W%wP>EF8l}0_Muu0Ncu7-rHU1HKQUUH)F5Lad*7opSF3z(9m!^ zR5ozy-CKnH`MYn7y_^DyxVddscq6-#b*6MI!z0)W-%gTJ-hgg0tdz|J!z<6VlLCb1 zV=|vJCVR;LMbvJ2S;GtZPA*)*VI?$E{w4P1rI&0M-w^^fFtDTubl{AI&o!i4XA?Z2N4Uc-?^Sy0G63@UhJPa@KcoDq3`a=I1gM~-K zaZxgav9y)c4>mN{7SH;bS^>d0Fu+4sfd`y{zpQ<+X>_OH5t=D?VdX%R%8QP?61DtE zdEa(cxW0p746KxTN*P_q3z_+vdKqL_eczSgPR3ybb7hRdn}Tuv_m)B@OBKnUG)DMN z4z{nl==wF~zhy_s_VJ_f81F=AJaA%A?11HVh z_C$3Dy`JMZlt;qY+;g`2mI!P#UWK?_r$at?{PD=e49~6?Eh=(XG^`}KYFhQ~lI0+mvaMsp@- zSB6)knGDgEM&iMRhPzSo+W+{medq=?R%JvMnn5LDEdCK@3A6;2YZ2dsh%~am zm{O(ieVJ!dSe|JD79+uIn}AspLB0Ks=v#N3%-fg%fRmhsGVUKc`352{5pU;9zk74B zdQA~z2qYY1$dqiJP!LRr8vN6Kz=1OS_p#D%F0IpjPt*vDlq4sKGF|!&UnGPos_z1C4;A6mmYx)NJU<-)8cL2t$ z@g?8@K4E+Jn~Cunn!jixcp7aT_~~hdkJ%S$NFd?;u7Twb+wf*3;U`h z6@uZC0vHe3xJU6(40&tEwxVR2zk2Z4P?D@ru*Q`M!|i?)T>vo;H`uy#U@fbgj7|nBIGmc_IW5X=n|LA{TNm7Gd0mu z=n3mQN$p0aDTS1Ihhk)KUEBIrCBYP-fx}MtSZHF5vuKv^ZyhrH&u&21QiS0kFL={> zay7f-C$!i4=Nws#22ys>(%`?gTDN6zY^w>LXMgd!`siR7<;vjAa7Z~31bYfvR98RT zR&Mn2{Ck4=2f|+HJDN3*m+!uJolG3)GjYR(>q|DqQu*6`kzv{`n9 zUu#(A)Dj0}ccV&K(TJWYGQ;Jzwq=iY-bb<^{sPa9_2p=xU4s+VX^D(ewL>;{3~)Oh z%KNw!VLDe4F(rBEn9_%zeJPEDlCF`Nr-gQR<-%m-k5TSc9)I}CC@wK`i*k=vJ13#2 zX!=C)akQ6lgw81B=DQ@DxI21UdAS(JEq^JV5co<)CPY#Z%V zVoY&NE49&_H_zdqXsl0oVd15EN`BP?zL;eKG)6|v6c|S`xnp7Dmx)Zk=NRniz2+Ev zQGYbthn)nCjKjH9N2BNf%MKbLKa2hdt%;72d$fO}^fcaJvYxpKZnLg+lrc~XOV;t? z>8Q%i<1)ONf!b>wE(;LS{b^9noEX64&$}0At1mx3bRPFT3h~OiIwAHQm=KLd=G|bO z_eKFPpBH5e7VCtt0X}0wB=89TkV{~-CK1sudpv<^3<_hyxp9v z-oM)sIv1sYlWPfbQ*@(t5zr(K68_>a(@%_dcwfC|-tI|D6a>!2E}?C=Kq0v1rYzyk zRPGR%-ICBCdZIlIH%|#o6-g@hA{#&ZStb@)iOfUR|1s(X6}M-^jm@@p>DJ# zFl7i_o->Fv7JZf+cF0ILyDB7XZg=%vVwZ`X2ulJA&F-p-C8rjQc2cPE{ZOvSh8Ndz zLAWP-wFRF9z_$JBHw?*ZS->e1jlmz<-nx%2TQ3zRcYmT=<}#ir60*6)H+Vn3&qGQ9 zwiK1p&ak;2E+jW>oS=Lk^0#|WSl5~9vo z9Ga@o#W)lgBad#+_MyV`m+!vTm$8qrYjACu%IGm$i-(uON?0u+?O6gu3wgI0xhvb^ zHtVX{dAB)4zrOg~(vCKQtJBlW-&XVkKa24eHI%Or&U(f?*Nu5>n~ldkmmmjkl08{5 zvZyEXp1b6^NyI~oV^EPnWXH;yezl;G109f`%^3jXHj*)P?RxH(;g&-*N~3v{+?br$FtQ6LP6a>w zFp7|&nv|HMS8J1kZLNI$DzBT8*~16fYvg)s;CZSkVq7X1DF%UWYrXXiPe zi12Xj9v%_+Vt6-uI`%UN>osHp+DH{}pqHcTu zOh{%fAYcYIYEC;4LyhxSna?w4Pz{^9rk!udd0aOK^Pa@BX< z%XpQ6G=L3o8l#x>H)%HDwNxYo6a|M&9fRq1Ec0B>h+E)|VLBc&HtvW#SwYhQ_WiX3 zTqlryK6gB%Iw}a`@$`bLF~>X!g3q7qKc8(*VTG%NVxo@V@jIJUgyEKvcqZkQF2b4( zLL0t-1@L1uqCrZ&VHRbNqV{E3^X|PsuCEwMLpfa0qP34Hfd-g@s7Q&Y5eI;^X$<$# zZP`|p0{6LPYI7G8Zb#Wp7~F5d5FwxZZAm_+;VQ(!!jtaK3HBnJ25ugOk;>KmU;tYgrs)9?jq;TQ_O? zTjz%k22W#$XXAmYQ7ZA*4FxkxD0!|lx2Z#F@YIq_q zJ|#RJcM1V2FY#Kkm}8XFm|NMrbz2`#4ML=j8fdb?6L%{tl!iUGZEd70wLNR!$tslIW`Ra*@>bp3b#zmrUEWJWpj{-;Q9Go#+@h8M4d-2M zv2MxAt&e@MT4V~CO0{`Qb8@ zt-JEZ761T107*naR3Mo>=+l{4VPwnt{$zd;!(`15gEgOI)^0X7k>Qk>>_%$_7w$KV zdQt!SQUuJwYF>WBXtb4c0Yg(;_0t^mH3M@k1GTxdg241r4s1)$(r-ECt>4YyyJ4#S z)xfl4Uz=ni`LDhtl`&HU zADcos_ggA@$&lI#m6Ng0$>s-pCVU$=C-2(Im*cl;N_JY=WSr)|JDXe?x^akHVF|{@ zUSO~SpR+xES5paxEts2 zQ&iT|yz8@TWtS&&)z_X?bFto1D2qwQ$%do~J1gjGbxFb9*qCUDf2P)pqm&)pdi3DI z?=+(m|ED}s{!>B^g8oeu>~;pu%^83XQG6J9@&0}HY@kkBFa}9E=`7?sYG-Lj+aY5j z$a(%&zzESSf%0@IqIta*7nvfWekWVazz&*eRTJL@;_k0&0ky`rb1!5k|=H6+Ko+6kJQCBgN4e zFj(o{j7$WcBBs1Su$2i3XBopu(bS@6fl(v60)sbe)Q}d%<7P!EJ(K6Sq>7UO&tiln z?YrQ9No9s@{kk-tykrP{=|A+SKSG)m8E2YhjA!HQG0)CrU-DqJeYylVrS zan8WE$6Ow6fg2rD;H`h}2;?f?n~s@k^7ud-%UXI0%Rb-d703;|D8!(*6JPkVMi(5$ z2Fmzwhoaj(l)_X*mp=&*v^9dFt|IlEC(%+_mz=QE56}FysgCIFTrENJ6pcE6zA2JZ z(-G&Rlop*5R}`9s=Pe%K$WVMnA`764hZTc6JCDc;GmM+gm5NdCZB7 zV!|vn+NF7-(agW6_}> zN4+xU!8D3IrJAAD&CIzb2SZ1H-Akgv#~rC6Y@{c5pA{=u;mzB*z4oU*2pa#yue#$@4-`c zl)M&o&lqcgBl`5KZznT%{^2L^y&J>+>D8a%8*J#=*pdfp=h3=sjK0~W6Dv&Omd?aT zj=DEC4_s|@m7cwO*Ze%aT~=dg&0%UQ+Ujvqll`RU_GK+PR4YEhSVPwwm1HXh94Ay? zqix~2`Tin#jlmfx@I!j&UwukOVTMe>tsj;rImuEr3QM!xf$F(YUo&f6W1%GA}oI!{lQ#w zwO3{!k2BQj$eTG~zG$~gL)&-&Q&!zxFfvzp;~RUUo31DY*Yc6hPCplg-m#?Z7vKGA_4LUXre&;2rO-Mh z>EXls4*QYr7kPfNnZ+KZQRMLM)lcLYZSX-6$F(+6bkQf{e%=W{(Ex9>EUR?s&^_Vp znQ#KWj9UB|UXy|7&=kx>YaGAN(;GQLDWK#bI!(ng9F=-6YEPN>Gjufe#y`=UHnzZg zc<1rzu6lsG55BNFp6w+a#Pj%$DI5{YaRwCpod$T!ur?=Ha~0oB#0J)O{3?nrE720Q zUDd`@8D3POVD8}2cfM1ea1c3nDdqipUs24hPRKnBPW)b4pAql3l>H@V>fCatXj@%J zwG%1KWb0H`svYr~8{Svo+UTlw%N+t0njLB?QpHzVq290hppvCiq`PZjktJ0>2>Z;7jC0@#5UG`dMRN-pjk2+rzZ{Bdi0sLj?g9Y)W?FDdP zY}s4z{GkZ7fcg9)5oLZvyCzaap~ySQF`&9{yjche|sl>YdhI}`tSI~533)4|6f=C z^oQphmS!b*J=itP4vfLM^jib^({_%##(YlE z_Z+bRSJ8O=1lwSqQm7I;BUInDJ8krr@?1Rf-qnu&Q=Bp96b)gb8XHxVR?yaX<|3?m ziTL!P>*2Z08iQXa133PsMs|COAb9 z`e-}S=BaU-piS)DKjtZ3*$q%mZVCgJ*)k8Vl6P%XzI zrB3jDAQ$V^L+3Z&mEH^=8HbK|JW!>(OWC}6@fXY090nnw{btc*1W}8E`s# zyW^kuv#RLYz4PSPt2<9A|mhxTU(K0ht zmvg5XHIBa%2(Qr~hqbL%X?|y|-;uH5TyaDyk_)u5r#aY{He3l$I@V8-BDpPvWHdPr zWlj#i`qk>_yWc2H_;Pjdg(F_^V7B7mZc8`Om7HfB``J@9zhmdMZK?ROJG0`U=`Qr0 z0#AB4V9TqH_kf`Szmv*0mRjpWCCNdXT^&S_3!2gV&Na`1rpK8W)LYEYk-?gQ0nXPjU@<-c?Cu7;ZNujGn+2+Jd zbdK1qfzP2W;-A7i{IG1y(>Bl*{Be}R7#)}+9qxu7T%2<-!Y^J~vPSWY8}VXv7@orW z#+0r~R-;e6^6=3^Q*4&){_)xBfB8TDPqitBtDpYx|E|9K_1_IH#}SlVa%JNEWX+<2Iic)=tQh zx;YpkF(b4~bzlQ-Fxu019(C@+9%FuAaam{NJ}?-(yL$HXt7R|=O;*5jZD7)tgiE?~ z0MIVMAoK}fhAXdr%oJs@Y_5_mCx9_B&wtwTT#C46vuzpAsi4jXWA6Fxo#qu&8sku4 zp8kTS7v`DSR0lktjdIWSbiW2Ua1@P=arQ0DV*R8vg0lZ7S0~*Dl&^4_`(9 zpWmsJ=gj11mkKQsctjWvt@TGA8T$?{nspLI>;@!WqJnppOAUJ4Kn^M>9cix`N z8QGS{mFgXuP>9tAbTV5`VTCT?rU-Uu9lVjIi)0~1*XNu+9k`M6V?6O1{I|1Khtgzd zuPBvKknvVDpsn+m@3%B z_1>jm72KuNI+AkNvcQV;cAb^oDvWdUlP!s>sIl=C*|p=^t^&XIyg$cRnYx_%3eILD znH(5}aZ^hG&Phq9Z}him&b+bPr+lmVSq@sbzdC(0G97)!Aw;u@PDMNwxyF|hjSnw= zHu$0@5mwSwq#Alpv%9>a=G79DPL}%$J4ibl5VZn*&NEJsn8Bbad~vp(Cp_A=rl*ChUKnb zcly|Eu#+yloTkNsD<>ybVM;ZPlgP3d zb@j!!-!K=}Ym@tEdfL<)T9rBZq-N(tHg3zfU%z^37t(hVJnq|Gx-GezE_(C!)vR0^ z-Rm7Gu#t;oa~!Z_4&95+<6i?qBcI`Tc#8bCRI{I)k@}u2FC0%jN_A$3!Rpz6zqTp}7|FwQNYxGxkBU`-n z=>DDm7~}MtDIPUKpDVc1azE8xBY@6qRq6Y1noheNL+T@q>TU1Gh+bTj#26Ju&wSUk z)pHrIQWu%AKKu38_a?ZClC?iQ2M8c@1d2xPA3>IVZrk@gVfn*9|D-Ut=j$yTFd#V- z?q~g)|Hvy<@MAyReK5xG5IA8qVvN$n12MMeil(RF4<2}5YpMAe$9PLATtx|nPgmh6i6^6mx{jF)#sdJGYeczv2;2JX6#D`rm|x;P#oa9un6Ks?E6cGi_5; zpMmu51=JMVM5#o-5qC;j`$c=#JG^dCX?6^^2YXwT(V5+SmTAyVb4sX*Z=w^s4Wm@? zC|OE~bu8*X%9?|_=)C#CJ4bu#Q<2YK-#B?FM5G+xq0{GPgPh>e&&(ss0JPTh`1ly! zE}YUvkQwa?8FSW}mu_E!FTVTb>YHB@29fsjzx$b2wvwTX#vOdKRJNWa)lA@DSs$A4HbKr^xZ#W`t@^PvVD(4`0odyW! zgquJ;dik!TORV4-lUBf&yOZ^TaRx=2Z?x?gygf=8nb z{z?9^O-2b%<+zn{?Rd}}ADblStS1p=H5l|Qbp_*ilNB-#?v(AKA%K|kqDwd!Is~is zQ^?Y&%Th4Lnj=zSYxSo?;i=7)wet&Zk4daZzf+QMg39?g5xeH)v2#k=wB*?Z0s zqqlL0$et;l-GBXkvO0Y9R2rN^k={qk4xhqbsVvGV`bACDpo4a8$-K)*UzNdera~IJ zd*;QhF12i}IeV{V%-i-Dw`qs2fYkJfigv&9QDWk60+L?2|we*DuPoX&R0Ufg5QUz@)A+GaHNx+rLa=O2IgasAWt z7wa8#N>&ocl;hMxfv4am8X-Qq3b|EcIFVBHcRAG-`CN;HRRxaRSrV z6=IeFnZ}1FG&+5A5<+m893D%p-?Qn>C%cpW*Z=!Jt)3Uia*!W=@y+V)eM|4qcXrB@ z={QHzZK=9xb(9I0XWbG5>)7%_76n#<0rQWtrg&3q4S1rzybm+CW2r*#AfB9uXv{JQ z$INbw*O?$C%oGO0XkP#ff_y24nooUtRv(0+Nr|FfglvxR<0lMwuzmBwUch@4@b|xX zdh^$xUyovM@MZHdwxupmgN^1rw!=>Qmxt6~#z3C(WP*Z7HddxyU1~}oBt|Iz&4%lX zVX5uLTRPC#r!D4x;lCn!0u{+yvX?v5W(Un_HIEDf^r)0q``8-C2(Nbf=QbF83Y@Il zGWasFvM})E2V=M$j^X0g5TdrXi#$B%-5ix=dtImmM|WeU6k#yeh9~`3Bm)lZC-jsG zQ=dWdz6Q#W#DgJ#XlD8|oQ^hLq8vAkK&Kwsvc7Oq_U0m6#kZf*{8$TV4?zq{GLxo>XRUhuqbOSA~_na=`q`!-9}IdGt* zCmE!cf}EUQt==g1X(hrQC6$33o)bY*^nt;UOBGYl&6iz%x>y|%ev)R|MszAtKQa%0 za9{N2-NVDXH!ZPy|LiY3jf`Z%9X}+@cwQeVmEirHd0KoYdfj7XJw`5o7(W!rmLePn z6E;R}asu#1XKZ^zB{#06m^wvp8xnHb<|hx|=b~~nzB$v-1s+uKezVJHom}ivX`+S9 z6BI{hDkNpt*jI;Nf6pM7MzuG74e;QvEUznLka9Vc4!!&M7k@nkUjG?9pSr4z_K~=> ziO}1d%|3;;ch^a?O54=Hw3YA{`Mz)lcuqpq#o43M@jJV))S6`cl=Gk=*;7U{o@`%o z%GCMgcvIBAa#n-sf~L+AlaUK@o|D&VfTpCXJ4udYTG-l=`x)aE1)fQ4Dco|JMD$Jp zkRMkHU|zoBlvy&CT3L3|OBYN~f#>O!WWH3nzS1-4H$28J*RJo)-QtBCygWL<7?uri z3J^kcGMKz;lQvT2FLTbw_D(AfW(~623fZpJkaSGJ<{rI?Z?1RA9dI|Mr5wtk-r&>C z)fBji*YAmP%^|N(-eOm~s)=x0hrdnN4POg<=DdVk{nPw-t9Q1_g0*)+D<4GNMb6t5 zD2iA?YWhPY9;~K5w1dv66l1iws~n6CVQ#j`Rx;7j!({&2F{$S{T)th-Ui!vHFXc#R z()%`x&J1PLpz}gu?6%_8or8x{1JLK8h}d%C7Dz8BNO$hxqdD>L;?vE^dzJc76;5jS z%=EP-UVB@YWeb>RViI>pMX)*V9&6xoc%uZ zh%Y!J7`Y+%YkXwz(U)Hlqr0mwerc*hdxuYcLw>WZ_>E&p``~HQddAYW(@xFM@Sp)X z3_7D+bkIYsNghKSF)`5MQ6iuLrM&tXd*N@F5YiK@_9l+;12Tom@&G=7 z2tr;Wfyo$^<}Q^|%UhUyHnjjp?9QIs-}-X(>f^=g%lq~NSMYQKruO>IP)?Ym&4`c^ zEpnJbE$zlMi^q^5&q=6lf{A(aoEq2kk0?eqpWW~uB}2RQddq1_4Hk{WiV1wF^x1Gg z`aOj~>-V;2QtlMD=UNg~3l-yq+i=qw`(x3U@Qb-ti zoSk5T=f&|MxXo3fOgr!8J&Y|fu5g7H`{j?6D#UHNTG zPD1lh6>H90!vD!U+L2UX^XsMXT9t70;Qs1Yzx`iVfAeque#ZXnFaNxH`RvcDAO86J zF(`9@h%xLk67l!vY*HDXG6dnXowtTJ;yXBKEFGa(fnJ7nitgY(B;_bI=)Che*d&F3 zx5^Y{D01#Z;A{-p7!ByXj^y2or?`*W=-6EO3`&w|AX_b^i9dihJlovh7R8(6(z(!= zBA6=~gWY|G{OiA!GJR?%uV1frEE@{v0Tp)^g>*dcvRVWGnyM(3tJ9H@3>OZVG?4jeU51-dFoE z4tczgUWoS^gRxNH%l4|K5|Z=uDt>Oyhm9$#39yZG8*Y348r+2=?d0Z=9b|%QI17%O zgRdbZye>tA0bF{ybZ);H#*qs_Jm)f>tPJWIx>z>{v8{V6hN zO*_S>ivO4iV0qc1g8zO)m8OHRMk-!0VG6pwBmh?j=v!7pm{Z!GSqBvu$0JW)} z`g-%G>9#Ao$US24WDGe-M|afGG-V+@AGLq{;jc5t-fnPn?g1VvjlaDsWqyHv4DJkaX-~Ezr+#c+_y|kmw6KSQGQ-kl zwkX>IOXyd2C^;Lu%Mou)O9owobk!G%WLrX47O-hT|2PQIS5C}3_yr@5eP}+XYqf_9 zkrMHJHiSv6d^RA#5F{yrUZe_2IlyJg&LI7tr=bA@?NLa;wKQ@Oxo8i4rh(W+BkK!N z!^xiKGC~oLAe6#E;B?wB)V1BQRE%3nZuew+fWLb4e!ZtUrn#$G=LuO2u+z#md9y{a z6yE5B(ojx7%7`12Y)TR7q{qsvEIT6x*I1p}8SMBDjQGj*QiZw(9xp9}rJsZto(UpE z8DgGnk!vl?+?56yZ(x;1p$epOciYysIZvC2oVE-kbfmz8?S`fEgD{5QXdF8UR#_9* zwUZ$Gcgfk&iud3)M(e7D(K#g)2vWjduV~XlwG@IgKn9W~ftRg4wBF(9Cy++7K|}2g z0EtF3Ei6t3=;DgXQEWBTHiTRM1~o*M4ZXsK5K){`b|dfAb%B?f2Ie={$UN z|K`ni->zP(Dt-G?>$JlqDX`^ebJRud*?YjbH-H4i>GpPVql#lxe84!1Udx7*4V-kbzFD#iHy`PQE}gW! zF^jZIH#u$Z+Bjz=grkfp$zg@lV!(JUM0$N+Obwelc~z4G2ushScqB!U46lo`Ex>RY=fLE6Y9Ym`A^`j z_-GlYunaN}4t8!1?Ag)22|FC=R#wQ{jY=*CS~A?a8-p%G9Lt3AYe#t|JL%%B!?kT{ z9VRGf16o@)pUJu%Qi)}i7W!POBc+!YR?M)^XZJibsvfgQpTKxuXNaHeTecwPpN{AM z4MPJx>EgU04Kv?+cG@s(0+;rSiQ17Wg9l?^FpyLV;dW%F=LswcVYddBp3BQlF>PZy z5_0pd?6L~eo~|{^T3Bm`WXpJn9~4#yWS4JU_g+DrQeXZWE3dF6f-(;=qEC* z8u9Xy8qI)fds3$HD8vsoiZBv-Le)=lk==~?oqjfvt#PH-Ddaqil)=qz=_{3xWFo^V zf}#*PCKbYz)hH_0;==5WgP_A~j;y~*B{G(24@#DrpU=e_MWJO0I=-^%+fhzD-znbn zRG8L+>sgXzycHAyTB~3on<6rH(IAX#0}-Nq9m5}kKhx(wi#pQ#Im}LahASKnLwV}gC(?OEp!mP&YutSmDOIpsnmb+`ABaO&NcAUD z=8!rLv!f^dwwqw{bIUfhQu#OLu^Aie9BZwP9+}JIYM&|~%K)OJnJ>J}VYuRGTwk9O z{pQ^%++D?2WN0tuvkWW#?Ogi}dhA`RkmmG;n`EjiK#DenQ6wS<;4@=$@GX5IjhbRT z`+zm9Pj*+RIdZfZ)(mv0(SH$T#{KGDCbz4ev$y?p5^ZRh*mewS-GvLZa=o;Bk`5p7 zxi-+4kwaKC;Cz&2m^5;3NSD(S#vc#BQ9Hj)F;~plK$5RCIvB(_K(2Ox;nq)SVFZm8 zJ=%z5X=Gm@f?g4E+bk!tW(2SCF-IV_8ho{cIHs)F65h#e+R87P%a5ArNG+DhD@vb? zVau$f+H15(d)KBl2)N7PSk7Fiz^p(9p6SM%s~nn~pPZHT<^VoQ!xJ_9 z*p$JADW3Dwj}V-m^!@DI9$ZD1=v!zVMkt_QJR@suK2wvY9N!qohvbJ1nZKhY*7d$&zYB1)1 zRll-xW#vf3lGaBtbSM`L1!a!#L%KERV&)#wQx%>b-lw+UC%u8bIaD3&l%i}ZDn2Rb zo%0)fufTch{?Xy@8vOtNXTJ5n0TfpHoXmfIF6O*_WgdgXiINink5*xvOO~*fZb&&c z{DEJaonk}}`r6e7qz^yC^RD$b!#)&o{oavz4Hxr*_iY8*303btSbxhgN(si`^A1I! z0&XEDrpkJTrjXVA=fIy-`i}C$tkbv8)3^UM&(0uF=297i!T`V>LW4Qi!oT_6SQo(z z&ME4P{j%62T>i}S`bt0%BIh9D0|k=kGwU9`LTTM9l=^Qx;SreV3P#rmmbV7(ge^{7 z>Ko>&HoH}XqVJvzx6{BFQwkX#JQKcakHq=ja}>VMrE$Z<^vx5QosELgy&50I;A=pI zy20z<7EB%qH^DrN?VWI~@#$}(2icX*6CZSXFW3^~l-3wKjnH!(3+ZO>moB@1@6N0x zuc+)mt0MoRnhhd0|rHXbIqP zTUuiNy9(eA%=b0^(%IP5eFlqSdhg`4*9uT7Zp!Isd=q^swwxt66l%NeWpv`p$ts1T z_>BPxZ>9gvRO?pc6;Gsu{LlNrK`EA+&WH?-wV%p}-Z6#iYZ-%a09+~KG+7Ambd+wq z*KyblDZF_2wQM{hxKoRW8SX8;bW69oujKsF-iHaL@TrVtz`uP4Q%6znr)T4QJ z#;Ucx=kB!wT!)iukG%&ck|x)4X#VKPxeBD7YnD%M$rvU;&Hr-cQ&O=r8U>8 zecS4FP87o<$v`wi;9I?bvHUj&zNCjn5Wq0T8@|TyrJoq)jB`KWZ%&uBTfOI$y?*{; z_05+~>oQj#4Mwy~vrM&n?PxciUYrI76D=k^-e1iW^{2iIV3q(PC&1w9Q(qmr63(if zNpTOac`?{>_z`->l04=#UrGC>pE`$pU}aF%iw$qjp+gz8)^0P9v*sGlkT`JQ8L*~8 zt{Kprh^-u`{&85Vj^D_b?6eCZc#@SRS_K0>D8qz?)bXemv_(1hcn`%m(nEhcld<0 z_-?X+$ppCX9cC9~DDYaT=vmLsx&5HX?*i{Vc>L6F%jg_ctms5`+=>}|iSK{^55MOC z6l5fGO%B}ZvEi|bZ|M{ni_V~Ev$6esj;v*!JBm=33=bZHi$#YlZ4CbbG#Z`Z?;N$J zhqzWg4}(Pwcr0sL%DroKpVNshxN*zsj*139^XrZ2=?^xQ*>!+U4nVpm-fBu~G@<+H zCGjIv3Asj@EKQGgSUM>#kfa*jTsPtLw)erW8+!Oo~76O=yV%GXq@O zmXe~7M#0t`FL$_qXHx6f43MIe+1NoI*w^g}MtG?oD~6-J-$`X1P)hgjKUlv%RcvGa zY?=V+{M^pbzzt&s9*N>)&a1o~jIb6bO~^QyrQ;VBxXk@8M!#4sV<((!c!QO^L`3&4 z|33vDlc8TuI5ned<21gEX@(=q=b5z)5ZE_gbTb4EZy5*A6cxw7iT2`{xaC!Ex+}P^ zXAfQ}1#hZB!rGu1gQ}HN5t^O8y~FEAMsGA6-%~Dej})#pO<2URQno>1U|MF{eX93# zT772JCQBlzVJVuHq?-~d6o5F-YpMC!ZS5Gt6>l^SX|U*An!qQ?Q{h3zZwBU78)&d% zyV7Bq66!!iV;)%gjgqKfE{6r8&(1~dM@1`FtLLxYtnQiPr)CvftN^8mAmKK|GmQ#Cc-yUP&IEU_R{`sPPcQl3xt%e zSz}Sl-~8t9S3m#oV)aUubI!-5gDuAMHd^>~;Zx=b8*WS`XgL~8Kk-RMJ%=oX zn7SC+xAr(H+vAKkLw)z&J)2915FAFAHm(t>B*Ty z&DqV-j%r*e$!TfK)okn@74CbUaSm5AUyh$y8d~C1;;SjMSl2zy2!o07lt}H+6FZOf zRdfsQcpz-VBH`lN!5zuMIwvT$Pk@@{aJM=kj|h^wFBpKwCU0@Jv04mOR9Jt;J&~@sHQk;izJUe z!@Ett#A%ZqvJ2ohob@OMYyF8W-m<_J17;1;o zYOm5Cr?OyMP+8Ag2c}M3%TWmx7~Z%$JP1#OLmH`6da{$#Wo3X(_CC@kkf=%NeOa9k z8W1&djKjM_kk-c^PwA2fvb%O2t+cgKp(RK7$rO&bn~pLtb5d5P z>|PE%rveV&(lavVvf47JMt8<$tOXJFJ}@J)cVlNc%i5pZRES2(t+Lv`X~UyBe+Y}NTVcHAt z;n!ZtVd5NZy;F|jh zeVzG`+yTtoyQS3^`=u@KEt~~j&(ipQwePxy4K%Szcs=VkWxJ8O$b0W!Yb4X#AI+`Z zNOd0AB`c#Tg-1!-T)%3?(xCp0XRGbJfTGvhSRzGSGTfPg*ISW0Eg8T#aLJ&D&?DJd$doY+4Un zQ~piCR;j~i6hi^NH1-H>IM!YhmpxnGhCU#q>@s}y06@_9qdo1IcG5h&!$ml&H9&R+ z*{pr_JuiK##< zA&tn~raa9F#$U(pY$FSY`_hzSUnoZ|r&Y&TB1X#M(lQB{yThH7&<)H z1|%wZ=Gv9{;xg=Ip{fZO*`mlYcFErG9EChOAR1;wV?SxsYw759OWHz$dpsBPhwtRm z=bYh@s}v`i1Z$8b_hl%S!*C)(Mz|@LXvN_%E-ahz9790xP>DQf=K?OO%;tMt;274w4m=Y=gL0oIpwW_NMAL6%)ZzYI6k-eO-^*vp+(*fb&zql`yIS}u;S&+ z6lN;~I>_nN?sy$vowUBQKYRtE70;ds42#fDOb5Jw|7rF5#|~y$YMXW*-s1zqgTXeY z-VMeMl)2}Ww&t?ihBo?4^qpSttoVk`i;gz-v4{NVUpo(On>+7Rx0c0*!06PuAZee* zaaEnn?AwkOoPoGuvdt@SZ~&+905ri<;b&REbX3!7iN!doV97?$^6T9q^uGBSA`Ds7j2c;he0w8SwpsjFhy0Ze$T+W({=mPcyg>0Jt={ zXL-gM7Eg~6sZdB|U~}l7gu&aENnF{j|Bj_jCz#}zH&`mO!Ey@?t{(cPc>`}e82|~L ztO?OgkZYd!fTsU(TG1xqfl7!Tqy(QnxAyLs%=v&3GpGqG1}m&Glni}FaH>aR&H=mj zj5miSCZ+uqPegk^#%hC&QfP&PI~4RbXCz!+QKVzm8@p32Qf8$v$LKZ;0bDD4T_E!e}@Tcd(?%+3-iNjq|)S2<%YjVR-gj#w5iF{;jHD!IrC3@`z@o zgzzh#O7Tikps^w}L#j}2Z*8en!aY+4v6wzE_Bf+0qsZuhW1I6+0bi>`THC8R-$T+_ zqNNCk!oXMUuYF|~vEe3tH67Cd8*MOiCS@Mq?dqfC>fVE2sA21H6$-R>;{$W`Db$?R z3u*Y%Xh{ZSNWcmZOyr%@NO=g=gycU zyW=e9Y-X(RC|bj81{K}la-38AGJlI;Ks$~w1G@!w$kIoBC-vkOF0a>$8ZAd{3Lw5L z{XM)(zBD{I&#NlRM(8nq!7?dtEoCf)bjIW!gi5J+w0Ueprl{8<*F>E<;S4W2rka-0 z_G=OC5)4#q$SKP}`fcvH6$OcYQVR+2hzW8s(6kfaD&92+cwh8O?TusW9odN)56bqhRuLD&Qz2LE8kyqbPEWeQiz_D@=45otF3dv(hE|(9%kIjKm{Q4c zDn-71C;d}S5LUHeOryI`@sk`Y(a^T~qjS#Ri7j(mTIVbdc%mo3b9ts(B8MIwIiPe4 z`WT)Ll(wLz{!(Y9@6}{@2Mysjen`eSDpn|-Btr=ansY6zo@iOKp6LfmQ6E@x%3`;E zJ;tGpTQ$IzxOR0wzDYi~BIp0}pMS5o_QX`w_RWWP?^h3=Jhls)Is)=wN@|LHZD2(g z7ED~O4i4|2P|m_@4vTCFDXbo8GK^6loLdToqj6K2oh8yVO(wN$?9_?{5xC|^V9$?m z;%=_)kU5i)y(+L2Sd5&IH32afzzodSFCka2kVtDIUrzk0r0H-F8Z*jpsrT+I44Ohu|tjVG3-Zh^b`) zX5I?3woPkG0Wc#4BGM5}Z7iixl^GxyD=359n8xsqdNID+v1DKp25lFm8X#Qhhw;m( z#<&|1SI)zXJA&zRn~^BJbAvHctnGGoErpz+s1{(Eb4wQzvf6>O-b=}CQ-XPno#7sM zs1$U8h_K$rgfW_j{g-i1plcTvDAMujv>Q{*@s9c~Xc>h$G}mg)VWR9twBAOWyz3&+ zbaZUH|dF zzi=)(16{O+V|S-0sG>#vqvtp|vNfH^WycksID4TuB;)f@1bJqMDM|!vMHJ3LM~7W> zA0L^5*--K+u5-99_5M++uHvWW?LXR(qWS*{9VcbxnS?OtU{4ryxauX*6?jo46fGEHa1Si#V2#&l-RzV7u%R3r^=2RtB-$u zgWo?YbaRBHdBU6?6Pv4@dq*}Lc?U1X6FJFWFqFL^aH68pv(XXrSiQkthMNvj!Lz~7 z5G7Ajg^upDDkbMVR7e?STslBzjTYG%`4YU1$5KxE<0{}6gUvaZbg5_1C`Y%9S0k(_ zbo`;_(PE#G>H;apBD<&;SNq@BTF5^g}`V?%$EsQyfSJ z!gHHjrH>2_OynIO%~rH{`hu*CT!-Ww#lbeY1mEyCde}S1_{kXhwlZPoz)pwwKV6lc zO19I37ddM+)0m{~!$Ne`)Zo1jO;DC$8)tI<(y!`!);E@C8dKs_RuK1i+`D+T2puq* z?ruWhCjm`a1 z>{zNaD*{BFmy|@sVVjMrTkArv&hSp}#{oGQ+`P7LJmVZBh$mn?Wn`)_Z;q_O!*<*1 zjB%}inPK%-2B^f)itbh;FA(G~-Q=M%K4nujw} zU1Q8k-G-lpzH_GQqq*mn-1Ix?R=6tl79R!+jvJ+sA#OtslXeRqFaQ+@=1|42o6@NG z#?y;}h^LGvKC7r`vj_Q3V~SR7#d@6)U-_0D1P^H z_2`>luWr8n_0Yil_Z0J8Dk>@(T-H^CTc5+%;8}JaOVQ#H%Bg}Qjd>qsfD&{$3aOj9 z=0X`Ani^h>-@Nq>ezWJKGG`8FMt!L?gXr6p-=;I4Ud!M08Rb&|B;S5rlnsnT?)d+1?G>#5flc-N`ZTUOc z(+oKnC(`K3@Z_A9MH>Cjv9N|lyI@Z@20y$s-Z7pdKi=`Tz^wG(qz{d)ZDs>%0+3`3 z+$Z{a@xopds@4x|1AXt|li3&s^nwGc0_cy64leP04*3UJrPpuq75)G4i$|;XuQ|8Y zb|1)?mep(}NBZn4yP*5_&OZ~}Z8qj2m3{yKKmbWZK~yWztZ zCSukHnsvoED9WA<6Gb?U3;ZdrgkqwY!D;OzsGcXdAwDL(v2-Cq# zFB9#j6g)7s1>Hy~c&2n#OjEnPYcE_hrBXh* z40e^y6$2@6^9uMWh~|Y;7<<|<8Ib$QP?Wzk2EDRGiVNU!5E6{ZoePTC}~Jq369% zLXB~mjTYee!#QEuH7^BE*XH)xt35ufBDBp(sQOVLxfjY`T#IkRV-aEo#S;XtjV;pI zin#0p-JDrQC|+Y2v^Ph*!o#fn#-Atlif#NtvtBGEt8}?NlMS1$L1$N`lex@ivtBo`QM-8oS z7_f9}5p(*bY?h0@leeDpJ)KeL)smhSqizz|hAIE(d9vmD<)tjo{cl!H-(DX7bq;!( z{rlCkT&pD@H3LKY$4@rPlD3I#!+x-$Q1nA&W8)aj^XQR6$U!ysIY`LgDRxvJQmU1& zhgPM?;Z5H;F}q}S&6ruT-*E8E!i&w@RLtm`i}Qakc&X=Qp+<2V%Z&_H&PxDBK)Js% z8?J-13iXNDdXG$IteX3W9ym-^BTPCD{36+E=4LezcuU1^Gw$*4jkVg?_&0Qg`<7QVmDjbQBXpZKaB+>Y+6Ubo>)JCvW3#|k}@pHo}sZrQh@;NxnmU`x$P zW7I|mxVQYZ;Pg8YMq_-AAIC(rKr> z!RAtW+RmM;W5%kHWxR($v1dc(t^OMmv7YwH>?qD9nsqIsmACy_5qS=zt{V@7voJC&fT8GF zg{UV8BZH8!%&Viym*;#c+KZ^AcB=BtfeM}+3?rIr9az#D>M6)8q6<##*KcXbiPGnL z3Kq(T*XC{Q)Y#%k>oDD7VXzkgMgwp2W1PBL z3>JM5KGHQs!bQ>XOozE#9=}k_@KO7o_3B5__RCk!1y`upF|^C^vfc^q=G%=AXufk~ zYzR`de38*RYob5A|G0VzO^}CQ&C^F0uRz;G#X(lTV_ef6b(j}tUb^LL(=c2`{|fR9 z3_fZDO*uU|6a@&VxESBiYP=P_HnlOVfe%LFMR2on$kVkqQL-qqEM2_SscG$`Sgl14 z^u&1D8@jVPy$Z; z!ANII+PQP|eAn<(%T&Mr=Hcqg@0akwAQ}%%3^fvYG)3_uhp3J z{$0~brR>vh70fmjG_JJ1D)vq9f`or{%WFAwbWTplB9EBCv%`OQWP%*e>{>Ojd%yuo zzk5Er6>p!*23~VACsTxn%A!nayr|^C>?~sF(yaHSe4B^QBhG!WjKX`yCMs#peT*;U#C)a168ymx zJe6vheVVnNfk}Z)Q!{97YJZGCMC~o9xYkZr**wc92z(XK&9k%}sSktWdZ;ZxY1MoB zDUIV%mr@~_CQMYi+H*z$ZdcLr;DLxr3M>I@Dqz!OWQ~QG{`k9iyWNK|I0^c>S=+`r z$i*l{K*5_5nRz{e-yYOCO5=I!uOgqRP0-&|gj4Wy-nr|YfQQSpw+M&9`dBumxzm%? z)2EM3qnxZ>yjGz+hJiq)P?C|(OV<8X5iP#jF*jxxEHE36zH%HaEg4#m_g#@sIJshE zMB_~>ikk7?~G^vrb57z;)y zC$tPz%It&#(fnc!mg>A#SsbpWAQ5~sXE^J3i{3n@PX$$!Nxa0N5jEs{7QIA*s$Hv( z*g1U6LH9mf4Bf$7AIp4wfT3$V1-R18MTR-6y;D?2Dh7ugp;|=U8sAA{nu4&&o<~lc zW~JhN&za_(PDeSNJM+4_mI}2}Hj_Q@etTJm&&e|2&m!${(C|j9$F6dCq{pvK9Z44q zUnEC1fAL(a5{$F;w&3*NM7imX6B(GQpB)}Gv{3lCdgKtM?z!T9cxA|v>LSo!YR<6F zIod_(v%yggi)T}EJLI-~$+zzQ8#~GV7ZF+popbi)pJre7d#1STyE1%H>a#5~ll|%w z?dZdBbWy6BqTJ}msw;S)Fcmd!i_j9Ib8s}>lJnhOhdEtExTTU)@Cjn_wZ*}xFs}K2 z(kYrl3;mT+pV|Ug&A_&4$?%1D%b4V>E@LHUoXw_8sv%Clc#&L3|K5R%HvB=;=It+K zq&mkO124MJW3&GQVT!+syk`mqe}XOhE^ zfxP z2CW=rn_Y1%dv=*d)uwDs#&|!zI)OtOu-OO*>>3-K#tv6GdKEX`w}T?OU(rn&?WtR^ zy=-T`w=yU?!DZB|e$Tju-bou3 zedZ8AMo&$VOiT_D72Vcq5mkW42zep<(h)6Ts>-+x2;O|)NzMEpuL>hhMD9Imi;%@U z2{uKh*-3*^ZpovuBdQ(z#=M6ZD*u)tYm>l@djj@W%*OylBc-ky zZfKbFUP{IH;MQ}4(OzRIqQwZ|!!z?x_kAA**Js{)!a1o*_qGUi%461{Ld+OT;#D1h zyT0=-d3!15rBI~aK;1y~w`|#{Plmo-irN~tU7d;~6M{W!xK9-`72P$4=+m@-b&I~k zAsWO8wHZA!+LK*@iy+9*P4q)(GWdXZ-#83!Sz^IZGfdGxMhm9c-|PObucj@GJA+lB zP0D%p_||1L6y0k+{h0%HUcZsr<-u=u;9bO00alU8wz`My*1V#1!)}RHDX`97Zyo8P zC@XS-CmW4yH-3>%>8%c5xs-aHgGr2c+S6|POE+hTIq(y2odN1zyEnd}pgO-`%Uok~ z#2p~gk%+h3P7Xb*z#kmXlDf_6&0p{{e#r<2SqqTNO2{Qy$gwRO-gucAJ0)ZkfrU6n*LJgR*F>629fZCo_Mkyf`ero52@MhnX14| z;aUXRI^=dIJQg8zKJ-UNM( YzoCA3+(a5l`Vr%!vwT<7<3+Vd^C#aLk!_Z)2Zu{_f4SO?2iU4hsCa;=q<$ z8Af`?8socCua6%;upMxR!!QtfRId$ZqWSjohC7?5CJp*UgB*%+j0+Cpo9N$rXjCl< zSzm*7Bl0UUjDO4C_0uLH85f8*e!7TKLB@e6!!^?z9^Fb_4Qt1A(9G+@Lk_sjEpB-L z9q`X(#h;2eTOO8PXnWk-?LhD#e(-$LL^&K~(aRFAP1EF5X6QQJH#%Q@umXb7K4#z# zA9ISy(J3dP=(Xv!Gi}_QD}EDIWO#E*(kmITmYMG0zm$CXzxn*$(bMqf1WsE0Z$bYwat9((7kfQyPu3kbBhe}snw z9dGHDn=RW5TY`E2t6#X`$Yqu6@w>J0a6+!UQIX=YGeS<`Z%09BbMc+7c_HhU{b-Y< zmOMwj=E>UuF#Rw=LOs$aK^-ar*!z}a0dAWr zU6oP6YoyfQo|V<3yIL)9^yn!N@iTkT(o~g$2*{A%w2}yI3#{nBT^ll0?5swL5NKqC zGXz1^@&i1CnX0}34Q>K6Jjs$G+dLW#OH0i1inuT32^pF5*`VJ8r5zKVj;D*Wi)hB< zBH8t5BNEIj)nFZ>tv;pT>MWz(-_Vp{3@U~!hUNrxVba-P$*7Kk9?v!dn=!gI-XtYs zd&S$#AI+eH3_JkJQ`K&57U_hq`YDAs^N#g7{Vsji-xx#PP0c`3=-nPk*-k;3@y}}* z%_cVb3jUsB3H_b#7$wG-LLEe|sG_;Ma2>05;vJxG$B-gKoxCCUXC4|(QszZmaT)Oe z7p3W64ny-)t$keI6^%%PW(>yVs*>Nmal*!1XGGwkXrMky*H1NdN(IP8lECq|XD}P( zI}QRoU@t?`V_fdr4kj#OYHn)YZ4A*vKM7q?aL!Uf)EwR=2js(X^Wx2Kqo;fJA|V@$ zVCgk9Yl&Aon$=mrnM%g@Xa1=St4ssbk%#KKuRRXej*RV4%R$EjN zPGgDA$3@!a;m&omCsJq=nhZ^AkK2{7dXA~6#*^VGhtYl}V1qvftxQGE9Gx@mx17-3 zQI=C<;r-}?GzyU_)bZCCQv`X~8RU3!R}|ZlMpm4{ZZjN)UExyh=M>QBBRXDC0PS_n z3mzp0!Lb3Q10M1)QTCNIV@kp}kPm<69H`=jix1D9S$A%SHi{vg?)E5=PS%=bG}TZr z9>YWET0v8WIQ~o#XQ)~Wy|?uc<#2*2jtdULRt|6xG6g+TNEB>q;Vb-VHcl0%;^Xv2_dES9-Kyat}|wm8$tc*-ok4F8QA&Fu|u3!7@$1P4d6 z&FxwBB=%I_Jwo4~U##A|5-d=ZXQH$Zj`I8Pk)acref7=3>X-lKi`4^re;>kmM(JSx zYV~RVD@F#7sN%lnq!cv7Z1odtI`>yGBiV^Imz=h*!gcYzgb@Ix3@(*uM6?cMpf5QlVG0|$7tMtlUCn|P~Z&MLv)S9|0UBw_kJ2&KwL?v2%w#O|-1<9Q9f4lWZ`c6{0SMXz_Q;21e6hfjvvOmqXQy<%Y|>I6f$y9nksfo?_~8NnR`-+4 zjAi3^Og+g$63>NrH)l>XMD!CQApS&|x8G;;60AGB7#Z}ev8AlG32AOYp1fBOkYF0b zq^ZMw%0_!RZF@Y(<`i%v#AZAwEDHV&M8+c#aM_!9X*0rl)o-u>WQ?Of`b1xCWoWfS z0ng7c3Z}-!BXAsym_ZYRfF&jB*<4EvPw@pL&C-&X%dKSzK^|P6d(R*UR^5*g)Spkw z^@uPJ-u};6iMr>40rC^*47f;hQs#{rtYmY3nrEvZCgtbO(q;sz(S*LiW86kCuzRK` zHscj6E$fT0jd$<^pAr4QQk0ng+}!BE=b1d$6mE^rvK{Yu)HUt*P=;GSGX*im*iFng zsl=smeW7K7=KEBOYkih2GDNMZR%g@|o@ZXX_C%Axc}RF#qIE*)9Xnz3jA6(doq0s} z*QKwFca~l0Hac7t-2kk))TwSB7*bn6P2p$mO3lMvnVh`TPOkE%dpRB(WZ3ipa00 zB7dBUr);MNet7WaIVE;_o`ZrPbh#x2?3>oEBxCWkSg+U!VCD9KfDyN8>8{=q2G?o*Q5X8 zaP`hXAOG^^`Rd76kL^&|KKMERDR6%oz*6R|bcjDj2T{-)t$8&1N{0IEzgD~8g$nH^szsD5GLzh=kEcSOQA+n*_?}_D z<{*|ST3b$)a~dUbOfpb*ToctAubP4;xM$l`GMi%R)V)SDQ(KIBt8}svQ#=R!vSu{c z$Y}H{Kr0Kd(}pqmIrK^PWYOYR`{{bdAiERU5BgIxQ_jO5SXXL6-2< zL*pWkpJZ|FDF$pSX@nTOS9qPyE{jpNr3_!DYxbn4$Ir=w@{@5!4(||L26ub=z9|0@ z;5b`om9yUvr?6GKJu`FPMcNE`8}jd>O+4F~1>1MNb^iSKGhKO;K7glwXKFlt)7AvG z`c8LO*U%~%4=9Sed-S_*PAWPKc_O3IOZ`klJD)SA(3@dQ5T>P+&0=IG${2+AmQG!d zfX#8$X2!Tm$PD$&^@Y?t4nmjGN=dY2skVy(Ce2(wfCNR2YBKWn(Y z?j_A_6!jYeDV7J%YC}k73h2N)3Mv(`tJP z9xz&lv|YJk!ulKGhmK40Zv#O%bUmX|A5)}<$i-g08=T>z#w&_h>JV<*qZooCh6taF z(e2r#oeJqPY^lw7C?g-Ec+fZi;dgMBQbF_R9(RVnl;>w2n6}f08fe~g=4HcO{bpps zr49@M4aK5^KNrDQ$5Y^uW9PaH^DhA`0e$ZJu|o`IeheHbv6&+-FPR*HrdUd=yczSR3`rYa3#Xmn^-Md>>Nan+O`*HZgI=IWS9UHqVFoJXG&pogyG=7 z3QluGs-6$k5CDES^Lggy!-27d$1)8$y)z!57SL2*GH2Y6Jm4vCR^O7$LgTb`qR6f~ zfYC+zo@^mKUq)|-K`&@b-(=jk@M|)B!~rv7;*`bh)wkb&>3CEd%V_tWW04OPyX_vJ zRZ%w?i*|dyw=A_Kc(s-Oot^0ra#x1xz~Ntef{wID5%7+P+K>PkuNfa4#(UK*6adWF z&^f2AQygorsi%8iSU#t(ZMtcK-`G{gtUVf<9tfxD|8y>FjU1)}I=S${cmI4-Uv~8P~)imEdregZ~Izz`J7T!QQv|oiFqFbllbDi426d z8{nd~eUB~#vJgJuq=@U#|6CP6yPsEGQAX)Yd@$GAj=EiZ>?kzPrXW$obJX8}F(P0< z`trnp1XlAYbAKtw%Q&Ub0s<5vY78z!&~B~+fI#S18jp=iIi`M)DnUsqy`K<$PPbDa z{HA4$%?oN+wjwnc9KNTFz_IinWMC#Fd0DNe51*zo$QOg*Zw4i}MydCHN-b0mS_8m9N%>nSnLWHi9pGYD zO2tu}oA%>hXFqj2r~_YnPmf^0>9mod>U{BxVS`{)rWFbhC`B&=3oQ~3;-dJB@*tdT z1)Rq%ZA&NGEFLPCRHi-|->PY+0B~@QM%u49FPb-a?eJH%=ieBkkgHEx&lMyBQwBO2 zh+RQQVeAST@2yM8epHCqu4j9#CwKSm!v|oJwv_5CN-xbmdsIgp$kQ+STEr)EZ0$Hq zp*w{c+zUW6*fAIx0C;XC0a-w=rbxg``J`b>l?LDPBTM4r&=*HU`;`35;fCM%qi6jd zSgvzzi3}7)FFIpnf&sCK z|J;kU#`*KDb=Qjof-4FaMH@$vQS_{myyBmWQtIih8BY-6IPw}bHz`O8q#f7N56y+f z-{T3azw&m1|5TXkDUv^Pzatw%3zD0xcjT`O$`Y) zp8dFb^Z*Sx&$NrI6PZD$Nv+4jWs&ewDd|`?_oi`7q?Zm5Bm^Uk43}it6W$nF#1k38 zc-rK2GHU-8ks5>E8h`sNn&yzb@_f$1Ri_0q#2r<-R<(aETeD#xIEwiKibx$Z%8_pl z`@)VgH+y>}21eoDl;ucQ>3?(#xMuyjuAx8=@m{czE?(XWr(bejUr%&DEr0XjOvXT%lDkP_WrHa z94hLYORp&H)$=#&laFWX1MAVFTeVYGzN~kXQ~I+*PjWK!8PtpZ)DMwBt1MNSGI08J z7|q%-s_XW@X!@)zXQ%2tzEphLmb7hHR)*8aVHv^vEg;-Dl6d#-m+QSdk6lB9sg86P z@}tP9GZP9pcdPL+#+{;n#S+nL|_9LQzDMh3w4Qa&Z3PNVgBgA=>n4zvikA7#@t=jZG z=J%qTFu)K`%|UQAj;^Pg!WY5L5R~egG#h3wvYH+7#(>ub(6lo}cB3P6E5bqI?E)^D zEP(H=RZsAjy~&G|h<#C3$nAe5VIij1swGINQEnBFWK>IQm0GOrrZGyn*8j|tO%}rV z82gm{D0%HQeZf4UW>Hr>pnu<@Z`y7+nvkS8$HezUB<}5R|>kF z_56S2%nd_dBAxDK4rg0ZP*eycQ7R&ZXRrm-tY$D4N4YRkG8yK@cSOY3oSs=~*xK}X z-=6E&>NMv3(C~p@tCnuc?@EgP`bKmFac6eLYxlq?mfRgv^jEWAdopJp8lF$}vkOXaC-b|mV3$26eDBSiZ6pP=lRUu9!-<2J(F>+;H?dJX6w`$10B;} zkfo=x6%*CxAcrqM$w4%UPb(B0qdNK}-XcHgc>mncc7r9S6v1pw>&q;W6xGH96P0T= z+GLwrt<uEGduj@+%5?+1cnB{%7C&_M0=Q@L--4-3HV!ik>h5OEiE;|#cZ&vRd ztNhlM(r57L00;Bbj!0$rPc5}v>MhE!W_N5(rpu^XmeUB0n+@|5w-v0mCqY?*W!fu8 z0B=qP!`|{Qo{lr47@M6=ZD4a2peyStWA#`0wbDC-78e zs%Sr)XH&c~PA3@B)qclbfDv+iPA9=1dTu?sbMJSlTC~7~A)DZ9p_EAlR;3UoUF&}6 z)TB?L)OW;Lh??O`aeDsN91N+)tM{xg8PqZOIvRqEWlyOujG(bV8|In0&}m@UV0m84 zDUB4(z!38Uba1$yQ1!Qz(=@G<6j=&vQWwUN1jHyV4l^gb%a1l)Z|-(F8{P>n@6?B} zWjdGOxiR1VxIU@&X$IB_Lv7X!E{8^jz{jb`j@!NIm4TsM*%E||N4QH^7CMB>6iLy( zhs!3^mdHOv9u1ZO_H>`MOJR6u1j3b3s;wRvr83a=tVcaRBY~ilP178U!de`WaGNpc zy}F)}_AD6ldK<#bpGQy8$WolryB?k>us|fE;Wq{YQ|}2LhF9W);GjY@4!(Nm)eIcA z%c!@}&W^to<7C{+*ko+mETR-~4r61O=?SoHL@&!!gXfUizy?ogo(_+we_i>zz}4qv zfVCZe`=zL(fBZ8m8-N!nb5z1X|Hes3vU)!p^}+X1VmjVYKyxVCO+oA*9$9hnSo;fY z!-rF+hMr4ZNxaef31b+E4q9yfUxYttJSpe;%+YH6!Cmp`r0KO6eYWAFv}0BMPKSaU z6wVmLC&p7@DT^^Mj?yiX)msWl+w7R&4Z2U(RPs~rv(FMaB?JH{%nteqH~`~*reZ+1CLJo$=a!9 zdPlo<{yRc)5V3$GP&LOAEmhJDgz-SD<;sNbV)a_DoAD?tP{wn)Y ze;4?uBDIk*c*)tEte2O?`O>!4S9s4e@njKSdK`floa()V+nljIMg)c@)voox(Qk!z z8SRO-hb}tyQ~%u?bPjF0hDPa8&&A`PgQNb=R8fP<=JY*A*KT?{M=|^ikNKP)NFK)d zU`(sLzso6ZFM}g>4SS0EDkOdT&M9k*^%)v=_{#a{DfKW4%$`4cvHI)3{J5;Bf&ZPB z-=WPX!9&|x-?=MA&(P+im#wJ^y-Y!JosOF&s$iV_6Szs(1RMpA4Ws3R)#W5}vx*0P za}*2c&N*P0j+RY+b5PSE$<)Y$!-cjFz7!K3(E$}(zV0L|mZdi4s$oOE20)pmR!kza>UF10>NqF>%TOUUP!HH=eBKCZctHuwV=c zYDS>--dhTUM$y-1+OLSkeIpuLZWD}g0^F*XErl&J*EgP@=g@9Ay7z;PV(40t6d-Sv zJ6x0wMZ1#!4(Qexp_slm`Tfnn~YcD0ew0$WRkW*HSSwLGrX0kg^i z&KJk9bUUTW88W3me6dfvX5`8+ME^NM9iJ}w%0(e1sIw+oc1WzIU37OXtY52q-%-tc zaMwW>(MZZ5ZfGt3y7lT6{S1pQG&BNzpfU6r*(kD#1~*#|Y+o9l^%X6`iFwXonrJUA zU61+*o}GA-QN5pqb^`cBa!n44~ir_NPY#zQIl4AfRzsoL!?Opll;7Hb-ywd?T$3_&9Prql~YdM?9~ zlbGUem%%mz`sUk5uD@Bmf74V8Il)ZB$DXamAkG(jg3HJh;uQ_IDN0$lYHN-y?K)%N zb{eCC!YxOdwlwmS#r=0JcRIS0IW|Cz?32fo`3d|MK_9@!E?F?us`##K+k5&`76z`` ziR|(O{_q~Kj4Ov5&M4_rLPIRuG)|1>C-@-8ToNNU4oJxviYss6F1gEa_ukOUeH0SO zDt-jt|3}xI{CJjShkbuBM`YxjSz~uqSC1T;lL!z7cZLmkVF8A{65xfsvH`CRujPNm z3kzO&VR_*N0n3s>*#a!clqizTCVOC2*IboTWM<46{(R4iEJ}d&%Z!Nkd+)w`&pq=! z_uPAvr!(2z)k%u=BjX>uqDeV{F(&jUf9dLy!^X*0e4WZm%U)~d(dF(dpMLCX?}pD| z!Ui_km+HbJ-A(tc4jChjZ?72Od0&}va;pTd#WPcVU*mJ~+Q5;WjKWS-yFK&d!FP0Q zshSnAlGS~@ol!?M8a_S0f)l(H#p)O+Zj)M zdUZ&aoR}CBRTlrAnU-=Uk5k?(L4%(L9?|}aUM<(-qC5l++u=RO(2D#N>2QD!o=o>& z>pugNakL#yba4Osz>!zU;4kdP zFL#~AfKSQ@G5 zY;zTw8K#&KSZ#(0?_>irc;f{U)^1mJkV83mG7zS~r~Y7YFPhF4k(A|z4C0jAtdp!A zXsc(l%qaz2$d1=_$t$n^`dNwA4cE`sPA9Q>VWU)uiK+8?FwdRb1eZhy^UYkZcTz+t zp$V@`_^3ZI_bMQ+3FkcTm#UgRXenHDtIRBC^0yIc!k*(4bI(kAhG7hTqbU7%y(;R$ zO_paw1J%+RLVVs$A=W4Rr6=A+9%j^QLyk;U8~DVCtFzO%sa}IcpR#5d2^DpdF*Rx^ zStZn@P}RNNhzOiBzd0~d?)p=>7$~cUY&xgu7v$G>n4lC(7)tSvqnM+joq-`5C=)UI zQ~xKt35@u(HRx;w8c>uRjJ)Cb)J6iQ{BVJ39_j#H)SY1a8?q;26V235nvH^Z*ycG~ z>eZ5@E1}~?Um5qU(bQ;_VNR)Ld^iQ6W79}s3~)*zr{mb)~Xx0Uz^9Fq+7n!^0r*+mtVEES+0c8{T|puWt-Ks zAyu#{?0Zf?{d~22@wKG7M<>hu+k4By+wgZonBBC%A?I|z=tGAGp-q|OPW7I4C)$$Z z21lbM@lHlq)#_JA*AS3#%~>sAaK_4=>*H+0tKfw}O6&R)F2bfKEI739)B5LhL7z$7beS=0AQP_Alj)L5G~K#o^qc_7!EJs6&JsIeC?mocEjQak4qhALw8JQPR*mtf$`r`tvWyI)5GM??h z?cq_~$zt>J;i-XdTgDDX$kUvj*)qf&j%aaA<}HKWwiGklm}8SZ*ad%4E1UuzNprXE zllSs|;?q@sIfBwOi1w6Er_Wg&aw%Ru{bYIm)yJF)j_@t~C~JR4^ORuwzfa_pEoEuL z`rsY!B*AWgYYb;X7Lom(kH2+ha>IHDpn2dK!~sBA>@nZRyv@=kA(c4v*O3;8BGZLwrUS8o`M~V5%E5l7U{QBGe ztS|&m3>fiV_dk;MJ?}IW&AfK()cXS`_%hfjRZAd3f4f2zvfU);g-26{$?3k(=*0;` z8L|>CzxbJ+F{A@?Ehsz! zI?;}m8c}vyS)-lkQyrsj!Buis0_=bKX{C9_MEQ7Pox2935wi}ibg6W8P~MY!Nhw#p zHU+(Tr*T*#6W+8$p_$+GIAc_QC-kbZO!4Ft@Tw;#BD#3*+$bUXH@(5O!y{)TkC}0Z zLhT+nVL-Iap@=_S`UzfmQ+Lh*PryhhgN+6R`|0Oj5PEnKQi})kz;iCQj4EnB!#ZI+ zZIr}U-w&Ll+}4|IMpWB54VUT?EcY#(QaOxx3UZDw6|q@Y0IaZudmwt&EFeQH>Qi1o zbu?no65KMUa5y7VeP|kHR&qoKt!{E=HYMJ)7r^jAd;^}8YNO!xmL6v~vLJ7IY3MQ| zycA6aNloqC#h{0$RTtnXecZ(}6UnefK6rBSCIi#qALoru+H0QiYd6HpbNxt0b9hpI zm}l}-Z0a)80Y#DUU3>JOI=(gs`Gyhm>}r;AB|Bv=@0xdg@Zk1xM`WUP#y2EF<&=K@ z+2iHsKl^NX^gOwaUpbfYP*WOmJUWZ};?iE+GNeyrz<0RP{hM%14qh~$9G|1>We5G> z6U|KcbqAam==AX?jMj5|j^E_mEB_K7pBO2gowSh&Trm_yVS;aP1fhB}qP0$i7B1&o zaC%cyjU3A^k98A)qDmJfuR7;nu$M@7}M!{DXg ziV)_ACCD?70PpJ9Ycqb=@8+6k6^B}I|M1R49aXx~TtaJ*@l<@!($xBq49)aOMm?tC zOrDh8p3!ZT3chs1F8-Rm4DeWC%9&ydWr*`S z^mDlj%YAzl*zZ4C^{u)5d=Nbtz0p>3ciN=?2iS6w*^ zc}Ul=!~m_goDrs*g;@&6wj`zS(poU#25)t!YxeuDwaQY){tPq3phFl81&EbF27b)z z-XJuqxq`C7=KDA_!Bagcz4`<0>YWW&3S%466`e>KZZrj8WEVVzEng9`9E&oI3*`sb zOl>qe^RvPWXYOb28mXj-^Xy9~nRdFDa}gd9JD5s#D}m~jQ^aD>jP)c)Me6#18P@P4 zAsM`2hFsnN$n-Yi1n(rRcwt71^DNQy{QIlXaN*>VVhDi znrR(s%V3)JNXd`G=Q*U!NVSBs-qnzI!(Y~5O;Sq%7Anp#HlMc%pBd5&PK}IHDID6o z(i&NSOMHlm3`g@=_AK{qeVtqa@3x*j_*61{J9GSyK8_r|B--h_p+_UyP}TbBB26`$ zvI8Ez@x|ZZF1!}aoBC+3cb0akvm`7`yUGzdMWHlv>{Ya42Msg0SIvEg8}il$IGYaj zXeYW(!J352Lek33`*^Q>fDB+sL;wJrdi1Lib(>=N1*5P~ga&@XVXdHD@ETno*XI9n?GfWghTbk zyAehPJ({e1uq`>Sgs)aKv=4wL^-aA(sdM-;mU#GvIoAVg_+Q{H_&qc_+cTouFEXRA z&;RsS>P{}<7K2at9)9Dk_9IXwLu@-(dlwu&I$dsB$IgBVr#I=NuMA1Te5%ZDj^`kk zvq&EZpfJcy{U`$d6C=Mi=#hX8uV~6qcqJ4M<4BRyT%REeJJ?caOA$h3cWG>TX0DEcRdGTCf8H2$nkO}nmcvZk%@ zknHrlcUzSdY%@YGqUT@wJg&@0&#~2dQDiARV&UP%m}l1iU$@^TLR7RL4mSQG3S^Kj z)YeSXMR)vJ38Y`WC#^NKGTG7AfXkTN=BL!frF)l-jFt61JLoYc9k!loDpsT{0YODgarTE<) zMhQe7onJ%+cTHEdXCntBwVJfHk{az$pR3_huCRWsM`h6sBku zb+ev$(3i@`wLW_2E#^nR$51C=0X%J1?OLwza*8x=Y)*LQgE*39K~ks^AWb~hH`-3k;G&aov3LWTZ}OO5uOTh69r zVx4gifYpfniXz=d!;IN_C`sZ5vbv(xwv5e-C){XkX@>6nl{nskT!f7P_~kZ;k>gzQT`~MsIf-fP<|81KhUtDH+H)X$olHC~V8# z+Lhhj{fbZVp=r?j=2MMy3SFYrj-BNmKYeB!YFow{nLlIT_Fi-vpcC2|{jc?_(WQzf zy#9;@J#dO>bEhGATO{Gc&WG469&wnQ06ZfiSYS?<(FWiFo@^7NrZaN#PmGjX%D8Lf zz1Ju-gqoe=qoX4!iHE_K1e`JGlR=J-O&_GT&S`T7IC7%3`!7#7E}x%Y85k#??f$Cb z!;S5mw`?G^KYQEcWOtHlD;VN`-I}R{_$4u%d362m%S>J2k@&trj3O)S(R3(atp$-B z@diGa$Qu`G!cFSe0MWtA= zB<-xi40lX~QKL z|2*$nnP8Q1Ij>dM+N;_yLt7n?Jq=aE;3jxh2AtkRc?4pvthC9X;Wo^5T~qapaL)1? z?{B9pF-Bd^WRCDmAN4m}h9mvSK`Kg7;-7K<@x_93xkuJAIi(Yvfp@Lk)g<=(kSJGDTl=nwkq9dg`Oqt^64w zG)&*w=Q~>m;1jN9Eptpu^od1M{qgZ^s$*1;BhcLFgu}PsAfuVFgXbLrTJ*_yNEvv~ zJEL@S5Y{YViZPmDjxtxrQxD17$VdZ_5|l!(xY>I;A`^H~Gnhc!L zX317<*L!7a)&AWhO*e++b0s!cR<4J<;l>ek%GJaCEKJKjCrq<(*jwIHiF@_ z)7HWp91cOQd9soJj=BAb%z1Ct03#KbfwF5v96ebkxN5{&^rgCOE`TmDWE2sbHcHA^ zH6$3vq0lb;?`&{NjaDf&bTRdTw=cX|nTaj)DU_TXN8eNWXJ37QM(PYF z^KR}lq%+o_g2!6ZUqz{ntmztV&rv{N5diSX9)I=fD}5v*@edyGA5Gb&O{Z%B06+jq zL_t)$IY1+Voi7g`-d{d=@3Bxi$5y`|J+}0XH7yU|>_yvqHZr!Z-8h3ixq4|tz2C_) zz5T?TIQs2^7k?Nm>nXl18Sm&Q9izPj8>Ou}nyjzuJe-P$%>gYOP)3Fl}> zG&zhK1=CrEcJx`i>m_guO*z{+x>$qYFsk|$uQs?zll5W1_{{jYZ1KTtx`QZ!VO`PS zy8*@+bGSG^J{KgiW1D0=$H!icJ)HWcOJlUKH|{C_Df&PJx^vvdxv!0F#?*bQ!;E+( zz&-i!*|PV(Qv-J_y;nY`@5bF7;rG4eW~)w2s}zi|HN|qv-s=0p@?U@Px#(E=D(GB< zhn_w`n3m7RXKe+$y?u_K8BXZpIE|B;!_YF+`tZcw61VRhjy-6t`Gy_x!s8BtZfm2+ zS^MDUjBQw7-hB7Ae%*Nc_ZalMXhp6>xb)U_&JMYyzZ|d2Jkb}*zuLKZwL>0Xo^U#3 z+&3IaXO+l1IWFLWf1;lQrXlpD0rP~@@M-ph2BWNMbY9NDgOWyvu7B1{P(_Q!V}54bkSAc`g@oD{$9d+2i$jNZ)@IiwlHSXv&Mz5b4`syXg?&@FRb@lD!jkrJ2m}wh)40a^ixGGdOuhgRVjE4Np zxV`3+*E}BICxEk7dw3sB7~D4TDEqzlN~~3yu)pspp^tFK@hJQKxUE{F5g5aEWhZY&nPjGQ zwK+>0lwBt`HG-fbt95uMLpWjdA{c&Fr%!qOJ|WXe>UT72Yt$U71_q6oa`+|^VpPtp z9LFNkUy7{k$nY*V;1z}}p!7-xe36(7(=ccDD@ainnx2C(z?^Z3vRmM*j(#Nr$xk?G z^AQ#cFbsc(_L%lkxlx{wJ=(V6$R<8Z&uwg)Lj`jCyYGoS>@##9J^Iv~=k4W|xpwFu z`rUhDWBJj$jDr!^6Ndb;d zG>LEv+pY9>2Ua{TIEBL^0EZgU_3vj*t3>wcIA?47M-j>x=>Sy)b&**Ql@} zYw|O@fzGjf{M~Z4aI6J5t}fCJd5P93e4Q|G<&(6(0xZ=`FM_*)7{^83S{V8@-^^yC zW4SDNC#Q;TYZXeXK;C}m-tyqBcfj{*`ObIlFMs@x{@j+lw}ss0T%i3;t4wxVy}{`A zbK{QP*Y2SuBVQiH=O4rU)0|LxM7tU8Y|R$Oq=D6T$cL+bR$Yy6f*(B&4jE?-&V@DQ z9TnYfbZu02fwp^gsq1`)_L^vS&DHs!ll*H6bdj)k-49(=7Yh5xhNzvcu9z(YKhA*r8QVtE6T*oJ{L3vX^tXD_WtB7Fgkpq0B#j|V0MP;- z+N7mZo31v6k}%gSi3;9?jM1LG=R=+EG1w&0WEfMTXi*3#vyk%W-|SniPczbIoQB~k zzjy}bR1{MDubh6acV;(lPp;`|5>a zIl z)#0xY8wEz`i~~!V6{1`>RWj-_-}b_W7Htu^=d`Cb^+>dtmReJX`8a({Br|&TJN|qn zscLg)of9qtw(x33sQ76kty!ukgnV&+Bq=9+H&XnUfrkraK-lx(#bPYN7wt+WC+ez+sfSIH#?Isd5d9ZZ*F?7=xGNCk%QXi*CY>fAL4o06d8_wA!J81}26(5fwTW`?!uj zx`^7~$W!p@IEWsl@8ap~)HsmIk8%}j)V~ckIR)oNoAJh)bWEFPxRb}))X(ZCrqh() zWEOt#X8X?X7mk+%)>74*mWs_399H`7kG{9O^Y&ZdKVRS<`A|Q3-*NP*swXm5PEC@3_nqH11LDAya78BCr@wrc zrr~OPbTGXgS~kr>yt*`X6#m)B_UE|#+P1Ni{90(g0WXYeZ*%Gid+Jh6Zwz8b8AE== z8gD#BHo}B4Te>24F^xuX?_i_Vh(;X$W2o231aojgHz6WVFUSg!y7HFyM#wP~=o74p zjG+MzY78P{s^(cUQYp;|LCTgagmHOlf8OIKX#jPC1xzbEm{}hR4fWB2Ux<=|cYj)k zq&^vh6`uK`jk!JbO_*}}6Fd8aS)Y1Px#s9n9IrqBf1~wa8piNeeJ->gRH>#uwH{1k zsP!)c-Q5|v6WWm>LfvS*H(JZM%0MHZjCP9BlW5ekv2X;gX3B>kb=UWVDKEc&eR9%* zXO!t=qDCX0CHRP>>L|aA2%{vBz{DplgSa za?%QA7ye(D_^pa*Xn;12d^2v1z^7l;2^t*W595G$!pm#iWr$}30qd6Wp`sVL;0LK_E4qeA=#siHkh3kftM~@yE zU^+ZPIJhM}8Q~oxgA$}#)>rx2LYCnyl-uaH(R|)~4r-&CL0j~8S-5@Z#}T_|H!JTX zPubtjGvDDTMavLDm%(+$B0MFZP!a9X1!Y#bs!e6q#5GjYM$w@RV-COuc-AN#rB4pG zos4$@Hk;h4to7&WcRh2$q83Bf+POCPqYsp4iPVR8y+#2IZkT!XXXsYh4Zfx^D$>1E zb!Vt{jCzx%^9#pNRu{bF%%P&7ZUcR;zZO>%%EeUluB*xt^vQIbOEy0eWZy8nj zD8Kjq{_+E1+V2XlKW5Z_`o-z;sqp=e9-10Lr#=0T%h`p=acQ5sF(zWZ?bt>5`AvUa{4 zZ~%694;f|i)7s3bPM5@AFKqmCX4>JUu*eMd@IyRa-NACekeBJc&v7U*bF3n?p+5Xy+v1O zjI&#(!&x7aq+E)@BX~>K1yrl1mZDY3w?4p>DI8%P{Vox=Q`u0QJb0FkD0w)K7J9A~ zL}WFhQE3%sAoBReC?%N9mE_R{3E@%mD3JAgN@BKo#)^Q=H65`!MgB0R0l7bGfWI!@M`DZ{7hezovzp8>w4-(MslrJ z-ReCz*Pa31DNL!|2~nnid?plIm+MOn0&eEO;5J7nx)*YdapqV`v@HBu;>0W;Qva|) z@2Wqwm$2q&9%>@VYZY@^^c<# z(nDmhDLZ@;4hehfe21RuopsrTQYotvD#v+*i{}1f@;;M~VFdI_dr$KKF>o>SrkFZK(5ys(eJ>U4SQ-AzqPfi{6NZ_ zkz>?G$)Ur@LS|wNo^peCxS=po+||8h1RU(uG2Pagc&h%UraK9&qe0tm0J702zKnO2 ziK!e;(a0)J;6gwGCr5?nDJ8Hprys9)W7>&Mt8QrUKt{otgK5r{jyGkY55_>?X_N!7 zoQN@Q@RB?lfp0O28S2@WKPQZg7E!wFIo_P9gm8H+m(QKT_3_Vt4vrkASIbX+`pe~S zK7J~Md$|1IH-Ds^w(-5Wyz|~0%LkkHW)N`#_UG~(a;T21U%zyOtmWy;Pd++Z9$80x zhHh-pa{RB492_E>{(7`{$6RX*65hDKz1)+1jV!~D_msIMeA5W9bH~4Wa=F~5q)h`a zFE|IW$RTg51A(Ar62WwPM*S(duK*381s>to zPjj=xB2uiQ%=FY4pSd-15kJh7rgtx+%ly+suX%5beuJbK#58MIkm ztVj}GlnS(?gH<`Q3)rD;ndv2!zWLUz<*q#gZu!}_6XQ%rIDg>EsJP{DZ@>S&E)212eCwV0kakC+e8q`&Ek8wT- zuW(ZD2WjZQXwP&}%T@R*!#wUN$^HpjVk?2`=$WJ$nDKGqO%FS`rL z;2$M0Mk)c-sCP6902jgVjgizwL>>Ptg3hF?fikXZoYgJ_%H~dp)ABuhoG=Tty#v_Al=lAo*@1O!G1rX2j~b7Zg>m zs|nkO|A>l>0#ir_XU0B>TS6gO-JNKUrG|&Dfvj>*RWgj&hdfK1_d{?sb-$=tZoQL z`rjz9k!QG{@O*IeAHKU+lG-{o1ADJ|0A7j~oCt{@TT8mhaBa4f3rzER@A?)m&3tpn z@=nK@7DmrWZ1j}j&X7!km%q&k=CrgneRg(>XG#<^%HL!3gEd8aX5OtXQf(s}!`I;`UEv-@NqHn{%4Cq^S@brqzDD}+A*U(_F7_S%LZA)HNXKzj?Yi~MY_4e}dhfkOP{hxlkY~J2oe*MRHIPIIu zPd|FLeE5ZZ<>BK2gYZxO?;{~)X8?Rllt=I;@!Lxm>2oLAMyKa8;$KIA8iAb)@3e{` z+SQBrF>PLdyB{tm@==)_cIc_z`uf6o-9$&S={<}ZaXd{T;vFn3hbFib9mi)Pr_nS1Pt@TeRnTW5alVw~HF>jWT!wfN zHH?E6h3`2sM#8t9rdB5S)z$6EDH&%Czg}*z4My_2hi}=8>3gR7zGh6FdBC~eXJGH$ z-B|wS!%rsy(-O5U(;cUFd`lkNgP_ydauoK=+qb^Gwdd2nf!_&g;5j)%5P04shqpUx zoipC1N0X0)Z(Hux03uzoV}Je(Z;`N$wrzj-M$mU0hq~8Fi`2#Fr}UyP!@TO3LlO>? z-{#vd?d@Jfyd>lf9qM42kUbg{BGYw~r{%h3o}Gj<0>fFVhvnTn^fBO2stY(kyB z%|9=%hUuEXm8@{3>)=p3pnQ+PVo=v4a5X#;cAf&w?NPEEg_+Mr*jgV0K!6MXPw(fQ z70Io+i)qID??M|puMP+ag*eNYzz{gq6c}B3KgAk9b{`9j(wcc;eV2X~<6UIUFd8CdQ zq`Jn@YV?@L2PWS`X6v~N(>2dOkb-|r77KSmtZL1RjnOg$G53r_6MX;A^r<=E_-k$$ z;Y61V`6SQzp2Vl{2CtZNA_)X#%aWqzJUjS+Si!etO-9an(=>aEV`G3`?f zDU#Z61iR*nq<&JI;Jr>T*)jqStl3>q=SEl{=D>2FS?S@8reLO!a zU!~xi2W@A#LjU1B6>40&v0guuWx#ebIQe2XZOYnc;8xEeEm_=C%)!v-_36j9dCe)RjiNnZXr20=JqT*A{!Vs2IOn)u(@}-;Z{NE& z=QE^3c1_1@3C-tHCo?6o&QjHEP<+jZPXxK0GI!dR7MtsySllha@o%+%R?B{^N{SlqGV3X)t*x$B|k^kLprPe!~RT8mtU zc%h0A34rzA;G^EHFwZnVWnxf2t9Kk@S3d5=u#KL)lA>KdF@&~i-#gVkPK!c=+j=+* zzP{7DlclI+B_?#Jp+Z)JlkO^?vK}WV@U)epO`u8$N&rhf+DhTo2Zp@E4(h9dYrWu! zISNa~N*(uDJ%yP|j!GeuPKBcfMoXEELK{_}E<8}5(!BoYGidiR$D|BY?)g*bD3-dgwr&^|tjQ*vOQcJKZzfA}) zn?EL;325qjIvjL_p9oGnXX*@>v(~ySoaqN8QGelU_65+_!qPb~hqeeUr2aydaLI3L zQeAaw$A}#>Fk}f>*~u5?DW4lT;6vzc0da{kO{EYpbH^#j3(9d$NLUdgp+oH6L_fCXM(au`bM!H5tm5FTKYcv{tR-^91XV#LB^FY4L z3(h<@!<<9k#x{fjE!Ry+rVrq(nnrAwmX=MOK^3FL$40_ulu1c+u|<4Us5ky=Th)14 z1sUCz8!9n42-g;nli?iT`qiAVcVTKqEc%rmYI>&UDa#2$X!E>qD46F&Jnc}wl$kdv z@^Cu4_jy0%e_=<&E$|)K%e&=4ZA_5QRBBx1stwcg!Bmv#8<^CM zzP`|(6AnYh^{XS<;Tde#Gqv}YO-V!r zW|?MkxI~*%B20LCVU77 zU5@5+o9WzQ)W83|cfhx`eE#{f<&a}E#&QNP*E-I7%~f|=V!B{Q&P-D}>Ba`D^>f#h zMQ5ZJkt^CZCj*K&rQtd`Z_c^5YU1`A-=$VhmpgC0Z=RhVG5X&zx4%YH)kpWhrmpte zlvaZ${4>$7=tCj2)8vHwIi306=k09Q>Y8Yt?BM=3UV4Sc+h(}^6xxoLWC2$7p>q_k zTQi5HsgPVl6W;VK&pyMZ;QE>CzIfB0F;Xsi(`$a#>orlX+99n6+6+lu)~PslA_3JA z8_vsX6RCJNQ*Y#sgn(h@`n(tUFDxf`U1uZ6sXHa0{y4Z}1cPs2fvho#RhhBW-Zx=r z_X>>!&)^~WGr9zJZUZ7^zV^TV)<@6P)AK%ooiS}B8Y6p|vdLh$tT9}D-iXwVp3evz zfnH~Z&qyvqqmGE0VTf5$8Y5&0)x6leaq~MI8f^qX2B)@d+~IvpK{2H83fENMI2Y4$ z9a~nQvMmxJ~jB52>ql0L4mLq1~ z){;%_l)>$tMwjMA8!g~`?bIfAA(Vx)hR1}jXTBZY+8Hn07Je;kZF8J4=0&O6=Fy|! zt=?-MywG(x7`7Uo&=x~w^8g3k7?*aq8%*(&%9OaCU{*lo!l;{fB75B-_C<=@fZMD`+pZLb(N-hk`q>66IWhjnCmxp&+P>1&k>eQsGEy!1 zaWM7#_&8D$X1${#&O^)X{Rs-XSM#VmYoEerz;aNHJylildnb zT$D>gx7uRFJ$zGB{te$@?+g9e*v-)^tKEF4{?62pK9-2M%Hqmfg`kt43{2B!r+DF% zvCj!B8g!XK4C~<=K{JForjv}PW9^7h@S_B-AIgBo!h@HW>MP>*u{ri9pE)o@JMXdb z_l@xW!9V(XdAR?~a;kJ8xkIB}j%Jx6IU2Wg=tyRJy9(Zx6R@ifa=fOFn?m-9J&f5u zTvxP_EJt1Bo%_g7*=yhZ`r2olZhzAgZLBpq#XGnIEwTaWwfF=4H4wo(TUSK%%-8A) z4|Hq}V>&Bd)3Qf?*KmK7OZ1|NVfVD1lbeHE-5K&m?pvIkn^rF!N&w6u%pmWWi+CV9 zad7MH<^At{Yq`s)AD_xm;kaHI_2%Rs7zFG|Ks|l(Gfu9xfOOPuMl}b~$o({3;qaTX z%3nS$s%6@4!wCA>r^{j=^B3NZ~L>q_0|s^gZeJx8tR%#4Kd(G*-bjjufg9qKWNjyX3Q2`SMPl1SfrVf zyOWdWW5ko!Q?~9{;@#=dr)%WQ0OGSgrw#4av2 zbYKq?Y$T7yYjgFtVjQw4oO+xa(Y-o6>B`S=3V4UrV#c0kTIUKUxskD|=Bk=2rb@-L zwH*d&z7pcSL#!x3%$ez-|DKM})M9YuwZZ^mx`rvEW`m0q^qA%dU@%9_*5U)ej23;5 z$fKYPuU7{H$tzbMLdkHaJMOJck9RQiL@g4WAn-V)*CrSc6M7AnNiZCyFg7g|Oi2c7 zpR)L?t4&_Kquq>fn`KZ&QvuxTwxlf)NJg82(EwNq6UAISJc58!qRTOkxCT(#Zm?DY zCyk!s^98Nd?K$J2mVqrgPcB$pZG@s3I>vYw>BHy6#!Gd2x9a1Abq$p-8VMFeQn3ap z*LDrj+jV|PlMUa{I$Bzyfh?(?SYq;^U05-jVbgbp&2NGxzPPht7cU*DK4Dq!v`rmX ziDBO+T$hcq2V8tF6%0kTZPilf)|Lr+_1* zXW%J@GKQPOn$ZTNjgf60RzH)}mhuVVIS@?YyV*d;)oT!8GEr~T= zbb-(CWjj})rCW8H5llYx1A!}7xE!Dy4)7xWOix&+yGdEjW^ZuHn5mzJ%hB>)xC0^u z!T~CBp`X!Rofb+syfJ>jxp0Lgd#3TWbh|vJpQxNCPte4sJSjHSCM&}cXwVX`=-Sq? z$?#w=Ags;&b7$HLu?0sAifUfkX@4rGamn~kc`aLa0#A}(8H|H6AHv${YH zoPpu~Mr!Blv6w`)_5V*~yT7E=Pog{C-*>OkY@g&U{3Tf<*)f(+4n>Z2UuOL_CQ3Io zEnxFa3s7W6^M}5r!>Oknf{i=d%U9?t05l|DICvrpk^ zynz2NFORV4zLtGIc%MH#hP;tIt`HL(+3u==NBScDwa%5JX(w|5qCM!(PEI!DBtWLfSp!%4DBK^5 zPVR|HXUH#4URag-!tQ$|$)$zp*{C&w;y1cQxahMA<1oU&J4Bx%F^^A$J(>5r70K3H ze$x;sq?kx$vHbeJyEQpCGkh-MjaXnR!t^lQIIon^m@v$)-^^zR`1ap4kP8QElg{4GOyu_}JiZe@=lhya}^Wm7oni z!Y8AUIetY|3~9)0njfxL+OB*>amP6+j*d*dEW8|jhX?96{HNgcGv_HMqoqtI+tzN+ z2BC-<3tJLd-RmYoQ7$LVd$rel_%|+^jZ7HGYwaQF4Qx4?Dw(a8Pfo5bEW>+czOyXp z@R@uk6X7X-fmr-Nfs^@HA}%8tWH>%8VXY`mG_*vAAI9h|`}yeJLt~}dk*~SXcxw*h zIm@wzKXjC<0hf%v_nN!UGbxE{k^*6TFXPPs6vfcvW!F4-7BB@B`Be}g!xwe9yL#c$ zBiGS2RkDsvj1=c?pd}-C87YGQAYMiJGqfl{GVt3q&!xI1b7>BLc?{Oh)81A7<;yTk zVLSG9pE$iKjBM!Tj(0|*X_y?gy1|KJSNqy({uf`Yas|HRw@`P|R}v+}csu&3+f}^Z zD1PqO3HspJGPVP_N*$~Zszvf*lUFW030#G?l3IgGYVU9o1P zma9E7-V(6wUAyC17It7h`~HLdafi-M$|2&Ex1O)*QPG=?&GV}pLfUQVys9cVuus61 zaP-O1Q(}Ss2e%MTLaMz4whta!5AN^Yt*iZ8Z_a%7jxCj+e)ZwwAnA4HQen&7-P6MRf24V~z*V2WqIb99z0c|LbeK0n=bYCuatj^3va!1ocPm z@iB)Z{wrd#xnYjobr!K4j&xhRIyp_4MRAP)T`1^=HWXI=I z`FWnnm{)qg&cCkN&EBicINJX)qU!~b3KlneKV$}~%B#1qZO2m1_Mf$b0N1QN@Ju5? z5v=N~nb$%y`i-)vnPF_mhtDZGee}-2IsG7}lz)n=aI2D2f667Kgv46tx(a$fqqN>~ z&wE?f-=_gUbS`wl-`wp;fVu|C2oA17Mlne{swEiHmQi`zaZVkSRYstZU7bh?Gz?<_ zF~cyKyI{c_3DV3J1)usRQAL@IZK#=gy6`_=rP$2uVphG?-aFtLaaAJ1}IokwXS?^?Ik>=Jd+$ z-LzybVeIs;0B+y&)}@}AyJign)*t>aG!2g=aB$y<5puLkA=F}D-LKx}n=@EfLeA$x zTQ1a-2dz)hNn0*`XNf5`;dNzg4H^IqYR*yBI>TV=|t}ep~N`%KRje9kky!Ye` zF0=^7Mgt6bMhra*?`2G9DHwQGWyxQ%`9*?XU$bI!PzNPZUPoczk_1!G^F8xX-@%cy zlH=2op!gsMmbc-7jC=+u#t03qldqoiKH^OIiB#xEC`^uOFivUO zpf&e1g4O206Al^9UeBMa-3wv%PTwnx9bPr+?r>T)4h_8|OjH6?(Wu}^IOBPE$QXzJ zjDY@^gw*L~ubK`}tev^mZ0(dk*kua0R{FwoSQ)<0fV-j;9dXH6oG@5@f-`4m-$?lW zy&H4p`Kg4o$@GVlk`fc6wVc1|pIr~(LHy0S_KaTl@S(r;w~s-nQ?{<>&fUuq6~s46 z*GIfI$%kM!LpHKf8{wh|M!T=&(8M?4;tbCg-hbrW^t<~jP9};)cXxbiggG>_Zr(gc zL9Alc$G#8X>TA=eIaEY{21j{xF;4hOX6k4E5I+4=Za{eohYWE}PNV(gEytvP2NKG5 z7~?O#65eL`4w?>#W(;GfePOqpgRHs#Gri~04X4Y!^X^-dgYiW4Vbioo(>13rpDmyK z!g4`-=WjK=!r*P~OMH_|wz<8o>)x>uR`QU$bYT#3Nggg3x?{N|r=nP$1;4=LD z6po5R%)~W{t{>NdvVP|J^%J+q3%cVgd*DDf9!}_y;ph0vXBtsRIYF& z!9b`y=(Iv*yx~!=;y!=e_1ra_^gOt#V~RlZ>!nez4g3hjz)Qq2e{GIY)AvrnnuaOm zYABO8Ba%$v?0IWt08e|Zd0`7U*B$#B1}fK^Y2Eq&p602k<9G-z)yoL*p6TJTrY#_f z$x@!&@**0w?Fu&c508OIwq8sO-#NVh`T- zMrW88&eloq#%GkbOzCz5(`R@NCwNXf;Yf}Fn;Cp@mOu03J+vIBh9gq_DFOI^dx|HA zjPQ?Br+oYz%+Y_=v=95Ju@UJIssS(>$1ebNWpGXKPbl-P?b~tbx+)@O~16CvFLuv zVwP=WSgv6YsJ25?+D{beZ?!jKiy$jLf+K0GrpgSw{atI;3hh7x1^(IG9gIsyrg(VX^XG=HtwYa_&j%Na$5YS@kOE`@~aR(+V` z04cM=!}XtBj{pyUMiQk4Za1>X!P`aqD~~Y%Q_DAcKc)CO;ce~vY7FLmN}`%FU?E&> zoVOL%DVQ9bW1Qc7|GIa1Ei0XJpa5s&7d|LK6Kimk@KZR-w*+sI={?C(!Klo+tqwb` zuuUEIZ`lUbe%h0l;NEOeTix(B+qjM_Fi57RS^|>G^s4Cy4#9@Jof=xjE2C3l*qKpp_-YT%XlS-JqWQs7 zV3>XUk)=@oD^m(*c)aNeuE@Yc@P>y|R53>zNf~gLh7jO5RydoC}#{949-~AJ1G^<})GdSi&f)(d)T+`gpmM z>00_W2Lgg&qcn5v23NtN73mUm$DI|C1p zS*`Hg(zWEbM6mVHo@AZFtsC_xxvS2MVQQtP>GT}Ae6k5m1FvwqD%Mfe@RskPs+as% z#r*7{Rpd(b$c8{?aCykE51pb^?dIqdjL(@k_Vdf9SIZlc`w!va*ra@s>0QZlt=rDv zt8O_oT5fQh6E=GJx}F)Y4DQ!HO~@X8!$=X@^xHVc>L?duACI(x=eaT^07yWWq_?^J z^{<{Szwrla*8edl_PJALavp#3tLMw7PMCYcDKiglNoX@=bKAD7AN~B(<+p$5z2)Is z_tb9-U2DsK|2H2lfA!&$<^TMD?T%<3yUhPH^gJaS{Hf(FNrcH0D)Wrmqy#33bCsd-12^FNLDN`t+B}T`=$1gk{5yqB8>5!*oXcef-=`j*#@7AOHUHFaPmB zT;8y>?C$-0c;V%8_s(7O_fAOEr{j|3u(rN~ppKBM18N;ygC>2;ats?*n8FZLoj zljqbAQEG&N2F+#0?(6%P7p9Ey#yDBB)YCQ5uEUPXQHl9yxh%S$woI=9gWD&kPec;Z z6DrF=16v2dlv%%Zu)qKJwK*T~t)FRJZ-;|pJTG2AamFJ!RNmNAgB31#2Ln7c8=^Sx@;(Z2Uv=U5x-IEv=0+KUPHoB) zY=rIt^Lc4nNbz*7>Y))Ru7x)Ts*!SG`$qG@TYf+WvZWCjzBOM#uLilG1Y7gQRhc2E z5A&I7!==DjnV{gzSYHs9n74w_VwQavO|J8&^(3X@zN+WF`q=1DKT;?O&zfxJnxj;? z6iy@0F*a@lTJB#w*ZSI)pp;0LQJKHh$tVRzL7+mANlATHWaX#pPP9?XY?`93N&bq@ zGI$hQ_Z!WvP8#(MkEccjC+4cFudLT`PRecvXKH`lZ7LteH#lwCSEGR0fe}38Na^Fu znWvPsGw()!U>3?OLNTNH>0`T|t545n>Wd79v~CAq1~MnJeqXZ|+;dJ^Fzp@YEd;yF=;EE08B#h`^V(Jnh`3BlgH<^j-*Io|<<)5qbIgu}pk2_^t1M+xqdN<2dA20lke+zv_wMP227oIX8(8Iw9hPC~f!v$ATnl3@QDWj92Xnvwv z5t=BUL3g6uEYZ6lzwLg1#y1A{QU9q^aSs4R_`C$ zc*m&x8H4x|Zl5~M?6IFG>iOB)GHb9Wc8=$|5{=6l%sYCwaovp0$xoQH~6K(Ur%Kp00D|yY2Pf)RgFn z4VF4)Io@jieg~f&K=}O!50>wI_wD7o-}|=f`^()2_iX2Cejj}+H*?*1xq(@<30rNa zoP!;YY`2mkn$f|V?Otavk<$)CGvA%g7-J3I>_a?XbYbKX-?!|z!D-9Z(j6@y%^@Be zl#G~KdMSA?vNna0anB~sE{n+sT{sbD{JwRtxBd4Exktoa7~|JB_u|Zm{qOozWHpE@ z2c_so@u+D=osd`=xBuQnQ#31!~rD+F4T2@d&!8|8@H__m!igUuO(Rr;L@9vM#>@1KKy*FDGIm0;(DV6BOFcam;xb z2SVAD?a)`>Uhk9L{>~*_W!R}&{p7G@NMzlns5?5b5kovTjut^arG%ovB+$8kZpleh z-4a5ttxo%*38%dbdxtg%6?*P(R$bvy26N#$C1I4iCZu_&xluSxOK_sbn80<0qB-jv zuQPbObev~Vjuc3?Jt-g}@P)SBpLUYP6}R!v=!b9v&K#d9*E<^Rd;N-@jYwzYqT&nl z|3jwmMgg53N&MqHH652|j`QIn{+WW_of^??TXNb?iH0hk-Rs~vI>)Ll6N=XH-|$$@ ze$G$I67VH(#$(ZTB4gU>-9|h`9a4rfnyI78yfCMJ;wRqC0XS9evoD^_>1mD7@4WF~ z`QnQw%cEa?KGO-gLE)r-hxlmnYg`|zuie9n0ELef`Z(hHb>*D}Mcbnm2Bn!~?gqkd1gwdtU{TGb(W6L`aWvsWa{D}>Ee)7*hT{eBcZI`xVYySW0^V8+WKX9xl zxc}^5f3*C@kH0G`{Vo~KDR*ef-CMHNty(a>bE$lz)k5gWP19mI%P+}$&f5za?d=WF zzWj%rgF7~eI_xm|^Tz^N&(PgSnUUNU&D@s&`tYG0<3tnU$sPFLG=gucr+lCT?>9d^ zbVd)*4Br&}nrL&5E7>Y?)qsQ7H;%kf$P9!gv`%L?AezBJp?A)HJUaT>sNF;S=ehf> z&WTrg-qha&(aCBKO?v0ifXYd=JesS+;CG6vFRMky|M9jaF3 zo6i{AzGbN9Zogxuc_sN6rmKp~Smooj^}Q5+>=~2kOm!%q2R@3(vvq_J(vnCeAvs3N zbF7_!A9Knh$`ie8hn=~f!dT0MnOYeDA*fG=DgD?eB<644i>UYNlyhq>U)iox*|n># z+DnPWbU{!nC9a&QN~iuw45Ktl6blcRLdgA{aB>~UxYT|l8f_Fxz)u}ItTX4Jcr zp{#*;G3e{ld=!p-hi>&f`qk?Qd)@zlY1_ffTFN|r8F*vJY8Otv$kW7p)wbt6;@J?! z1_@gf^NeOPw!M!iF>=I7IlY3b(t2J|1|?UO8!)GJxm)JE#fUP+DH;MH_@sKH3Ey>= zfYfhoOOn|lKsj|$4~B}i*_Kor1aQ{ugEQkEj|D?h9MQI9!lES;Rf!J%0bppI5$b+P zG%6qC8roa;ZE7>S$dE_u002M$Nkl%(Z8$@pa(q$ix^0}My&Y@wjWA~9JaWzv$r-aK1m!?!<-rhe zfXld~FE~V-Lissyc&(|HiJXmIS#i&T;D@yk4lZQv>y4rlRB{OImDm-~!y zcuaYRM-&{~Cu`m7#kw1-u~jg*{xo=cPR^gS2F~3{=DOCUy_5cI+8{j=yf1_mO4!l^ zHT#&m?%z0W>dF{3^6u>K?zLIW2?y*1Gru&&A&*T&$@>hcf%ow76LJ0}~(Z62Yf0?kvCjg>80!cD(%P-u>m< z_Ef)m^{wTP|Jh$VQSH#iJ7fUtFYLi^i%~Ddq)DInJb7%VKr<^M3v`IN^OwTwEpa<^ zfXgj*!C`3h!_(!}OQ#d&cEJ6mRV8b*XVs@s47U9w-zp>f0iOQNMjVjCGR$98ztMkt z0<>quc{(h;P6y7^EJuEG=b3#La>&C`M!LaJe5w_A%_*PjxF0fiV z8};x89g!^a{+h;%u1Albk`?@p7aP#cXr2-2pqQ;gA@^8fel)1n^*W+=Gj0mHf5&Hx zzNTZ=^@V@4IPCjWMdkjs77R3CU%wO6dfL2esGYml3|Tla;Z|h*4D-e8Zn*B;*~>;7 z31@F-EMY8#TOwJNWspPc2%zu1JKl| zOB`26qF(kKPh=o4%DgkgM1X-$t?JIXDD2WUuVwW%+VScTwyGoegc4OSqfE?py+jBk zAg#DHj}g8M-C)ggyy6rTYR>Tp?sdeeyzA(&dXkJ=aEGU9D!0aSOf{ot3{?oIoyo?# ztXuKNz_ki}`X=K!c$wB$MK*@Lb_dVFs6>VmKI&&iIOEmmG`<;Q6mw^c!P90uWqY>N zYLe;DaoW;GjuB$kQ+4_ZCoQ!qd^#Jm5RAgv=M-`4vs-^2eI@}gcnEokh>1%EB_ki+ z^YACy003Y>pTCC6uGyyv{A$c6EJ~=t`|uW|#qyzGWI#Jb3JTNrZui^VqHte*-(rAE zXxkEke`OPl`eC__{)Xq3w@#WnD?Df#qJxJfydE#BD^GuvuWz&cB>dHWp8dd(!{cA0 z%K7fs>Le8T&_Bv1gO-ubX-FAVE`yu%mC;F=Bir!F%3JIAn#rJNZbn5Y6vSib!FQdA zHAZn{Oda7f`pt-nlaL}BK2XybdCIUT+1M;{m=fM>10Vg;+3_#p1zIF|UIZR~6 zA(_nXfM1ztmLokQPev?Os?8MPDMg$yD(pEs11jH=XL?;Q+3 zJ$U~E=a!!@fAbf=G#@Kz69S4@1Wn=bcrb^&?NrNlU-_-oEGw)tQU+sqD|7uO1L)6` zM^Uw8s5wM18t%)2ANh3u1P;qq-}6(MM%0Z!N3E#yxg}diwL|7x()R*GKhw|AX8;qR z8v}UE3Z5#5Qp5AL6FlLq@bg>)N)h32vZ=re-pnJTLiJ7m#gCeqI?{VNG9_}gRYmwJ2PJ**r6XQn3dKLTuyI{l(V-sb~S;Q;lKy@r{W8tDX zW1Et}1SZ|?UxrzdEhAl`n`iJ*^z_uqisU99gDm`&{x9ONP94lLSnZS}P|L|^Qwhzf zzpzRrC*~#R=INK8506Z)NJLgThq@)CEm#}otY>iMTX(uhbpbMpTz_JO)w^`NFFxzv z>q7plhTD5SnTkYzMzmqf=e58$ib1Bv+7EX#kDI_! z{Idu5sOS~$>X!drBhMCV^`-3ze0$A?hR`X*j{1`FV8)NX$_^ z6SZ(}!fMqw?W4G%PX=V2CrpWD1TyNb)!is_5>F7aQOabm*FmMWji#E5F1cZtpm3{3 zXB{Ir6FkE@r0%pJ zMz0p9G$TBNc;!00L~bXr?TjV)qr|>F+sI~!8~O@B>TeijoT3FLpUhESZiZl0ppBp` z9*O+mpLyg6S&NNeG5W0gj3)j;If395=}&WFYW993FgN_~Z=>UJ!d%CP8JQKmjd~K~ z5C}#zIKOzNAI-O!A1Aye>#P~LFp#6c;IvGqJdl=$Wvnm91E;{-MULi(FWQ+Z0D`TI z_Z)_?Ykm#h+{?(YT#IlBqp zXXPIR1&|WwA+GO@a!NwXxhtvZ@S%Oc@#5LdyUV?I-gdBtBrZGk{qtHafTx2w_twzHjf&XxqT)-S0*5^J+VmQw!XwJ70G?x^dY7OTj*U?hHY%Vpj6tHlZ z<2wY3C}{htaFf3_qFatUdpaT52zI$VfWy!mEOH7tFr2+S0rFK&;TgO=Gu2SyT)cD4 zFtqgTn89w)cY$x)#Hzz<8gR6<`7T45!`Tj&2NLie-g>#b$pG&&aDUmp_l)Ta25fCL z=V;}xb)*XidQ&8!dG)UmHCp`9U;T9XjwzPgqF4XJKmK^RxqP^MN&>%cDcrk1cysyZ zKYfJ%MOCgoBm?R;U;M#4+sl9bU;RhRy*qd0W!RtnSVG<9p>`$7a>n)!Zoqv;*QZbv zx{z=h)StFO^JR5pdO2bseoAEMs4;-|pucJx zY{VYjZeA&aofA++^C1WmDnp56T0OSIW}?=L#_Q@*MZCupzU;^p^bnDW#$4tI4I()g zc)s^FGVib6Afo`_LSH2WO>#}0P-e?sm>5vsQphVf4AV+KSZ3_iN0q>rA3l!Yb_9q% z8#}N=`S~6;4VP@J03OC9H4s+SsMeGeza-j5)wqV%YBAu71T~_I)|8A&jq5D0(Ti3Zon;+c=}jBkB_dihBFJEl@~{Y&@x}jA*|d6j;YGSouSef zc&%@K>+hzOD64Q(Zife-pUKx>Gy0z#s~DO*H`@uVytcXRAEOaKDPaCGMQ6>_(fg@()!l@jHvkTjvYvdnmO}R zC_-6ynzg*@G7=oFNA5mR2ku21jLduKgwmcURh&i{$8>GPQ3B(j1Y^%pttnl!R0fID z8*znAK=dOtw9iAllQWbapQ*CsLq|3*EvHjuyEUe$0SI)M$0LuPpD$loOZ^68zJ2En zk$0KRDQ|1pKl$k6$#%YXp8;`JeJcxUu02hXv(!^U)&WHEogcDD1|~AG;w+3c%KcJe zSU9W#;Ej6G;@qMPbCUs+Kt->yH^qgH$HiKyzc6Uez%1j9^5`qGnoPMm(NbGavIEO+x2a^ z`cHo6Tg%O>bF z+0*3J37sc8$T4z0$Sk6uV_!NG+CE!OBSufI1oN558%M&Gtjx5GSXIWZfuFV zZXVopy6a5|e0DN5$3GiF4P1D$ZM(HzUQYwCN$5jlJ1TfTR=A1mY zyW;k-l>`@)BhUcg>C;YmLjxnacJ{mT09bKMOfzg4Q}yNg6XW#BpbnO=pH-%3J&g(H z9e-b!SB-ly{mRTK+2Q)BdKycduNfO#_t^J7jl@Gv#wwv3wx8F92~+Ysl&!W!9ngh8 zQqo2^wO2)>Xflw!3h`~hx*ky!%!yTP6u!*h{%v`M(mL7=>Q&eZ<iM5+Jb zqfhP5p&Q=IqprQ)PrFRWop{9(*`7Wvp{cHf`bzlZk`dnF`8MCI*m!FSO!#IDXu>9c?i7a8w~hGqxRt;EL`12mCdhC_;;8Ro|WIuI8rR+P}>Aa zeTB_{PYJ4<6;7y!zIdXDP<$8o)sG_HsbAjF&kWm89#7=i=REp7bj^{9hs6K1O>h~y zt@fApx9x0G77a$ra6Ok1pZ9X!vKW0I+NdYzA_ZFRL7}+v3Bo}NU=qVXl|vG4awIUE zGQ!myyg6kI4re3-w&w12U#%R}l#~&0^o>r@r*xR<7L~ zwbpP4XZ!k(j19im5#EH?hbG}I><3FdNe=wLakxNuyiN{8Wy)ed<5acWuW)+?un}n? z)l-heiR|{LkB)_DZ!O2p&Hh3X*YWX@#Hr)uZ~pvKS?-@LH(PE6CuOsT_tAsFrTO?H z$0#@kTf>y1H2SvUIfY+T%s5it;;q>{CVIGDwT7cxg`PXtJT^jQw1-D}=o<>Cx8-@m zYwBvW9a{5LcEdS2_{CHEzwalT`b8{qkaZYNcZAZr8x4$80e6QU{lv1*J$Tte&qH|v z90NF9lk>d3;#R#g$IdV|pIk3HxBXdpJnDZS3}3|ME!p86i~CgK+aLV+z1bY;d?E)| zwy)h^erTTkqpzPYKYyZ6HV}II*3PnvPM@25|Ft(9O>46L46Xj^o4CLX4U+5; z7|EtzTU$oSoSa7cvm{e*n&NA!vi%9xXt0BEP^B<-LOUyb+Tcjv=IjF-+$7s~8T2ii zOSNYq%1q=V2QB}70}pQ6g0`sZS$nb%Dh-f086ygQotG`oc}(p#7r*KydWxfx2iN9C zFP(P4K+&*6no;eZmaGfa>WV=6G$Sqvy&o zz;XZeC${K&ce=fX&3o!gH$y1s))7-T+?sbn^n6|K)w3||+1h3g`w52X?$_&jV6&$X zmLWlrQ5+SFXJf(?b{_t;RXxhi2&Q)^-%eiYv#OHHQnvF>$Wq4lKDFCOX>~?cSXM>9 zIx2I2%B7OlPe!cwUbk02`rglLp|0d~dGJ%e!IE=<0}B6)XBqT8uk7`;N=LEJC_NZc zT+_WqBf*>^tgRJ>QH;YKo+uZC51L0c1Ot`Q`+1wZ)qJ}=#p#1K2cNLidh6;8lHs=g zYH#|33s+jg6?j%$uDB8;^B#JPQ3;Q0=zcYv=mzJre?6fZVbUi(P|irhTv=J*eO?>d zcr+u?=u2w^U^TpjmoqG{KhbC$vG5*(>I4%tH?cl;6OvElTeEr4yti zYgKO{y5`u$H%D=P0B?wW@W!TPe6+~4Xy@wyb7jN{_{OYtm?I;J9>I$pzT-TF>F|>12xp`06eA{uD zdlY#3aOKHbQCsyg9@n*i*Om9H^L4bVzawSwbFg^U6iH#79e8Rp64=z|Mv3U4y`8-r zr{=`tt(;3n(a1BHYq{M9^Z9Z(%Udb==o`lkZOe{7?!0XHc=FZb4yWNKIpyqh`QZoejuWdoS?(`o#a}M>_cxY@wyiA;|Ah@`e(l>L zN}hl83Eg9W@I><4&ptYGILqGhhkxgt<-y(k~x7Z56}hzu^D22Jf(y40orYz0zj7wiP7tQ>VElk8Sx}gfGWGxkwP& zWpFl*!oagM)5_}l% z*7mFxN$)rEfWPMQJBy$x#P*D6s`DAg^2y`J%j2(~;cxP!+^2v23AiQDmLZ6~oyB1P zspY-*-*Gp_N>Kfqx#YR3Tx5uO=AHGz_t*C`7=6}K*N?kZb^Tf&dtbeP=r5k|1mW{g z`~yEEcfHm$_d?EiJJ;_ZRBaB!G{T}Z8};e>j3Rq`62=l@KOv^~^JMy&`QMD+uuaMf z;R*f@Wsp}lumYfZUJGR~&Imlklk!yWbSnOxq^&OA& ze0{UNhOaOlD*U}>#`-wr6s&B5lo1=+)CbqAvrr9vVhylQUG*@py!vt=VsPy!5EBmm zu9eRiV|8s&kh4xZ#xo?Cxq3CUrQy~k^?^>Sy00G$uX2Sp`^-FRaK=08suhvB@a2p&rbyQ!{(e5Kvss=C4=h0v0(l>X2mTBtY0 z*{H5G%PGv_*$eqs5jKpI`f+l)PXIHxQ{%z<^GQuM_ZvAnJ>$yM~&{a4bEud60gL=h0!+|@Qlf; znG1=e)qPJc}uQ7`}%nKG2zeh$yMn>@Yd{5@uj;hRz z_{F(C=eeKfJg2Ozm9I$%bN*^v-g(e1Yiti3a<92_{fhM%Y%G^|o(!;9IXMEIbiKyb zW1^g37(q+&t`!o(cq~-!m$6Fj1dPG)!3F{rKA!z@W+Qx*5s8b@h|-+a8^-*^YhmV5 zV((z^YG;nq#K7=$NQX4E3Uce4FF|9Zfq@{GF@|V@_jeB7gQ9%KD@O30@KD~gJK`H{ z8wlq(!mm{TSWM=rM@^62Inn=mXcYbz;N#K3ML9V5|Yd$j2^~sPnn+MSGX_*r$KWWXT~2%Q0L(>1WE#U;l|=&`RT5?v$XCkCwgWs zDVgclefSC^1tcxNK>5uWJyWe1@wpab8kBcte0y=EeLoSNi~|bt##L z+4j8^ZU&{#xbdpOk-2D%;;Pd%EF4u7M0`6js$MHu_&fcaxfM?sqXNW!_>$+ejYXqy zg8mpAOpiATAs@`mCPz6LxbTV+1C{9l4ju}d2npt99R(GXmjE>zQ7}PJS(iYRxl_lZ zC^+(9!pw$#7ED&*SOpy4s010Ii|WAlxWNhd(LJu)&BP)@GE`lj9M~ZOr^7Sbr9I%E z{OgQQ1x4YlFwN~<0!d7SfVJEuU@ZWZoM`#91&FGkpPQ$E0t#?o6j;)Sj$LUWXJ5sW zyi@?KD9%7%%$X?yH*wfL{K_@2@RR0j)SN?e6HuvD;#pPk~xUm>&_XRI_LJXhy5nm&zfZqnebXX z0|v-p6ef;lfdd|+eE_V^!LK_ywpbtBSpq)X2NvP(vrF*d2&;Dm9Ha}z22KT;fdgaA zc8!ujUc^|$zdcNP6^kBldBB{)uee0W`^O0*z-3$ra-(nB5}wj};%I!vKY5my%Cx~* zk7RaP1M?nS;B7Mdp8Q&#bIEz}5yvtOa=ZlpRd7tI!@hI5WQ|YUM`t|0@_4O0N6^2x zoR|06CU_5Felol%pS~EDckxM_ny&#Rt-H$h0fKA^fvwj|p6{h3CvOf|BP?c?q)pz2 z4uCNdf5wRp1%G?61N_)GT5>D%p=;3~XHyt}?UDpM6~zhiD=)?2rGms1Tr)~|gH zuY;zY@2GXboQb4I&=V8H`lZICwF~yljk7@sL~H8;BdOL?3kG27Q1J4Y*I%EP z-B)|83p3L~RJe&0A#CxL7Q^^>|Ifc7IOMc>(W}CYN%@ii_#SZCP+oU!_%Wn>Th{l{ zbR9VWNxQxmk!e4z2z6Rh>-6PE30j1S1}ijpZ**@UAj7}wnDPo>rW>nLt1K2LjYN<$ zMkB7>BQZ+eejLANv>O0BZa)@DLEy_hGPU?Q3@Cz+zTIE8Kga5P_*LK2o|(*Kp!H&r zTLPgk5mE41yuSQUM;+xw81O&B+g5pB_fRPl!C9XrkS0a_r6Q#;b_{Y;+lAzU}i$Ab9Wx#&tks?!V%A{mvQIxF14;U%Z&^54R zTZ3p3Is<;f2>eoLITtN6WI2q3HXNX9ilv&YPeyuEKKbbF5nOTYgD3ozY871Z3NQkh zv(~(y+gy%d2U>UvPe;s3X5IXdD+o=aYL^gIU;sl$<#M)}qZcfAv(qd&4P??47sUYu zg>cf6cCV{Bul*(14IHzK+PXv}etDO%Iq2cgi~*csTxVyv*g1X@oD!FSv0gFPONMTp z>TLl>V^#B&0I(deFmTQGedz^Id#Ta*#=NNjK*P?4`HM5U3cVT1?FHTJZ6b4_Vz}3{d&KA_Tp7pAlbi^!F^!zm$_6~3rGQ&5a@=L>ykJJWq$R+urA>8HurVEyV4|}OJxK>1FRIx zUf6OU{dMKIRSsFw`*@B){{L{|nEm(Uu+n zt@qFh--8{o8W|mp01nnc1EGU(zhG?M-M)-que|_Q>Yi$))3?XmyyZ=WRxis8si9k2 z%iUmNacPF#B;t*jd9~~itu5jh6tm#Fw&Xg^*5kgn)e2*k9;{W8dQTESH zIr^s3fQ1R4IUrVMfE9a=(y`E?4ubUp!A#g2eX1Zrx3p;jr<(@bM+o5|`{&{cl8i<0 zUWV&3JmdH$5$Q|oeFSeG9xVO?U?B&+C59;O1nv=Mt|Pah5WIi~!OYt641$QFqE#-= zDl5huUu-gt?i#6@<)fUeaV;9g^Dr(0;LQHWJu(R1K@|3O7g&JLddO79TNtFK&GkB2 zbA?=?^wi^N%$qv{UccU@EiSx4mJONT;U04YIWB>z347XjH9Vr&B<_Quq<=FBnqvBk z=Lg^q$R!#YO&Vk%5UTFqK}5{(4xuQN3Ew^QxtLOQRNCNk7;OZGA6W!|VhQWy1u$g| z){_A4Ug2qe^m`dwzJN$2#-Mc_N9|GZ!2$ss+qM~z(*8i(3^MK6ClfcoNVlNz@CIe2 zm|fY2W%xcyh>IpHU@QvL1~+IKh1kU6NpKEr$jqy?#|W_e8wd!`5!U(f*)sgoDvaP{ zIJRv!VZ2!Y2V9?JztbAGg2OrJ+7dpFP_~+R`B4ENmt?NaH!d)r%T4&~Wgs$Guhq*j z4B8S7dIT~I>c^EDnDT{F6={38kQmR50HCZ1_4Hb}6<|%9mmxcj>|ZmyYZzi+gWJdG zY(8X;cZ!l$v_s=wQNW47gz<_mGUry5AimX=sp5k`wLeb`aJP8MIXGb^Jhd_uT)yJb zkZ~{;pSyRYg`R5~nY@;;)?6)3c?ZD+v}Cg#1Du2hsuh}Ld#mA+NV zW+)JzcBt#iLxsQ3*<;fJaSs!S2L!=$-SgmxE=E9^aVsDUBs6=MTPN-lBDp zrP7VQhpk)7l~W+x4Cc-?xW+UW9XN0hW4=C^m2WQ1IlAR}a1>F-`QrG7V^_`6Uv89d zuQ$qvTWtct5SD;>39rBcZ&Zkf1a65_Q}d$pbBg6YWL{eMe32~<-rUlH?;rv2(VN_L zLCdp+9hijh=QGv7%#?{j!5CdUOSx{YS$GuPnptkZ{7mDz$1m4y6?;epL_sU9t86#H zhwykBkEaWMn^OpU_I>~<+*1H&2t1P!b+B5A&(0c(RtzU&QXq%4lUFU|k0oyf> z24HIt1oz}oV36gZo#P`iNCFqv@PvdhXUEVn=j2s%uZU6Le+0O>5LOxE3Oq?k<`$9$ zkM+!G*X4}vh!_wAy2L`4G5`w5#9R*t>PvhGPSj6Xh_WQ4R-<~MKV4R5TG-d`baGnU{`?& z@EC>+&2ih0mL(K$ZXD$);G!&uBicu@dLWGO`X5UkS zV4xXO9WY(h{&?<;ZfBKtG9Jr25`muAt+1vd5X_HX-j&VVT_deb>jF#Xpt51qYKsdG zE^*spB_TDHBIXMBrx(jK`)`E_|a01f|cd17B*8QxqM` z+k5>-IX-91lrzR4E^@!6Z;`nQcIJdIavrX@_~$jG{GvbBjOSIz@*H?a;p8t(Q(C~H zR0$Uz=>5^h>uG5VqNJBI;FmqL0IHPO+e{DiFAk7Zn*1e?229LE*N+yx*nnZ39pycB{{ogVcHaTkGCFEyn*?1mipc*XfrEEX9Op* z=jnNMIiG<^U2f0vir^Obx~zADal0>ng;n{tzSk+=eZ>3_&|iGTxK%pbb9KJVbrlPk z6M3|Ws~ETV)d|9${&Z)zux=L^|3Vi=3~bw2TO2OL0{|TO$9-#zz?bl^I3jk!uVL=u zWNlZlsDK(LWKMVaflNO754HJNzTt_RMb=0q^cdw)g+irHZx2LD z+l&R}*3aUoW2u)0i4)Kj3#uU3dr8M>&n2M?KT_6P3=xt%Onecry~3h4PUh$WcRl^C zac*21JVy{dBVcY}ty^BBRK4I@)nNmHMp;iX8GLBYhB1Qp0wL(u(U|Cb5Dr?zDn@}c znAWW*kK}P~E_}rPtCve}5D+nNjX$zOlmA#}f}p-enbX6i*4O0+l*ZWUv;V-=tyGAY z0mZRR(4XnGP4;rbg+i5=fcNw(jD`0Q&%^ZQ7iH`56D)Ojf;i3Cc@ts?J)i`*6)9hQ z{^E7H!8>w(L9{$eZ?N0GNAiGofC+_6z#)ep&m3##>Dmsid=n43uFlOZ4ILGDnL#@> zY0jjuO!8!kgi*nZVKu=9v7=6awWffR7fJjFYnM8iC`IJ+E^B3~wCHN`;`p9Z5tuXp zW+5k=wp2f9K_pG02S4X)R)b z48-9$7hnE~Qp20LD(hqCkarcBzUAx+O6hV~frd;yL1 z=K7L3u$qF0IaWBb(!fE6!^uj(1`9**yT;+T5T-Z>vAPs<>fesovzG#mZ&+g%4)41G zAsnuv9~f`SCtHyPIV-J#=bn>_qGLn3aw)jRQC7}6aW!B^_b&j5CkN@#nsunBr2w+a zeZdo_&rvkiNCYA*OWXnX6o>{y%?awcQ$7PXzMHmaK+o9mm=`dDv`Q@r#c(rk2cI_00e=f@VrBT1i8UHRn)vo zzD*lW&gflMf){v948x2FXVJ*TLfSGH1QK)ftayif)vmq!dw$Je8s@#H+rUFakUQiV zj4xgln5^{HVy-}u+&_#C#t(d`4X)6RKO!%v5?}gfU7@z|w#$;p4$>*>VR( zB;0~AKvq-`2H+GQ6uf?5+|=f5FnDa#eH0+dtDv@`*mwa)7sk(?CinmyHzxSmC$2qf zCo}C~jlK2kQQ5}b+k5`1e8v8G(lP-P0}9;-bsfjLXc4@_vsrlqpimSV=QqvyYmQ%i ziNoV9E+hG?zfSNKt8;wFwVEOmGZ;~aQAg;WraTKO# zM-AgDq$roc-@-vR>j8}3xtQ{pSlNv9AVI- zpB4UkC%b*il$-MF}66Y0g*<&b%7Jbf}hAW4jf-Cvl` zKRun6Q;xOieR6OdQ%8xNa?R>=J{nFM(-B$%>mojDnKnAtjoHch{Dz}#Mxk_9Ng)eg z)O*G~=_HMmmfTK`jrLfN@wd8+{SwX*xuvA7^i=FT_1 z?AQMMHg7Dh?^2pN!rrJUb8e)%$sL0`hkwXN?X^ISKWFhioO{X0kgg*oWv203jT zhG07)t7UQ~mGZ8+FaK668)|UQmSY@oDWydH<=W7pqrehI07Dp#!+yK~#eM2FdszXf zFh*GAOgd}jv*09swPElfDC!Egz>GeVmK9;>o;dWIxe5bcvQfw8@U*<`ud+$;!|_5< z%rVjDvJ2|DKbD)I=qG7qg)x=fw(vy_8diC^T|36&aXT+)a4%l>qV=Oh#9#Na>mwJ4A7JVhfNSaZMyQ`N=bVk_v-RD_ zH}K3H_Rdd1@A|lXF~mhJzL?K^h_Vy@!+5oz4S;j)RmM-TW>-)WRdTy4Tj9yOhlioR zot+IV*Is%3>a_fO|HXe=o_+A7{Olk7VVQ1il{dg^gR53O%D08(y+d^SA>-3y@}0K_ zrO6S~^J91$_-HVgUK)lJe7+>0afIOSa2d;BeFb+nTY?xK(#Y5}!~~s9TpI)0*e)|5 za(F_J>d_?vzF(F%R=95g{(u70BOp>w;@wS?m&BczI@wHdy7rBq5`Rc&L)uz$7T}&g z-apBC@sK)jlLk8k0eWSxYDDt2gWu9{JZKuCY>7$d6W_ZY*)kx8feXJ_y}oUM_(9ma+Mg znOfKGt76267(?8_=)5Ex3rtOdB3Frp!=ih1V?Kvy*!Pj~YmsX)Gg5>aRuC|UckyDp za$H})QKI*=L#A{3rI0_oay2e}T(kFSeS0G-PoB=|${VXsLfUcOcpmy7bMzH=}XP6`kitqpwykzP;rypy23HyYZ zWv#U|@XygX7Oh#$Xjef$Au`x&Ol4Zq9-M?);0d(jjb}~V>kE2EP+p>x(zim<_8Pc3 zt0KcdM9d867E014&vYXTn~W5k^sPcjOG12dZsM=B?x_JM(I*{e*GP5S@Hz7nmI@Eq zn(zwVxZeDe=5Xe+Cbn(=z>0qf$OLca2HEk~ATE+%GB5Ucv33lEbB^I)-mG3>pU45kMnMcs!#i=laF~e+KE%-CIie*2-%4ey@c$C@t~E)S#{v-B|!>}J3OOU z@GDq{5T_fg!xl%Nmx@1i#Cm(k&-N{n|X z;k$pI$Z)-CY&U@CW6HZ_(LfpSqMU1QR|AI(j67z`#B7_3@Bnk1tMH>^;qfw7gs6Uw zv`sqDJJ+f9*O%GJIxA-fCuMl#F}TWOC*VYj>t78@9DzGtxhuVj-xK!Xmo_%xX=G~y zUvl9Q>tvjZfq2IxCBo8Yya;G{GrUD!G=dhdI7WwN!NRfE4?;}vM7Sl{Ssozox@DPt zslgR=Q})4o3^qKhFMsk?>41wL{pNSMN_Mfld3`|AAL|nbLuBeA>+d#JI6nh4l3)5_ zX=Ql+cBxqi24Ag0U4KA?g#i`frle4qM})CUNcJ_QO}&c@X%>qtWAJh!af$Rzm~MDcI!6XIda){WqIs4@zkI>x)bAj^bF z$}njW%(THePZ~>IJ{xTWDb z=r(3f)KTCIBRw#B0VIyV2cipvP>S4JONKHhNP9+R+q`?~x?aVNFnZv1LtjmT5Vd?e zM8djnXxxI=i)yump)qH?%goIPXM@Ic-tx@J^KM4@c7!}Q0uE?t+ad25r(lAZ0Sj?B zx0bpit<2j3j3)JzFEYE}17lYB%0PsfcwVy{s$o~=dnX`;MRGPne7fL*0}?;`VL~V5BQBI^%QM@L zS)0MVwcz#vd#C3B*8Tq|sEp0|Ns6LQ42#U0Il?GC?&*zq+1CWrZQ%;wlR-|LWC+%! z6aic?;%m+ykAVdoP1?(59^=zX!pAb;!jGkXAIor&bf;eeYc@CxmXrp-;d7$hjU`y( zM!-k?Ryk(t;o4jZth3&NUB+b6p#Sp^NH7Dw?2vo>fBC;J%b&d92-@>m`JG?umf!WZ zIFQlp0^?bbJM_)kAjF;PHGKUT3sk)Dh?h5@c??Zj?sN2P(KGYB*L|A%85p?mmM430 z4qLXt13ec!$~=Q7l<}{4BTv=^Zt$DE3mW+Y_?v&D>^ym^y!`xm`Gg^&kl;aZ$#5sFO#$G`g_I#X zIT1pOP;AlF=w~b6q6Kw$%PXi-tKUATU)+8jB)M(g7WgcK09`~f0&`^iBDoF5YNm^- zEbt75NAJ8*1*Q;4FQQcoK?b2<7J0fdY$JKD8b`>BGM zTHcvU9W4;GVi#8{Ldr=p3;qTcJaZ0CA>-z`t}WYfY_=iu)&+uEkwSV3gsJdNv^an5 z$408M;`m`ml*+zytZ&N)7@2yk6N!#zQ<;HBYjF8wED&1G@uHN&Bx{?5};dA{G5UnLzde|_%h0kXFP1GR0( z=ySedJSq~D2n=tfX!>e_HzvtY_&GdDbnf17=1}+RP~l;8SRAz7T_kdsj%A8^iU^>f503cB|6L}%gqw050j#Ayh5@VW84%{(;=+)jfV(he1aGpn zF>ZO3?s!w|dRBO7f(`_$irO(}ozK`Oy*R!kl9(I27*PiSyiAgnWvt7E1&*)bCKj$* zU-xj??jGRbfzI9OivReqbS=scSK&fjb-$ za$SUtmi=sc4&M5e25@ss{jl00xMN-h(gwsz{2{Rgei~y z^1J29TN~x)pL|)q`Rm`|Sld?l#@mm}y?dSVNB{fZExTVG(VyM{=>#|;M6a;a$3(y< zs10WeK?FbyxG-*(pI}DtP9+7Jy-S4TvDhiFze1qbTodqiChFRZaBp`>&|{SQ8Nyj_ zU$%$g-O4+Ibv-H5z9ii$+b;&rUS4q24fnkQ{`6!F4lU9Kc))pn?}9gTBO@j)Ek{Iw zXb#?Q~x@G#Tyf2uS#Ak3=keYc^m$fZhllkxyM4)f$OJ)i%iA5$V zi>@q8m{6xS#h8Q}-{+5FgSPlkV*>8#QNZCT(0Ho=*X=8OEtfCt6=rKYLM1Ls#^ARG zmN+<>e7k5HK)0h}OsG&5VUUuxiFbZZg9aDc5PElo%M2 zOJ1=BeAz~$I3m&t@(-| zvPwG{)@daKqR$se?P-fV+H}v;!ql_ACre<@Q67elmw7us zH5t7e00QW+epq3rxGMF=On86Br8GAxl?b!pnTn{a7TEB87kZ-Eazb{q8yN2ZYis3$ zCkP<%s#(6K`Ty|!LHV!#=6mJ%&0bj|VC=vD*ikgf!xa<=Xj0>(i>0mWUz*oC^yPJ; zXBXUKjT;@$Z04DxgpQd`N8nf!p)MJbpfMIPvt{mL0nd0YXw1w?aD3oy@*Q?+1Uyg& z-h6L!;aA31@Z~M56)YT&oSnJyYUY* z@C7V_E~w8l@k4j`geyI_IG=5%HkB-S#aJAqN9W=koX_~=s2m(0X8pS>tL5PE>+L@)XcqaX(&n*yr7HFJlZ`e z7aVUSGB5lJ7t-%n%{p!tcu@zEOU&)V_ddyG4-^7tI-

-~b`dFO}ZZ)Z)9@kqQmy)OOtHp?ao!1Vbq z+1GcU-Zcj|Qm>5-I z^}=Rh5jlz#OVp@Yx?mM>3PV#{_CXC;O#M;SFb#W(4AQpoGb=R0b z;t$3H!~#(geXF&)Iq1*N5M~!JeW;W9!>kp0YRTkkz96y3%p-UR`J1cL#)EgZIJ>$$ zH?hUq7H;X@I@MSgn4(tS1lQaYYmo!6<>>3@<*LmdA3jNPR)gh&3r&dK%vU2^YH}A*fIUIH?Uci}PPVT(a0{6& z(mDID@FfJAYKw4XKBl=<`=8+s!~;Vg@i)Pk5n2eI1e9U9E#MXux8e5)N*D{)rpZ~{ zr-D&fQ}h7E_kslwGF1! z($f_T5!Sn(e zcQa#x4AFn>Ez5wl++h2E-oXXHMJ*i9r~NL6aKTFgxMU+httuEQ`+baoR~#QxxO-$x zH}o}a^?N9~=wIeUx55&@(+hG5{u<}t5yK_cmK^ScEZxjkIVQ;6=#9hEi#eIxSb6vY z?NqN#qp{fSfQCw1))D$2V6960d!K(f?=1|<%Hz$(!Tu-n-LH?_CvQA_=cD=F*I$;s zHV@20^M&G{wA+g|UF8sn>*c}K+! zV6%hJSVNiH-t5iWXj=y2jzL1tp)7C*?#(hAXCYaW&Hf5f-PY+S;!A? zIqd-ejy75M_Jyy)l2+;fWsoTsYX#wmMaKfz3)fQ8(gpiGV+dn3(rrY0)Nd0U)Y)YJ zZGb=lK|G4Xj$GxOv`aaTror&BHu*ch;8@2gxA*Mrj^_KjWNP9)>lx+~3u(rs15*~) z4&^Cg1Ev56H=>XTJ3JV-BBHtpLtM^O^_Dp&*S7@BOWlAPWNBp_yEDPY6eYm`7SGwH9P8?ls3||X{K|Nt zY?1NmddiGgtPbPq4q*JZ$9#m5nW7w7_Pp)i^=CYKFQx_#LB0-BFNQ+Y?G8)^v}}@D zU}Ym$fIH(S`-l<8T1~hFp;p|4a0M+@0)#kD0|$rcNxjbTmYk_eup)hObP)V^988;e z=qiPv#7S!~empjSYl85*W*jP--WlKP4eD*G9P2veW~-6flH(wrVutWB3!V5bqgR=b z!?sXHT!$MD0x!L@%>CS0Q%rJz;H^r`Y;O8aaVy>||59HM#tbI6{p{WH@a-q%r~luN zNqTaQlGiP(vqdcHlle#A{a!hRu>aui{jjXRy8~n^g?11SbOCPTdIU!5ozmEx^qXB2 zLjaGWr?&*SdlM~6gKOehWd)doC1sw#bkDKYy-7^$`T18TrT4Sv<+mT)k7wzdqyc@7 z0{GwlPbcNNjjO*m<)|}oVvp0yRNi{n&zwB_FHh52La$`4$p}YYcy6vOlQ(C;ON-f@ z0a$^&H$`q29qkJD+2WIZy%Db!T@7huqs^yTF=uNi;vl?aYY1tD z*ZdNAE^{sL*4oDW&Hg^?iHhdBf(H*bH#jYD1zqV8W8`SZ>+3sZ>;4ugT-)ewTs(4q zQogahgHq5bzx9J(EB79(&9@$Z1FQ5=`6tt(vM@X@dqYkgut$w%O~Y&@!9<35G_v-o_1=K+aV9%!w?@{?-1>iw#FICJwy1vgFyjsLM%)Loc7G!Xn+d7^PH)ctX z!8Hnp{ba2hMADnw2pVoIL7>F85F`uPG9MLwxC81u$K}Z5Zm=BuUs64(nDG`ZjBqMXIP%1syBq$G-?Rl45cej!B3N`JRXV(>0?-Y8e@%|6~*vQy`0m z60IoWXNpN<)Dba!Z(p@d+qnH?W&Tsouc)Pvrb?KIjab&!)Jpt9;fDDEv5N9es*W{E zgN<6pA|Pc(X}dI%h+LM(3|Y!+8Iplu{!?X;Y1PSy+>Qy1=R0$jOB@Od8`o=Kpp=oI zwO)phvAO*A56BagMb#iYr!y&UzzK_m)uBN3q6pzB0vXk5^VxB6!dMzNSc#_y4fwFOq zO$6!DoJeP|74K;isAP;xoOjd_F^FEbvS-R~#(NMEt~LI3VVnjub#aC2URorDmF8O zp06qMmI-jyR?tMq>Nya`2GSuf85{72a6QB7cLs5q`(8KmjJbJVxc5Hmg{UzOj*Dw8 z-Kvn8xSntz6-LJ(ZK!Cp!MQ2NfjDFf!vlY?@|SnmSKl1OJ8^Zj1a6$Og~Pajk%G*O zd{HJ?V~>G(W1YOgAMbANu?N^9?d1@m=^6Joy9X`+W0Zp$ydsF`H|MFi-iq}8<-+l-T-8zveA~aF@q-Q-0@Ziiz zrACFw{md!z)(tKG>smkK$dmCG3U&A7eW~Qh7mUTwK=AVxisNhCoE$L*-$)@!WVdwM zdJkMjkvn>^2hFXQr(~wP6d_?=eqF6YHlU9Vuo$7$L!y}1q(%CjDJZ}b z6&_FU8)z2Z!CH@ib=KvW$umXHiF*xTsh#SxkymguSo`m3=1k@$53ZJB1 zqunnMasvblG8AD_*U0u>v&a2#ZBW*rsUvueF&ol?TX^zp;o+q`CT$&iJP_OgL+Ar* z9pzS~LgQr28UoU?f!1=&I@T>L6&9L9Av~g-3Z*HvrA@aR#Dp$1e8_ry@xwpF8@Gw) z<2k`)%jM-)FGKhDcb>3seO_Muzw9Hs9b_kr$)Gp45j6iKmbOWTfTgq>n&IgJmqeMR z?wUO5Q~plOR{_8@dNj)7_C`oLjzunS>Er4jdisIa70TTN9F`WnetCjIC_f(H9bJ2{ zig$v3FhX+53W4l#Iq}4;Gf&BQ)1X_Ui(ERh#HBMT0ggK-P`Eq8=={b6#TwMtgE#lu zSNa&wS5ZhY`{9#%2`1pa$)2Df>Tr4i<*TzeK=CHi9sS05aZd72*3(!VV!05RQUc%+ zYY;Hv5MKi>RWT@(VMgl;dQr?kgAgx^LwTMc!bC7Cf&>-cL_z?pphA28jSmz|o%Bpe2at#S1qmGp9Xb);R5uWx z>4rAaJanAl##$E7^c{oPxP~L1B4aEOK~ELL6)nWN3Vs33jJNhs1zjBo; zBNrYj1qwWL4;DgR=51Ttw8UcZ)98_5YH_hU??HfAE)BHB`IvhQZ>~lGoxl?bgqnom z{4Y*%2hH9zF0TevWRXh2`+C#L09aA0X*jNXEoAKQqJs=d2lc zEPmKEpg4=LAp`;*G;n@#!p51H`5evhGO3+s6k@qZ3luh7$V_+3Ef(r=o8J-BEmW?Bjk71!O z8wKYgy~DgwiWGhZ{K({5C<|)$dZw}u%+om;%Wxl4Q`XW!>{H*_xv;(==eSyAvdXa* z3YA2|XJbV-upBCM!YT?+1}v`=huQz91@WbULIA>`WXwA_1;=dX69bK#85jfuJf>qk z&#xyxfX0AzeWkfYHh4+Y~Vp69~ypB z^Ic1l5|@l@h_HY7oo~W8&&yAK{Kw_8dwD%^3IMX>}_%)y^jiUw?mAd@a1pg9~72mxO;#A z8Z(yj6O>F8MruSjY_Bbs_4l7O);4h^LyN!n;)Koj%ktm+NB7Gr(agukR}H-=9-s4+ zi(_%VTio?+MnYCHLVt)5R`?Rf9ZTH}twY?uxZM@ZcISWaN4^0C17rd1d+#rocM@AL zUne$U50?Nql;l2E;fA|;FF>OU0Izf%5v0(Lq(DhE*n7g>`gAyLcxr>Sq7Jb~ z%-8Kf6|jsQKDV+lDEBr7^Gi-+TzpA#Aufey4s&7);B9a#+FiTXYpic9#pT{wt*Z;Nw_@Dre~;)he0KWm5|**-Xpdc zj!4bC<&M&PGQyjKRKGgC!V559?0a~|zzf!T0a~05*%M-G4G4%L+$B?GkG@=nDu-5k za;fM=2Fg@*N@DawER>*>+PU8=V09fvsGxF503uqXABYqYE%NbFc)6;&bZCy39Yg(~ z1j6wZHh1!I=h-}c|P%SMuY+G`E{?!exd)-;{ zPykF@DN9Jt3a zxCe-a>3NP@El;6ljcRG*$ACy9z`!&GlufL#*Zg2-t2|@QSLfdLb1^^Wy1t`>;e3xk zkRt+{u7*Iw;4Ki8y;$5^GEQwi=glA#fVgVKAw*TQM^iP5vRI-B*1!Zhy4%kL)G$oF z2x1+Uf84TKu(|@({9R9iLhKM%=pq+j8Mz9#2CBkkh4Z8vL?e62$e4Yo1#V61aWsSs zbhHBFqFuLrgoS0KIJaJ8kWKQMN8%@H#Yr$gahvh_(mmZIKEPbe5ss9T7-is0ff<4; zaFTiBl6`-Oo4Gey%X|;IL7XB)=t2nP_=`9~f(Zhy5@pX=EI}iTgO~yPmC>@NsK(vk zY_2q8Gm9uTeMmwE<-?TE^fSvB%AlVA)J7%ni;l^{cl~>6K9kY72Nd#;d(oW$*RTe09UQ^vu3B zzuMiyH8Ct5;Bmq=V@=MX-&<0uW{mCz`%IB}I%lE#C1=k*MWj3i)@ubZ>CWw2wd(K_o*@J?_d7HgdcDAl^&r454F=2uwc=JkMZfg@J- z!p(flR?~a|UIKpL#9%%sa4g^@@BpNRWOwr(@dG5IXB~+}AmaV{f~*{P5DhZCBqDx+ z?-#kCM^#A=XPXlym{a8f50u`Vj*23=WI)L z;dAE6xWaYMVRRI`HU=MXVvMpnmJ+0rh+)VQgoIyZrOY8b0>r8B!}I7ONJbrrMP}dw z>nbQH$}b*RhXxdkzVet~5zcCWmN---#_n)sYEt#=M1;WUfB8+eQ z2$Ob^DejLmQHaR=B|@J?d*h8GavU@T;eh#h&jX}!Q>{}N zACm=Pr4U^0B;;@b6`KA2I&S=}SVC9quN@wpmE+x`vXAwCMoLDRUv3*>eOwn7i%TpN zYVP~DFlInp0{65=QT*ew(rqM<5^$B1H=OLw&13M1gzrG*)-;^`x(jWSwCEQrfMLEP&IIe1)_#;pu- z?Ivz_6*0z4IpL|`X2+cVb;$!ea72xGL1hc3CLYAV#*}kT?9=i47FK;Ym^45M z`U6ttBJg`~TE$VEPcC`NJUM!+?CtFn{By|Mzz;0{r8qvf))z0O*(cM5V{V39834RhbsEMsQ-GTrS0 zd#PG+aU%n#O=#@`7kTU3@08xFqcSJn#3*lrFlSi9o%LS%^p|^uNQxXWT3a|NKl!3n z-hYc@fE+7Ag(-{o?{PC1bNl%|78$O3BvFTW4S8h;Z`KA8Zc~KFn^PDwa8+oGpvz+j z+%*w}$A;P zikCUf@#v^r8SJLp-(zk=ykD*pjEC=`N9Lgspn!t)09$Au-qPoTg8f8giGqED73sU; z42lQ+UyyQluztVv-MV3X2HeiB7ui22egThA8-?loXs`U~$sTyqD*x8M^IO!x`$E}z z4>%KnDJ&91cLwavM!5Sq%as(l;2q0^Aaz~GjB|oZ-0@AJCE_gf{FnXF zDXwe0Z72cq08awRJ4mJ<1!Y1YrZS_qzFw|-c>J~=l&e=?t9_LP-0d2BBic3pyMBwr zk9Y(R9w`r5Tw~j>%XZTWk?O!#C2f(ur6IQj^rv~drQ0CwD}?&q39SGJ?;L=CZr>lU zCB)VzKhnNY_C)g~wXz9paG}80@pxevXj^+!8t*OqRmFs_`AMd@+~~w2(j5z;v;dJP zA>sfx2u=pW#C;EP=o8RtcQ7@;Nd|eR5P*mZSrL&5>9A);K{T4-b;~qBH}`}kJo|~; z3*`1MUGNPvknn*5UYZ~V}4p`Mm?K7YZZ6Y_|YbUOD_Rp z4kWt5!qGxEder??gFb2u5=;^0(QSk-b!iGBKugBIWZ9Dw(iq~NG>@@kceD#=%Zr?a zUPsgIbq2v9F2a7#iaCd?FaO)bAn810x;j59P@iK@4> z!q~t76;b9JTm}%j%|&Q>2TvqyK*9Gk8JzPQ?O&WMnPSxUfM*B3F|-L2%ZI zDzNK$cjgb+{VdMW8;s1rM564Ok1p+&#{&`M?(ypWW@^DZg%PGGKZtl1*fo`oWa>%q zLoEZEq)*2t-BsG78|Gp16;V?hmo}?g&$02fClbg6$CVA#aVvm_Gwjvi5#?cA_*GG8 zjKdISRH57i{4hHoli&f&ko*8LOY+wPk6u~F!d_on!Mk&h1T90>TBVJkA(&T>xQBRQ zz^w>R85`N!Tu+mZ2s?9ruagnW`|W!~1~&}oqt8Ri{o)hcutzW|hTq-TC=2W*imNx? zISmhJ;}KAx8+)?E&1ZTTMC)YEV?HgOw;78$-BprT*)#6ZcZ?SB%A;jQbDKCNo(Djn zX9mA6N;U~Wo^BO_zRLs3pg$)bVg0`IhaP)dqc+yCaj&UU~`dA-fms4Wt# z?7tzq7uV&&d|wQi3;XT^ws#h1=NxguMx!z52g8t~T-W8%qYgrGq3j;cxki;a^4tV7 z30r;Ffj_w9`KpuL|Fz=0pG(%zeuKch^M+ zR+qvHJeiI;6@tfPuCRl~*_=bLQbwMlK|tGHFTAu&W;c-EU@c==x++2#C0OXtzshgn zh#monY>T*$e6s(FCN|6V&XXuZ*&ZTTjD#1Alb8k+ysOhQQm%4R2VA&EcC^9Hz4D{q z`zPhoAN^73y>%a&?3TxyDE6$O83a6cKF7LM!%*Rvlau`lIJuSMed~n5$E__-_uRL< z*rZ5!-kFbUSQQU|%RJ%HDCCm`_O;su&0gA3q?;`VwE#R*xVltzhIK*=NG9-an_!yi#B393^z&@{>@<6^nol7^Y_5WrNH zFN0uSx7I_R0R%aM%q8FX%R%{!0SYBr2rxtrUkY_V8?CO!GmALYViLLJI$8rVE14)? zrG59=W0^~|w82BZ6M^iz{_qfhsHswxXBLx?vsf7#zaR$p$sDkw^mD9onLa~~0J9yC zHCBn`9J_Tg9)yKVj1Ow8_O0G*8;&u3Qa}6Q%+&GAfHQXI2_wV@6u~BJ9fAU>%R=}2 zO|B3m-!K6+%j|deNk*~A@tC;dX;@dd(ZtoQNmoIa$4K+wkbcyd- ztQ4o`+)hT>z|a*J265f$K|xT#H1*_M<0cY@Dhk3w?}JDvKKfbrs0vFLcT?{127U^5 zEo|kC`@iDJoTFA?GVoV{#Oe_aJXc7W7{v=TIxuI0rpCB@ulbPysRXGB%Ol09*$vml zS{FCD3XuwjCt-XRSG46=|7+$ZS}ZQFAUO0gkS=n_y~DKG;&!zb7PV1O$3j$J$eWWYwe2N=LsG!R$D%td#f!UoT4+1TVPG67<`Rxdc-cS<6c1(=)( zMHaE*R)`U4&b$*F!EVd~~qWz~yf zQ1VRiX^wEc9tjEsH*SWeD#g=)d)NEZiP453Ak0`JmVqDP$c*KmO#TYEOCwOWNCnNy zFS!k_zX(iF)+lGxws*!W91Na2XM06{qW4G39H_AkAw7XB^bO<~0}=E`7v;^1U9bu; z@&ooVFO=uxF*x;{ z^sb!lB3oOZ@o*sz_-#Cb`7k_J@7CW5=lz-s)d)O0-Y+ZnKOmTq|so_5x$60D z0cQ5g_w>gv7#npg&yRm=U23>8B2d0ps5lb;rCNk$W55b!QXe8AuqkkMi^BEN_2qh~={u%5t~k+A*egtu ze;>rn;3+5Ll=1Q_M4~68)vChWbyA4QWNlkGMiF2R!kNmH6)XA9wTGZR&J_z0AuJ*b z3ya1*$TM6Hj7u*9UtlSFrDf8dDv)4IYq;W~I0$)#3k*cK3-lb9WITF-I%wx6!4aS= z7szZHQwGTxYi7Ctnh~*{S``W_24fPL7D7DkahQT?>=YrB>}bIg#sQlXB)Y@JhzU40 zqn<8lr!dEhVSQ6s@{sn7uHS$GfCIES9G;XWu40d!EpTQ?2G}9%Qwvw~4W5Lg#bvGv zN>5WbWUu6P4TcvaIf0@f9bQHO6BVo=M&P(*q6ero_b3zNAO&fFE8c>PCK~eZO`;W; zzpPZw0As3KT%K0o1?*?mr4v0(+!Mt-p)VPt$PL3Y8-Ysz=2T09k5ZZt$*oS}g)nj9 z#WTvAnm5}`d?(H3E&@rYM*rG_1OobPL|7444C6lpQtB%SYe+sC@e8U$QO0k*eSQOA#J3>gg|d z`q2l#z)W)lEb;jetF4Vtxxzd2)jnHOY*V#x+fESxBQ5}$Vf7z_Z?7(A<*Mv3UssWK0iB`RY{p}%S z*)wKN-dTQrf)@i^>M;Kyre(85P}qxC#OC$;7;7+XV6(a*2&sQ*Nl}M z1Sqrc0eC|T1yl1>_q(1IsDrk_8F29$_$YI3xS#xx?U)HTx50CE!?Vuf#x-~ZNU{3G z#T&LAaECwtk`R#7a&d6LHVOT%t%1jS*`RA8{#63AT}(d;)UL{?m|(mFo=MZtPEqb~ zqm(i8)Vl#_SZn%4>1gR@M;1(&f4PLOpzL|lWsIf1M8>&oww^AJMvMhR515TPx@Mv* zy=Pv3OBw{$xymp?LkM~_E=vPI=Eii?W*~NQ4cU;n@CXKOf3ccr&(`<_QY)6~m}HOsYbIZY3Va2QXrt&{^h^ zk%3QisPHq_GKDhMDpc#-t1^Qn12Pyer#sU5REtKpJFdR;r;uWy`L=;J84KKLgDAp5 zI8{X8J^XBjuymi7;2_q8S^(91lvP-?X4;&Ul|g3FZ&!f`V|vED7|a5ZfeF}*{F=#Pb+LcsPLfea&7D+Q$hwX2A5f+>*gfj(L5+x@L3 zZcpdr`R53G5ZeifJEDMrSnLwhhnAX<(&|L0^1V7mtH}{OEM?V_Ai0T}xRCgH1Oboh0DKWV22CJ=lhG61G=g5$>G?|B<0dot? za78*X6%BI?;2J^xqXf8C(w5_4PDu{qGBH+N>EgN6BOORhdV9oE6`&?Z7=@Q>R+S8K z*cz0joG_v&jiNO2rwxO1y=O4LYt|#NA8C~$FcG&6j1qQjvb`k1@JUq2UOn?-Ry@m7 zaIr)1BDug3u%1I7l3w`q2y2X>DU*LlDLEp=JZx&JVnSe_$9Fh!Y@@U!vtm@tFURz5 z@VuZnsqDEYX^aYf0w>6G@33|}SeVi_^@X>%c!LmZWo;QFI4N!dw#=wHBV9 zV}hkTmUPWo=+pgi&R8Gq9by$O0~6e@Y+)#v%o(tNfPlZlz=>oBkJ=}d|Wfh@*O`Oq`W2z{G(BPE)YX$TfE_;9sEC`NeZOt{GEjF=+@kz&L(3$S=Wq4$8 z7nB;iZouyfzAW81ey!(A>I_L1#DKZT2M-f`rR7>9d^>E$$MXnJ!a(OV27!j~rW>s2 z$)!hxr8W2v9TUzA{=)0|4x6006X-|G!HDgR25YNQHyLu#O9Bv8a#>G|F|Q-u;dtCW z8vh}qPz)LLs>eXfbcW#3MJmF2ueQUl3LAvR z+_O$FaQAIZjVhQN{D><%g@i6BwQ@k1dGUz9_N*|ZA22xWBgE9uIfX=@td~q4Ca5N% zhAwcl21RT&W!X!Fmz~g>W6nEa3nQArXcdUFg-HH3ci{LRe`!|>`QPj3c zOJ4R7VM_p9VjB>)1`=XwK???{5YXbh$1oY*&wI{^^+Jmm4k{#zM0_uj-Omhnm$;>k zp4Y;j!dMNwF(!a%08`eDjB#!YneD8ev4^OEw`Z}v0A;587&r>2cyGit+T%PCaCU+9 zZixS^IYL2Ksfrl33T+}_5Q@3fC2hDs^e*rivX*!AhTtA>&Rf_b=%`<5R5%+iQI$dW zr&}sV)K_V!mNRfyu(Gah3%SSIg(1!dD-jS^>|I_U+)c)!fb0+0h9a8WyUb;puu1B< zEu?E+!3<1jzsX*-`$onaNLOYnbd>`G1h&>UXcwFp#Z-o(d@`wKlk@U4q`O)7xx6Z} zz;QZv6tM>MU^0(aUwlypDAxw{{a?TLkITCsJRvjVI(fUdIkS8~#+=nC*_tNfbCU0n zPIL%Yo#6^UI6H-Ld+&I+JRtB7QNp~yS^DlExF;%wtE(u}2twxW)vX7w-++ej0OAOa z4yaGR*PLNrG}kkOCYfS>tRWLq zFfT8Y1Dx^5BgoXB0Msqgl`dg@t8{Z*6oH4rg@Eg-Frr9_^J7kHoFUL!ZU@rvDK2RA zlXABEn*Cz%fA0;-wZ|Y)0GOLbp}|`(KKqiRdq>%ySE?v0vW+Ou0KRCk{6H4q5-Acx z+iow;P+&j*`Da<5C^DCv8iBVx`{-TP6kK4>-ZOaDC?R6U0{PR!V>v!bVkqzo4j?W` z7t$c((vm$x=(X5dYJ$%^WVPg^NErw@JO@zSxX(>r=G2-hJD{;6P9;3%xZSd}Ja>`QA*{XD|ZK7C%Ll5gl#aMTxY3qdGq{4WzygjTIB#cLhEpj8*vqhOh(O-Mw1 zmxZ7)zXy(4q%dizksk?=W;2#M!ITz5wSHAcBo1eRs8a*y_{M^FFD9eQ!?vV&g;Tz^ zBI9su5;c>jTNaUW0!%^edtX{8dCxD{Xe<<;Ti%9s_u8h#?~uhNCDaUt_bE#`1?Vb^ zu}Ku@1}R)k&hM3mWm|%EBvzUKqVQ{6-s~#gJV*n z8o96Vc3pH`8VRqpt5_3$qErAYn4Kl<_}1+JVdenz#>6!U6V-KBCAEUgg|@VH#HA|i zg^hcgGBmB+D08IA)!G%mTn}`P)M3#WsjwwB1SQM4+hmRm;rWT;PQNN=UWsdKjzvW% z>x2TNfOEg|m_0+&V0N%v1_-A3VoWn(RJ!wnM6|V}j zaxez(IyVm9@|QuQz{two>Tn;j@+s#4Jb;(`wIYtUhQ0jqGxmtzlhhIGB z{PMg!{P0O>ar>Nm=bldKBN&G$4_eTh_cuT-D3fKKBCJhb)dj!h9bE+fGRb33kGcL6 z*ZeN+-DCWhB&fM!PyE4?x47u!vV8mpe^#zg0&1Wesv~r%w?S`~28y05(=f+sS5QM6 zdO=Ky>bjZRBA!hIpSa(}b0FIeHevxcpmC2IE{Id$K$J$lRd^db=n-Gg5G9e55cFr9 zlSYzwt^9JHImb_<;%IKK1kc3x2{Y`K`oGU-m zQ@YF+>gEHy7VvZDVT+2#TFRD9sOXQx-(p>9t3S=rd;CO~2dRE%>+ z2S*hKB+METQ2g1Ce}=+{mBjs-;Oc)IaE^sA{1X+Px0wfIDdQ7MaWa==1O2oDVNNLD+6o@iI{`0gl2?|iv z1(R7*p6_(i-w723f@jYC${LlS^Ns?JCB#S@e&=tsy&5eK39Fs42tw0(E>v9Blws%~ ztF?Fz0?1@-g10=W?Z6j!$77yV0c0f$)jz)V!`*lzh=fzd;^<(Tz*WT1&2C$Pp%wX& z30lV6kWj-?DAt2Pa2$g!z$hB5L6i#GnG+A7m5`%o2&*bkWwyG9#jDELyiQctlUBX!6r`Z9 z>iVTT^SOqZvZ=*)tY6kq?Vs=x{MYby=D=Pk``B7LZN`!u?!p20r`)Qgwr)TfiqH&; z%p^B9={%FvhceQF7LVDy$f+585wTP>n0e+-x)X*eBL?08jRZoB6ODO^Jn&f0H!y)H z1hmq?I$t2@Nd`2+H80L;MR%|`7TG(~avvi+ll_hT-s~sSX0|w3BbkaTWwblaLe>_@ zp#bXcoDs|>dNEwAbG;_u1p=_HJ2;@#Pc4;W`!w6WIOlKaQkS56>%z>aWtpFsm_$j4 zB7${|Rd0aU*tMm;L54H{tYK0rB9s#^3>aKMs6>&(VxEyVJ+IIR09I=UXRGl5beY5b z2XDdX2xued%ZoGgVlDJY#foCP>=8sfCT^#&qVxeI5U9>A$BrtN0B03C*e7TC0SfOC zAO;XhhpbsU$}e#8!~`)XqP=(*g_5zGz{9wWZ3NgN*7xpfE(XD6?M4-OMhfyB7bmWM zjuCeCoPa-`({@Yco8SFQ<-xNL%c~bJv8WEpgKu4y*Pnky%uL}r+RO41w{VC3`3*L~ zSD)VJGL!SN`{!Q}V7G_@!#QRYIx`_g^*~@Rt`O}HFX$vW5je$q+_=pEJ%?`sW8-9e zCFTgm_G{pp3Y%Lu1_%ld&y`;x6r9%u?(-W2u}5Wp`N=Q25ByC8aj&~k25b?WnFgA^ zJ19W%Gy^avy6nm3c5yZ=>-SgK4lvIHio`Jr3*M9wc`Nj&J$$y47@?Kb&GPIw-z^7w z%!kA(uC?9~<50#ZB?<6_24@H-;eJUpe2-vBF;G~#ZjM1uUyHqa;i6J2-N{+SSG}9^ zs3G(7XqYK;x4zqP?0Qk37yXcy2-YEQb6=hD7~sl#<(DUC zmcuj19l)9>pGlfT&m;#To}z(wrh#`&k81Yzd4A0v{^%0U%sQAHOi!CUP({V8M+#(l zfteFoFknI8Ch2V5UNRe8^JqvqS5FbZb5MpnUw?`xq{Hc-tw;CD(U-5I2+IrP8!dRo zgJ;`inUg^`Blh1NA6pQut=0a2Eun|WMXWhvpyb^1M`-$y=}^qKGK2_FnE@1^!ZKi< z^8p&E1tpezn`bPAK(yJruVpYuOcyVLNtP2PCy_vgXj^>6L}XepePtBD=Z~GF7e;j3 zrgb6kjLi|r{9=jR_HIe%$$VrcjGaEsjK^PE1R0o&|1BkAF%nq1o$?RdV*vvYKG~EE z-8l&m>sXGr)UrhdpN|5Zl<+&t+FJc998@ku3iI-k$G5?S6b&Q2B`9HF`$R}Lx}1r< zYb+7!z}vZYKt2PFXoj8{GrzO{4&0oh>!YG`8`Q>Z7#H6-7u(ZvvX+XE8N_t`qAc(g zbqO8!LIfw0mT{%(PR^p*9|V!8^(o{b;}j4GH8?-pgDcs>Y zD+Tj&8z+JSAkITw!;8dRU%)?+0b;op)KX4ubm`h89QPIK9wc z-N5-lUFr{3`;GU%^KGL2ztLF4WlW|?uI0r_Ut}&MMp?#Xyg*vHRkjaIl(fXbadFPe zZ&ufr8%uZ_`jjJ43*K!{L=Z5Hce3jo+*<|~2wZ6t!Ow0oWsF$YVi!)zc7iDdRCLMuMNl6#`e@ePLjr{1L3dn1WfN4A95=>QZCE z-!6PWv+C+<1pMVvy1T1VJLCc^FTT1)AR8Cpwu=VvHA>Vaik17_8WkRYJ0tOsaU7;v zULwxGY;_VsXPJ{C@RoUd5vp~0}tQ=34qSos=a@&Up5i++!xZz4^Rh^N210wx8d$L9RgPG+cA z!tZPFh5h-xh?ImN)BbPZi5><#XfQ9Bjr-Kpg2jTU5IJLH=8UtB&%Om2 zprHhC=Dl+k?lP=ej>asGRa~Rx>*u@iv=HKYYV1#@>3TY6|0+Z?ufT>jL9VXj;>Wdg zoWe}2u=Xo#sgp6`MrUB8Fr@Pei&%ct$t3C&_yEjkNI_x$T2mDl>4W}+mkdn-r!`R9 zuj^(LHL9OJly|Y7Apq*>0zck60H(P2aE&`B+Yx4W>!OFjv02`ITP@5eCal363lDnd zCBsnQg5z|k@U#u2Dt)7|)2y(_^WX>M4&GYNwGoEy$@7O7Oa*sdjS0~9170v41Cu;L z=vAkFXV$$AmP;46n#aJj!eTK)8{(vbL|E}Q)`?1vo|Z(@^Nsk6c*p^&4^{A*q+81h z$7rnVF)Qq?Oj#M7F2L;J1&0)Nj)^(h7EC8?L0dTuK|4Od>Bwi5X6YKbV;;PLR_I3M z82D?&h#z_pvOOUX_$(Y$z2r>~)#9cVN}iG0)V=njz5Vig|KRt_&wuh$a(9!EhPZ&A{PE8z!7(sCzw_-6%C~;& z*UFP$eTTH8+_;6{*PZQ!KCeIiEbzbo^ile99gVN(;HHy7s}S`}*$RGYb-E4V*?#Fr zfh`Q2SFYT|gO?>m>6+r5sR4`}wZt+u`G~IG5yuQi2p4S=_hi=yHd`mSO^a`Jb*&)$ zag{G|PXc!&+<(A5-2?KR-)H}tyA!w`b@kpRDN5gP#O(V2uj$@?yt=9Yj-TJaKnsHu z25O$~5wm)*Lrx{%MVs%od#3dWjS{2R~A(Xz=Q zI@Fo%q;&c;mC8|bPlqy{$dLQn^5bM^stPOvM5aoqmk}!4qPA}8N(#CnCT)T53kdL< ztXleSbPs&2qq}c-HNJ@VYVgFt8|8@~;3qwhgB>SMj{@I2opRq&hFOmt=#`*+k^bUd`m_BZgynR92Coij2e=P!*D znVjujx|~DCHgr;3@`~nY)VQx{LbIb&X7;#3-`42&JA3`ji|5%hqZqQ08#w%oqVa!V z&O*i&aSngOAClZc;MAlhVrErgZ0y zQP`_G$}mLeyz=x+cT5qN@AJX-DhWO`M z&Vz|g<-2<;fGYi3%C;1-2h3wd>?})|PboG-fKT@;9jl^o&P|H(xTs*|;jR@4g}boA zkn)y$a14Do^bVoK%xO!+U3z#e`f^RgJv9Vy8J*S$mzX?x$hC7DOhrg^<&poml_-33p?~v93LoyF{ z;mf!7_9ur=EM*iurpSUV$0Yg;e$a8YyFGVA;-hdl2hn(TTJxe;GSXOWxoTTkz2NtC;ShZAh=6 zSCtvA+C>=f0Gv1wHb+xL0t(D=*7BU%#`VY(@Ch+jc zma#9L<-T{Lx(qGt8SF2to&WRiH#F0EXL;DN7rK1oH8ld~x0WwH|J(BW-@Uh7yDm-r z+_0s)v^+CMGfkJ|K#E{9=rtHhF6*Tb4`7#pr@~&$00pbgdElJ-$w;yrx1C8|0N7zO zb0o0&-jXpW(%oj|({YrYbiM{w9GR0v_&#MbY9jQ=8v7P`EAlNX89!7HQC&p^byK$i z*tR_u;PxjSnPalc`Kcelj>cc>vTBa%YUdthP|3E{3uBTwJQLPeTB`9ugWBMxL-+R& zG>^Ht>|VUEeDeoyG2)z8%MfQ@{;ECtchn`^S<0$Byn9_SX@SlDMb%%E6=ZWqRLExjXG2U zpVy@z77`w9^cp|BEt0)=@99Q+K*uoxNCZ)O?J?#OLkz>r`du^U8aC^oOxP(0LEL0< zF>b)C7tWHzPy-Jr3g3FxZ~X{xVAMhpXjv)$rgMA}x&)gMN-KbOpzl{cp%0UKD#-eV zX=0w|_-l-er3w2l6k>74ORHCh8HfzDzOn{xBnJiPIXeBqjng#R!jZ9?16s_(l)WvH z?KEcZ)f}U~;KAbV>G}K=(O^IzmJ#i|-{QFD?fOhLgT~+=k;BQea6Pz1a5D$q_MqrM zdg{=A%wsf2O>TUkSzDIy^hC~x#%rW0GIOnz*C0oECvxBef*ZRSg4FV%Qo7GK-5Cm2 z$MMTd$j%6qUTxt;ZrN1<^c&6y+ zK~<_!x+^}5GOcBG4AqQGO~&l77H!s3=im|#9$3$-b!L^&EpucrgQG${d%}FnfaVYo z+Taho=-n3)MQ&1dfBUG4XM2{P9J?orCh~yJ6mR3Ev~ti&ZR2EVyN8hv=YUf(;P{I# z9UeOY!%zOJFZRB(?jO>9Wpz+KCG8!hoJ*FsRmqw@*^3_0i9!JvG>sCEiK~4`5vJ&; zy54+Ku1-+FHb64EU{Sqtp$A|-Q71;jrs!R_Sq`>Pmivv>6~D=tqr zr_P=}d{O;GsoC27E{AtC=Q!p4Met=6VB}Eo5o|!*D7bEGS3-DiX{aVkpn@awNnFWw zg^XnuI>e^sl0*XFk(raay%8CyX9|DEq?~ICva`H-<&ETJbNR}J!#}_J*X5j|rPF72 zmz)2(ZqD}OD?WQ)8`~Nk#aj}PLc$Qhte|2|rQfgn=(_uE|VNP&>7%|h# zFyqoZY(bzSLr}kgI}9KO2VcURGGbZc%EV@Cnz;Iz>~r23()k38TNCl{RomzE+2jA- zpGb1C%VGc4wdJGtK3?8@>-%WJArxWuQM9*t`K2?5SH5<6Iel`o`S^oB9=`kAzijrd z>qmZ^!7*FRx9Oao7g;$xi~jX86R=r+IeyYKQ&#B0>1xJ4bgv1eBc9>Q%yW)eBWhY> zx7V@5-SfGh&Vj>6YvxC{LP8Oe>d_D!$=fk4v&%k7#YfQWf&-^G1K6-)gFMl7kFz4B-|XN%&6H!jMe=y`#TcI`Nz z>nl|7Y?a(N%bkA22Qqxmrc$4COLvS5t;c~bcW?n=czk&J%4@H1NWlZ3jPiJW-H~UV z2=MgX8_nU%&qj>~qp-qLKBOB6BMc8Y5{T7%bYC*u?kM15XYyyT`&MFFd<-)1u zqN50{!|>Ro*Z@=S@u?y%if8PPsQPR^rKAq5OFZm6TnZY%(}TITq*Z)AP?(Sxf8TL{ zPJBWv3<$o)WJXK~JUgB_na%MVoLkQi{~qM*p})UnhGu@=k&st; z+gPQxTdrK!hKtJZ$a7q-H43W(g?r7&_G@lFkYY+b7Y*guVB&NCZxDEfVFkhd7p zs;q;F&pCTcSO17no2XvDeoWcPc4{;Ti+K_+8ue zoV^lLyN9%iq9&{0+0yUfJK9wkRl}OQ3}w#N301s++D9d45yvVzyf}X7bKM+YKhTC;)86)C3dZ@-lMYBgcXdF)3;#>3<-QuNSt1z%4z0n2kDa~3I zDE@w6@AdJDN%ie$VFlQVN_8%-iedjyNh1Xwp%aeRHR9kTUe<0Xa}zls$>HS>fTI1d3)Gsnrd$mq@y+sSIJ za(^gG(Y496rvDU`h1>YIC8*9WxOMZ3ANq_eD{eW!)97AisT6;Qm{`Q-SfPWIc|1~wH5kW&aGIWOi6-4* z-ZkxsBg+0&Uvk$;kUJSe%K<^MHWv!6}qGRA=&c>ZvH>bM)e;!dtK5VRo5C8xG07*qoM6N<$ Ef;j_H@&Et; literal 0 HcmV?d00001 diff --git a/swix_ios_app/swix_ios_app/swix/ScalarArithmetic-bleed.swift b/swix_ios_app/swix_ios_app/swix/ScalarArithmetic-bleed.swift new file mode 100755 index 0000000..6b49f2c --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/ScalarArithmetic-bleed.swift @@ -0,0 +1,205 @@ + +// from https://github.com/seivan/ScalarArithmetic/ +// bleeding as of 2014-11-8. Commit on Aug. 15th +// commented out because compile errors. not critical to this release. + +import Darwin +import CoreGraphics + + +protocol FloatingPointMathType { +// var acos:Self {get} +// var asin:Self {get} +// var atan:Self {get} +// func atan2(x:Self) -> Self +// var cos:Self {get} +// var sin:Self {get} +// var tan:Self {get} +// var exp:Self {get} +// var exp2:Self {get} +// var log:Self {get} +// var log10:Self {get} +// var log2:Self {get} +// func pow(exponent:Self) -> Self +// var sqrt:Self {get} +} + + +extension Double : FloatingPointMathType { +// var abs:Double { return Double.abs(self) } +// var acos:Double { return Darwin.acos(self) } +// var asin:Double { return Darwin.asin(self) } +// var atan:Double { return Darwin.atan(self) } +// func atan2(x:Double) -> Double { return Darwin.atan2(self,x) } +// var cos:Double { return Darwin.cos(self) } +// var sin:Double { return Darwin.sin(self) } +// var tan:Double { return Darwin.tan(self) } +// var exp:Double { return Darwin.exp(self) } +// var exp2:Double { return Darwin.exp2(self) } +// var log:Double { return Darwin.log(self) } +// var log10:Double{ return Darwin.log10(self) } +// var log2:Double { return Darwin.log2(self) } +// func pow(exponent:Double)-> Double { return Darwin.pow(self, exponent) } +// var sqrt:Double { return Darwin.sqrt(self) } + func __conversion() -> CGFloat { return CGFloat(self) } +} + + + +protocol ScalarFloatingPointType { + var toDouble:Double { get } + init(_ value:Double) +} + +extension CGFloat : ScalarFloatingPointType, FloatingPointMathType { + var toDouble:Double { return Double(self) } +// var abs:CGFloat { return self.abs } +// var acos:CGFloat { return Darwin.acos(self) } +// var asin:CGFloat { return Darwin.asin(self) } +// var atan:CGFloat { return Darwin.atan(self) } +// func atan2(x:CGFloat) -> CGFloat { return Darwin.atan2(self, x) } +// var cos:CGFloat { return Darwin.cos(self) } +// var sin:CGFloat { return Darwin.sin(self) } +// var tan:CGFloat { return Darwin.tan(self) } +// var exp:CGFloat { return Darwin.exp(self) } +// var exp2:CGFloat { return Darwin.exp2(self) } +// var log:CGFloat { return Darwin.log(self) } +// var log10:CGFloat { return Darwin.log10(self)} +// var log2:CGFloat { return Darwin.log2(self)} +// func pow(exponent:CGFloat)-> CGFloat { return Darwin.pow(self, exponent) } +// var sqrt:CGFloat { return Darwin.sqrt(self) } + func __conversion() -> Double { return Double(self) } +} + +extension Float : ScalarFloatingPointType { var toDouble:Double { return Double(self) } } + +protocol ScalarIntegerType : ScalarFloatingPointType { + var toInt:Int { get } +} + +extension Int : ScalarIntegerType { + var toDouble:Double { return Double(self) } + func __conversion() -> Double { return Double(self) } + var toInt:Int { return Int(self) } + +} +extension Int16 : ScalarIntegerType { + var toDouble:Double { return Double(self) } + func __conversion() -> Double { return Double(self) } + var toInt:Int { return Int(self) } + +} +extension Int32 : ScalarIntegerType { + var toDouble:Double { return Double(self) } + func __conversion() -> Double { return Double(self) } + var toInt:Int { return Int(self) } + +} +extension Int64 : ScalarIntegerType { + var toDouble:Double { return Double(self) } + func __conversion() -> Double { return Double(self) } + var toInt:Int { return Int(self) } + +} +extension UInt : ScalarFloatingPointType { + var toDouble:Double { return Double(self) } + func __conversion() -> Double { return Double(self) } + +} +extension UInt16 : ScalarFloatingPointType { + var toDouble:Double { return Double(self) } + func __conversion() -> Double { return Double(self) } + +} +extension UInt32 : ScalarFloatingPointType { + var toDouble:Double { return Double(self) } + func __conversion() -> Double { return Double(self) } +} +extension UInt64 : ScalarFloatingPointType { + var toDouble:Double { return Double(self) } + func __conversion() -> Double { return Double(self) } + +} + + + + + +func + (lhs:T, rhs:Int) -> Int { return lhs + rhs } +func + (lhs:Int, rhs:T) -> Int { return lhs + rhs.toInt } + +func - (lhs:T, rhs:Int) -> Int { return lhs.toInt - rhs } +func - (lhs:Int, rhs:T) -> Int { return lhs - rhs.toInt } + +func * (lhs:T, rhs:Int) -> Int { return lhs.toInt * rhs } +func * (lhs:Int, rhs:T) -> Int { return lhs * rhs.toInt } + +func / (lhs:T, rhs:Int) -> Int { return lhs.toInt / rhs } +func / (lhs:Int, rhs:T) -> Int { return lhs / rhs.toInt } + + + +//Equality T<===>T +func == (lhs:U,rhs:T) -> Bool { return (lhs.toDouble == rhs.toDouble) } +func == (lhs:Double,rhs:T) -> Bool { return (lhs == rhs.toDouble) } +func == (lhs:T,rhs:Double) -> Bool { return (lhs.toDouble == rhs) } + +func != (lhs:U,rhs:T) -> Bool { return (lhs.toDouble == rhs.toDouble) == false } +func != (lhs:Double,rhs:T) -> Bool { return (lhs == rhs.toDouble) == false } +func != (lhs:T,rhs:Double) -> Bool { return (lhs.toDouble == rhs) == false } + +func <= (lhs:T,rhs:U) -> Bool { return (lhs.toDouble <= rhs.toDouble) } +func <= (lhs:Double, rhs:T) -> Bool { return (lhs <= rhs.toDouble) } +func <= (lhs:T,rhs:Double) -> Bool { return (lhs.toDouble <= rhs) } + +func < (lhs:T,rhs:U) -> Bool { return (lhs.toDouble < rhs.toDouble) } +func < (lhs:Double, rhs:T) -> Bool { return (lhs < rhs.toDouble) } +func < (lhs:T,rhs:Double) -> Bool { return (lhs.toDouble < rhs) } + +func > (lhs:T,rhs:U) -> Bool { return (lhs <= rhs) == false } +func > (lhs:Double, rhs:T) -> Bool { return (lhs <= rhs) == false} +func > (lhs:T,rhs:Double) -> Bool { return (lhs <= rhs) == false } + +func >= (lhs:T,rhs:U) -> Bool { return (lhs < rhs) == false } +func >= (lhs:Double, rhs:T) -> Bool { return (lhs < rhs) == false } +func >= (lhs:T,rhs:Double) -> Bool { return (lhs < rhs) == false } + + + +//SUBTRACTION +func - (lhs:U, rhs:T) -> Double {return (lhs.toDouble - rhs.toDouble) } +func - (lhs:Double, rhs:T) -> T { return T(lhs - rhs.toDouble) } +func - (lhs:T, rhs:Double) -> T { return T(lhs.toDouble - rhs) } +func - (lhs:Double, rhs:T) -> Double { return (lhs - rhs.toDouble) } +func - (lhs:T, rhs:Double) -> Double { return (lhs.toDouble - rhs) } +func -= (inout lhs:T, rhs:U) { lhs = T(lhs.toDouble - rhs.toDouble) } +func -= (inout lhs:Double, rhs:T) { lhs = lhs - rhs.toDouble } + +//ADDITION +func + (lhs:U, rhs:T) -> Double {return (lhs.toDouble + rhs.toDouble) } +func + (lhs:Double, rhs:T) -> T { return T(lhs + rhs.toDouble) } +func + (lhs:T, rhs:Double) -> T { return T(lhs.toDouble + rhs) } +func + (lhs:Double, rhs:T) -> Double { return (lhs + rhs.toDouble) } +func + (lhs:T, rhs:Double) -> Double { return (lhs.toDouble + rhs) } +func += (inout lhs:T, rhs:U) { lhs = T(lhs.toDouble + rhs.toDouble) } +func += (inout lhs:Double, rhs:T) { lhs = lhs + rhs.toDouble } + +//MULTIPLICATION +func * (lhs:U, rhs:T) -> Double {return (lhs.toDouble * rhs.toDouble) } +func * (lhs:Double, rhs:T) -> T { return T(lhs * rhs.toDouble) } +func * (lhs:T, rhs:Double) -> T { return T(lhs.toDouble * rhs) } +func * (lhs:Double, rhs:T) -> Double { return (lhs * rhs.toDouble) } +func * (lhs:T, rhs:Double) -> Double { return (lhs.toDouble * rhs) } +func *= (inout lhs:T, rhs:U) { lhs = T(lhs.toDouble * rhs.toDouble) } +func *= (inout lhs:Double, rhs:T) { lhs = lhs * rhs.toDouble } + +//DIVISION +func / (lhs:U, rhs:T) -> Double {return (lhs.toDouble / rhs.toDouble) } +func / (lhs:Double, rhs:T) -> T { return T(lhs / rhs.toDouble) } +func / (lhs:T, rhs:Double) -> T { return T(lhs.toDouble / rhs) } +func / (lhs:Double, rhs:T) -> Double { return (lhs / rhs.toDouble) } +func / (lhs:T, rhs:Double) -> Double { return (lhs.toDouble / rhs) } +func /= (inout lhs:T, rhs:U) { lhs = T(lhs.toDouble / rhs.toDouble) } +func /= (inout lhs:Double, rhs:T) { lhs = lhs / rhs.toDouble } + + diff --git a/swix_ios_app/swix_ios_app/swix/imshow.py b/swix_ios_app/swix_ios_app/swix/imshow.py new file mode 100644 index 0000000..480ecd3 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/imshow.py @@ -0,0 +1,30 @@ +from __future__ import division +import pylab as p +from pandas import read_csv +import sys + +""" +Usage: python imshow.py filename.png shouldSave shouldShow +""" + +def str2bool(string): + """ + Only true if string is one of "yes", "true", "t", "1". Returns false + otherwise. + """ + return string.lower() in ("yes", "true", "t", "1") + +filename = sys.argv[1] +shouldSave = str2bool(sys.argv[2]) +shouldShow = str2bool(sys.argv[3]) + +x = read_csv("temp.csv", header=None) +x = p.asarray(x) + +p.figure() +p.imshow(x, interpolation='nearest') +p.tight_layout() +p.colorbar() + +if shouldShow: p.show() +if shouldSave: p.savefig('../'+filename, dpi=300) diff --git a/swix_ios_app/swix_ios_app/swix/io.swift b/swix_ios_app/swix_ios_app/swix/io.swift new file mode 100644 index 0000000..2058f17 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/io.swift @@ -0,0 +1,119 @@ +// +// io.swift +// swix +// +// Created by Scott Sievert on 11/7/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation + +// ndarray binary +func write_binary(x:ndarray, filename:String, prefix:String=S2_PREFIX){ + let N = x.n + let data = NSData(bytes:!x, length:N*sizeof(Double)) + data.writeToFile(prefix+"../"+filename, atomically: false) +} +func read_binary(filename:String, prefix:String=S2_PREFIX) -> ndarray{ + let read = NSData(contentsOfFile: prefix+"../"+filename) + let l:Int! = read?.length + let sD:Int = sizeof(Double) + let count = (l.double / sD.double) + + let y = zeros(count.int) + read?.getBytes(!y, length: count.int*sizeof(Double)) + return y +} + +// matrix binary +func write_binary(x:matrix, filename:String, prefix:String=S2_PREFIX){ + let y = concat(array(x.shape.0.double, x.shape.1.double), y: x.flat) + write_binary(y, filename:filename, prefix:prefix) +} +func read_binary(filename:String, prefix:String=S2_PREFIX)->matrix{ + var a:ndarray = read_binary(filename, prefix:prefix) + let (w, h) = (a[0], a[1]) + return reshape(a[2.. ndarray{ + var x: String? + do { + x = try String(contentsOfFile: prefix+"../"+filename, encoding: NSUTF8StringEncoding) + } catch _ { + x = nil + } + var array:[Double] = [] + var columns:Int = 0 + var z = x!.componentsSeparatedByString(",") + columns = 0 + for i in 0.. matrix{ + var x: String? + do { + x = try String(contentsOfFile: prefix+"../"+filename, encoding: NSUTF8StringEncoding) + } catch _ { + x = nil + } + var y = x!.componentsSeparatedByString("\n") + let rows = y.count-1 + var array:[Double] = [] + var columns:Int = 0 + for i in 0.. objc --> objc++ --> c++ + var cvsvm:cvSVM; + var svm_type:String + var kernel_type:String + var N:Int + var M:Int + init(){ + self.cvsvm = cvSVM() + + + self.N = -1 + self.M = -1 + + // with linear svc results, we closely match (and do slightly better than) sk-learn + self.svm_type = "C_SVC" + self.kernel_type = "LINEAR" + setParams(svm_type, kernel_type:kernel_type) + } + func setParams(svm_type:String, kernel_type:String, nu:Float=0.5){ + // kernel: LINEAR, SIGMOID + // svm_type: C_SVC, ONE_CLASS, NU_SVC, NU_SVR + + // careful: NU_SVR and SIGMOID throws an exception error + self.cvsvm.setParams(svm_type.nsstring as String, kernel:kernel_type.nsstring as String, nu:nu.cfloat) + } + func train(responses: matrix, _ targets: ndarray){ + // convert matrix2d to NSArray + self.M = responses.shape.0 + self.N = responses.shape.1 + self.cvsvm.train(!responses, targets:!targets, m:self.M.cint, n:self.N.cint) + } + func predict(response: ndarray) -> Double{ + assert(self.N == response.count, "Sizes of input arguments do not match: predict.count != trained.count. The varianbles you're trying to predict a result from must match variables you trained off of.") + let tp = self.cvsvm.predict(!response, n:self.N.cint) + return tp.double + } + func predict(responses: matrix) -> ndarray{ + let y = zeros(responses.shape.0) + assert(self.N == responses.shape.1, "Sizes must match") + self.cvsvm.predict(!responses, into:!y, m:responses.shape.0.cint, n:responses.shape.1.cint); + return y + } +} +class kNearestNeighbors{ + // finds the nearest neighbor over all points. if want to change, dive into knn.mm and change `int k = cvknn.get_max_k();` in `predict(...)` + var T:Double + var knn:kNN; + var N:Int; // variables + var M:Int; // responses + init(){ + assert(false, "Careful! My simple tests failed but it looks like it should work.") + self.T = 1 + self.knn = kNN() + self.N = -1 + self.M = -1 + } + func train(responses: matrix, targets: ndarray){ + self.M = responses.shape.0 + self.N = responses.shape.1 + + self.knn.train(!responses, targets: !targets, m:self.M.cint, n:self.N.cint) + + } + func predict(x: ndarray, k: Int) -> Double{ + assert(self.N == x.count, "Sizes of input arguments do not match: predict.count != trained.count. The varianbles you're trying to predict a result from must match variables you trained off of.") + assert(k <= 32, "k <= 32 for performance reasons enforced by OpenCV.") + let result = self.knn.predict(!x, n:x.n.cint, k:k.cint) + return result.double; + } +} + diff --git a/swix_ios_app/swix_ios_app/swix/matrix/m-complex-math.swift b/swix_ios_app/swix_ios_app/swix/matrix/m-complex-math.swift new file mode 100644 index 0000000..f2d8258 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/matrix/m-complex-math.swift @@ -0,0 +1,119 @@ +// +// twoD-complex-math.swift +// swix +// +// Created by Scott Sievert on 7/15/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation +import Swift +import Accelerate + +func rank(x:matrix)->Double{ + let (_, S, _) = svd(x, compute_uv:false) + let m:Double = (x.shape.0 < x.shape.1 ? x.shape.1 : x.shape.0).double + let tol = S.max() * m * DOUBLE_EPSILON + return sum(S > tol) +} +func dot(x: matrix, y: matrix) -> matrix{ + return x.dot(y) +} +func dot(A: matrix, x: ndarray) -> ndarray{ + return A.dot(x) +} +func svd(x: matrix, compute_uv:Bool=true) -> (matrix, ndarray, matrix){ + let (m, n) = x.shape + let nS = m < n ? m : n // number singular values + let sigma = zeros(nS) + let vt = zeros((n,n)) + var u = zeros((m,m)) + + var xx = zeros_like(x) + xx.flat = x.flat + xx = xx.T + let c_uv:CInt = compute_uv==true ? 1 : 0 + svd_objc(!xx, m.cint, n.cint, !sigma, !vt, !u, c_uv) + + // to get the svd result to match Python + let v = transpose(vt) + u = transpose(u) + + return (u, sigma, v) +} +func pinv(x:matrix)->matrix{ + var (u, s, v) = svd(x) + let m = u.shape.0 + let n = v.shape.1 + let ma = m < n ? n : m + let cutoff = DOUBLE_EPSILON * ma.double * max(s) + let i = s > cutoff + let ipos = argwhere(i) + s[ipos] = 1 / s[ipos] + let ineg = argwhere(1-i) + s[ineg] = zeros_like(ineg) + var z = zeros((n, m)) + z["diag"] = s + let res = v.T.dot(z).dot(u.T) + return res +} +func inv(x: matrix) -> matrix{ + assert(x.shape.0 == x.shape.1, "To take an inverse of a matrix, the matrix must be square. If you want the inverse of a rectangular matrix, use psuedoinverse.") + let y = x.copy() + let (M, N) = x.shape + + var ipiv:Array<__CLPK_integer> = Array(count:M*M, repeatedValue:0) + var lwork:__CLPK_integer = __CLPK_integer(N*N) +// var work:[CDouble] = [CDouble](count:lwork, repeatedValue:0) + var work = [CDouble](count: Int(lwork), repeatedValue: 0.0) + var info:__CLPK_integer=0 + var nc = __CLPK_integer(N) + dgetrf_(&nc, &nc, !y, &nc, &ipiv, &info) + dgetri_(&nc, !y, &nc, &ipiv, &work, &lwork, &info) + return y +} +func solve(A: matrix, b: ndarray) -> ndarray{ + let (m, n) = A.shape + assert(b.n == m, "Ax = b, A.rows == b.n. Sizes must match which makes sense mathematically") + assert(n == m, "Matrix must be square -- dictated by OpenCV") + let x = zeros(n) + CVWrapper.solve(!A, b:!b, x:!x, m:m.cint, n:n.cint) + return x +} +func eig(x: matrix)->ndarray{ + // matrix, value, vectors + let (m, n) = x.shape + assert(m == n, "Input must be square") + + let value_real = zeros(m) + let value_imag = zeros(n) + var vector = zeros((n,n)) + + var work:[Double] = Array(count:n*n, repeatedValue:0.0) + var lwork = __CLPK_integer(4 * n) + var info = __CLPK_integer(1) + + // don't compute right or left eigenvectors + let job = "N" + var jobvl = (job.cStringUsingEncoding(NSUTF8StringEncoding)?[0])! + var jobvr = (job.cStringUsingEncoding(NSUTF8StringEncoding)?[0])! + + work[0] = Double(lwork) + var nc = __CLPK_integer(n) + dgeev_(&jobvl, &jobvr, &nc, !x, &nc, + !value_real, !value_imag, !vector, &nc, !vector, &nc, + &work, &lwork, &info) + + vector = vector.T + + return value_real +} + + + + + + + + + diff --git a/swix_ios_app/swix_ios_app/swix/matrix/m-helper-functions.swift b/swix_ios_app/swix_ios_app/swix/matrix/m-helper-functions.swift new file mode 100644 index 0000000..a34e808 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/matrix/m-helper-functions.swift @@ -0,0 +1,147 @@ +// +// helper-functions.swift +// swix +// +// Created by Scott Sievert on 8/9/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation + +// NORMs +func norm(x:matrix, ord:String="assumed to be 'fro' for Frobenius")->Double{ + if ord == "fro" {return norm(x.flat, ord:2)} + assert(false, "Norm type assumed to be \"fro\" for Forbenius norm!") + return -1 +} +func norm(x:matrix, ord:Double=2)->Double{ + if ord == inf {return max(sum(abs(x), axis:1))} + else if ord == -inf {return min(sum(abs(x), axis:1))} + else if ord == 1 {return max(sum(abs(x), axis:0))} + else if ord == -1 {return min(sum(abs(x), axis:0))} + else if ord == 2 { + // compute only the largest singular value? + let (_, s, _) = svd(x, compute_uv:false) + return s[0] + } + else if ord == -2 { + // compute only the smallest singular value? + let (_, s, _) = svd(x, compute_uv:false) + return s[-1] + } + + assert(false, "Invalid norm for matrices") + return -1 +} + +func det(x:matrix)->Double{ + var result:CDouble = 0.0 + CVWrapper.det(!x, n:x.shape.0.cint, m:x.shape.1.cint, result:&result) + return result +} + +// basics +func argwhere(idx: matrix) -> ndarray{ + return argwhere(idx.flat) +} +func flipud(x:matrix)->matrix{ + let y = x.copy() + CVWrapper.flip(!x, into:!y, how:"ud", m:x.shape.0.cint, n:x.shape.1.cint) + return y +} +func fliplr(x:matrix)->matrix{ + let y = x.copy() + CVWrapper.flip(!x, into:!y, how:"lr", m:x.shape.0.cint, n:x.shape.1.cint) + return y +} +func rot90(x:matrix, k:Int=1)->matrix{ + // k is assumed to be less than or equal to 3 + let y = x.copy() + if k == 1 {return fliplr(x).T} + if k == 2 {return flipud(fliplr(y))} + if k == 3 {return flipud(x).T} + assert(false, "k is assumed to satisfy 1 <= k <= 3") + return y +} + +// modifying matrices, modifying equations +func transpose (x: matrix) -> matrix{ + let m = x.shape.1 + let n = x.shape.0 + let y = zeros((m, n)) + vDSP_mtransD(!x, 1.stride, !y, 1.stride, m.length, n.length) + return y +} +func kron(A:matrix, B:matrix)->matrix{ + // an O(n^4) operation! + func assign_kron_row(A:matrix, B:matrix,inout C:matrix, p:Int, m:Int, m_max:Int){ + var row = (m+0)*(p+0) + p-0 + row = m_max*m + 1*p + + let i = arange(B.shape.1 * A.shape.1) + let n1 = arange(A.shape.1) + let q1 = arange(B.shape.1) + let (n, q) = meshgrid(n1, y: q1) + C[row, i] = A[m, n.flat] * B[p, q.flat] + } + var C = zeros((A.shape.0*B.shape.0, A.shape.1*B.shape.1)) + for p in 0.. ndarray{ + let (m, n) = x.shape + let (mm, nn) = meshgrid(arange(m), y: arange(n)) + var i = mm - nn + let j = (i < 0+S2_THRESHOLD) + i[argwhere(j)] <- 0 + i[argwhere(1-j)] <- 1 + return argwhere(i) +} +func triu(x: matrix)->ndarray{ + let (m, n) = x.shape + let (mm, nn) = meshgrid(arange(m), y: arange(n)) + var i = mm - nn + let j = (i > 0-S2_THRESHOLD) + i[argwhere(j)] <- 0 + i[argwhere(1-j)] <- 1 + return argwhere(i) +} + +// PRINTING +func println(x: matrix, prefix:String="matrix([", postfix:String="])", newline:String="\n", format:String="%.3f", printWholeMatrix:Bool=false){ + print(prefix, terminator: "") + var pre:String + var post:String + var printedSpacer = false + for i in 0..x.shape.0-4{ + print(x[i, 0..Double{ + return x.max() +} +func min(x: matrix, axis:Int = -1)->Double{ + return x.min() +} +func print(x: matrix, prefix:String="matrix([", postfix:String="])", newline:String="\n", format:String="%.3f", printWholeMatrix:Bool=false){ + println(x, prefix:prefix, postfix:postfix, newline:"", format:format, printWholeMatrix:printWholeMatrix) +} diff --git a/swix_ios_app/swix_ios_app/swix/matrix/m-image.swift b/swix_ios_app/swix_ios_app/swix/matrix/m-image.swift new file mode 100644 index 0000000..d069b1a --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/matrix/m-image.swift @@ -0,0 +1,165 @@ +// +// twoD-image.swift +// swix +// +// Created by Scott Sievert on 7/30/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +/* + * some other useful tips that need an iOS app to use: + * 1. UIImage to raw array[0]: + * 2. raw array to UIImage[1]: + * + * for a working implementation, see[2] (to be published shortly) + * + * [0]:http://stackoverflow.com/a/1262893/1141256 + * [1]:http://stackoverflow.com/a/12868860/1141256 + * [2]:https://github.com/scottsievert/saliency/blob/master/AVCam/AVCam/saliency/imageToRawArray.m + * + * + */ + +import Foundation +import UIKit // for iOS use + +func rgb2hsv_pixel(R:Double, G:Double, B:Double)->(Double, Double, Double){ + // tested against wikipedia/HSL_and_HSV. returns (H, S_hsv, V) + let M = max(array(R, G, B)) + let m = min(array(R, G, B)) + let C = M - m + var Hp:Double = 0 + if M==R {Hp = ((G-B)/C) % 6} + else if M==G {Hp = ((B-R)/C) + 2} + else if M==B {Hp = ((R-G)/C) + 4} + let H = 60 * Hp + let V = M + var S = 0.0 + if !(V==0) {S = C/V} + + return (H, S, V) +} + + +func rgb2hsv(r:matrix, g:matrix, b:matrix)->(matrix, matrix, matrix){ + assert(r.shape.0 == g.shape.0) + assert(b.shape.0 == g.shape.0) + assert(r.shape.1 == g.shape.1) + assert(b.shape.1 == g.shape.1) + var h = zeros_like(r) + var s = zeros_like(g) + var v = zeros_like(b) + for i in 0..matrix{ + return max(max(r, y: g), y: b) +} + + +func savefig(x:matrix, filename:String, save:Bool=true, show:Bool=false){ + // assumes Python is on your $PATH and pylab/etc are installed + // prefix should point to the swix folder! + // prefix is defined in numbers.swift + // assumes python is on your path + write_csv(x, filename:"swix/temp.csv") + system("cd "+S2_PREFIX+"; "+PYTHON_PATH + " imshow.py \(filename) \(save) \(show)") + system("rm "+S2_PREFIX+"temp.csv") +} +func imshow(x: matrix){ + savefig(x, filename: "junk", save:false, show:true) +} + +func UIImageToRGBA(image:UIImage)->(matrix, matrix, matrix, matrix){ + // returns red, green, blue and alpha channels + + // init'ing + var imageRef = image.CGImage + var width = CGImageGetWidth(imageRef) + var height = CGImageGetHeight(imageRef) + var colorSpace = CGColorSpaceCreateDeviceRGB() + var bytesPerPixel = 4 + var bytesPerRow:UInt = UInt(bytesPerPixel) * UInt(width) + var bitsPerComponent:UInt = 8 + var pix = Int(width) * Int(height) + var count:Int = 4*Int(pix) + + // pulling the color out of the image + var rawData = UnsafeMutablePointer.alloc(4 * width * height) + var temp = CGImageAlphaInfo.PremultipliedLast.rawValue + var bitmapInfo = CGBitmapInfo(rawValue:temp) + var context = CGBitmapContextCreate(rawData, Int(width), Int(height), Int(bitsPerComponent), Int(bytesPerRow), colorSpace, temp) + CGContextDrawImage(context, CGRectMake(0,0,CGFloat(width), CGFloat(height)), imageRef) + + + // unsigned char to double conversion + var rawDataArray = zeros(count)-1 + vDSP_vfltu8D(rawData, 1.stride, !(rawDataArray), 1, count.length) + + // pulling the RGBA channels out of the color + var i = arange(pix) + var r = zeros((Int(height), Int(width)))-1; + r.flat = rawDataArray[4*i+0] + + var g = zeros((Int(height), Int(width))); + g.flat = rawDataArray[4*i+1] + + var b = zeros((Int(height), Int(width))); + b.flat = rawDataArray[4*i+2] + + var a = zeros((Int(height), Int(width))); + a.flat = rawDataArray[4*i+3] + return (r, g, b, a) +} +func RGBAToUIImage(r:matrix, g:matrix, b:matrix, a:matrix)->UIImage{ + // might be useful! [1] + // [1]:http://stackoverflow.com/questions/30958427/pixel-array-to-uiimage-in-swift + // setup + var height = r.shape.0 + var width = r.shape.1 + var area = height * width + var componentsPerPixel = 4 // rgba + var compressedPixelData = zeros(4*area) + var N = width * height + + // double to unsigned int + var i = arange(N) + compressedPixelData[4*i+0] = r.flat + compressedPixelData[4*i+1] = g.flat + compressedPixelData[4*i+2] = b.flat + compressedPixelData[4*i+3] = a.flat + var pixelData:[CUnsignedChar] = Array(count:area*componentsPerPixel, repeatedValue:0) + vDSP_vfixu8D(&compressedPixelData.grid, 1, &pixelData, 1, vDSP_Length(componentsPerPixel*area)) + + // creating the bitmap context + var colorSpace = CGColorSpaceCreateDeviceRGB() + var bitsPerComponent = 8 + var bytesPerRow = ((bitsPerComponent * width) / 8) * componentsPerPixel + var temp = CGImageAlphaInfo.PremultipliedLast.rawValue + var bitmapInfo = CGBitmapInfo(rawValue:temp) + var context = CGBitmapContextCreate(&pixelData, Int(width), Int(height), Int(bitsPerComponent), Int(bytesPerRow), colorSpace, temp) + + // creating the image + var toCGImage = CGBitmapContextCreateImage(context)! + var image:UIImage = UIImage.init(CGImage:toCGImage) + return image +} +func resizeImage(image:UIImage, shape:(Int, Int)) -> UIImage{ + // nice variables + var (height, width) = shape + var cgSize = CGSizeMake(CGFloat(width), CGFloat(height)) + + // draw on new CGSize + UIGraphicsBeginImageContextWithOptions(cgSize, false, 0.0) + image.drawInRect(CGRectMake(CGFloat(0), CGFloat(0), CGFloat(width), CGFloat(height))) + var newImage = UIGraphicsGetImageFromCurrentImageContext() + UIGraphicsEndImageContext() + return newImage +} diff --git a/swix_ios_app/swix_ios_app/swix/matrix/m-initing.swift b/swix_ios_app/swix_ios_app/swix/matrix/m-initing.swift new file mode 100644 index 0000000..65886d7 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/matrix/m-initing.swift @@ -0,0 +1,113 @@ +// +// twoD-initing.swift +// swix +// +// Created by Scott Sievert on 7/9/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation +import Accelerate + + +func zeros(shape: (Int, Int)) -> matrix{ + return matrix(columns: shape.1, rows: shape.0) +} +func zeros_like(x: matrix) -> matrix{ + let y:matrix = zeros((x.shape.0, x.shape.1)) + return y +} +func ones_like(x: matrix) -> matrix{ + return zeros_like(x) + 1 +} +func ones(shape: (Int, Int)) -> matrix{ + return zeros(shape)+1 +} +func eye(N: Int) -> matrix{ + return diag(ones(N)) +} +func diag(x:ndarray)->matrix{ + var y = zeros((x.n, x.n)) + y["diag"] = x + return y +} +func randn(N: (Int, Int), mean: Double=0, sigma: Double=1) -> matrix{ + var x = zeros(N) + let y = randn(N.0 * N.1, mean:mean, sigma:sigma) + x.flat = y + return x +} +func rand(N: (Int, Int)) -> matrix{ + var x = zeros(N) + let y = rand(N.0 * N.1) + x.flat = y + return x +} +func reshape(x: ndarray, shape:(Int, Int))->matrix{ + return x.reshape(shape) +} +func meshgrid(x: ndarray, y:ndarray) -> (matrix, matrix){ + assert(x.n > 0 && y.n > 0, "If these matrices are empty meshgrid fails") + let z1 = reshape(`repeat`(y, N: x.n), shape: (x.n, y.n)) + let z2 = reshape(`repeat`(x, N: y.n, axis: 1), shape: (x.n, y.n)) + return (z2, z1) +} + + +/// array("1 2 3; 4 5 6; 7 8 9") works like matlab. note that string format has to be followed to the dot. String parsing has bugs; I'd use arange(9).reshape((3,3)) or something similar +func array(matlab_like_string: String)->matrix{ + let mls = matlab_like_string + var rows = mls.componentsSeparatedByString(";") + let r = rows.count + var c = 0 + for char in rows[0].characters{ + if char == " " {} + else {c += 1} + } + var x = zeros((r, c)) + var start:Int + var i:Int=0, j:Int=0 + for row in rows{ + var nums = row.componentsSeparatedByCharactersInSet(NSCharacterSet.whitespaceCharacterSet()) + if nums[0] == ""{start=1} + else {start=0} + j = 0 + for n in start..matrix{ + var y = zeros_like(self) + y.flat = self.flat.copy() + return y + } + subscript(i: String) -> ndarray { + get { + assert(i == "diag", "Currently the only support x[string] is x[\"diag\"]") + let size = rows < columns ? rows : columns + let i = arange(size) + return self[i*columns.double + i] + } + set { + assert(i == "diag", "Currently the only support x[string] is x[\"diag\"]") + let m = shape.0 + let n = shape.1 + let min_mn = m < n ? m : n + let j = n.double * arange(min_mn) + self[j + j/n.double] = newValue + } + } + func indexIsValidForRow(r: Int, c: Int) -> Bool { + return r >= 0 && r < rows && c>=0 && c < columns + } + func dot(y: matrix) -> matrix{ + let (Mx, Nx) = self.shape + let (My, Ny) = y.shape + assert(Nx == My, "Matrix sizes not compatible for dot product") + let z = zeros((Mx, Ny)) + cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, + Mx.cint, Ny.cint, Nx.cint, 1.0, + !self, Nx.cint, + !y, Ny.cint, 1.0, + !z, Ny.cint) + return z + } + func dot(x: ndarray) -> ndarray{ + var y = zeros((x.n, 1)) + y.flat = x + var z = self.dot(y) + return z.flat + } + func min(axis:Int = -1) -> Double{ + if axis == -1{ + return self.flat.min() + } + assert(axis==0 || axis==1, "Axis must be 0 or 1 as matrix only has two dimensions") + assert(false, "max(x, axis:Int) for maximum of each row is not implemented yet. Use max(A.flat) or A.flat.max() to get the global maximum") + + } + func max(axis:Int = -1) -> Double{ + if axis == -1 { + return self.flat.max() + } + assert(axis==0 || axis==1, "Axis must be 0 or 1 as matrix only has two dimensions") + assert(false, "max(x, axis:Int) for maximum of each row is not implemented yet. Use max(A.flat) or A.flat.max() to get the global maximum") + } + subscript(i: Int, j: Int) -> Double { + // x[0,0] + get { + var nI = i + var nJ = j + if nI < 0 {nI = rows + i} + if nJ < 0 {nJ = rows + j} + assert(indexIsValidForRow(nI, c:nJ), "Index out of range") + return flat[nI * columns + nJ] + } + set { + var nI = i + var nJ = j + if nI < 0 {nI = rows + i} + if nJ < 0 {nJ = rows + j} + assert(indexIsValidForRow(nI, c:nJ), "Index out of range") + flat[nI * columns + nJ] = newValue + } + } + subscript(i: Range, k: Int) -> ndarray { + // x[0..<2, 0] + get { + let idx = asarray(i) + return self[idx, k] + } + set { + let idx = asarray(i) + self[idx, k] = newValue + } + } + subscript(r: Range, c: Range) -> matrix { + // x[0..<2, 0..<2] + get { + let rr = asarray(r) + let cc = asarray(c) + return self[rr, cc] + } + set { + let rr = asarray(r) + let cc = asarray(c) + self[rr, cc] = newValue + } + } + subscript(i: Int, k: Range) -> ndarray { + // x[0, 0..<2] + get { + let idx = asarray(k) + return self[i, idx] + } + set { + let idx = asarray(k) + self[i, idx] = newValue + } + } + subscript(or: ndarray, oc: ndarray) -> matrix { + // the main method. + // x[array(1,2), array(3,4)] + get { + var r = or.copy() + var c = oc.copy() + if r.max() < 0.0 {r += 1.0 * rows.double} + if c.max() < 0.0 {c += 1.0 * columns.double} + + let (j, i) = meshgrid(r, y: c) + let idx = (j.flat*columns.double + i.flat) + let z = flat[idx] + let zz = reshape(z, shape: (r.n, c.n)) + return zz + } + set { + var r = or.copy() + var c = oc.copy() + if r.max() < 0.0 {r += 1.0 * rows.double} + if c.max() < 0.0 {c += 1.0 * columns.double} + if r.n > 0 && c.n > 0{ + let (j, i) = meshgrid(r, y: c) + let idx = j.flat*columns.double + i.flat + flat[idx] = newValue.flat + } + } + } + subscript(r: ndarray) -> ndarray { + // flat indexing + get {return self.flat[r]} + set {self.flat[r] = newValue } + } + subscript(i: String, k:Int) -> ndarray { + // x["all", 0] + get { + let idx = arange(shape.0) + let x:ndarray = self.flat[idx * self.columns.double + k.double] + return x + } + set { + let idx = arange(shape.0) + self.flat[idx * self.columns.double + k.double] = newValue + } + } + subscript(i: Int, k: String) -> ndarray { + // x[0, "all"] + get { + assert(k == "all", "Only 'all' supported") + let idx = arange(shape.1) + let x:ndarray = self.flat[i.double * self.columns.double + idx] + return x + } + set { + assert(k == "all", "Only 'all' supported") + let idx = arange(shape.1) + self.flat[i.double * self.columns.double + idx] = newValue + } + } + subscript(i: ndarray, k: Int) -> ndarray { + // x[array(1,2), 0] + get { + let idx = i.copy() + let x:ndarray = self.flat[idx * self.columns.double + k.double] + return x + } + set { + let idx = i.copy() + self.flat[idx * self.columns.double + k.double] = newValue + } + } + subscript(i: matrix) -> ndarray { + // x[x < 5] + get { + return self.flat[i.flat] + } + set { + self.flat[i.flat] = newValue + } + } + subscript(i: Int, k: ndarray) -> ndarray { + // x[0, array(1,2)] + get { + let x:ndarray = self.flat[i.double * self.columns.double + k] + return x + } + set { + self.flat[i.double * self.columns.double + k] = newValue + } + } +} + + + + + + + + + + + + + + + + + diff --git a/swix_ios_app/swix_ios_app/swix/matrix/m-operators.swift b/swix_ios_app/swix_ios_app/swix/matrix/m-operators.swift new file mode 100644 index 0000000..1cf7fe0 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/matrix/m-operators.swift @@ -0,0 +1,152 @@ +// +// twoD-operators.swift +// swix +// +// Created by Scott Sievert on 7/9/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation +import Accelerate + +func make_operator(lhs: matrix, operation: String, rhs: matrix)->matrix{ + assert(lhs.shape.0 == rhs.shape.0, "Sizes must match!") + assert(lhs.shape.1 == rhs.shape.1, "Sizes must match!") + + var result = zeros_like(lhs) // real result + let lhsM = lhs.flat + let rhsM = rhs.flat + var resM:ndarray = zeros_like(lhsM) // flat ndarray + if operation=="+" {resM = lhsM + rhsM} + else if operation=="-" {resM = lhsM - rhsM} + else if operation=="*" {resM = lhsM * rhsM} + else if operation=="/" {resM = lhsM / rhsM} + else if operation=="<" {resM = lhsM < rhsM} + else if operation==">" {resM = lhsM > rhsM} + else if operation==">=" {resM = lhsM >= rhsM} + else if operation=="<=" {resM = lhsM <= rhsM} + result.flat.grid = resM.grid + return result +} +func make_operator(lhs: matrix, operation: String, rhs: Double)->matrix{ + var result = zeros_like(lhs) // real result +// var lhsM = asmatrix(lhs.grid) // flat + let lhsM = lhs.flat + var resM:ndarray = zeros_like(lhsM) // flat matrix + if operation=="+" {resM = lhsM + rhs} + else if operation=="-" {resM = lhsM - rhs} + else if operation=="*" {resM = lhsM * rhs} + else if operation=="/" {resM = lhsM / rhs} + else if operation=="<" {resM = lhsM < rhs} + else if operation==">" {resM = lhsM > rhs} + else if operation==">=" {resM = lhsM >= rhs} + else if operation=="<=" {resM = lhsM <= rhs} + result.flat.grid = resM.grid + return result +} +func make_operator(lhs: Double, operation: String, rhs: matrix)->matrix{ + var result = zeros_like(rhs) // real result +// var rhsM = asmatrix(rhs.grid) // flat + let rhsM = rhs.flat + var resM:ndarray = zeros_like(rhsM) // flat matrix + if operation=="+" {resM = lhs + rhsM} + else if operation=="-" {resM = lhs - rhsM} + else if operation=="*" {resM = lhs * rhsM} + else if operation=="/" {resM = lhs / rhsM} + else if operation=="<" {resM = lhs < rhsM} + else if operation==">" {resM = lhs > rhsM} + else if operation==">=" {resM = lhs >= rhsM} + else if operation=="<=" {resM = lhs <= rhsM} + result.flat.grid = resM.grid + return result +} + +// DOUBLE ASSIGNMENT +func <- (inout lhs:matrix, rhs:Double){ + let assign = ones((lhs.shape)) * rhs + lhs = assign +} + +// SOLVE +infix operator !/ {associativity none precedence 140} +func !/ (lhs: matrix, rhs: ndarray) -> ndarray{ + return solve(lhs, b: rhs)} +// EQUALITY +func ~== (lhs: matrix, rhs: matrix) -> Bool{ + return (rhs.flat ~== lhs.flat)} + +infix operator == {associativity none precedence 140} +func == (lhs: matrix, rhs: matrix)->matrix{ + return (lhs.flat == rhs.flat).reshape(lhs.shape) +} +infix operator !== {associativity none precedence 140} +func !== (lhs: matrix, rhs: matrix)->matrix{ + return (lhs.flat !== rhs.flat).reshape(lhs.shape) +} + +/// ELEMENT WISE OPERATORS +// PLUS +infix operator + {associativity none precedence 140} +func + (lhs: matrix, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "+", rhs: rhs)} +func + (lhs: Double, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "+", rhs: rhs)} +func + (lhs: matrix, rhs: Double) -> matrix{ + return make_operator(lhs, operation: "+", rhs: rhs)} +// MINUS +infix operator - {associativity none precedence 140} +func - (lhs: matrix, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "-", rhs: rhs)} +func - (lhs: Double, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "-", rhs: rhs)} +func - (lhs: matrix, rhs: Double) -> matrix{ + return make_operator(lhs, operation: "-", rhs: rhs)} +// TIMES +infix operator * {associativity none precedence 140} +func * (lhs: matrix, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "*", rhs: rhs)} +func * (lhs: Double, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "*", rhs: rhs)} +func * (lhs: matrix, rhs: Double) -> matrix{ + return make_operator(lhs, operation: "*", rhs: rhs)} +// DIVIDE +infix operator / {associativity none precedence 140} +func / (lhs: matrix, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "/", rhs: rhs) +} +func / (lhs: Double, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "/", rhs: rhs)} +func / (lhs: matrix, rhs: Double) -> matrix{ + return make_operator(lhs, operation: "/", rhs: rhs)} +// LESS THAN +infix operator < {associativity none precedence 140} +func < (lhs: matrix, rhs: Double) -> matrix{ + return make_operator(lhs, operation: "<", rhs: rhs)} +func < (lhs: matrix, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "<", rhs: rhs)} +func < (lhs: Double, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "<", rhs: rhs)} +// GREATER THAN +infix operator > {associativity none precedence 140} +func > (lhs: matrix, rhs: Double) -> matrix{ + return make_operator(lhs, operation: ">", rhs: rhs)} +func > (lhs: matrix, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: ">", rhs: rhs)} +func > (lhs: Double, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: ">", rhs: rhs)} +// GREATER THAN OR EQUAL +infix operator >= {associativity none precedence 140} +func >= (lhs: matrix, rhs: Double) -> matrix{ + return make_operator(lhs, operation: ">=", rhs: rhs)} +func >= (lhs: matrix, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: ">=", rhs: rhs)} +func >= (lhs: Double, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: ">=", rhs: rhs)} +// LESS THAN OR EQUAL +infix operator <= {associativity none precedence 140} +func <= (lhs: matrix, rhs: Double) -> matrix{ + return make_operator(lhs, operation: "<=", rhs: rhs)} +func <= (lhs: matrix, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "<=", rhs: rhs)} +func <= (lhs: Double, rhs: matrix) -> matrix{ + return make_operator(lhs, operation: "<=", rhs: rhs)} \ No newline at end of file diff --git a/swix_ios_app/swix_ios_app/swix/matrix/m-simple-math.swift b/swix_ios_app/swix_ios_app/swix/matrix/m-simple-math.swift new file mode 100644 index 0000000..6113bbc --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/matrix/m-simple-math.swift @@ -0,0 +1,110 @@ +// +// twoD-math.swift +// swix +// +// Created by Scott Sievert on 7/10/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation +import Accelerate + +func apply_function(function: ndarray->ndarray, x: matrix)->matrix{ + let y = function(x.flat) + var z = zeros_like(x) + z.flat = y + return z +} + +// TRIG +func sin(x: matrix) -> matrix{ + return apply_function(sin, x: x) +} +func cos(x: matrix) -> matrix{ + return apply_function(cos, x: x) +} +func tan(x: matrix) -> matrix{ + return apply_function(tan, x: x) +} +func tanh(x: matrix) -> matrix { + return apply_function(tanh, x: x) +} + +// BASIC INFO +func abs(x: matrix) -> matrix{ + return apply_function(abs, x: x) +} +func sign(x: matrix) -> matrix{ + return apply_function(sign, x: x) +} + +// POWER FUNCTION +func pow(x: matrix, power: Double) -> matrix{ + let y = pow(x.flat, power: power) + var z = zeros_like(x) + z.flat = y + return z +} +func sqrt(x: matrix) -> matrix{ + return apply_function(sqrt, x: x) +} + +// ROUND +func floor(x: matrix) -> matrix{ + return apply_function(floor, x: x) +} +func ceil(x: matrix) -> matrix{ + return apply_function(ceil, x: x) +} +func round(x: matrix) -> matrix{ + return apply_function(round, x: x) +} + +// LOG +func log(x: matrix) -> matrix{ + return apply_function(log, x: x) +} + +// BASIC STATS +func min(x:matrix, y:matrix)->matrix{ + var z = zeros_like(x) + z.flat = min(x.flat, y: y.flat) + return z +} +func max(x:matrix, y:matrix)->matrix{ + var z = zeros_like(x) + z.flat = max(x.flat, y: y.flat) + return z +} + + +// AXIS +func sum(x: matrix, axis:Int = -1) -> ndarray{ + // arg dim: indicating what dimension you want to sum over. For example, if dim==0, then it'll sum over dimension 0 -- it will add all the numbers in the 0th dimension, x[0.. ndarray{ + assert(axis==0 || axis==1, "if you want to sum over the entire matrix, call `sum(x.flat)`.") + let y = log(x) + let z = sum(y, axis:axis) + return exp(z) +} +func mean(x:matrix, axis:Int = -1) -> ndarray{ + assert(axis==0 || axis==1, "If you want to find the average of the whole matrix call `mean(x.flat)`") + let div = axis==0 ? x.shape.0 : x.shape.1 + return sum(x, axis:axis) / div.double +} diff --git a/swix_ios_app/swix_ios_app/swix/ndarray/complex-math.swift b/swix_ios_app/swix_ios_app/swix/ndarray/complex-math.swift new file mode 100644 index 0000000..6491261 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/ndarray/complex-math.swift @@ -0,0 +1,101 @@ +// +// math.swift +// swix +// +// Created by Scott Sievert on 7/11/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation +import Accelerate + + +// integration +func cumtrapz(x:ndarray)->ndarray{ + // integrate and see the steps at each iteration + let y = zeros_like(x) + var dx:CDouble = 1.0 + vDSP_vtrapzD(!x, 1.stride, &dx, !y, 1.stride, x.n.length) + return y +} +func trapz(x:ndarray)->Double{ + // integrate and get the final value + return cumtrapz(x)[-1] +} +// basic definitions +func inner(x:ndarray, y:ndarray)->Double{ + // the inner product. aka dot product, but I use dot product as a short for matrix multiplication + return sum(x * y) +} +func outer(x:ndarray, y:ndarray)->matrix{ + // the outer product. + let (xm, ym) = meshgrid(x, y: y) + return xm * ym +} +// fourier transforms +func fft(x: ndarray) -> (ndarray, ndarray){ + let N:CInt = x.n.cint + var yr = zeros(N.int) + var yi = zeros(N.int) + + // setup for the accelerate calling + let radix:FFTRadix = FFTRadix(FFT_RADIX2) + let pass:vDSP_Length = vDSP_Length((log2(N.double)+1.0).int) + let setup:FFTSetupD = vDSP_create_fftsetupD(pass, radix) + let log2n:Int = (log2(N.double)+1.0).int + let z = zeros(N.int) + var x2:DSPDoubleSplitComplex = DSPDoubleSplitComplex(realp: !x, imagp:!z) + var y = DSPDoubleSplitComplex(realp:!yr, imagp:!yi) + let dir = FFTDirection(FFT_FORWARD) + let stride = 1.stride + + // perform the actual computation + vDSP_fft_zropD(setup, &x2, stride, &y, stride, log2n.length, dir) + + // free memory + vDSP_destroy_fftsetupD(setup) + + // this divide seems wrong + yr /= 2.0 + yi /= 2.0 + return (yr, yi) +} +func ifft(yr: ndarray, yi: ndarray) -> ndarray{ + let N = yr.n + var x = zeros(N) + + // setup for the accelerate calling + let radix:FFTRadix = FFTRadix(FFT_RADIX2) + let pass:vDSP_Length = vDSP_Length((log2(N.double)+1.0).int) + let setup:FFTSetupD = vDSP_create_fftsetupD(pass, radix) + let log2n:Int = (log2(N.double)+1.0).int + let z = zeros(N) + var x2:DSPDoubleSplitComplex = DSPDoubleSplitComplex(realp: !yr, imagp:!yi) + var result:DSPDoubleSplitComplex = DSPDoubleSplitComplex(realp: !x, imagp:!z) + let dir = FFTDirection(FFT_INVERSE) + let stride = 1.stride + + // doing the actual computation + vDSP_fft_zropD(setup, &x2, stride, &result, stride, log2n.length, dir) + + // this divide seems wrong + x /= 16.0 + return x +} +func fftconvolve(x:ndarray, kernel:ndarray)->ndarray{ + // convolve two arrays using the fourier transform. + // zero padding, assuming kernel is smaller than x + var k_pad = zeros_like(x) + k_pad[0.. Double{ + // takes the norm of an array + if ord==2 { return sqrt(sum(pow(x, power: 2)))} + else if ord==1 { return sum(abs(x))} + else if ord==0 { return sum(abs(x) > S2_THRESHOLD)} + else if ord == -1 || ord == -2{ + return pow(sum(abs(x)^ord.double), 1/ord.double) + } + else if ord.double == inf {return max(abs(x))} + else if ord.double == -inf {return min(abs(x))} + assert(false, "type of norm unrecongnized") + return -1.0} +func count_nonzero(x:ndarray)->Double{ + return sum(abs(x) > S2_THRESHOLD) +} + +// modifying elements of the array +func clip(a:ndarray, a_min:Double, a_max:Double)->ndarray{ + // clip the matrix + var y = a.copy() + y[argwhere(a < a_min)] <- a_min + y[argwhere(a > a_max)] <- a_max + return y +} +func reverse(x:ndarray) -> ndarray{ + // reverse the array + let y = x.copy() + vDSP_vrvrsD(!y, 1.stride, y.n.length) + return y +} +func delete(x:ndarray, idx:ndarray) -> ndarray{ + // delete select elements + var i = ones(x.n) + i[idx] *= 0 + let y = x[argwhere(i)] + return y +} +func `repeat`(x: ndarray, N:Int, axis:Int=0) -> ndarray{ + // repeat the array element wise or as a whole array + var y = zeros((N, x.n)) + + // wrapping using OpenCV + CVWrapper.`repeat`(!x, to:!y, n_x:x.n.cint, n_repeat:N.cint) + + if axis==0{} + else if axis==1 { y = y.T} + return y.flat +} + +// SORTING and the like +func sort(x:ndarray)->ndarray{ + // sort the array and return a new array + let y = x.copy() + y.sort() + return y +} +func unique(x:ndarray)->ndarray{ + var y = sort(x) + var z = concat(zeros(1), y: y) + let diff = abs(z[1.. S2_THRESHOLD + let un = y[argwhere(diff)] + if abs(min(x)) < S2_THRESHOLD{ + return sort(concat(zeros(1), y: un)) + } + else{ + return un + } +} +func shuffle(x:ndarray)->ndarray{ + // randomly shuffle the array + let y = x.copy() + CVWrapper.shuffle(!y, n:y.n.cint) + return y +} + +// SETS +func intersection(x: ndarray, y:ndarray)->ndarray{ + return unique(x[argwhere(in1d(x, y: y))]) +} +func union(x:ndarray, y:ndarray)->ndarray{ + return unique(concat(x, y: y)) +} +func in1d(x: ndarray, y:ndarray)->ndarray{ + if (x.n > 0 && y.n > 0){ + let (xx, yy) = meshgrid(x, y: y) + let i = abs(xx-yy) < S2_THRESHOLD + let j = (sum(i, axis:1)) > 0.5 + return 0+j + } + return array() +} +func concat(x:ndarray, y:ndarray)->ndarray{ + // concatenate two matrices + var z = zeros(x.n + y.n) + z[0..Int{ + // find the location of the max + var m:CInt = 0 + CVWrapper.argmax(!x, n: x.n.cint, max: &m) + return Int(m) +} +func argmin(x:ndarray)->Int{ + // find the location of the min + var m:CInt = 0 + CVWrapper.argmin(!x, n: x.n.cint, min: &m) + return Int(m) +} +func argsort(x:ndarray)->ndarray{ + // sort the array but use integers + + // the array of integers that OpenCV needs + var y:[CInt] = Array(count:x.n, repeatedValue:0) + // calling opencv's sortidx + CVWrapper.argsort(!x, n: x.n.cint, into:&y) + // the integer-->double conversion + let z = zeros_like(x) + vDSP_vflt32D(&y, 1.stride, !z, 1.stride, x.n.length) + return z +} +func argwhere(idx: ndarray) -> ndarray{ + // counts non-zero elements, return array of doubles (which can be indexed!). + let i = arange(idx.n) + let args = zeros(sum(idx).int) + vDSP_vcmprsD(!i, 1.stride, !idx, 1.stride, !args, 1.stride, idx.n.length) + return args +} + + +// LOGICAL +func logical_and(x:ndarray, y:ndarray)->ndarray{ + return x * y +} +func logical_or(x:ndarray, y:ndarray)->ndarray{ + var i = x + y + let j = argwhere(i > 0.5) + i[j] <- 1.0 + return i +} +func logical_not(x:ndarray)->ndarray{ + return 1-x +} +func logical_xor(x:ndarray, y:ndarray)->ndarray{ + let i = x + y + let j = (i < 1.5) && (i > 0.5) + return j +} + +// PRINTING +func println(x: ndarray, prefix:String="array([", postfix:String="])", newline:String="\n", format:String="%.3f", seperator:String=", ", printAllElements:Bool=false){ + // print the matrix + print(prefix, terminator: "") + var suffix = seperator + var printed = false + var string:NSString + for i in 0..(x.n-4){ + string = NSString(format: format+suffix, x[i]) + print(String(string), terminator:"") + } + else if printed == false{ + printed = true + print("..., ", terminator: "") + } + } + print(postfix, terminator: "") + print(newline, terminator: "") +} +func print(x: ndarray, prefix:String="ndarray([", postfix:String="])", format:String="%.3f", printWholeMatrix:Bool=false){ + println(x, prefix:prefix, postfix:postfix, newline:"\n", format:format, printAllElements:printWholeMatrix) +} + + + diff --git a/swix_ios_app/swix_ios_app/swix/ndarray/initing.swift b/swix_ios_app/swix_ios_app/swix/ndarray/initing.swift new file mode 100644 index 0000000..c2b51f2 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/ndarray/initing.swift @@ -0,0 +1,135 @@ +// +// initing.swift +// swix +// +// Created by Scott Sievert on 7/9/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation +import Accelerate +import Swift + +// SLOW PARTS: array(doubles), read_csv, write_csv. not a huge deal -- hopefully not used in final code + + +func zeros(N: Int) -> ndarray{ + // N zeros + return ndarray(n: N) +} +func zeros_like(x: ndarray) -> ndarray{ + // make an array like the other array + return zeros(x.n) +} +func ones_like(x: ndarray) -> ndarray{ + // make an array like the other array + return zeros_like(x) + 1 +} +func ones(N: Int) -> ndarray{ + // N ones + return ndarray(n: N)+1 +} +func arange(max: Double, x exclusive:Bool = true) -> ndarray{ + // 0.. ndarray{ + // 0.. ndarray{ + // min, min+step, min+2*step..., max-step, max + return linspace(min, max: max, num:1+((max-min)/step).int) +} +func arange(min: Double, max: Double, x exclusive: Bool = true) -> ndarray{ + // min...max + var pad = 0 + if !exclusive {pad = 1} + let N = max.int - min.int + pad + let x = zeros(N) + var o = CDouble(min) + var l = CDouble(1) + vDSP_vrampD(&o, &l, !x, 1.stride, N.length) + return x +} +func linspace(min: Double, max: Double, num: Int=50) -> ndarray{ + // 0...1 + let x = zeros(num+0) + var min = CDouble(min) + var step = CDouble((max-min).double/(num-1).double) + vDSP_vrampD(&min, &step, !x, 1.stride, x.n.length) + return x +} +func array(numbers: Double...) -> ndarray{ + // array(1, 2, 3, 4) -> arange(4)+1 + // okay to leave unoptimized, only used for testing + var x = zeros(numbers.count) + var i = 0 + for number in numbers{ + x[i] = number + i++ + } + return x +} +func asarray(x: [Double]) -> ndarray{ + // convert a grid of double's to an array + var y = zeros(x.count) + y.grid = x + return y +} +func asarray(seq: Range) -> ndarray { + // make a range a grid of arrays + // improve with [1] + // [1]:https://gist.github.com/nubbel/d5a3639bea96ad568cf2 + let start:Double = seq.startIndex.double * 1.0 + let end:Double = seq.endIndex.double * 1.0 + return arange(start, max: end, x:true) +} + +func copy(x: ndarray) -> ndarray{ + // copy the value + return x.copy() +} + +func seed(n:Int){ + SWIX_SEED = __CLPK_integer(n) +} + +func rand(N: Int, distro:String="uniform") -> ndarray{ + let x = zeros(N) + var i:__CLPK_integer = 1 + if distro=="normal" {i = __CLPK_integer(3)} + var seed:Array<__CLPK_integer> = [SWIX_SEED, 2, 3, 5] + var nn:__CLPK_integer = __CLPK_integer(N) + dlarnv_(&i, &seed, &nn, !x) + SWIX_SEED = seed[0] + return x +} +func randn(N: Int, mean: Double=0, sigma: Double=1) -> ndarray{ + return (rand(N, distro:"normal") * sigma) + mean; +} +func randperm(N:Int)->ndarray{ + let x = arange(N) + let y = shuffle(x) + return y +} + + + + + + + + + + + + + + + + + + + + diff --git a/swix_ios_app/swix_ios_app/swix/ndarray/ndarray.swift b/swix_ios_app/swix_ios_app/swix/ndarray/ndarray.swift new file mode 100644 index 0000000..24bd49c --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/ndarray/ndarray.swift @@ -0,0 +1,163 @@ +// +// initing.swift +// swix +// +// Created by Scott Sievert on 7/9/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation +import Accelerate + +// the matrix definition and related functions go here + +// SLOW PARTS: x[ndarray, ndarray] set + +struct ndarray { + let n: Int // the number of elements + var count: Int // ditto + var grid: [Double] // the raw values + init(n: Int) { + self.n = n + self.count = n + grid = Array(count: n, repeatedValue: 0.0) + } + func reshape(shape: (Int,Int)) -> matrix{ + // reshape to a matrix of size. + var (mm, nn) = shape + if mm == -1 {mm = n / nn} + if nn == -1 {nn = n / mm} + assert(mm * nn == n, "Number of elements must not change.") + var y:matrix = zeros((mm, nn)) + y.flat = self + return y + } + func copy() -> ndarray{ + // return a new array just like this one + let y = zeros(n) + cblas_dcopy(self.n.cint, !self, 1.cint, !y, 1.cint) + return y + } + func sort(){ + // sort this array *in place* + vDSP_vsortD(!self, self.n.length, 1.cint) + } + func indexIsValidForRow(index: Int) -> Bool { + // making sure this index is valid + return index >= 0 && index < n + } + func min() -> Double{ + // return the minimum + var m:CDouble=0 + vDSP_minvD(!self, 1.stride, &m, self.n.length) + return Double(m) + } + func max() -> Double{ + // return the maximum + var m:CDouble=0 + vDSP_maxvD(!self, 1.stride, &m, self.n.length) + return m + } + func mean() -> Double{ + // return the mean + return sum(self) / n + } + subscript(index:String)->ndarray{ + // assumed to be x["all"]. returns every element + get { + assert(index == "all", "Currently only \"all\" is supported") + return self + } + set { + assert(index == "all", "Currently only \"all\" is supported") + self[0.. Double { + // x[0] -> Double. access a single element + get { + var newIndex:Int = index + if newIndex < 0 {newIndex = self.n + index} + assert(indexIsValidForRow(newIndex), "Index out of range") + return grid[newIndex] + } + set { + var newIndex:Int = index + if newIndex < 0 {newIndex = self.n + index} + assert(indexIsValidForRow(newIndex), "Index out of range") + grid[newIndex] = newValue + } + } + subscript(r: Range) -> ndarray { + // x[0.. ndarray { + // x[arange(2)]. access a range of values; x[0..<2] depends on this. + get { + // ndarray has fractional parts, and those parts get truncated + var idx:ndarray + if i.n > 0 { + if i.n == self.n && i.max() < 1.5 { + // assumed to be boolean + idx = argwhere(i > 0.5) + } + else { + // it's just indexes + idx = i.copy() + } + if idx.max() < 0 { + // negative indexing + idx += n.double + } + if (idx.n > 0){ + assert((idx.max().int < self.n) && (idx.min() >= 0), "An index is out of bounds") + let y = zeros(idx.n) + vDSP_vindexD(!self, !idx, 1.stride, !y, 1.stride, idx.n.length) + return y + } + } + return array() + } + set { + var idx:ndarray// = oidx.copy() + if i.n > 0{ + if i.n == self.n && i.max() < 1.5{ + // assumed to be boolean + idx = argwhere(i > 0.5) + } + else { + // it's just indexes + idx = i.copy() + } + if idx.n > 0{ + if idx.max() < 0 {idx += n.double } + assert((idx.max().int < self.n) && (idx.min() >= 0), "An index is out of bounds") + index_xa_b_objc(!self, !idx, !newValue, idx.n.cint) + } + } + } + } +} + + + + + + + + + + + + + + + + + diff --git a/swix_ios_app/swix_ios_app/swix/ndarray/operators.swift b/swix_ios_app/swix_ios_app/swix/ndarray/operators.swift new file mode 100644 index 0000000..3b39148 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/ndarray/operators.swift @@ -0,0 +1,244 @@ +// +// oneD-functions.swift +// swix +// +// Created by Scott Sievert on 7/9/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation +import Accelerate + +func make_operator(lhs:ndarray, operation:String, rhs:ndarray) -> ndarray{ + assert(lhs.n == rhs.n, "Sizes must match!") + + // see [1] on how to integrate Swift and accelerate + // [1]:https://github.com/haginile/SwiftAccelerate + var result = lhs.copy() + let N = lhs.n + if operation=="+" + {cblas_daxpy(N.cint, 1.0.cdouble, !rhs, 1.cint, !result, 1.cint);} + else if operation=="-" + {cblas_daxpy(N.cint, -1.0.cdouble, !rhs, 1.cint, !result, 1.cint);} + else if operation=="*" + {vDSP_vmulD(!lhs, 1, !rhs, 1, !result, 1, lhs.n.length)} + else if operation=="/" + {vDSP_vdivD(!rhs, 1, !lhs, 1, !result, 1, lhs.n.length)} + else if operation=="%"{ + result = remainder(lhs, x2: rhs) + } + else if operation=="<" || operation==">" || operation==">=" || operation=="<=" { + result = zeros(lhs.n) + CVWrapper.compare(!lhs, with: !rhs, using: operation.nsstring as String, into: !result, ofLength: lhs.n.cint) + // since opencv uses images which use 8-bit values + result /= 255 + } + else if operation == "=="{ + return abs(lhs-rhs) < S2_THRESHOLD + } + else if operation == "!=="{ + return abs(lhs-rhs) > S2_THRESHOLD + } + else {assert(false, "operation not recongized!")} + return result +} +func make_operator(lhs:ndarray, operation:String, rhs:Double) -> ndarray{ + var array = zeros(lhs.n) + var right = [rhs] + if operation == "%"{ + // unoptimized. for loop in c + let r = zeros_like(lhs) + rhs + array = remainder(lhs, x2: r) + } else if operation == "*"{ + var C:CDouble = 0 + var mul = CDouble(rhs) + vDSP_vsmsaD(!lhs, 1.stride, &mul, &C, !array, 1.stride, lhs.n.length) + } + else if operation == "+" + {vDSP_vsaddD(!lhs, 1, &right, !array, 1, lhs.n.length)} + else if operation=="/" + {vDSP_vsdivD(!lhs, 1, &right, !array, 1, lhs.n.length)} + else if operation=="-" + {array = make_operator(lhs, operation: "-", rhs: ones(lhs.n)*rhs)} + else if operation=="<" || operation==">" || operation=="<=" || operation==">="{ + CVWrapper.compare(!lhs, withDouble:rhs.cdouble, using:operation.nsstring as String, into:!array, ofLength:lhs.n.cint) + array /= 255 + } + else {assert(false, "operation not recongnized! Error with the speedup?")} + return array +} +func make_operator(lhs:Double, operation:String, rhs:ndarray) -> ndarray{ + var array = zeros(rhs.n) // lhs[i], rhs[i] + let l = ones(rhs.n) * lhs + if operation == "*" + {array = make_operator(rhs, operation: "*", rhs: lhs)} + else if operation=="%"{ + let l = zeros_like(rhs) + lhs + array = remainder(l, x2: rhs) + } + else if operation == "+"{ + array = make_operator(rhs, operation: "+", rhs: lhs)} + else if operation=="-" + {array = -1 * make_operator(rhs, operation: "-", rhs: lhs)} + else if operation=="/"{ + array = make_operator(l, operation: "/", rhs: rhs)} + else if operation=="<"{ + array = make_operator(rhs, operation: ">", rhs: lhs)} + else if operation==">"{ + array = make_operator(rhs, operation: "<", rhs: lhs)} + else if operation=="<="{ + array = make_operator(rhs, operation: ">=", rhs: lhs)} + else if operation==">="{ + array = make_operator(rhs, operation: "<=", rhs: lhs)} + else {assert(false, "Operator not reconginzed")} + return array +} + +// DOUBLE ASSIGNMENT +infix operator <- {} +func <- (inout lhs:ndarray, rhs:Double){ + let assign = ones(lhs.n) * rhs + lhs = assign +} + +// EQUALITY +infix operator ~== {associativity none precedence 140} +func ~== (lhs: ndarray, rhs: ndarray) -> Bool{ + assert(lhs.n == rhs.n, "`~==` only works on arrays of equal size") + return max(abs(lhs - rhs)) > 1e-6 ? false : true; +} +func == (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "==", rhs: rhs)} +func !== (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "!==", rhs: rhs)} + +// NICE ARITHMETIC +func += (inout x: ndarray, right: Double){ + x = x + right} +func *= (inout x: ndarray, right: Double){ + x = x * right} +func -= (inout x: ndarray, right: Double){ + x = x - right} +func /= (inout x: ndarray, right: Double){ + x = x / right} + +// MOD +infix operator % {associativity none precedence 140} +func % (lhs: ndarray, rhs: Double) -> ndarray{ + return make_operator(lhs, operation: "%", rhs: rhs)} +func % (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "%", rhs: rhs)} +func % (lhs: Double, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "%", rhs: rhs)} +// POW +infix operator ^ {associativity none precedence 140} +func ^ (lhs: ndarray, rhs: Double) -> ndarray{ + return pow(lhs, power: rhs)} +func ^ (lhs: ndarray, rhs: ndarray) -> ndarray{ + return pow(lhs, y: rhs)} +func ^ (lhs: Double, rhs: ndarray) -> ndarray{ + return pow(lhs, y: rhs)} +// PLUS +infix operator + {associativity none precedence 140} +func + (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "+", rhs: rhs)} +func + (lhs: Double, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "+", rhs: rhs)} +func + (lhs: ndarray, rhs: Double) -> ndarray{ + return make_operator(lhs, operation: "+", rhs: rhs)} +// MINUS +infix operator - {associativity none precedence 140} +func - (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "-", rhs: rhs)} +func - (lhs: Double, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "-", rhs: rhs)} +func - (lhs: ndarray, rhs: Double) -> ndarray{ + return make_operator(lhs, operation: "-", rhs: rhs)} +// TIMES +infix operator * {associativity none precedence 140} +func * (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "*", rhs: rhs)} +func * (lhs: Double, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "*", rhs: rhs)} +func * (lhs: ndarray, rhs: Double) -> ndarray{ + return make_operator(lhs, operation: "*", rhs: rhs)} +// DIVIDE +infix operator / {associativity none precedence 140} +func / (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "/", rhs: rhs) + } +func / (lhs: Double, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "/", rhs: rhs)} +func / (lhs: ndarray, rhs: Double) -> ndarray{ + return make_operator(lhs, operation: "/", rhs: rhs)} +// LESS THAN +infix operator < {associativity none precedence 140} +func < (lhs: ndarray, rhs: Double) -> ndarray{ + return make_operator(lhs, operation: "<", rhs: rhs)} +func < (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "<", rhs: rhs)} +func < (lhs: Double, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "<", rhs: rhs)} +// GREATER THAN +infix operator > {associativity none precedence 140} +func > (lhs: ndarray, rhs: Double) -> ndarray{ + return make_operator(lhs, operation: ">", rhs: rhs)} +func > (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: ">", rhs: rhs)} +func > (lhs: Double, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: ">", rhs: rhs)} +// GREATER THAN OR EQUAL +infix operator >= {associativity none precedence 140} +func >= (lhs: ndarray, rhs: Double) -> ndarray{ + return make_operator(lhs, operation: ">=", rhs: rhs)} +func >= (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: ">=", rhs: rhs)} +func >= (lhs: Double, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: ">=", rhs: rhs)} +// LESS THAN OR EQUAL +infix operator <= {associativity none precedence 140} +func <= (lhs: ndarray, rhs: Double) -> ndarray{ + return make_operator(lhs, operation: "<=", rhs: rhs)} +func <= (lhs: ndarray, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "<=", rhs: rhs)} +func <= (lhs: Double, rhs: ndarray) -> ndarray{ + return make_operator(lhs, operation: "<=", rhs: rhs)} +// LOGICAL AND +infix operator && {associativity none precedence 140} +func && (lhs: ndarray, rhs: ndarray) -> ndarray{ + return logical_and(lhs, y: rhs)} +// LOGICAL OR +func || (lhs: ndarray, rhs: ndarray) -> ndarray { + return logical_or(lhs, y: rhs) +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/swix_ios_app/swix_ios_app/swix/ndarray/simple-math.swift b/swix_ios_app/swix_ios_app/swix/ndarray/simple-math.swift new file mode 100644 index 0000000..050b93c --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/ndarray/simple-math.swift @@ -0,0 +1,251 @@ +// +// oneD_math.swift +// swix +// +// Created by Scott Sievert on 6/11/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + + +import Foundation +import Accelerate + +func apply_function(function: Double->Double, x: ndarray) -> ndarray{ + // apply a function to every element. + + // I've tried the below, but it doesn't apply the function to every element (at least in Xcode6b4) + //var function:Double->Double = sin + //var x = arange(N)*pi / N + //var y = zeros(x.count) + //dispatch_apply(UInt(N), dispatch_get_global_queue(0,0), {(i)->() in + // y[Int(i)] = function(x[Int(i)]) + // }) + + var y = zeros(x.count) + for i in 0..ndarray{ + // apply select optimized functions + let y = zeros_like(x) + let n = x.n.length + var count = Int32(x.n) + if function=="abs"{ + vDSP_vabsD(!x, 1, !y, 1, n);} + else if function=="sign"{ + var o = CDouble(0) + var l = CDouble(1) + vDSP_vlimD(!x, 1.stride, &o, &l, !y, 1.stride, n) + } + else if function=="cumsum"{ + var scalar:CDouble = 1 + vDSP_vrsumD(!x, 1.stride, &scalar, !y, 1.stride, n) + } + else if function=="floor"{ + vvfloor(!y, !x, &count) + } + else if function=="log10"{ + assert(min(x) > 0, "log must be called with positive values") + vvlog10(!y, !x, &count) + } + else if function=="log2"{ + assert(min(x) > 0, "log must be called with positive values") + vvlog2(!y, !x, &count) + } + else if function=="exp2"{ + vvexp2(!y, !x, &count) + } + else if function=="log"{ + assert(min(x) > 0, "log must be called with positive values") + vvlog(!y, !x, &count) + } + else if function=="exp"{ + vvexp(!y, !x, &count) + } + else if function=="cos"{ + vvcos(!y, !x, &count) + } + else if function=="sin"{ + vvsin(!y, !x, &count) + } + else if function=="tan"{ + vvtan(!y, !x, &count) + } + else if function=="expm1"{ + vvexpm1(!y, !x, &count) + } + else if function=="round"{ + vvnint(!y, !x, &count) + } + else if function=="ceil"{ + vvceil(!y, !x, &count) + } + else if function == "tanh" { + vvtanh(!y, !x, &count) + } + else {assert(false, "Function not recongized")} + return y +} + +// MIN/MAX +func min(x: ndarray) -> Double{ + // finds the min + return x.min()} +func max(x: ndarray) -> Double{ + // finds the max + return x.max()} +func max(x: ndarray, y:ndarray)->ndarray{ + // finds the max of two arrays element wise + assert(x.n == y.n) + let z = zeros_like(x) + vDSP_vmaxD(!x, 1.stride, !y, 1.stride, !z, 1.stride, x.n.length) + return z +} +func min(x: ndarray, y:ndarray)->ndarray{ + // finds the min of two arrays element wise + assert(x.n == y.n) + let z = zeros_like(x) + vDSP_vminD(!x, 1.stride, !y, 1.stride, !z, 1.stride, x.n.length) + return z +} + +// BASIC STATS +func mean(x: ndarray) -> Double{ + // finds the mean + return x.mean() +} +func std(x: ndarray) -> Double{ + // standard deviation + return sqrt(variance(x))} +func variance(x: ndarray) -> Double{ + // the varianace + return sum(pow(x - mean(x), power: 2) / x.count.double)} + +// BASIC INFO +func sign(x: ndarray)->ndarray{ + // finds the sign + return apply_function("sign", x: x)} +func sum(x: ndarray) -> Double{ + // finds the sum of an array + var ret:CDouble = 0 + vDSP_sveD(!x, 1.stride, &ret, x.n.length) + return Double(ret) +} +func remainder(x1:ndarray, x2:ndarray)->ndarray{ + // finds the remainder + return (x1 - floor(x1 / x2) * x2) +} +func cumsum(x: ndarray) -> ndarray{ + // the sum of each element before. + return apply_function("cumsum", x: x)} +func abs(x: ndarray) -> ndarray{ + // absolute value + return apply_function("abs", x: x)} +func prod(x:ndarray)->Double{ + var y = x.copy() + var factor = 1.0 + if min(y) < 0{ + y[argwhere(y < 0.0)] *= -1.0 + if sum(x < 0) % 2 == 1 {factor = -1} + } + return factor * exp(sum(log(y))) +} +func cumprod(x:ndarray)->ndarray{ + var y = x.copy() + if min(y) < 0.0{ + let i = y < 0 + y[argwhere(i)] *= -1.0 + let j = 1 - (cumsum(i) % 2.0) < S2_THRESHOLD + var z = exp(cumsum(log(y))) + z[argwhere(j)] *= -1.0 + return z + } + return exp(cumsum(log(y))) +} + + +// POWER FUNCTIONS +func pow(x:ndarray, power:Double)->ndarray{ + // take the power. also callable with ^ + let y = zeros_like(x) + CVWrapper.pow(!x, n:x.n.cint, power:power, into:!y) + return y +} +func pow(x:ndarray, y:ndarray)->ndarray{ + // take the power. also callable with ^ + let z = zeros_like(x) + var num = CInt(x.n) + vvpow(!z, !y, !x, &num) + return z +} +func pow(x:Double, y:ndarray)->ndarray{ + // take the power. also callable with ^ + let xx = ones(y.n) * x + return pow(xx, y: y) +} +func sqrt(x: ndarray) -> ndarray{ + return x^0.5 +} +func exp(x:ndarray)->ndarray{ + return apply_function("exp", x: x) +} +func exp2(x:ndarray)->ndarray{ + return apply_function("exp2", x: x) +} +func expm1(x:ndarray)->ndarray{ + return apply_function("expm1", x: x) +} + +// ROUND +func round(x:ndarray)->ndarray{ + return apply_function("round", x: x) +} +func round(x:ndarray, decimals:Double)->ndarray{ + let factor = pow(10, decimals) + return round(x*factor) / factor +} +func floor(x: ndarray) -> ndarray{ + return apply_function("floor", x: x) +} +func ceil(x: ndarray) -> ndarray{ + return apply_function("ceil", x: x) +} + +// LOG +func log10(x:ndarray)->ndarray{ + // log_10 + return apply_function("log10", x: x) +} +func log2(x:ndarray)->ndarray{ + // log_2 + return apply_function("log2", x: x) +} +func log(x:ndarray)->ndarray{ + // log_e + return apply_function("log", x: x) +} + +// TRIG +func sin(x: ndarray) -> ndarray{ + return apply_function("sin", x: x) +} +func cos(x: ndarray) -> ndarray{ + return apply_function("cos", x: x) +} +func tan(x: ndarray) -> ndarray{ + return apply_function("tan", x: x) +} +func tanh(x: ndarray) -> ndarray { + return apply_function("tanh", x: x) +} + + + + + + + + + diff --git a/swix_ios_app/swix_ios_app/swix/numbers.swift b/swix_ios_app/swix_ios_app/swix/numbers.swift new file mode 100644 index 0000000..fde7014 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/numbers.swift @@ -0,0 +1,129 @@ +// +// constants.swift +// swix +// +// Created by Scott Sievert on 7/9/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation +import Accelerate + +// should point to the swift folder +let S2_PREFIX = "\(NSHomeDirectory())/Developer/swix/swix/swix/swix/" +let PYTHON_PATH = "~/anaconda/bin/ipython" + +// how close is close? +let S2_THRESHOLD = 1e-9 + +// The random seed +var SWIX_SEED:__CLPK_integer = 42 + +// various important constants +var pi = 3.1415926535897932384626433832795028841971693993751058 +var π = pi +var tau = 2 * pi +var τ = tau +var phi = (1.0 + sqrt(5))/2 +var φ = phi +var e = exp(1.double) +var euler = 0.57721566490153286060651209008240243104215933593992 + +// largest possible value +var inf = Double.infinity +var nan = Double.NaN + +// smallest possible difference +var DOUBLE_EPSILON = DBL_EPSILON +var FLOAT_EPSILON = FLT_EPSILON + +func close(x: Double, y: Double)->Bool{ + return abs(x-y) < S2_THRESHOLD +} +func ~= (x:Double, y:Double)->Bool{ + return close(x, y: y) +} +func rad2deg(x:Double)->Double{ + return x * 180.0 / pi +} +func deg2rad(x:Double)->Double{ + return x * pi / 180.0 +} +func max(x:Double, y:Double)->Double{ + return x < y ? y : x +} +func min(x:Double, y:Double)->Double{ + return x < y ? x : y +} +func factorial(n:Double)->Double{ + let y = arange(n)+1 + return prod(y) +} +func binom(n:Double, k:Double)->Double{ + // similar to scipy.special.binom + let i = arange(k)+1 + let result = (n+1-i) / i + return prod(result) +} + +// use 3.double or 3.14.int or N.int +extension Int{ + var stride:vDSP_Stride {return vDSP_Stride(self)} + var length:vDSP_Length {return vDSP_Length(self)} + var int:Int {return Int(self)} + var cint:CInt {return CInt(self)} + var float:Float {return Float(self)} + var double:Double {return Double(self)} +} +extension Double{ + var int:Int {return Int(self)} + var float:Float {return Float(self)} + var double:Double {return Double(self)} + var cdouble:CDouble {return CDouble(self)} +} +extension CInt{ + var int:Int {return Int(self)} + var float:Float {return Float(self)} + var double:Double {return Double(self)} +} +extension Float{ + var int:Int {return Int(self)} + var cfloat:CFloat {return CFloat(self)} + var float:Float {return Float(self)} + var double:Double {return Double(self)} + +} +extension String { + var floatValue: Float { + return (self as NSString).floatValue + } + var doubleValue: Double { + return (self as NSString).doubleValue + } + var nsstring:NSString {return NSString(string:self)} +} + +// damn integer division causes headaches +infix operator / {associativity none precedence 140} +func / (lhs: Int, rhs: Int) -> Double{ + return lhs.double / rhs.double} +func / (lhs: Double, rhs: Int) -> Double{ + return lhs / rhs.double} + +// a quick hack to get what I want +func isNumber(x: Double) ->Bool{return true} +func isNumber(x: Float) ->Bool{return true} +func isNumber(x: Int) ->Bool{return true} +func isNumber(x: CInt) ->Bool{return true} +func isNumber(x: ndarray) ->Bool{return false} +func isNumber(x: matrix) ->Bool{return false} +func isNumber(x: AnyObject)->Bool{return false} + + + + + + + + + diff --git a/swix_ios_app/swix_ios_app/swix/objc/OpenCV.h b/swix_ios_app/swix_ios_app/swix/objc/OpenCV.h new file mode 100644 index 0000000..fa5b7e7 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/OpenCV.h @@ -0,0 +1,48 @@ +// +// fft.m +// swix +// +// Created by Scott Sievert on 7/9/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +#import +#import + +#import +#import +#import +#import + +@interface CVWrapper : NSObject ++ (void) pointerTest; ++ (void) repeat:(double *)x to:(double*)y n_x:(int)Nx n_repeat:(int)Nrepeat; ++ (void) solve:(double *)A b:(double*)b x:(double*)x m:(int)m n:(int)n; ++ (void) compare:(double*)x with:(double*)y using:(NSString*)op into:(double*)z ofLength:(int)N; ++ (void) compare:(double*)x withDouble:(double)y + using:(NSString*)op into:(double*)z ofLength:(int)N; ++ (void) flip:(double*)x into:(double*)y how:(NSString*)how M:(int)M N:(int)N; ++ (void) argmax:(double*)x N:(int)N max:(int*)max; ++ (void) argmin:(double*)x N:(int)N min:(int*)min; ++ (void) argsort:(double*)x N:(int)N into:(int*)y; ++ (void) pow:(double*)x N:(int)N power:(double)power into:(double*)y; ++ (void) shuffle:(double*)x n:(int)n; ++ (void) det:(double*)x n:(int)N m:(int)M result:(double*)result; + +@end + +// #### SVM (svm.mm) +@interface cvSVM : NSObject{ +} +- (void) train:(double *)x targets:(double *)targets m:(int)M n:(int)N; +- (float) predict:(double *)x n:(int)N; +- (double*) predict:(double*)x into:(double*)y m:(int)M n:(int)N; +-(void)setParams:(NSString*)svm_type kernel:(NSString*)kernel nu:(float)nu; +@end + +// #### k nearest neighbors (knn.mm) +@interface kNN : NSObject{ +} +- (void) train:(double *)x targets:(double *)tar m:(int)M n:(int)N; +- (double) predict:(double *)x n:(int)N k:(int)k; +@end \ No newline at end of file diff --git a/swix_ios_app/swix_ios_app/swix/objc/conversion.swift b/swix_ios_app/swix_ios_app/swix/objc/conversion.swift new file mode 100644 index 0000000..3f2ee49 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/conversion.swift @@ -0,0 +1,27 @@ +// +// conversion.swift +// swix +// +// Created by Scott Sievert on 7/11/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +import Foundation +import Accelerate + +func matrixToPointer(x: ndarray)->UnsafeMutablePointer{ + // sustains since objc rewrites raw memory! + return UnsafeMutablePointer(x.grid) +} +func matrixToPointer(x: [Int])->UnsafeMutablePointer{ + return UnsafeMutablePointer(x) +} + +/// use !x to get the address. I tried &x but that doesn't work in beta3. +prefix func ! (x: ndarray) -> UnsafeMutablePointer { + return matrixToPointer(x) +} +prefix func ! (x: matrix) -> UnsafeMutablePointer { + return matrixToPointer(x.flat) +} + diff --git a/swix_ios_app/swix_ios_app/swix/objc/machine_learning.mm b/swix_ios_app/swix_ios_app/swix/objc/machine_learning.mm new file mode 100644 index 0000000..b1bbfc0 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/machine_learning.mm @@ -0,0 +1,116 @@ +// +// svm.m +// swix +// +// Created by Scott Sievert on 7/16/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +#import +#import "OpenCV.h" +#import "swix-Bridging-Header.h" +using namespace cv; + +void doubleToFloat(double * x, float * y, int N){ + vDSP_vdpsp(x, 1, y, 1, N); +} + +// #### STATE VECTOR MACHINE +@implementation cvSVM : NSObject +CvSVM ocvSVM; +CvSVMParams params; +int N; // number of variables +int M; // number of responses +void copy_float_to_double(float* x, double* y, int N){ + vDSP_vspdp(x, 1, y, 1, N); +} +void copy_float(float* x, float * y, int N){ + cblas_scopy(N, x, 1, y, 1); +} +-(void)setParams:(NSString*)svm_type kernel:(NSString*)kernel nu:(float)nu{ + if ([svm_type isEqualToString:@"C_SVC"]) + { params.svm_type = CvSVM::C_SVC; } + else if ([svm_type isEqualToString:@"ONE_CLASS"]) + { params.svm_type = CvSVM::ONE_CLASS;} + else if ([svm_type isEqualToString:@"NU_SVC"]) + { params.svm_type = CvSVM::NU_SVC;} + else if ([svm_type isEqualToString:@"NU_SVR"]) + { params.svm_type = CvSVM::NU_SVR;} + + if ([kernel isEqualToString:@"LINEAR"]) + { params.kernel_type = CvSVM::LINEAR;} + else if ([kernel isEqualToString:@"SIGMOID"]) + { params.kernel_type = CvSVM::SIGMOID;} + + params.nu = nu; +} +-(NSObject*)init{ + params.svm_type = CvSVM::C_SVC; + params.kernel_type = CvSVM::LINEAR; + params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6); + return self; +} +-(void) train:(double *)x targets:(double *)targets m:(int)M n:(int)N{ + // M is the number of responses or rows; N is columns or variables + float * x2 = (float *)malloc(sizeof(float) * M * N); + float * t2 = (float *)malloc(sizeof(float) * M); + doubleToFloat(x, x2, M*N); + doubleToFloat(targets, t2, M*1); + Mat xMat(M, N, CV_32FC1, x2); + Mat tMat(M, 1, CV_32FC1, t2); + Mat x3 = Mat(); + ocvSVM.train(xMat, tMat, x3, x3, params); +} +- (float) predict:(double *)x n:(int)N{ + float * x2 = (float *)malloc(sizeof(float) * 1 * N); + doubleToFloat(x, x2, N); + Mat xMat(1, N, CV_32FC1, x2); + float targetPredict = ocvSVM.predict(xMat); + return targetPredict; +} +- (double*) predict:(double*)x into:(double*)y m:(int)M n:(int)N{ + float * x2 = (float *)malloc(sizeof(float) * M * N); + doubleToFloat(x, x2, M*N); + float* y2 = (float *)malloc(sizeof(float) * M); + Mat xMat(M, N, CV_32FC1, x2); + Mat yMat(M, 1, CV_32FC1, y2); + + ocvSVM.predict(xMat, yMat); + copy_float_to_double(y2, y, M); + return y; +} +@end + +// #### STATE VECTOR MACHINE +@implementation kNN : NSObject +int kN; +int kM; + +CvKNearest cvknn; + +-(NSObject*)init{ + return self; +} + +- (void) train:(double *)x targets:(double *)tar m:(int)M n:(int)N{ + float * x2 = (float *)malloc(sizeof(float) * M * N); + float * t2 = (float *)malloc(sizeof(float) * M * 1); + Mat x3(M, N, CV_32FC1, x2); + Mat t3(M, 1, CV_32FC1, t2); + + cvknn.train(x3, t3); +} +- (double) predict:(double *)x n:(int)N k:(int)k{ + float * x2 = (float *)malloc(sizeof(float) * N * 1); + Mat x3(1, N, CV_32FC1, x2); + Mat results(1, 1, CV_32FC1); + float targetPredict = -3.14; + targetPredict = cvknn.find_nearest(x3, k, &results); + + std::cout << results << std::endl; + std::cout << results.at(0,0) << std::endl; + return results.at(0,0); +} +@end + + diff --git a/swix_ios_app/swix_ios_app/swix/objc/math.m b/swix_ios_app/swix_ios_app/swix/objc/math.m new file mode 100644 index 0000000..7933de2 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/math.m @@ -0,0 +1,66 @@ +// +// fft.m +// swix +// +// Created by Scott Sievert on 7/9/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +#import +#import + +void svd_objc(double * xx, int m, int n, double* s, double* vt, double* u, int compute_uv){ + // adapted from the buggy code at http://stackoverflow.com/questions/5047503/lapack-svd-singular-value-decomposition + + char job; + if (compute_uv == 0) job = 'N'; + else if (compute_uv == 1) job = 'A'; + + __CLPK_integer lda = (__CLPK_integer)m; + long numberOfSingularValues = m < n ? m : n; + + // Workspace and status variables: + __CLPK_integer _n = n; + __CLPK_integer _m = m; + double* work = (double*)malloc(sizeof(double) * 2); + __CLPK_integer lwork = -1; + __CLPK_integer * iwork = (__CLPK_integer *)malloc(sizeof(__CLPK_integer) * 8 * numberOfSingularValues); + __CLPK_integer info = 0; + + // Call dgesdd_ with lwork = -1 to query optimal workspace size: + dgesdd_(&job, &_m, &_n, xx, &lda, s, u, &_m, vt, &_n, work, &lwork, iwork, &info); + + // Optimal workspace size is returned in work[0]. + lwork = work[0]; + free(work); + work = (double *)malloc(lwork * sizeof(double)); + + // Call dgesdd_ to do the actual computation: + dgesdd_(&job, &_m, &_n, xx, &lda, s, u, &_m, vt, &_n, work, &lwork, iwork, &info); + + free(work); + free(iwork); +} + + + + + + + + + + + + + + + + + + + + + + + diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv.mm b/swix_ios_app/swix_ios_app/swix/objc/opencv.mm new file mode 100644 index 0000000..5308c72 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv.mm @@ -0,0 +1,140 @@ +// +// knn.m +// swix +// +// Created by Scott Sievert on 7/1/14. +// Copyright (c) 2014 com.scott. All rights reserved. +// + +#import +#import + +#import +#import +#import + +#import "OpenCV.h" +using namespace cv; + +void copy(Mat x, double * y, int N); +@implementation CVWrapper ++ (void) pointerTest{ + // to test whether a matrix copies to the pointer + // makes sense; the raw values have to be stored somewhere + // it does not make sense that repeat( , 1, r, ) vs repeat( , r , 1) has a difference in shared data + int N = 3; + int r = 2; + double * x = (double *)malloc(sizeof(double)* N); + double * y = (double *)malloc(sizeof(double)* N * r); + for (int i=0; i" ]) {zMat = xMat > yMat;} + else if ([op isEqualToString:@"<=" ]) {zMat = xMat <= yMat;} + else if ([op isEqualToString:@">=" ]) {zMat = xMat >= yMat;} + else if ([op isEqualToString:@"==" ]) {zMat = xMat == yMat;} + else if ([op isEqualToString:@"!=="]) {compare(xMat, yMat, zMat, CMP_NE);} + else printf("*** Careful! Your operation isn't recognized!\n"); + matArgWhereConvert(zMat, z, N); +} ++ (void) compare:(double*)x withDouble:(double)y + using:(NSString*)op into:(double*)z ofLength:(int)N{ + + // this isn't working. + // instead, I can do threshold(abs(x - y), 1e-9) + + // threshold: vDSP_vthrscD + Mat xMat(1, N, CV_64F, x); + Mat zMat; + if ([op isEqualToString:@"<" ]) {zMat = xMat < y;} + else if ([op isEqualToString:@">" ]) {zMat = xMat > y;} + else if ([op isEqualToString:@"<=" ]) {zMat = xMat <= y;} + else if ([op isEqualToString:@">=" ]) {zMat = xMat >= y;} + else if ([op isEqualToString:@"==" ]) {zMat = xMat == y;} + else if ([op isEqualToString:@"!=="]) {compare(xMat, y, zMat, CMP_NE);} + else printf("*** Careful! Your operation isn't recognized!\n"); + matArgWhereConvert(zMat, z, N); +} + ++ (void) argmax:(double*)x N:(int)N max:(int *)max{ + Mat xMat(N, 1, CV_64F, x); + minMaxIdx(xMat, NULL, NULL, NULL, max); +} ++ (void) argmin:(double*)x N:(int)N min:(int *)min{ + Mat xMat(N, 1, CV_64F, x); + minMaxIdx(xMat, NULL, NULL, min, NULL); +} ++ (void) argsort:(double*)x N:(int)N into:(int*)y{ + Mat xMat(N, 1, CV_64F, x); + Mat yMat(N, 1, CV_32S, y); + sortIdx(xMat, yMat, CV_SORT_ASCENDING + CV_SORT_EVERY_COLUMN); +} ++ (void) pow:(double*)x N:(int)N power:(double)power into:(double*)y{ + Mat xMat(N,1,CV_64F, x); + Mat yMat(N,1,CV_64F, y); + pow(xMat, power, yMat); +} ++ (void) shuffle:(double*)x n:(int)n{ + Mat xMat(n, 1, CV_64F, x); + randShuffle(xMat); +} ++ (void) det:(double*)x n:(int)N m:(int)M result:(double*)result{ + Mat xMat(N, M, CV_64F, x); + double r = determinant(xMat); + *result = r; +} + +void matArgWhereConvert(Mat x, double * y, int N){ + if (!x.isContinuous()){ + printf("Careful! The OpenCV::Mat-->double* conversion didn't go well as x is not continuous in memory! (message printed from swix/objc/opencv.mm:matArgWhereConvert)\n"); + } + uchar* ptr = x.data; + // integer to double conversion + vDSP_vfltu8D(ptr, 1, y, 1, N); +} +void copy(double* x, double * y, int N){ + cblas_dcopy(N, x, 1, y, 1); +} +@end + + diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Headers b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Headers new file mode 120000 index 0000000..a177d2a --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Headers @@ -0,0 +1 @@ +Versions/Current/Headers \ No newline at end of file diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Resources b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Resources new file mode 120000 index 0000000..953ee36 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Resources @@ -0,0 +1 @@ +Versions/Current/Resources \ No newline at end of file diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/calib3d/calib3d.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/calib3d/calib3d.hpp new file mode 100644 index 0000000..f213a11 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/calib3d/calib3d.hpp @@ -0,0 +1,751 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CALIB3D_HPP__ +#define __OPENCV_CALIB3D_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/features2d/features2d.hpp" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************************\ +* Camera Calibration, Pose Estimation and Stereo * +\****************************************************************************************/ + +typedef struct CvPOSITObject CvPOSITObject; + +/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */ +CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count ); + + +/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of + an object given its model and projection in a weak-perspective case */ +CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points, + double focal_length, CvTermCriteria criteria, + float* rotation_matrix, float* translation_vector); + +/* Releases CvPOSITObject structure */ +CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object ); + +/* updates the number of RANSAC iterations */ +CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob, + int model_points, int max_iters ); + +CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst ); + +/* Calculates fundamental matrix given a set of corresponding points */ +#define CV_FM_7POINT 1 +#define CV_FM_8POINT 2 + +#define CV_LMEDS 4 +#define CV_RANSAC 8 + +#define CV_FM_LMEDS_ONLY CV_LMEDS +#define CV_FM_RANSAC_ONLY CV_RANSAC +#define CV_FM_LMEDS CV_LMEDS +#define CV_FM_RANSAC CV_RANSAC + +enum +{ + CV_ITERATIVE = 0, + CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" + CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem" +}; + +CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, + CvMat* fundamental_matrix, + int method CV_DEFAULT(CV_FM_RANSAC), + double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99), + CvMat* status CV_DEFAULT(NULL) ); + +/* For each input point on one of images + computes parameters of the corresponding + epipolar line on the other image */ +CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points, + int which_image, + const CvMat* fundamental_matrix, + CvMat* correspondent_lines ); + +/* Triangulation functions */ + +CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2, + CvMat* projPoints1, CvMat* projPoints2, + CvMat* points4D); + +CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2, + CvMat* new_points1, CvMat* new_points2); + + +/* Computes the optimal new camera matrix according to the free scaling parameter alpha: + alpha=0 - only valid pixels will be retained in the undistorted image + alpha=1 - all the source image pixels will be retained in the undistorted image +*/ +CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix, + const CvMat* dist_coeffs, + CvSize image_size, double alpha, + CvMat* new_camera_matrix, + CvSize new_imag_size CV_DEFAULT(cvSize(0,0)), + CvRect* valid_pixel_ROI CV_DEFAULT(0), + int center_principal_point CV_DEFAULT(0)); + +/* Converts rotation vector to rotation matrix or vice versa */ +CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst, + CvMat* jacobian CV_DEFAULT(0) ); + +/* Finds perspective transformation between the object plane and image (view) plane */ +CVAPI(int) cvFindHomography( const CvMat* src_points, + const CvMat* dst_points, + CvMat* homography, + int method CV_DEFAULT(0), + double ransacReprojThreshold CV_DEFAULT(3), + CvMat* mask CV_DEFAULT(0)); + +/* Computes RQ decomposition for 3x3 matrices */ +CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ, + CvMat *matrixQx CV_DEFAULT(NULL), + CvMat *matrixQy CV_DEFAULT(NULL), + CvMat *matrixQz CV_DEFAULT(NULL), + CvPoint3D64f *eulerAngles CV_DEFAULT(NULL)); + +/* Computes projection matrix decomposition */ +CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr, + CvMat *rotMatr, CvMat *posVect, + CvMat *rotMatrX CV_DEFAULT(NULL), + CvMat *rotMatrY CV_DEFAULT(NULL), + CvMat *rotMatrZ CV_DEFAULT(NULL), + CvPoint3D64f *eulerAngles CV_DEFAULT(NULL)); + +/* Computes d(AB)/dA and d(AB)/dB */ +CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB ); + +/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)), + t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */ +CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1, + const CvMat* _rvec2, const CvMat* _tvec2, + CvMat* _rvec3, CvMat* _tvec3, + CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0), + CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0), + CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0), + CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) ); + +/* Projects object points to the view plane using + the specified extrinsic and intrinsic camera parameters */ +CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, + const CvMat* translation_vector, const CvMat* camera_matrix, + const CvMat* distortion_coeffs, CvMat* image_points, + CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL), + CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL), + CvMat* dpddist CV_DEFAULT(NULL), + double aspect_ratio CV_DEFAULT(0)); + +/* Finds extrinsic camera parameters from + a few known corresponding point pairs and intrinsic parameters */ +CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points, + const CvMat* image_points, + const CvMat* camera_matrix, + const CvMat* distortion_coeffs, + CvMat* rotation_vector, + CvMat* translation_vector, + int use_extrinsic_guess CV_DEFAULT(0) ); + +/* Computes initial estimate of the intrinsic camera parameters + in case of planar calibration target (e.g. chessboard) */ +CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points, + const CvMat* image_points, + const CvMat* npoints, CvSize image_size, + CvMat* camera_matrix, + double aspect_ratio CV_DEFAULT(1.) ); + +#define CV_CALIB_CB_ADAPTIVE_THRESH 1 +#define CV_CALIB_CB_NORMALIZE_IMAGE 2 +#define CV_CALIB_CB_FILTER_QUADS 4 +#define CV_CALIB_CB_FAST_CHECK 8 + +// Performs a fast check if a chessboard is in the input image. This is a workaround to +// a problem of cvFindChessboardCorners being slow on images with no chessboard +// - src: input image +// - size: chessboard size +// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called, +// 0 if there is no chessboard, -1 in case of error +CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size); + + /* Detects corners on a chessboard calibration pattern */ +CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size, + CvPoint2D32f* corners, + int* corner_count CV_DEFAULT(NULL), + int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) ); + +/* Draws individual chessboard corners or the whole chessboard detected */ +CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, + CvPoint2D32f* corners, + int count, int pattern_was_found ); + +#define CV_CALIB_USE_INTRINSIC_GUESS 1 +#define CV_CALIB_FIX_ASPECT_RATIO 2 +#define CV_CALIB_FIX_PRINCIPAL_POINT 4 +#define CV_CALIB_ZERO_TANGENT_DIST 8 +#define CV_CALIB_FIX_FOCAL_LENGTH 16 +#define CV_CALIB_FIX_K1 32 +#define CV_CALIB_FIX_K2 64 +#define CV_CALIB_FIX_K3 128 +#define CV_CALIB_FIX_K4 2048 +#define CV_CALIB_FIX_K5 4096 +#define CV_CALIB_FIX_K6 8192 +#define CV_CALIB_RATIONAL_MODEL 16384 + +/* Finds intrinsic and extrinsic camera parameters + from a few views of known calibration pattern */ +CVAPI(double) cvCalibrateCamera2( const CvMat* object_points, + const CvMat* image_points, + const CvMat* point_counts, + CvSize image_size, + CvMat* camera_matrix, + CvMat* distortion_coeffs, + CvMat* rotation_vectors CV_DEFAULT(NULL), + CvMat* translation_vectors CV_DEFAULT(NULL), + int flags CV_DEFAULT(0), + CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria( + CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) ); + +/* Computes various useful characteristics of the camera from the data computed by + cvCalibrateCamera2 */ +CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix, + CvSize image_size, + double aperture_width CV_DEFAULT(0), + double aperture_height CV_DEFAULT(0), + double *fovx CV_DEFAULT(NULL), + double *fovy CV_DEFAULT(NULL), + double *focal_length CV_DEFAULT(NULL), + CvPoint2D64f *principal_point CV_DEFAULT(NULL), + double *pixel_aspect_ratio CV_DEFAULT(NULL)); + +#define CV_CALIB_FIX_INTRINSIC 256 +#define CV_CALIB_SAME_FOCAL_LENGTH 512 + +/* Computes the transformation from one camera coordinate system to another one + from a few correspondent views of the same calibration target. Optionally, calibrates + both cameras */ +CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1, + const CvMat* image_points2, const CvMat* npoints, + CvMat* camera_matrix1, CvMat* dist_coeffs1, + CvMat* camera_matrix2, CvMat* dist_coeffs2, + CvSize image_size, CvMat* R, CvMat* T, + CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0), + CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria( + CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)), + int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC)); + +#define CV_CALIB_ZERO_DISPARITY 1024 + +/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both + views parallel (=> to make all the epipolar lines horizontal or vertical) */ +CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2, + const CvMat* dist_coeffs1, const CvMat* dist_coeffs2, + CvSize image_size, const CvMat* R, const CvMat* T, + CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, + CvMat* Q CV_DEFAULT(0), + int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY), + double alpha CV_DEFAULT(-1), + CvSize new_image_size CV_DEFAULT(cvSize(0,0)), + CvRect* valid_pix_ROI1 CV_DEFAULT(0), + CvRect* valid_pix_ROI2 CV_DEFAULT(0)); + +/* Computes rectification transformations for uncalibrated pair of images using a set + of point correspondences */ +CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, + const CvMat* F, CvSize img_size, + CvMat* H1, CvMat* H2, + double threshold CV_DEFAULT(5)); + + + +/* stereo correspondence parameters and functions */ + +#define CV_STEREO_BM_NORMALIZED_RESPONSE 0 +#define CV_STEREO_BM_XSOBEL 1 + +/* Block matching algorithm structure */ +typedef struct CvStereoBMState +{ + // pre-filtering (normalization of input images) + int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now + int preFilterSize; // averaging window size: ~5x5..21x21 + int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap] + + // correspondence using Sum of Absolute Difference (SAD) + int SADWindowSize; // ~5x5..21x21 + int minDisparity; // minimum disparity (can be negative) + int numberOfDisparities; // maximum disparity - minimum disparity (> 0) + + // post-filtering + int textureThreshold; // the disparity is only computed for pixels + // with textured enough neighborhood + int uniquenessRatio; // accept the computed disparity d* only if + // SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.) + // for any d != d*+/-1 within the search range. + int speckleWindowSize; // disparity variation window + int speckleRange; // acceptable range of variation in window + + int trySmallerWindows; // if 1, the results may be more accurate, + // at the expense of slower processing + CvRect roi1, roi2; + int disp12MaxDiff; + + // temporary buffers + CvMat* preFilteredImg0; + CvMat* preFilteredImg1; + CvMat* slidingSumBuf; + CvMat* cost; + CvMat* disp; +} CvStereoBMState; + +#define CV_STEREO_BM_BASIC 0 +#define CV_STEREO_BM_FISH_EYE 1 +#define CV_STEREO_BM_NARROW 2 + +CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC), + int numberOfDisparities CV_DEFAULT(0)); + +CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state ); + +CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right, + CvArr* disparity, CvStereoBMState* state ); + +CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity, + int numberOfDisparities, int SADWindowSize ); + +CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost, + int minDisparity, int numberOfDisparities, + int disp12MaxDiff CV_DEFAULT(1) ); + +/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */ +CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage, + CvArr* _3dImage, const CvMat* Q, + int handleMissingValues CV_DEFAULT(0) ); + + +#ifdef __cplusplus +} + +////////////////////////////////////////////////////////////////////////////////////////// +class CV_EXPORTS CvLevMarq +{ +public: + CvLevMarq(); + CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria= + cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON), + bool completeSymmFlag=false ); + ~CvLevMarq(); + void init( int nparams, int nerrs, CvTermCriteria criteria= + cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON), + bool completeSymmFlag=false ); + bool update( const CvMat*& param, CvMat*& J, CvMat*& err ); + bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm ); + + void clear(); + void step(); + enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 }; + + cv::Ptr mask; + cv::Ptr prevParam; + cv::Ptr param; + cv::Ptr J; + cv::Ptr err; + cv::Ptr JtJ; + cv::Ptr JtJN; + cv::Ptr JtErr; + cv::Ptr JtJV; + cv::Ptr JtJW; + double prevErrNorm, errNorm; + int lambdaLg10; + CvTermCriteria criteria; + int state; + int iters; + bool completeSymmFlag; +}; + +namespace cv +{ +//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation +CV_EXPORTS_W void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian=noArray()); + +//! type of the robust estimation algorithm +enum +{ + LMEDS=CV_LMEDS, //!< least-median algorithm + RANSAC=CV_RANSAC //!< RANSAC algorithm +}; + +//! computes the best-fit perspective transformation mapping srcPoints to dstPoints. +CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints, + int method=0, double ransacReprojThreshold=3, + OutputArray mask=noArray()); + +//! variant of findHomography for backward compatibility +CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints, + OutputArray mask, int method=0, double ransacReprojThreshold=3); + +//! Computes RQ decomposition of 3x3 matrix +CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ, + OutputArray Qx=noArray(), + OutputArray Qy=noArray(), + OutputArray Qz=noArray()); + +//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector +CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix, + OutputArray rotMatrix, OutputArray transVect, + OutputArray rotMatrixX=noArray(), + OutputArray rotMatrixY=noArray(), + OutputArray rotMatrixZ=noArray(), + OutputArray eulerAngles=noArray() ); + +//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients +CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, + OutputArray dABdA, + OutputArray dABdB ); + +//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments +CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1, + InputArray rvec2, InputArray tvec2, + OutputArray rvec3, OutputArray tvec3, + OutputArray dr3dr1=noArray(), OutputArray dr3dt1=noArray(), + OutputArray dr3dr2=noArray(), OutputArray dr3dt2=noArray(), + OutputArray dt3dr1=noArray(), OutputArray dt3dt1=noArray(), + OutputArray dt3dr2=noArray(), OutputArray dt3dt2=noArray() ); + +//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters +CV_EXPORTS_W void projectPoints( InputArray objectPoints, + InputArray rvec, InputArray tvec, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray imagePoints, + OutputArray jacobian=noArray(), + double aspectRatio=0 ); + +//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled. +enum +{ + ITERATIVE=CV_ITERATIVE, + EPNP=CV_EPNP, + P3P=CV_P3P +}; +CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray rvec, OutputArray tvec, + bool useExtrinsicGuess=false, int flags=ITERATIVE); + +//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible. +CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints, + InputArray imagePoints, + InputArray cameraMatrix, + InputArray distCoeffs, + OutputArray rvec, + OutputArray tvec, + bool useExtrinsicGuess = false, + int iterationsCount = 100, + float reprojectionError = 8.0, + int minInliersCount = 100, + OutputArray inliers = noArray(), + int flags = ITERATIVE); + +//! initializes camera matrix from a few 3D points and the corresponding projections. +CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints, + Size imageSize, double aspectRatio=1. ); + +enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2, + CALIB_CB_FILTER_QUADS = 4, CALIB_CB_FAST_CHECK = 8 }; + +//! finds checkerboard pattern of the specified size in the image +CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, + OutputArray corners, + int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE ); + +//! finds subpixel-accurate positions of the chessboard corners +CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size); + +//! draws the checkerboard pattern (found or partly found) in the image +CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize, + InputArray corners, bool patternWasFound ); + +enum { CALIB_CB_SYMMETRIC_GRID = 1, CALIB_CB_ASYMMETRIC_GRID = 2, + CALIB_CB_CLUSTERING = 4 }; + +//! finds circles' grid pattern of the specified size in the image +CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize, + OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, + const Ptr &blobDetector = new SimpleBlobDetector()); + +//! the deprecated function. Use findCirclesGrid() instead of it. +CV_EXPORTS_W bool findCirclesGridDefault( InputArray image, Size patternSize, + OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID ); +enum +{ + CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS, + CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO, + CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT, + CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST, + CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH, + CALIB_FIX_K1 = CV_CALIB_FIX_K1, + CALIB_FIX_K2 = CV_CALIB_FIX_K2, + CALIB_FIX_K3 = CV_CALIB_FIX_K3, + CALIB_FIX_K4 = CV_CALIB_FIX_K4, + CALIB_FIX_K5 = CV_CALIB_FIX_K5, + CALIB_FIX_K6 = CV_CALIB_FIX_K6, + CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL, + // only for stereo + CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC, + CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH, + // for stereo rectification + CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY +}; + +//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern. +CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints, + Size imageSize, + CV_OUT InputOutputArray cameraMatrix, + CV_OUT InputOutputArray distCoeffs, + OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, + int flags=0, TermCriteria criteria = TermCriteria( + TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) ); + +//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size. +CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, + Size imageSize, + double apertureWidth, + double apertureHeight, + CV_OUT double& fovx, + CV_OUT double& fovy, + CV_OUT double& focalLength, + CV_OUT Point2d& principalPoint, + CV_OUT double& aspectRatio ); + +//! finds intrinsic and extrinsic parameters of a stereo camera +CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints1, + InputArrayOfArrays imagePoints2, + CV_OUT InputOutputArray cameraMatrix1, + CV_OUT InputOutputArray distCoeffs1, + CV_OUT InputOutputArray cameraMatrix2, + CV_OUT InputOutputArray distCoeffs2, + Size imageSize, OutputArray R, + OutputArray T, OutputArray E, OutputArray F, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6), + int flags=CALIB_FIX_INTRINSIC ); + + +//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters +CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1, + InputArray cameraMatrix2, InputArray distCoeffs2, + Size imageSize, InputArray R, InputArray T, + OutputArray R1, OutputArray R2, + OutputArray P1, OutputArray P2, + OutputArray Q, int flags=CALIB_ZERO_DISPARITY, + double alpha=-1, Size newImageSize=Size(), + CV_OUT Rect* validPixROI1=0, CV_OUT Rect* validPixROI2=0 ); + +//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed) +CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2, + InputArray F, Size imgSize, + OutputArray H1, OutputArray H2, + double threshold=5 ); + +//! computes the rectification transformations for 3-head camera, where all the heads are on the same line. +CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1, + InputArray cameraMatrix2, InputArray distCoeffs2, + InputArray cameraMatrix3, InputArray distCoeffs3, + InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3, + Size imageSize, InputArray R12, InputArray T12, + InputArray R13, InputArray T13, + OutputArray R1, OutputArray R2, OutputArray R3, + OutputArray P1, OutputArray P2, OutputArray P3, + OutputArray Q, double alpha, Size newImgSize, + CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags ); + +//! returns the optimal new camera matrix +CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs, + Size imageSize, double alpha, Size newImgSize=Size(), + CV_OUT Rect* validPixROI=0, bool centerPrincipalPoint=false); + +//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1)) +CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst ); + +//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z)) +CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst ); + +//! for backward compatibility +CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst ); + +//! the algorithm for finding fundamental matrix +enum +{ + FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm + FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm + FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm + FM_RANSAC = CV_FM_RANSAC //!< RANSAC algorithm +}; + +//! finds fundamental matrix from a set of corresponding 2D points +CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2, + int method=FM_RANSAC, + double param1=3., double param2=0.99, + OutputArray mask=noArray()); + +//! variant of findFundamentalMat for backward compatibility +CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2, + OutputArray mask, int method=FM_RANSAC, + double param1=3., double param2=0.99); + +//! finds coordinates of epipolar lines corresponding the specified points +CV_EXPORTS_W void computeCorrespondEpilines( InputArray points, + int whichImage, InputArray F, + OutputArray lines ); + +CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2, + InputArray projPoints1, InputArray projPoints2, + OutputArray points4D ); + +CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2, + OutputArray newPoints1, OutputArray newPoints2 ); + +template<> CV_EXPORTS void Ptr::delete_obj(); + +/*! + Block Matching Stereo Correspondence Algorithm + + The class implements BM stereo correspondence algorithm by K. Konolige. +*/ +class CV_EXPORTS_W StereoBM +{ +public: + enum { PREFILTER_NORMALIZED_RESPONSE = 0, PREFILTER_XSOBEL = 1, + BASIC_PRESET=0, FISH_EYE_PRESET=1, NARROW_PRESET=2 }; + + //! the default constructor + CV_WRAP StereoBM(); + //! the full constructor taking the camera-specific preset, number of disparities and the SAD window size + CV_WRAP StereoBM(int preset, int ndisparities=0, int SADWindowSize=21); + //! the method that reinitializes the state. The previous content is destroyed + void init(int preset, int ndisparities=0, int SADWindowSize=21); + //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair + CV_WRAP_AS(compute) void operator()( InputArray left, InputArray right, + OutputArray disparity, int disptype=CV_16S ); + + //! pointer to the underlying CvStereoBMState + Ptr state; +}; + + +/*! + Semi-Global Block Matching Stereo Correspondence Algorithm + + The class implements the original SGBM stereo correspondence algorithm by H. Hirschmuller and some its modification. + */ +class CV_EXPORTS_W StereoSGBM +{ +public: + enum { DISP_SHIFT=4, DISP_SCALE = (1<(X,Y,Z) using the matrix Q returned by cv::stereoRectify +CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity, + OutputArray _3dImage, InputArray Q, + bool handleMissingValues=false, + int ddepth=-1 ); + +CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst, + OutputArray out, OutputArray inliers, + double ransacThreshold=3, double confidence=0.99); + +} + +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/contrib.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/contrib.hpp new file mode 100644 index 0000000..7d881c3 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/contrib.hpp @@ -0,0 +1,985 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CONTRIB_HPP__ +#define __OPENCV_CONTRIB_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/objdetect/objdetect.hpp" + +#ifdef __cplusplus + +/****************************************************************************************\ +* Adaptive Skin Detector * +\****************************************************************************************/ + +class CV_EXPORTS CvAdaptiveSkinDetector +{ +private: + enum { + GSD_HUE_LT = 3, + GSD_HUE_UT = 33, + GSD_INTENSITY_LT = 15, + GSD_INTENSITY_UT = 250 + }; + + class CV_EXPORTS Histogram + { + private: + enum { + HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1) + }; + + protected: + int findCoverageIndex(double surfaceToCover, int defaultValue = 0); + + public: + CvHistogram *fHistogram; + Histogram(); + virtual ~Histogram(); + + void findCurveThresholds(int &x1, int &x2, double percent = 0.05); + void mergeWith(Histogram *source, double weight); + }; + + int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider; + double fHistogramMergeFactor, fHuePercentCovered; + Histogram histogramHueMotion, skinHueHistogram; + IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame; + IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame; + +protected: + void initData(IplImage *src, int widthDivider, int heightDivider); + void adaptiveFilter(); + +public: + + enum { + MORPHING_METHOD_NONE = 0, + MORPHING_METHOD_ERODE = 1, + MORPHING_METHOD_ERODE_ERODE = 2, + MORPHING_METHOD_ERODE_DILATE = 3 + }; + + CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE); + virtual ~CvAdaptiveSkinDetector(); + + virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask); +}; + + +/****************************************************************************************\ + * Fuzzy MeanShift Tracker * + \****************************************************************************************/ + +class CV_EXPORTS CvFuzzyPoint { +public: + double x, y, value; + + CvFuzzyPoint(double _x, double _y); +}; + +class CV_EXPORTS CvFuzzyCurve { +private: + std::vector points; + double value, centre; + + bool between(double x, double x1, double x2); + +public: + CvFuzzyCurve(); + ~CvFuzzyCurve(); + + void setCentre(double _centre); + double getCentre(); + void clear(); + void addPoint(double x, double y); + double calcValue(double param); + double getValue(); + void setValue(double _value); +}; + +class CV_EXPORTS CvFuzzyFunction { +public: + std::vector curves; + + CvFuzzyFunction(); + ~CvFuzzyFunction(); + void addCurve(CvFuzzyCurve *curve, double value = 0); + void resetValues(); + double calcValue(); + CvFuzzyCurve *newCurve(); +}; + +class CV_EXPORTS CvFuzzyRule { +private: + CvFuzzyCurve *fuzzyInput1, *fuzzyInput2; + CvFuzzyCurve *fuzzyOutput; +public: + CvFuzzyRule(); + ~CvFuzzyRule(); + void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1); + double calcValue(double param1, double param2); + CvFuzzyCurve *getOutputCurve(); +}; + +class CV_EXPORTS CvFuzzyController { +private: + std::vector rules; +public: + CvFuzzyController(); + ~CvFuzzyController(); + void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1); + double calcOutput(double param1, double param2); +}; + +class CV_EXPORTS CvFuzzyMeanShiftTracker +{ +private: + class FuzzyResizer + { + private: + CvFuzzyFunction iInput, iOutput; + CvFuzzyController fuzzyController; + public: + FuzzyResizer(); + int calcOutput(double edgeDensity, double density); + }; + + class SearchWindow + { + public: + FuzzyResizer *fuzzyResizer; + int x, y; + int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth; + int ldx, ldy, ldw, ldh, numShifts, numIters; + int xGc, yGc; + long m00, m01, m10, m11, m02, m20; + double ellipseAngle; + double density; + unsigned int depthLow, depthHigh; + int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom; + + SearchWindow(); + ~SearchWindow(); + void setSize(int _x, int _y, int _width, int _height); + void initDepthValues(IplImage *maskImage, IplImage *depthMap); + bool shift(); + void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth); + void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh); + bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth); + }; + +public: + enum TrackingState + { + tsNone = 0, + tsSearching = 1, + tsTracking = 2, + tsSetWindow = 3, + tsDisabled = 10 + }; + + enum ResizeMethod { + rmEdgeDensityLinear = 0, + rmEdgeDensityFuzzy = 1, + rmInnerDensity = 2 + }; + + enum { + MinKernelMass = 1000 + }; + + SearchWindow kernel; + int searchMode; + +private: + enum + { + MaxMeanShiftIteration = 5, + MaxSetSizeIteration = 5 + }; + + void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth); + +public: + CvFuzzyMeanShiftTracker(); + ~CvFuzzyMeanShiftTracker(); + + void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass); +}; + + +namespace cv +{ + + class CV_EXPORTS Octree + { + public: + struct Node + { + Node() {} + int begin, end; + float x_min, x_max, y_min, y_max, z_min, z_max; + int maxLevels; + bool isLeaf; + int children[8]; + }; + + Octree(); + Octree( const vector& points, int maxLevels = 10, int minPoints = 20 ); + virtual ~Octree(); + + virtual void buildTree( const vector& points, int maxLevels = 10, int minPoints = 20 ); + virtual void getPointsWithinSphere( const Point3f& center, float radius, + vector& points ) const; + const vector& getNodes() const { return nodes; } + private: + int minPoints; + vector points; + vector nodes; + + virtual void buildNext(size_t node_ind); + }; + + + class CV_EXPORTS Mesh3D + { + public: + struct EmptyMeshException {}; + + Mesh3D(); + Mesh3D(const vector& vtx); + ~Mesh3D(); + + void buildOctree(); + void clearOctree(); + float estimateResolution(float tryRatio = 0.1f); + void computeNormals(float normalRadius, int minNeighbors = 20); + void computeNormals(const vector& subset, float normalRadius, int minNeighbors = 20); + + void writeAsVrml(const String& file, const vector& colors = vector()) const; + + vector vtx; + vector normals; + float resolution; + Octree octree; + + const static Point3f allzero; + }; + + class CV_EXPORTS SpinImageModel + { + public: + + /* model parameters, leave unset for default or auto estimate */ + float normalRadius; + int minNeighbors; + + float binSize; + int imageWidth; + + float lambda; + float gamma; + + float T_GeometriccConsistency; + float T_GroupingCorespondances; + + /* public interface */ + SpinImageModel(); + explicit SpinImageModel(const Mesh3D& mesh); + ~SpinImageModel(); + + void setLogger(std::ostream* log); + void selectRandomSubset(float ratio); + void setSubset(const vector& subset); + void compute(); + + void match(const SpinImageModel& scene, vector< vector >& result); + + Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const; + + size_t getSpinCount() const { return spinImages.rows; } + Mat getSpinImage(size_t index) const { return spinImages.row((int)index); } + const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; } + const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; } + + const Mesh3D& getMesh() const { return mesh; } + Mesh3D& getMesh() { return mesh; } + + /* static utility functions */ + static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result); + + static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal); + + static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1, + const Point3f& pointModel1, const Point3f& normalModel1, + const Point3f& pointScene2, const Point3f& normalScene2, + const Point3f& pointModel2, const Point3f& normalModel2); + + static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1, + const Point3f& pointModel1, const Point3f& normalModel1, + const Point3f& pointScene2, const Point3f& normalScene2, + const Point3f& pointModel2, const Point3f& normalModel2, + float gamma); + protected: + void defaultParams(); + + void matchSpinToModel(const Mat& spin, vector& indeces, + vector& corrCoeffs, bool useExtremeOutliers = true) const; + + void repackSpinImages(const vector& mask, Mat& spinImages, bool reAlloc = true) const; + + vector subset; + Mesh3D mesh; + Mat spinImages; + std::ostream* out; + }; + + class CV_EXPORTS TickMeter + { + public: + TickMeter(); + void start(); + void stop(); + + int64 getTimeTicks() const; + double getTimeMicro() const; + double getTimeMilli() const; + double getTimeSec() const; + int64 getCounter() const; + + void reset(); + private: + int64 counter; + int64 sumTime; + int64 startTime; + }; + + CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm); + + class CV_EXPORTS SelfSimDescriptor + { + public: + SelfSimDescriptor(); + SelfSimDescriptor(int _ssize, int _lsize, + int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET, + int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS, + int _nangles=DEFAULT_NUM_ANGLES); + SelfSimDescriptor(const SelfSimDescriptor& ss); + virtual ~SelfSimDescriptor(); + SelfSimDescriptor& operator = (const SelfSimDescriptor& ss); + + size_t getDescriptorSize() const; + Size getGridSize( Size imgsize, Size winStride ) const; + + virtual void compute(const Mat& img, vector& descriptors, Size winStride=Size(), + const vector& locations=vector()) const; + virtual void computeLogPolarMapping(Mat& mappingMask) const; + virtual void SSD(const Mat& img, Point pt, Mat& ssd) const; + + int smallSize; + int largeSize; + int startDistanceBucket; + int numberOfDistanceBuckets; + int numberOfAngles; + + enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41, + DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3, + DEFAULT_NUM_DISTANCE_BUCKETS = 7 }; + }; + + + typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data); + + class CV_EXPORTS LevMarqSparse { + public: + LevMarqSparse(); + LevMarqSparse(int npoints, // number of points + int ncameras, // number of cameras + int nPointParams, // number of params per one point (3 in case of 3D points) + int nCameraParams, // number of parameters per one camera + int nErrParams, // number of parameters in measurement vector + // for 1 point at one camera (2 in case of 2D projections) + Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras + // 1 - point is visible for the camera, 0 - invisible + Mat& P0, // starting vector of parameters, first cameras then points + Mat& X, // measurements, in order of visibility. non visible cases are skipped + TermCriteria criteria, // termination criteria + + // callback for estimation of Jacobian matrices + void (CV_CDECL * fjac)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& A, Mat& B, void* data), + // callback for estimation of backprojection errors + void (CV_CDECL * func)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& estim, void* data), + void* data, // user-specific data passed to the callbacks + BundleAdjustCallback cb, void* user_data + ); + + virtual ~LevMarqSparse(); + + virtual void run( int npoints, // number of points + int ncameras, // number of cameras + int nPointParams, // number of params per one point (3 in case of 3D points) + int nCameraParams, // number of parameters per one camera + int nErrParams, // number of parameters in measurement vector + // for 1 point at one camera (2 in case of 2D projections) + Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras + // 1 - point is visible for the camera, 0 - invisible + Mat& P0, // starting vector of parameters, first cameras then points + Mat& X, // measurements, in order of visibility. non visible cases are skipped + TermCriteria criteria, // termination criteria + + // callback for estimation of Jacobian matrices + void (CV_CDECL * fjac)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& A, Mat& B, void* data), + // callback for estimation of backprojection errors + void (CV_CDECL * func)(int i, int j, Mat& point_params, + Mat& cam_params, Mat& estim, void* data), + void* data // user-specific data passed to the callbacks + ); + + virtual void clear(); + + // useful function to do simple bundle adjustment tasks + static void bundleAdjust(vector& points, // positions of points in global coordinate system (input and output) + const vector >& imagePoints, // projections of 3d points for every camera + const vector >& visibility, // visibility of 3d points for every camera + vector& cameraMatrix, // intrinsic matrices of all cameras (input and output) + vector& R, // rotation matrices of all cameras (input and output) + vector& T, // translation vector of all cameras (input and output) + vector& distCoeffs, // distortion coefficients of all cameras (input and output) + const TermCriteria& criteria= + TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON), + BundleAdjustCallback cb = 0, void* user_data = 0); + + public: + virtual void optimize(CvMat &_vis); //main function that runs minimization + + //iteratively asks for measurement for visible camera-point pairs + void ask_for_proj(CvMat &_vis,bool once=false); + //iteratively asks for Jacobians for every camera_point pair + void ask_for_projac(CvMat &_vis); + + CvMat* err; //error X-hX + double prevErrNorm, errNorm; + double lambda; + CvTermCriteria criteria; + int iters; + + CvMat** U; //size of array is equal to number of cameras + CvMat** V; //size of array is equal to number of points + CvMat** inv_V_star; //inverse of V* + + CvMat** A; + CvMat** B; + CvMat** W; + + CvMat* X; //measurement + CvMat* hX; //current measurement extimation given new parameter vector + + CvMat* prevP; //current already accepted parameter. + CvMat* P; // parameters used to evaluate function with new params + // this parameters may be rejected + + CvMat* deltaP; //computed increase of parameters (result of normal system solution ) + + CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation + // length of array is j = number of cameras + CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation + // length of array is i = number of points + + CvMat** Yj; //length of array is i = num_points + + CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params + + CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation + + CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j + + int num_cams; + int num_points; + int num_err_param; + int num_cam_param; + int num_point_param; + + //target function and jacobian pointers, which needs to be initialized + void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data); + void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data); + + void* data; + + BundleAdjustCallback cb; + void* user_data; + }; + + CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ, + CV_OUT vector >& results, CV_OUT vector& cost, + double templScale=1, int maxMatches = 20, + double minMatchDistance = 1.0, int padX = 3, + int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6, + double orientationWeight = 0.5, double truncate = 20); + + + class CV_EXPORTS_W StereoVar + { + public: + // Flags + enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16}; + enum {CYCLE_O, CYCLE_V}; + enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK}; + + //! the default constructor + CV_WRAP StereoVar(); + + //! the full constructor taking all the necessary algorithm parameters + CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags); + + //! the destructor + virtual ~StereoVar(); + + //! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair + CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, CV_OUT Mat& disp); + + CV_PROP_RW int levels; + CV_PROP_RW double pyrScale; + CV_PROP_RW int nIt; + CV_PROP_RW int minDisp; + CV_PROP_RW int maxDisp; + CV_PROP_RW int poly_n; + CV_PROP_RW double poly_sigma; + CV_PROP_RW float fi; + CV_PROP_RW float lambda; + CV_PROP_RW int penalization; + CV_PROP_RW int cycle; + CV_PROP_RW int flags; + + private: + void autoParams(); + void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level); + void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level); + void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level); + }; + + CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order); + + class CV_EXPORTS Directory + { + public: + static std::vector GetListFiles ( const std::string& path, const std::string & exten = "*", bool addPath = true ); + static std::vector GetListFilesR ( const std::string& path, const std::string & exten = "*", bool addPath = true ); + static std::vector GetListFolders( const std::string& path, const std::string & exten = "*", bool addPath = true ); + }; + + /* + * Generation of a set of different colors by the following way: + * 1) generate more then need colors (in "factor" times) in RGB, + * 2) convert them to Lab, + * 3) choose the needed count of colors from the set that are more different from + * each other, + * 4) convert the colors back to RGB + */ + CV_EXPORTS void generateColors( std::vector& colors, size_t count, size_t factor=100 ); + + + /* + * Estimate the rigid body motion from frame0 to frame1. The method is based on the paper + * "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011. + */ + enum { ROTATION = 1, + TRANSLATION = 2, + RIGID_BODY_MOTION = 4 + }; + CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt, + const Mat& image0, const Mat& depth0, const Mat& mask0, + const Mat& image1, const Mat& depth1, const Mat& mask1, + const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f, + const std::vector& iterCounts=std::vector(), + const std::vector& minGradientMagnitudes=std::vector(), + int transformType=RIGID_BODY_MOTION ); + + /** + *Bilinear interpolation technique. + * + *The value of a desired cortical pixel is obtained through a bilinear interpolation of the values + *of the four nearest neighbouring Cartesian pixels to the center of the RF. + *The same principle is applied to the inverse transformation. + * + *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 + */ + class CV_EXPORTS LogPolar_Interp + { + public: + + LogPolar_Interp() {} + + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0, + int interp=INTER_LINEAR, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Interp(); + + protected: + + Mat Rsri; + Mat Csri; + + int S, R, M, N; + int top, bottom,left,right; + double ro0, romax, a, q; + int interp; + + Mat ETAyx; + Mat CSIyx; + + void create_map(int M, int N, int R, int S, double ro0); + }; + + /** + *Overlapping circular receptive fields technique + * + *The Cartesian plane is divided in two regions: the fovea and the periphery. + *The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in + *the periphery we use the overlapping Gaussian circular RFs. + * + *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 + */ + class CV_EXPORTS LogPolar_Overlapping + { + public: + LogPolar_Overlapping() {} + + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Overlapping(int w, int h, Point2i center, int R=70, + double ro0=3.0, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Overlapping(); + + protected: + + Mat Rsri; + Mat Csri; + vector Rsr; + vector Csr; + vector Wsr; + + int S, R, M, N, ind1; + int top, bottom,left,right; + double ro0, romax, a, q; + + struct kernel + { + kernel() { w = 0; } + vector weights; + int w; + }; + + Mat ETAyx; + Mat CSIyx; + vector w_ker_2D; + + void create_map(int M, int N, int R, int S, double ro0); + }; + + /** + * Adjacent receptive fields technique + * + *All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF. + *The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF. + *This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements + *to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370 + * + *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5 + */ + class CV_EXPORTS LogPolar_Adjacent + { + public: + LogPolar_Adjacent() {} + + /** + *Constructor + *\param w the width of the input image + *\param h the height of the input image + *\param center the transformation center: where the output precision is maximal + *\param R the number of rings of the cortical image (default value 70 pixel) + *\param ro0 the radius of the blind spot (default value 3 pixel) + *\param smin the size of the subpixel (default value 0.25 pixel) + *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle. + * \a 0 means that the retinal image is computed within the inscribed circle. + *\param S the number of sectors of the cortical image (default value 70 pixel). + * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1. + *\param sp \a 1 (default value) means that the parameter \a S is internally computed. + * \a 0 means that the parameter \a S is provided by the user. + */ + LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1); + /** + *Transformation from Cartesian image to cortical (log-polar) image. + *\param source the Cartesian image + *\return the transformed image (cortical image) + */ + const Mat to_cortical(const Mat &source); + /** + *Transformation from cortical image to retinal (inverse log-polar) image. + *\param source the cortical image + *\return the transformed image (retinal image) + */ + const Mat to_cartesian(const Mat &source); + /** + *Destructor + */ + ~LogPolar_Adjacent(); + + protected: + struct pixel + { + pixel() { u = v = 0; a = 0.; } + int u; + int v; + double a; + }; + int S, R, M, N; + int top, bottom,left,right; + double ro0, romax, a, q; + vector > L; + vector A; + + void subdivide_recursively(double x, double y, int i, int j, double length, double smin); + bool get_uv(double x, double y, int&u, int&v); + void create_map(int M, int N, int R, int S, double ro0, double smin); + }; + + CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src); + CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src); + + class CV_EXPORTS LDA + { + public: + // Initializes a LDA with num_components (default 0) and specifies how + // samples are aligned (default dataAsRow=true). + LDA(int num_components = 0) : + _num_components(num_components) {}; + + // Initializes and performs a Discriminant Analysis with Fisher's + // Optimization Criterion on given data in src and corresponding labels + // in labels. If 0 (or less) number of components are given, they are + // automatically determined for given data in computation. + LDA(const Mat& src, vector labels, + int num_components = 0) : + _num_components(num_components) + { + this->compute(src, labels); //! compute eigenvectors and eigenvalues + } + + // Initializes and performs a Discriminant Analysis with Fisher's + // Optimization Criterion on given data in src and corresponding labels + // in labels. If 0 (or less) number of components are given, they are + // automatically determined for given data in computation. + LDA(InputArrayOfArrays src, InputArray labels, + int num_components = 0) : + _num_components(num_components) + { + this->compute(src, labels); //! compute eigenvectors and eigenvalues + } + + // Serializes this object to a given filename. + void save(const string& filename) const; + + // Deserializes this object from a given filename. + void load(const string& filename); + + // Serializes this object to a given cv::FileStorage. + void save(FileStorage& fs) const; + + // Deserializes this object from a given cv::FileStorage. + void load(const FileStorage& node); + + // Destructor. + ~LDA() {} + + //! Compute the discriminants for data in src and labels. + void compute(InputArrayOfArrays src, InputArray labels); + + // Projects samples into the LDA subspace. + Mat project(InputArray src); + + // Reconstructs projections from the LDA subspace. + Mat reconstruct(InputArray src); + + // Returns the eigenvectors of this LDA. + Mat eigenvectors() const { return _eigenvectors; }; + + // Returns the eigenvalues of this LDA. + Mat eigenvalues() const { return _eigenvalues; } + + protected: + bool _dataAsRow; + int _num_components; + Mat _eigenvectors; + Mat _eigenvalues; + + void lda(InputArrayOfArrays src, InputArray labels); + }; + + class CV_EXPORTS_W FaceRecognizer : public Algorithm + { + public: + //! virtual destructor + virtual ~FaceRecognizer() {} + + // Trains a FaceRecognizer. + CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0; + + // Updates a FaceRecognizer. + CV_WRAP void update(InputArrayOfArrays src, InputArray labels); + + // Gets a prediction from a FaceRecognizer. + virtual int predict(InputArray src) const = 0; + + // Predicts the label and confidence for a given sample. + CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0; + + // Serializes this object to a given filename. + CV_WRAP virtual void save(const string& filename) const; + + // Deserializes this object from a given filename. + CV_WRAP virtual void load(const string& filename); + + // Serializes this object to a given cv::FileStorage. + virtual void save(FileStorage& fs) const = 0; + + // Deserializes this object from a given cv::FileStorage. + virtual void load(const FileStorage& fs) = 0; + + }; + + CV_EXPORTS_W Ptr createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX); + CV_EXPORTS_W Ptr createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX); + CV_EXPORTS_W Ptr createLBPHFaceRecognizer(int radius=1, int neighbors=8, + int grid_x=8, int grid_y=8, double threshold = DBL_MAX); + + enum + { + COLORMAP_AUTUMN = 0, + COLORMAP_BONE = 1, + COLORMAP_JET = 2, + COLORMAP_WINTER = 3, + COLORMAP_RAINBOW = 4, + COLORMAP_OCEAN = 5, + COLORMAP_SUMMER = 6, + COLORMAP_SPRING = 7, + COLORMAP_COOL = 8, + COLORMAP_HSV = 9, + COLORMAP_PINK = 10, + COLORMAP_HOT = 11 + }; + + CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap); + + CV_EXPORTS bool initModule_contrib(); +} + +#include "opencv2/contrib/retina.hpp" + +#include "opencv2/contrib/openfabmap.hpp" + +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/detection_based_tracker.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/detection_based_tracker.hpp new file mode 100644 index 0000000..56aa1cc --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/detection_based_tracker.hpp @@ -0,0 +1,106 @@ +#pragma once + +#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID) + +#include +#include + +#include + +class DetectionBasedTracker +{ + public: + struct Parameters + { + int minObjectSize; + int maxObjectSize; + double scaleFactor; + int maxTrackLifetime; + int minNeighbors; + int minDetectionPeriod; //the minimal time between run of the big object detector (on the whole frame) in ms (1000 mean 1 sec), default=0 + + Parameters(); + }; + + DetectionBasedTracker(const std::string& cascadeFilename, const Parameters& params); + virtual ~DetectionBasedTracker(); + + virtual bool run(); + virtual void stop(); + virtual void resetTracking(); + + virtual void process(const cv::Mat& imageGray); + + bool setParameters(const Parameters& params); + const Parameters& getParameters(); + + + typedef std::pair Object; + virtual void getObjects(std::vector& result) const; + virtual void getObjects(std::vector& result) const; + + protected: + class SeparateDetectionWork; + cv::Ptr separateDetectionWork; + friend void* workcycleObjectDetectorFunction(void* p); + + + struct InnerParameters + { + int numLastPositionsToTrack; + int numStepsToWaitBeforeFirstShow; + int numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown; + int numStepsToShowWithoutDetecting; + + float coeffTrackingWindowSize; + float coeffObjectSizeToTrack; + float coeffObjectSpeedUsingInPrediction; + + InnerParameters(); + }; + Parameters parameters; + InnerParameters innerParameters; + + struct TrackedObject + { + typedef std::vector PositionsVector; + + PositionsVector lastPositions; + + int numDetectedFrames; + int numFramesNotDetected; + int id; + + TrackedObject(const cv::Rect& rect):numDetectedFrames(1), numFramesNotDetected(0) + { + lastPositions.push_back(rect); + id=getNextId(); + }; + + static int getNextId() + { + static int _id=0; + return _id++; + } + }; + + int numTrackedSteps; + std::vector trackedObjects; + + std::vector weightsPositionsSmoothing; + std::vector weightsSizesSmoothing; + + cv::CascadeClassifier cascadeForTracking; + + + void updateTrackedObjects(const std::vector& detectedObjects); + cv::Rect calcTrackedObjectPositionToShow(int i) const; + void detectInRegion(const cv::Mat& img, const cv::Rect& r, std::vector& detectedObjectsInRegions); +}; + +namespace cv +{ + using ::DetectionBasedTracker; +} //end of cv namespace + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/hybridtracker.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/hybridtracker.hpp new file mode 100644 index 0000000..3a1f722 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/hybridtracker.hpp @@ -0,0 +1,220 @@ +//*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_HYBRIDTRACKER_H_ +#define __OPENCV_HYBRIDTRACKER_H_ + +#include "opencv2/core/core.hpp" +#include "opencv2/core/operations.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/video/tracking.hpp" +#include "opencv2/ml/ml.hpp" + +#ifdef __cplusplus + +namespace cv +{ + +// Motion model for tracking algorithm. Currently supports objects that do not move much. +// To add Kalman filter +struct CV_EXPORTS CvMotionModel +{ + enum {LOW_PASS_FILTER = 0, KALMAN_FILTER = 1, EM = 2}; + + CvMotionModel() + { + } + + float low_pass_gain; // low pass gain +}; + +// Mean Shift Tracker parameters for specifying use of HSV channel and CamShift parameters. +struct CV_EXPORTS CvMeanShiftTrackerParams +{ + enum { H = 0, HS = 1, HSV = 2 }; + CvMeanShiftTrackerParams(int tracking_type = CvMeanShiftTrackerParams::HS, + CvTermCriteria term_crit = CvTermCriteria()); + + int tracking_type; + vector h_range; + vector s_range; + vector v_range; + CvTermCriteria term_crit; +}; + +// Feature tracking parameters +struct CV_EXPORTS CvFeatureTrackerParams +{ + enum { SIFT = 0, SURF = 1, OPTICAL_FLOW = 2 }; + CvFeatureTrackerParams(int featureType = 0, int windowSize = 0) + { + feature_type = featureType; + window_size = windowSize; + } + + int feature_type; // Feature type to use + int window_size; // Window size in pixels around which to search for new window +}; + +// Hybrid Tracking parameters for specifying weights of individual trackers and motion model. +struct CV_EXPORTS CvHybridTrackerParams +{ + CvHybridTrackerParams(float ft_tracker_weight = 0.5, float ms_tracker_weight = 0.5, + CvFeatureTrackerParams ft_params = CvFeatureTrackerParams(), + CvMeanShiftTrackerParams ms_params = CvMeanShiftTrackerParams(), + CvMotionModel model = CvMotionModel()); + + float ft_tracker_weight; + float ms_tracker_weight; + CvFeatureTrackerParams ft_params; + CvMeanShiftTrackerParams ms_params; + int motion_model; + float low_pass_gain; +}; + +// Performs Camshift using parameters from MeanShiftTrackerParams +class CV_EXPORTS CvMeanShiftTracker +{ +private: + Mat hsv, hue; + Mat backproj; + Mat mask, maskroi; + MatND hist; + Rect prev_trackwindow; + RotatedRect prev_trackbox; + Point2f prev_center; + +public: + CvMeanShiftTrackerParams params; + + CvMeanShiftTracker(); + explicit CvMeanShiftTracker(CvMeanShiftTrackerParams _params); + ~CvMeanShiftTracker(); + void newTrackingWindow(Mat image, Rect selection); + RotatedRect updateTrackingWindow(Mat image); + Mat getHistogramProjection(int type); + void setTrackingWindow(Rect _window); + Rect getTrackingWindow(); + RotatedRect getTrackingEllipse(); + Point2f getTrackingCenter(); +}; + +// Performs SIFT/SURF feature tracking using parameters from FeatureTrackerParams +class CV_EXPORTS CvFeatureTracker +{ +private: + Ptr dd; + Ptr matcher; + vector matches; + + Mat prev_image; + Mat prev_image_bw; + Rect prev_trackwindow; + Point2d prev_center; + + int ittr; + vector features[2]; + +public: + Mat disp_matches; + CvFeatureTrackerParams params; + + CvFeatureTracker(); + explicit CvFeatureTracker(CvFeatureTrackerParams params); + ~CvFeatureTracker(); + void newTrackingWindow(Mat image, Rect selection); + Rect updateTrackingWindow(Mat image); + Rect updateTrackingWindowWithSIFT(Mat image); + Rect updateTrackingWindowWithFlow(Mat image); + void setTrackingWindow(Rect _window); + Rect getTrackingWindow(); + Point2f getTrackingCenter(); +}; + +// Performs Hybrid Tracking and combines individual trackers using EM or filters +class CV_EXPORTS CvHybridTracker +{ +private: + CvMeanShiftTracker* mstracker; + CvFeatureTracker* fttracker; + + CvMat* samples; + CvMat* labels; + + Rect prev_window; + Point2f prev_center; + Mat prev_proj; + RotatedRect trackbox; + + int ittr; + Point2f curr_center; + + inline float getL2Norm(Point2f p1, Point2f p2); + Mat getDistanceProjection(Mat image, Point2f center); + Mat getGaussianProjection(Mat image, int ksize, double sigma, Point2f center); + void updateTrackerWithEM(Mat image); + void updateTrackerWithLowPassFilter(Mat image); + +public: + CvHybridTrackerParams params; + CvHybridTracker(); + explicit CvHybridTracker(CvHybridTrackerParams params); + ~CvHybridTracker(); + + void newTracker(Mat image, Rect selection); + void updateTracker(Mat image); + Rect getTrackingWindow(); +}; + +typedef CvMotionModel MotionModel; +typedef CvMeanShiftTrackerParams MeanShiftTrackerParams; +typedef CvFeatureTrackerParams FeatureTrackerParams; +typedef CvHybridTrackerParams HybridTrackerParams; +typedef CvMeanShiftTracker MeanShiftTracker; +typedef CvFeatureTracker FeatureTracker; +typedef CvHybridTracker HybridTracker; +} + +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/openfabmap.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/openfabmap.hpp new file mode 100644 index 0000000..6b2834e --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/openfabmap.hpp @@ -0,0 +1,405 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// This file originates from the openFABMAP project: +// [http://code.google.com/p/openfabmap/] +// +// For published work which uses all or part of OpenFABMAP, please cite: +// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843] +// +// Original Algorithm by Mark Cummins and Paul Newman: +// [http://ijr.sagepub.com/content/27/6/647.short] +// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942] +// [http://ijr.sagepub.com/content/30/9/1100.abstract] +// +// License Agreement +// +// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and +// Will Maddern [w.maddern@qut.edu.au], all rights reserved. +// +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OPENFABMAP_H_ +#define __OPENCV_OPENFABMAP_H_ + +#include "opencv2/core/core.hpp" +#include "opencv2/features2d/features2d.hpp" + +#include +#include +#include +#include +#include + +namespace cv { + +namespace of2 { + +using std::list; +using std::map; +using std::multiset; + +/* + Return data format of a FABMAP compare call +*/ +struct CV_EXPORTS IMatch { + + IMatch() : + queryIdx(-1), imgIdx(-1), likelihood(-DBL_MAX), match(-DBL_MAX) { + } + IMatch(int _queryIdx, int _imgIdx, double _likelihood, double _match) : + queryIdx(_queryIdx), imgIdx(_imgIdx), likelihood(_likelihood), match( + _match) { + } + + int queryIdx; //query index + int imgIdx; //test index + + double likelihood; //raw loglikelihood + double match; //normalised probability + + bool operator<(const IMatch& m) const { + return match < m.match; + } + +}; + +/* + Base FabMap class. Each FabMap method inherits from this class. +*/ +class CV_EXPORTS FabMap { +public: + + //FabMap options + enum { + MEAN_FIELD = 1, + SAMPLED = 2, + NAIVE_BAYES = 4, + CHOW_LIU = 8, + MOTION_MODEL = 16 + }; + + FabMap(const Mat& clTree, double PzGe, double PzGNe, int flags, + int numSamples = 0); + virtual ~FabMap(); + + //methods to add training data for sampling method + virtual void addTraining(const Mat& queryImgDescriptor); + virtual void addTraining(const vector& queryImgDescriptors); + + //methods to add to the test data + virtual void add(const Mat& queryImgDescriptor); + virtual void add(const vector& queryImgDescriptors); + + //accessors + const vector& getTrainingImgDescriptors() const; + const vector& getTestImgDescriptors() const; + + //Main FabMap image comparison + void compare(const Mat& queryImgDescriptor, + vector& matches, bool addQuery = false, + const Mat& mask = Mat()); + void compare(const Mat& queryImgDescriptor, + const Mat& testImgDescriptors, vector& matches, + const Mat& mask = Mat()); + void compare(const Mat& queryImgDescriptor, + const vector& testImgDescriptors, + vector& matches, const Mat& mask = Mat()); + void compare(const vector& queryImgDescriptors, vector< + IMatch>& matches, bool addQuery = false, const Mat& mask = + Mat()); + void compare(const vector& queryImgDescriptors, + const vector& testImgDescriptors, + vector& matches, const Mat& mask = Mat()); + +protected: + + void compareImgDescriptor(const Mat& queryImgDescriptor, + int queryIndex, const vector& testImgDescriptors, + vector& matches); + + void addImgDescriptor(const Mat& queryImgDescriptor); + + //the getLikelihoods method is overwritten for each different FabMap + //method. + virtual void getLikelihoods(const Mat& queryImgDescriptor, + const vector& testImgDescriptors, + vector& matches); + virtual double getNewPlaceLikelihood(const Mat& queryImgDescriptor); + + //turn likelihoods into probabilities (also add in motion model if used) + void normaliseDistribution(vector& matches); + + //Chow-Liu Tree + int pq(int q); + double Pzq(int q, bool zq); + double PzqGzpq(int q, bool zq, bool zpq); + + //FAB-MAP Core + double PzqGeq(bool zq, bool eq); + double PeqGL(int q, bool Lzq, bool eq); + double PzqGL(int q, bool zq, bool zpq, bool Lzq); + double PzqGzpqL(int q, bool zq, bool zpq, bool Lzq); + double (FabMap::*PzGL)(int q, bool zq, bool zpq, bool Lzq); + + //data + Mat clTree; + vector trainingImgDescriptors; + vector testImgDescriptors; + vector priorMatches; + + //parameters + double PzGe; + double PzGNe; + double Pnew; + + double mBias; + double sFactor; + + int flags; + int numSamples; + +}; + +/* + The original FAB-MAP algorithm, developed based on: + http://ijr.sagepub.com/content/27/6/647.short +*/ +class CV_EXPORTS FabMap1: public FabMap { +public: + FabMap1(const Mat& clTree, double PzGe, double PzGNe, int flags, + int numSamples = 0); + virtual ~FabMap1(); +protected: + + //FabMap1 implementation of likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); +}; + +/* + A computationally faster version of the original FAB-MAP algorithm. A look- + up-table is used to precompute many of the reoccuring calculations +*/ +class CV_EXPORTS FabMapLUT: public FabMap { +public: + FabMapLUT(const Mat& clTree, double PzGe, double PzGNe, + int flags, int numSamples = 0, int precision = 6); + virtual ~FabMapLUT(); +protected: + + //FabMap look-up-table implementation of the likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); + + //precomputed data + int (*table)[8]; + + //data precision + int precision; +}; + +/* + The Accelerated FAB-MAP algorithm, developed based on: + http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942 +*/ +class CV_EXPORTS FabMapFBO: public FabMap { +public: + FabMapFBO(const Mat& clTree, double PzGe, double PzGNe, int flags, + int numSamples = 0, double rejectionThreshold = 1e-8, double PsGd = + 1e-8, int bisectionStart = 512, int bisectionIts = 9); + virtual ~FabMapFBO(); + +protected: + + //FabMap Fast Bail-out implementation of the likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); + + //stucture used to determine word comparison order + struct WordStats { + WordStats() : + q(0), info(0), V(0), M(0) { + } + + WordStats(int _q, double _info) : + q(_q), info(_info), V(0), M(0) { + } + + int q; + double info; + mutable double V; + mutable double M; + + bool operator<(const WordStats& w) const { + return info < w.info; + } + + }; + + //private fast bail-out necessary functions + void setWordStatistics(const Mat& queryImgDescriptor, multiset& wordData); + double limitbisection(double v, double m); + double bennettInequality(double v, double m, double delta); + static bool compInfo(const WordStats& first, const WordStats& second); + + //parameters + double PsGd; + double rejectionThreshold; + int bisectionStart; + int bisectionIts; +}; + +/* + The FAB-MAP2.0 algorithm, developed based on: + http://ijr.sagepub.com/content/30/9/1100.abstract +*/ +class CV_EXPORTS FabMap2: public FabMap { +public: + + FabMap2(const Mat& clTree, double PzGe, double PzGNe, int flags); + virtual ~FabMap2(); + + //FabMap2 builds the inverted index and requires an additional training/test + //add function + void addTraining(const Mat& queryImgDescriptors) { + FabMap::addTraining(queryImgDescriptors); + } + void addTraining(const vector& queryImgDescriptors); + + void add(const Mat& queryImgDescriptors) { + FabMap::add(queryImgDescriptors); + } + void add(const vector& queryImgDescriptors); + +protected: + + //FabMap2 implementation of the likelihood comparison + void getLikelihoods(const Mat& queryImgDescriptor, const vector< + Mat>& testImgDescriptors, vector& matches); + double getNewPlaceLikelihood(const Mat& queryImgDescriptor); + + //the likelihood function using the inverted index + void getIndexLikelihoods(const Mat& queryImgDescriptor, vector< + double>& defaults, map >& invertedMap, + vector& matches); + void addToIndex(const Mat& queryImgDescriptor, + vector& defaults, + map >& invertedMap); + + //data + vector d1, d2, d3, d4; + vector > children; + + // TODO: inverted map a vector? + + vector trainingDefaults; + map > trainingInvertedMap; + + vector testDefaults; + map > testInvertedMap; + +}; +/* + A Chow-Liu tree is required by FAB-MAP. The Chow-Liu tree provides an + estimate of the full distribution of visual words using a minimum spanning + tree. The tree is generated through training data. +*/ +class CV_EXPORTS ChowLiuTree { +public: + ChowLiuTree(); + virtual ~ChowLiuTree(); + + //add data to the chow-liu tree before calling make + void add(const Mat& imgDescriptor); + void add(const vector& imgDescriptors); + + const vector& getImgDescriptors() const; + + Mat make(double infoThreshold = 0.0); + +private: + vector imgDescriptors; + Mat mergedImgDescriptors; + + typedef struct info { + float score; + short word1; + short word2; + } info; + + //probabilities extracted from mergedImgDescriptors + double P(int a, bool za); + double JP(int a, bool za, int b, bool zb); //a & b + double CP(int a, bool za, int b, bool zb); // a | b + + //calculating mutual information of all edges + void createBaseEdges(list& edges, double infoThreshold); + double calcMutInfo(int word1, int word2); + static bool sortInfoScores(const info& first, const info& second); + + //selecting minimum spanning egdges with maximum information + bool reduceEdgesToMinSpan(list& edges); + + //building the tree sctructure + Mat buildTree(int root_word, list &edges); + void recAddToTree(Mat &cltree, int q, int pq, + list &remaining_edges); + vector extractChildren(list &remaining_edges, int q); + +}; + +/* + A custom vocabulary training method based on: + http://www.springerlink.com/content/d1h6j8x552532003/ +*/ +class CV_EXPORTS BOWMSCTrainer: public BOWTrainer { +public: + BOWMSCTrainer(double clusterSize = 0.4); + virtual ~BOWMSCTrainer(); + + // Returns trained vocabulary (i.e. cluster centers). + virtual Mat cluster() const; + virtual Mat cluster(const Mat& descriptors) const; + +protected: + + double clusterSize; + +}; + +} + +} + +#endif /* OPENFABMAP_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/retina.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/retina.hpp new file mode 100644 index 0000000..3d7c847 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/contrib/retina.hpp @@ -0,0 +1,355 @@ +/*#****************************************************************************** + ** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + ** + ** By downloading, copying, installing or using the software you agree to this license. + ** If you do not agree to this license, do not download, install, + ** copy or use the software. + ** + ** + ** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab. + ** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping. + ** + ** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications) + ** + ** Creation - enhancement process 2007-2011 + ** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France + ** + ** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr). + ** Refer to the following research paper for more information: + ** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011 + ** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book: + ** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. + ** + ** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : + ** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: + ** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 + ** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. + ** ====> more informations in the above cited Jeanny Heraults's book. + ** + ** License Agreement + ** For Open Source Computer Vision Library + ** + ** Copyright (C) 2000-2008, Intel Corporation, all rights reserved. + ** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved. + ** + ** For Human Visual System tools (hvstools) + ** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved. + ** + ** Third party copyrights are property of their respective owners. + ** + ** Redistribution and use in source and binary forms, with or without modification, + ** are permitted provided that the following conditions are met: + ** + ** * Redistributions of source code must retain the above copyright notice, + ** this list of conditions and the following disclaimer. + ** + ** * Redistributions in binary form must reproduce the above copyright notice, + ** this list of conditions and the following disclaimer in the documentation + ** and/or other materials provided with the distribution. + ** + ** * The name of the copyright holders may not be used to endorse or promote products + ** derived from this software without specific prior written permission. + ** + ** This software is provided by the copyright holders and contributors "as is" and + ** any express or implied warranties, including, but not limited to, the implied + ** warranties of merchantability and fitness for a particular purpose are disclaimed. + ** In no event shall the Intel Corporation or contributors be liable for any direct, + ** indirect, incidental, special, exemplary, or consequential damages + ** (including, but not limited to, procurement of substitute goods or services; + ** loss of use, data, or profits; or business interruption) however caused + ** and on any theory of liability, whether in contract, strict liability, + ** or tort (including negligence or otherwise) arising in any way out of + ** the use of this software, even if advised of the possibility of such damage. + *******************************************************************************/ + +#ifndef __OPENCV_CONTRIB_RETINA_HPP__ +#define __OPENCV_CONTRIB_RETINA_HPP__ + +/* + * Retina.hpp + * + * Created on: Jul 19, 2011 + * Author: Alexandre Benoit + */ + +#include "opencv2/core/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support +#include + +namespace cv +{ + +enum RETINA_COLORSAMPLINGMETHOD +{ + RETINA_COLOR_RANDOM, //!< each pixel position is either R, G or B in a random choice + RETINA_COLOR_DIAGONAL,//!< color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR... + RETINA_COLOR_BAYER//!< standard bayer sampling +}; + +class RetinaFilter; + +/** + * @class Retina a wrapper class which allows the Gipsa/Listic Labs model to be used. + * This retina model allows spatio-temporal image processing (applied on still images, video sequences). + * As a summary, these are the retina model properties: + * => It applies a spectral whithening (mid-frequency details enhancement) + * => high frequency spatio-temporal noise reduction + * => low frequency luminance to be reduced (luminance range compression) + * => local logarithmic luminance compression allows details to be enhanced in low light conditions + * + * USE : this model can be used basically for spatio-temporal video effects but also for : + * _using the getParvo method output matrix : texture analysiswith enhanced signal to noise ratio and enhanced details robust against input images luminance ranges + * _using the getMagno method output matrix : motion analysis also with the previously cited properties + * + * for more information, reer to the following papers : + * Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011 + * Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891. + * + * The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author : + * _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper: + * ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007 + * _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions. + * ====> more informations in the above cited Jeanny Heraults's book. + */ +class CV_EXPORTS Retina { + +public: + + // parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel + struct RetinaParameters{ + struct OPLandIplParvoParameters{ // Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters + OPLandIplParvoParameters():colorMode(true), + normaliseOutput(true), + photoreceptorsLocalAdaptationSensitivity(0.7f), + photoreceptorsTemporalConstant(0.5f), + photoreceptorsSpatialConstant(0.53f), + horizontalCellsGain(0.0f), + hcellsTemporalConstant(1.f), + hcellsSpatialConstant(7.f), + ganglionCellsSensitivity(0.7f){};// default setup + bool colorMode, normaliseOutput; + float photoreceptorsLocalAdaptationSensitivity, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, hcellsTemporalConstant, hcellsSpatialConstant, ganglionCellsSensitivity; + }; + struct IplMagnoParameters{ // Inner Plexiform Layer Magnocellular channel (IplMagno) + IplMagnoParameters(): + normaliseOutput(true), + parasolCells_beta(0.f), + parasolCells_tau(0.f), + parasolCells_k(7.f), + amacrinCellsTemporalCutFrequency(1.2f), + V0CompressionParameter(0.95f), + localAdaptintegration_tau(0.f), + localAdaptintegration_k(7.f){};// default setup + bool normaliseOutput; + float parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k; + }; + struct OPLandIplParvoParameters OPLandIplParvo; + struct IplMagnoParameters IplMagno; + }; + + /** + * Main constructor with most commun use setup : create an instance of color ready retina model + * @param inputSize : the input frame size + */ + Retina(Size inputSize); + + /** + * Complete Retina filter constructor which allows all basic structural parameters definition + * @param inputSize : the input frame size + * @param colorMode : the chosen processing mode : with or without color processing + * @param colorSamplingMethod: specifies which kind of color sampling will be used + * @param useRetinaLogSampling: activate retina log sampling, if true, the 2 following parameters can be used + * @param reductionFactor: only usefull if param useRetinaLogSampling=true, specifies the reduction factor of the output frame (as the center (fovea) is high resolution and corners can be underscaled, then a reduction of the output is allowed without precision leak + * @param samplingStrenght: only usefull if param useRetinaLogSampling=true, specifies the strenght of the log scale that is applied + */ + Retina(Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0); + + virtual ~Retina(); + + /** + * retreive retina input buffer size + */ + Size inputSize(); + + /** + * retreive retina output buffer size + */ + Size outputSize(); + + /** + * try to open an XML retina parameters file to adjust current retina instance setup + * => if the xml file does not exist, then default setup is applied + * => warning, Exceptions are thrown if read XML file is not valid + * @param retinaParameterFile : the parameters filename + * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error + */ + void setup(std::string retinaParameterFile="", const bool applyDefaultSetupOnFailure=true); + + + /** + * try to open an XML retina parameters file to adjust current retina instance setup + * => if the xml file does not exist, then default setup is applied + * => warning, Exceptions are thrown if read XML file is not valid + * @param fs : the open Filestorage which contains retina parameters + * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error + */ + void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true); + + /** + * try to open an XML retina parameters file to adjust current retina instance setup + * => if the xml file does not exist, then default setup is applied + * => warning, Exceptions are thrown if read XML file is not valid + * @param newParameters : a parameters structures updated with the new target configuration + * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error + */ + void setup(RetinaParameters newParameters); + + /** + * @return the current parameters setup + */ + Retina::RetinaParameters getParameters(); + + /** + * parameters setup display method + * @return a string which contains formatted parameters information + */ + const std::string printSetup(); + + /** + * write xml/yml formated parameters information + * @rparam fs : the filename of the xml file that will be open and writen with formatted parameters information + */ + virtual void write( std::string fs ) const; + + + /** + * write xml/yml formated parameters information + * @param fs : a cv::Filestorage object ready to be filled + */ + virtual void write( FileStorage& fs ) const; + + /** + * setup the OPL and IPL parvo channels (see biologocal model) + * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance (low frequency energy) + * IPL parvo is the OPL next processing stage, it refers to Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. + * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011 + * @param colorMode : specifies if (true) color is processed of not (false) to then processing gray level image + * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false) + * @param photoreceptorsLocalAdaptationSensitivity: the photoreceptors sensitivity renage is 0-1 (more log compression effect when value increases) + * @param photoreceptorsTemporalConstant: the time constant of the first order low pass filter of the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is frames, typical value is 1 frame + * @param photoreceptorsSpatialConstant: the spatial constant of the first order low pass filter of the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is pixels, typical value is 1 pixel + * @param horizontalCellsGain: gain of the horizontal cells network, if 0, then the mean value of the output is zero, if the parameter is near 1, then, the luminance is not filtered and is still reachable at the output, typicall value is 0 + * @param HcellsTemporalConstant: the time constant of the first order low pass filter of the horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is frames, typical value is 1 frame, as the photoreceptors + * @param HcellsSpatialConstant: the spatial constant of the first order low pass filter of the horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels, typical value is 5 pixel, this value is also used for local contrast computing when computing the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular channel model) + * @param ganglionCellsSensitivity: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 230 + */ + void setupOPLandIPLParvoChannel(const bool colorMode=true, const bool normaliseOutput = true, const float photoreceptorsLocalAdaptationSensitivity=0.7, const float photoreceptorsTemporalConstant=0.5, const float photoreceptorsSpatialConstant=0.53, const float horizontalCellsGain=0, const float HcellsTemporalConstant=1, const float HcellsSpatialConstant=7, const float ganglionCellsSensitivity=0.7); + + /** + * set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel + * this channel processes signals outpint from OPL processing stage in peripheral vision, it allows motion information enhancement. It is decorrelated from the details channel. See reference paper for more details. + * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false) + * @param parasolCells_beta: the low pass filter gain used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), typical value is 0 + * @param parasolCells_tau: the low pass filter time constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical value is 0 (immediate response) + * @param parasolCells_k: the low pass filter spatial constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical value is 5 + * @param amacrinCellsTemporalCutFrequency: the time constant of the first order high pass fiter of the magnocellular way (motion information channel), unit is frames, tipicall value is 5 + * @param V0CompressionParameter: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 200 + * @param localAdaptintegration_tau: specifies the temporal constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation + * @param localAdaptintegration_k: specifies the spatial constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation + */ + void setupIPLMagnoChannel(const bool normaliseOutput = true, const float parasolCells_beta=0, const float parasolCells_tau=0, const float parasolCells_k=7, const float amacrinCellsTemporalCutFrequency=1.2, const float V0CompressionParameter=0.95, const float localAdaptintegration_tau=0, const float localAdaptintegration_k=7); + + /** + * method which allows retina to be applied on an input image, after run, encapsulated retina module is ready to deliver its outputs using dedicated acccessors, see getParvo and getMagno methods + * @param inputImage : the input cv::Mat image to be processed, can be gray level or BGR coded in any format (from 8bit to 16bits) + */ + void run(const Mat &inputImage); + + /** + * accessor of the details channel of the retina (models foveal vision) + * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV + */ + void getParvo(Mat &retinaOutput_parvo); + + /** + * accessor of the details channel of the retina (models foveal vision) + * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling + */ + void getParvo(std::valarray &retinaOutput_parvo); + + /** + * accessor of the motion channel of the retina (models peripheral vision) + * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV + */ + void getMagno(Mat &retinaOutput_magno); + + /** + * accessor of the motion channel of the retina (models peripheral vision) + * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling + */ + void getMagno(std::valarray &retinaOutput_magno); + + // original API level data accessors : get buffers addresses... + const std::valarray & getMagno() const; + const std::valarray & getParvo() const; + + /** + * activate color saturation as the final step of the color demultiplexing process + * -> this saturation is a sigmoide function applied to each channel of the demultiplexed image. + * @param saturateColors: boolean that activates color saturation (if true) or desactivate (if false) + * @param colorSaturationValue: the saturation factor + */ + void setColorSaturation(const bool saturateColors=true, const float colorSaturationValue=4.0); + + /** + * clear all retina buffers (equivalent to opening the eyes after a long period of eye close ;o) + */ + void clearBuffers(); + + /** + * Activate/desactivate the Magnocellular pathway processing (motion information extraction), by default, it is activated + * @param activate: true if Magnocellular output should be activated, false if not + */ + void activateMovingContoursProcessing(const bool activate); + + /** + * Activate/desactivate the Parvocellular pathway processing (contours information extraction), by default, it is activated + * @param activate: true if Parvocellular (contours information extraction) output should be activated, false if not + */ + void activateContoursProcessing(const bool activate); + +protected: + // Parameteres setup members + RetinaParameters _retinaParameters; // structure of parameters + + // Retina model related modules + std::valarray _inputBuffer; //!< buffer used to convert input cv::Mat to internal retina buffers format (valarrays) + + // pointer to retina model + RetinaFilter* _retinaFilter; //!< the pointer to the retina module, allocated with instance construction + + /** + * exports a valarray buffer outing from HVStools objects to a cv::Mat in CV_8UC1 (gray level picture) or CV_8UC3 (color) format + * @param grayMatrixToConvert the valarray to export to OpenCV + * @param nbRows : the number of rows of the valarray flatten matrix + * @param nbColumns : the number of rows of the valarray flatten matrix + * @param colorMode : a flag which mentions if matrix is color (true) or graylevel (false) + * @param outBuffer : the output matrix which is reallocated to satisfy Retina output buffer dimensions + */ + void _convertValarrayBuffer2cvMat(const std::valarray &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, Mat &outBuffer); + + /** + * + * @param inputMatToConvert : the OpenCV cv::Mat that has to be converted to gray or RGB valarray buffer that will be processed by the retina model + * @param outputValarrayMatrix : the output valarray + * @return the input image color mode (color=true, gray levels=false) + */ + bool _convertCvMat2ValarrayBuffer(const cv::Mat inputMatToConvert, std::valarray &outputValarrayMatrix); + + //! private method called by constructors, gathers their parameters and use them in a unified way + void _init(const Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0); + + +}; + +} +#endif /* __OPENCV_CONTRIB_RETINA_HPP__ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/affine.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/affine.hpp new file mode 100644 index 0000000..827d044 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/affine.hpp @@ -0,0 +1,509 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_AFFINE3_HPP__ +#define __OPENCV_CORE_AFFINE3_HPP__ + +#ifdef __cplusplus + +#include + +namespace cv +{ + template + class Affine3 + { + public: + typedef T float_type; + typedef Matx Mat3; + typedef Matx Mat4; + typedef Vec Vec3; + + Affine3(); + + //Augmented affine matrix + Affine3(const Mat4& affine); + + //Rotation matrix + Affine3(const Mat3& R, const Vec3& t = Vec3::all(0)); + + //Rodrigues vector + Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0)); + + //Combines all contructors above. Supports 4x4, 4x3, 3x3, 1x3, 3x1 sizes of data matrix + explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0)); + + //From 16th element array + explicit Affine3(const float_type* vals); + + static Affine3 Identity(); + + //Rotation matrix + void rotation(const Mat3& R); + + //Rodrigues vector + void rotation(const Vec3& rvec); + + //Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix; + void rotation(const Mat& data); + + void linear(const Mat3& L); + void translation(const Vec3& t); + + Mat3 rotation() const; + Mat3 linear() const; + Vec3 translation() const; + + //Rodrigues vector + Vec3 rvec() const; + + Affine3 inv(int method = cv::DECOMP_SVD) const; + + // a.rotate(R) is equivalent to Affine(R, 0) * a; + Affine3 rotate(const Mat3& R) const; + + // a.rotate(R) is equivalent to Affine(rvec, 0) * a; + Affine3 rotate(const Vec3& rvec) const; + + // a.translate(t) is equivalent to Affine(E, t) * a; + Affine3 translate(const Vec3& t) const; + + // a.concatenate(affine) is equivalent to affine * a; + Affine3 concatenate(const Affine3& affine) const; + + template operator Affine3() const; + + template Affine3 cast() const; + + Mat4 matrix; + +#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H + Affine3(const Eigen::Transform& affine); + Affine3(const Eigen::Transform& affine); + operator Eigen::Transform() const; + operator Eigen::Transform() const; +#endif + }; + + template static + Affine3 operator*(const Affine3& affine1, const Affine3& affine2); + + template static + V operator*(const Affine3& affine, const V& vector); + + typedef Affine3 Affine3f; + typedef Affine3 Affine3d; + + static Vec3f operator*(const Affine3f& affine, const Vec3f& vector); + static Vec3d operator*(const Affine3d& affine, const Vec3d& vector); + + template class DataType< Affine3<_Tp> > + { + public: + typedef Affine3<_Tp> value_type; + typedef Affine3::work_type> work_type; + typedef _Tp channel_type; + + enum { generic_type = 0, + depth = DataType::depth, + channels = 16, + fmt = DataType::fmt + ((channels - 1) << 8), + type = CV_MAKETYPE(depth, channels) + }; + + typedef Vec vec_type; + }; +} + + +/////////////////////////////////////////////////////////////////////////////////// +/// Implementaiton + +template inline +cv::Affine3::Affine3() + : matrix(Mat4::eye()) +{} + +template inline +cv::Affine3::Affine3(const Mat4& affine) + : matrix(affine) +{} + +template inline +cv::Affine3::Affine3(const Mat3& R, const Vec3& t) +{ + rotation(R); + translation(t); + matrix.val[12] = matrix.val[13] = matrix.val[14] = 0; + matrix.val[15] = 1; +} + +template inline +cv::Affine3::Affine3(const Vec3& _rvec, const Vec3& t) +{ + rotation(_rvec); + translation(t); + matrix.val[12] = matrix.val[13] = matrix.val[14] = 0; + matrix.val[15] = 1; +} + +template inline +cv::Affine3::Affine3(const cv::Mat& data, const Vec3& t) +{ + CV_Assert(data.type() == cv::DataType::type); + + if (data.cols == 4 && data.rows == 4) + { + data.copyTo(matrix); + return; + } + else if (data.cols == 4 && data.rows == 3) + { + rotation(data(Rect(0, 0, 3, 3))); + translation(data(Rect(3, 0, 1, 3))); + return; + } + + rotation(data); + translation(t); + matrix.val[12] = matrix.val[13] = matrix.val[14] = 0; + matrix.val[15] = 1; +} + +template inline +cv::Affine3::Affine3(const float_type* vals) : matrix(vals) +{} + +template inline +cv::Affine3 cv::Affine3::Identity() +{ + return Affine3(cv::Affine3::Mat4::eye()); +} + +template inline +void cv::Affine3::rotation(const Mat3& R) +{ + linear(R); +} + +template inline +void cv::Affine3::rotation(const Vec3& _rvec) +{ + double rx = _rvec[0], ry = _rvec[1], rz = _rvec[2]; + double theta = std::sqrt(rx*rx + ry*ry + rz*rz); + + if (theta < DBL_EPSILON) + rotation(Mat3::eye()); + else + { + const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 }; + + double c = std::cos(theta); + double s = std::sin(theta); + double c1 = 1. - c; + double itheta = theta ? 1./theta : 0.; + + rx *= itheta; ry *= itheta; rz *= itheta; + + double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz }; + double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 }; + Mat3 R; + + // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x] + // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0] + for(int k = 0; k < 9; ++k) + R.val[k] = static_cast(c*I[k] + c1*rrt[k] + s*_r_x_[k]); + + rotation(R); + } +} + +//Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix; +template inline +void cv::Affine3::rotation(const cv::Mat& data) +{ + CV_Assert(data.type() == cv::DataType::type); + + if (data.cols == 3 && data.rows == 3) + { + Mat3 R; + data.copyTo(R); + rotation(R); + } + else if ((data.cols == 3 && data.rows == 1) || (data.cols == 1 && data.rows == 3)) + { + Vec3 _rvec; + data.reshape(1, 3).copyTo(_rvec); + rotation(_rvec); + } + else + CV_Assert(!"Input marix can be 3x3, 1x3 or 3x1"); +} + +template inline +void cv::Affine3::linear(const Mat3& L) +{ + matrix.val[0] = L.val[0]; matrix.val[1] = L.val[1]; matrix.val[ 2] = L.val[2]; + matrix.val[4] = L.val[3]; matrix.val[5] = L.val[4]; matrix.val[ 6] = L.val[5]; + matrix.val[8] = L.val[6]; matrix.val[9] = L.val[7]; matrix.val[10] = L.val[8]; +} + +template inline +void cv::Affine3::translation(const Vec3& t) +{ + matrix.val[3] = t[0]; matrix.val[7] = t[1]; matrix.val[11] = t[2]; +} + +template inline +typename cv::Affine3::Mat3 cv::Affine3::rotation() const +{ + return linear(); +} + +template inline +typename cv::Affine3::Mat3 cv::Affine3::linear() const +{ + typename cv::Affine3::Mat3 R; + R.val[0] = matrix.val[0]; R.val[1] = matrix.val[1]; R.val[2] = matrix.val[ 2]; + R.val[3] = matrix.val[4]; R.val[4] = matrix.val[5]; R.val[5] = matrix.val[ 6]; + R.val[6] = matrix.val[8]; R.val[7] = matrix.val[9]; R.val[8] = matrix.val[10]; + return R; +} + +template inline +typename cv::Affine3::Vec3 cv::Affine3::translation() const +{ + return Vec3(matrix.val[3], matrix.val[7], matrix.val[11]); +} + +template inline +typename cv::Affine3::Vec3 cv::Affine3::rvec() const +{ + cv::Vec3d w; + cv::Matx33d u, vt, R = rotation(); + cv::SVD::compute(R, w, u, vt, cv::SVD::FULL_UV + cv::SVD::MODIFY_A); + R = u * vt; + + double rx = R.val[7] - R.val[5]; + double ry = R.val[2] - R.val[6]; + double rz = R.val[3] - R.val[1]; + + double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25); + double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5; + c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c; + double theta = acos(c); + + if( s < 1e-5 ) + { + if( c > 0 ) + rx = ry = rz = 0; + else + { + double t; + t = (R.val[0] + 1) * 0.5; + rx = std::sqrt(std::max(t, 0.0)); + t = (R.val[4] + 1) * 0.5; + ry = std::sqrt(std::max(t, 0.0)) * (R.val[1] < 0 ? -1.0 : 1.0); + t = (R.val[8] + 1) * 0.5; + rz = std::sqrt(std::max(t, 0.0)) * (R.val[2] < 0 ? -1.0 : 1.0); + + if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R.val[5] > 0) != (ry*rz > 0) ) + rz = -rz; + theta /= std::sqrt(rx*rx + ry*ry + rz*rz); + rx *= theta; + ry *= theta; + rz *= theta; + } + } + else + { + double vth = 1/(2*s); + vth *= theta; + rx *= vth; ry *= vth; rz *= vth; + } + + return cv::Vec3d(rx, ry, rz); +} + +template inline +cv::Affine3 cv::Affine3::inv(int method) const +{ + return matrix.inv(method); +} + +template inline +cv::Affine3 cv::Affine3::rotate(const Mat3& R) const +{ + Mat3 Lc = linear(); + Vec3 tc = translation(); + Mat4 result; + result.val[12] = result.val[13] = result.val[14] = 0; + result.val[15] = 1; + + for(int j = 0; j < 3; ++j) + { + for(int i = 0; i < 3; ++i) + { + float_type value = 0; + for(int k = 0; k < 3; ++k) + value += R(j, k) * Lc(k, i); + result(j, i) = value; + } + + result(j, 3) = R.row(j).dot(tc.t()); + } + return result; +} + +template inline +cv::Affine3 cv::Affine3::rotate(const Vec3& _rvec) const +{ + return rotate(Affine3f(_rvec).rotation()); +} + +template inline +cv::Affine3 cv::Affine3::translate(const Vec3& t) const +{ + Mat4 m = matrix; + m.val[ 3] += t[0]; + m.val[ 7] += t[1]; + m.val[11] += t[2]; + return m; +} + +template inline +cv::Affine3 cv::Affine3::concatenate(const Affine3& affine) const +{ + return (*this).rotate(affine.rotation()).translate(affine.translation()); +} + +template template inline +cv::Affine3::operator Affine3() const +{ + return Affine3(matrix); +} + +template template inline +cv::Affine3 cv::Affine3::cast() const +{ + return Affine3(matrix); +} + +template inline +cv::Affine3 cv::operator*(const cv::Affine3& affine1, const cv::Affine3& affine2) +{ + return affine2.concatenate(affine1); +} + +template inline +V cv::operator*(const cv::Affine3& affine, const V& v) +{ + const typename Affine3::Mat4& m = affine.matrix; + + V r; + r.x = m.val[0] * v.x + m.val[1] * v.y + m.val[ 2] * v.z + m.val[ 3]; + r.y = m.val[4] * v.x + m.val[5] * v.y + m.val[ 6] * v.z + m.val[ 7]; + r.z = m.val[8] * v.x + m.val[9] * v.y + m.val[10] * v.z + m.val[11]; + return r; +} + +static inline +cv::Vec3f cv::operator*(const cv::Affine3f& affine, const cv::Vec3f& v) +{ + const cv::Matx44f& m = affine.matrix; + cv::Vec3f r; + r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3]; + r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7]; + r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11]; + return r; +} + +static inline +cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v) +{ + const cv::Matx44d& m = affine.matrix; + cv::Vec3d r; + r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3]; + r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7]; + r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11]; + return r; +} + + + +#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H + +template inline +cv::Affine3::Affine3(const Eigen::Transform& affine) +{ + cv::Mat(4, 4, cv::DataType::type, affine.matrix().data()).copyTo(matrix); +} + +template inline +cv::Affine3::Affine3(const Eigen::Transform& affine) +{ + Eigen::Transform a = affine; + cv::Mat(4, 4, cv::DataType::type, a.matrix().data()).copyTo(matrix); +} + +template inline +cv::Affine3::operator Eigen::Transform() const +{ + Eigen::Transform r; + cv::Mat hdr(4, 4, cv::DataType::type, r.matrix().data()); + cv::Mat(matrix, false).copyTo(hdr); + return r; +} + +template inline +cv::Affine3::operator Eigen::Transform() const +{ + return this->operator Eigen::Transform(); +} + +#endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */ + + +#endif /* __cplusplus */ + +#endif /* __OPENCV_CORE_AFFINE3_HPP__ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/core.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/core.hpp new file mode 100644 index 0000000..2ecb70c --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/core.hpp @@ -0,0 +1,4858 @@ +/*! \file core.hpp + \brief The Core Functionality + */ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_HPP__ +#define __OPENCV_CORE_HPP__ + +#include "opencv2/core/types_c.h" +#include "opencv2/core/version.hpp" + +#ifdef __cplusplus + +#ifndef SKIP_INCLUDES +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif // SKIP_INCLUDES + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides +*/ +namespace cv { + +#undef abs +#undef min +#undef max +#undef Complex + +using std::vector; +using std::string; +using std::ptrdiff_t; + +template class Size_; +template class Point_; +template class Rect_; +template class Vec; +template class Matx; + +typedef std::string String; + +class Mat; +class SparseMat; +typedef Mat MatND; + +namespace ogl { + class Buffer; + class Texture2D; + class Arrays; +} + +// < Deprecated +class GlBuffer; +class GlTexture; +class GlArrays; +class GlCamera; +// > + +namespace gpu { + class GpuMat; +} + +class CV_EXPORTS MatExpr; +class CV_EXPORTS MatOp_Base; +class CV_EXPORTS MatArg; +class CV_EXPORTS MatConstIterator; + +template class Mat_; +template class MatIterator_; +template class MatConstIterator_; +template class MatCommaInitializer_; + +#if !defined(ANDROID) || (defined(_GLIBCXX_USE_WCHAR_T) && _GLIBCXX_USE_WCHAR_T) +typedef std::basic_string WString; + +CV_EXPORTS string fromUtf16(const WString& str); +CV_EXPORTS WString toUtf16(const string& str); +#endif + +CV_EXPORTS string format( const char* fmt, ... ); +CV_EXPORTS string tempfile( const char* suffix CV_DEFAULT(0)); + +// matrix decomposition types +enum { DECOMP_LU=0, DECOMP_SVD=1, DECOMP_EIG=2, DECOMP_CHOLESKY=3, DECOMP_QR=4, DECOMP_NORMAL=16 }; +enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_L2SQR=5, NORM_HAMMING=6, NORM_HAMMING2=7, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32 }; +enum { CMP_EQ=0, CMP_GT=1, CMP_GE=2, CMP_LT=3, CMP_LE=4, CMP_NE=5 }; +enum { GEMM_1_T=1, GEMM_2_T=2, GEMM_3_T=4 }; +enum { DFT_INVERSE=1, DFT_SCALE=2, DFT_ROWS=4, DFT_COMPLEX_OUTPUT=16, DFT_REAL_OUTPUT=32, + DCT_INVERSE = DFT_INVERSE, DCT_ROWS=DFT_ROWS }; + + +/*! + The standard OpenCV exception class. + Instances of the class are thrown by various functions and methods in the case of critical errors. + */ +class CV_EXPORTS Exception : public std::exception +{ +public: + /*! + Default constructor + */ + Exception(); + /*! + Full constructor. Normally the constuctor is not called explicitly. + Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used. + */ + Exception(int _code, const string& _err, const string& _func, const string& _file, int _line); + virtual ~Exception() throw(); + + /*! + \return the error description and the context as a text string. + */ + virtual const char *what() const throw(); + void formatMessage(); + + string msg; ///< the formatted error message + + int code; ///< error code @see CVStatus + string err; ///< error description + string func; ///< function name. Available only when the compiler supports getting it + string file; ///< source file name where the error has occured + int line; ///< line number in the source file where the error has occured +}; + + +//! Signals an error and raises the exception. + +/*! + By default the function prints information about the error to stderr, + then it either stops if setBreakOnError() had been called before or raises the exception. + It is possible to alternate error processing by using redirectError(). + + \param exc the exception raisen. + */ +CV_EXPORTS void error( const Exception& exc ); + +//! Sets/resets the break-on-error mode. + +/*! + When the break-on-error mode is set, the default error handler + issues a hardware exception, which can make debugging more convenient. + + \return the previous state + */ +CV_EXPORTS bool setBreakOnError(bool flag); + +typedef int (CV_CDECL *ErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, + int line, void* userdata ); + +//! Sets the new error handler and the optional user data. + +/*! + The function sets the new error handler, called from cv::error(). + + \param errCallback the new error handler. If NULL, the default error handler is used. + \param userdata the optional user data pointer, passed to the callback. + \param prevUserdata the optional output parameter where the previous user data pointer is stored + + \return the previous error handler +*/ +CV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback, + void* userdata=0, void** prevUserdata=0); + + +#if defined __GNUC__ +#define CV_Func __func__ +#elif defined _MSC_VER +#define CV_Func __FUNCTION__ +#else +#define CV_Func "" +#endif + +#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, CV_Func, __FILE__, __LINE__) ) +#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, CV_Func, __FILE__, __LINE__) ) +#define CV_Assert( expr ) if(!!(expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, CV_Func, __FILE__, __LINE__) ) + +#ifdef _DEBUG +#define CV_DbgAssert(expr) CV_Assert(expr) +#else +#define CV_DbgAssert(expr) +#endif + +CV_EXPORTS void glob(String pattern, std::vector& result, bool recursive = false); + +CV_EXPORTS void setNumThreads(int nthreads); +CV_EXPORTS int getNumThreads(); +CV_EXPORTS int getThreadNum(); + +CV_EXPORTS_W const string& getBuildInformation(); + +//! Returns the number of ticks. + +/*! + The function returns the number of ticks since the certain event (e.g. when the machine was turned on). + It can be used to initialize cv::RNG or to measure a function execution time by reading the tick count + before and after the function call. The granularity of ticks depends on the hardware and OS used. Use + cv::getTickFrequency() to convert ticks to seconds. +*/ +CV_EXPORTS_W int64 getTickCount(); + +/*! + Returns the number of ticks per seconds. + + The function returns the number of ticks (as returned by cv::getTickCount()) per second. + The following code computes the execution time in milliseconds: + + \code + double exec_time = (double)getTickCount(); + // do something ... + exec_time = ((double)getTickCount() - exec_time)*1000./getTickFrequency(); + \endcode +*/ +CV_EXPORTS_W double getTickFrequency(); + +/*! + Returns the number of CPU ticks. + + On platforms where the feature is available, the function returns the number of CPU ticks + since the certain event (normally, the system power-on moment). Using this function + one can accurately measure the execution time of very small code fragments, + for which cv::getTickCount() granularity is not enough. +*/ +CV_EXPORTS_W int64 getCPUTickCount(); + +/*! + Returns SSE etc. support status + + The function returns true if certain hardware features are available. + Currently, the following features are recognized: + - CV_CPU_MMX - MMX + - CV_CPU_SSE - SSE + - CV_CPU_SSE2 - SSE 2 + - CV_CPU_SSE3 - SSE 3 + - CV_CPU_SSSE3 - SSSE 3 + - CV_CPU_SSE4_1 - SSE 4.1 + - CV_CPU_SSE4_2 - SSE 4.2 + - CV_CPU_POPCNT - POPCOUNT + - CV_CPU_AVX - AVX + + \note {Note that the function output is not static. Once you called cv::useOptimized(false), + most of the hardware acceleration is disabled and thus the function will returns false, + until you call cv::useOptimized(true)} +*/ +CV_EXPORTS_W bool checkHardwareSupport(int feature); + +//! returns the number of CPUs (including hyper-threading) +CV_EXPORTS_W int getNumberOfCPUs(); + +/*! + Allocates memory buffer + + This is specialized OpenCV memory allocation function that returns properly aligned memory buffers. + The usage is identical to malloc(). The allocated buffers must be freed with cv::fastFree(). + If there is not enough memory, the function calls cv::error(), which raises an exception. + + \param bufSize buffer size in bytes + \return the allocated memory buffer. +*/ +CV_EXPORTS void* fastMalloc(size_t bufSize); + +/*! + Frees the memory allocated with cv::fastMalloc + + This is the corresponding deallocation function for cv::fastMalloc(). + When ptr==NULL, the function has no effect. +*/ +CV_EXPORTS void fastFree(void* ptr); + +template static inline _Tp* allocate(size_t n) +{ + return new _Tp[n]; +} + +template static inline void deallocate(_Tp* ptr, size_t) +{ + delete[] ptr; +} + +/*! + Aligns pointer by the certain number of bytes + + This small inline function aligns the pointer by the certian number of bytes by shifting + it forward by 0 or a positive offset. +*/ +template static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp)) +{ + return (_Tp*)(((size_t)ptr + n-1) & -n); +} + +/*! + Aligns buffer size by the certain number of bytes + + This small inline function aligns a buffer size by the certian number of bytes by enlarging it. +*/ +static inline size_t alignSize(size_t sz, int n) +{ + assert((n & (n - 1)) == 0); // n is a power of 2 + return (sz + n-1) & -n; +} + +/*! + Turns on/off available optimization + + The function turns on or off the optimized code in OpenCV. Some optimization can not be enabled + or disabled, but, for example, most of SSE code in OpenCV can be temporarily turned on or off this way. + + \note{Since optimization may imply using special data structures, it may be unsafe + to call this function anywhere in the code. Instead, call it somewhere at the top level.} +*/ +CV_EXPORTS_W void setUseOptimized(bool onoff); + +/*! + Returns the current optimization status + + The function returns the current optimization status, which is controlled by cv::setUseOptimized(). +*/ +CV_EXPORTS_W bool useOptimized(); + +/*! + The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree() +*/ +template class Allocator +{ +public: + typedef _Tp value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template class rebind { typedef Allocator other; }; + + explicit Allocator() {} + ~Allocator() {} + explicit Allocator(Allocator const&) {} + template + explicit Allocator(Allocator const&) {} + + // address + pointer address(reference r) { return &r; } + const_pointer address(const_reference r) { return &r; } + + pointer allocate(size_type count, const void* =0) + { return reinterpret_cast(fastMalloc(count * sizeof (_Tp))); } + + void deallocate(pointer p, size_type) {fastFree(p); } + + size_type max_size() const + { return max(static_cast<_Tp>(-1)/sizeof(_Tp), 1); } + + void construct(pointer p, const _Tp& v) { new(static_cast(p)) _Tp(v); } + void destroy(pointer p) { p->~_Tp(); } +}; + +/////////////////////// Vec (used as element of multi-channel images ///////////////////// + +/*! + A helper class for cv::DataType + + The class is specialized for each fundamental numerical data type supported by OpenCV. + It provides DataDepth::value constant. +*/ +template class DataDepth {}; + +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_16U, fmt=(int)'w' }; }; +template<> class DataDepth { public: enum { value = CV_16S, fmt=(int)'s' }; }; +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +// this is temporary solution to support 32-bit unsigned integers +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +template<> class DataDepth { public: enum { value = CV_32F, fmt=(int)'f' }; }; +template<> class DataDepth { public: enum { value = CV_64F, fmt=(int)'d' }; }; +template class DataDepth<_Tp*> { public: enum { value = CV_USRTYPE1, fmt=(int)'r' }; }; + + +////////////////////////////// Small Matrix /////////////////////////// + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. + */ + +struct CV_EXPORTS Matx_AddOp {}; +struct CV_EXPORTS Matx_SubOp {}; +struct CV_EXPORTS Matx_ScaleOp {}; +struct CV_EXPORTS Matx_MulOp {}; +struct CV_EXPORTS Matx_MatMulOp {}; +struct CV_EXPORTS Matx_TOp {}; + +template class Matx +{ +public: + typedef _Tp value_type; + typedef Matx<_Tp, (m < n ? m : n), 1> diag_type; + typedef Matx<_Tp, m, n> mat_type; + enum { depth = DataDepth<_Tp>::value, rows = m, cols = n, channels = rows*cols, + type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Matx(); + + Matx(_Tp v0); //!< 1x1 matrix + Matx(_Tp v0, _Tp v1); //!< 1x2 or 2x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2); //!< 1x3 or 3x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 1x4, 2x2 or 4x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 1x5 or 5x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 1x6, 2x3, 3x2 or 6x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 1x7 or 7x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 1x8, 2x4, 4x2 or 8x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 1x9, 3x3 or 9x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 1x10, 2x5 or 5x2 or 10x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11); //!< 1x12, 2x6, 3x4, 4x3, 6x2 or 12x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15); //!< 1x16, 4x4 or 16x1 matrix + explicit Matx(const _Tp* vals); //!< initialize from a plain array + + static Matx all(_Tp alpha); + static Matx zeros(); + static Matx ones(); + static Matx eye(); + static Matx diag(const diag_type& d); + static Matx randu(_Tp a, _Tp b); + static Matx randn(_Tp a, _Tp b); + + //! dot product computed with the default precision + _Tp dot(const Matx<_Tp, m, n>& v) const; + + //! dot product computed in double-precision arithmetics + double ddot(const Matx<_Tp, m, n>& v) const; + + //! convertion to another data type + template operator Matx() const; + + //! change the matrix shape + template Matx<_Tp, m1, n1> reshape() const; + + //! extract part of the matrix + template Matx<_Tp, m1, n1> get_minor(int i, int j) const; + + //! extract the matrix row + Matx<_Tp, 1, n> row(int i) const; + + //! extract the matrix column + Matx<_Tp, m, 1> col(int i) const; + + //! extract the matrix diagonal + diag_type diag() const; + + //! transpose the matrix + Matx<_Tp, n, m> t() const; + + //! invert matrix the matrix + Matx<_Tp, n, m> inv(int method=DECOMP_LU) const; + + //! solve linear system + template Matx<_Tp, n, l> solve(const Matx<_Tp, m, l>& rhs, int flags=DECOMP_LU) const; + Vec<_Tp, n> solve(const Vec<_Tp, m>& rhs, int method) const; + + //! multiply two matrices element-wise + Matx<_Tp, m, n> mul(const Matx<_Tp, m, n>& a) const; + + //! element access + const _Tp& operator ()(int i, int j) const; + _Tp& operator ()(int i, int j); + + //! 1D element access + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp); + template Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp); + template Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp); + Matx(const Matx<_Tp, n, m>& a, Matx_TOp); + + _Tp val[m*n]; //< matrix elements +}; + + +typedef Matx Matx12f; +typedef Matx Matx12d; +typedef Matx Matx13f; +typedef Matx Matx13d; +typedef Matx Matx14f; +typedef Matx Matx14d; +typedef Matx Matx16f; +typedef Matx Matx16d; + +typedef Matx Matx21f; +typedef Matx Matx21d; +typedef Matx Matx31f; +typedef Matx Matx31d; +typedef Matx Matx41f; +typedef Matx Matx41d; +typedef Matx Matx61f; +typedef Matx Matx61d; + +typedef Matx Matx22f; +typedef Matx Matx22d; +typedef Matx Matx23f; +typedef Matx Matx23d; +typedef Matx Matx32f; +typedef Matx Matx32d; + +typedef Matx Matx33f; +typedef Matx Matx33d; + +typedef Matx Matx34f; +typedef Matx Matx34d; +typedef Matx Matx43f; +typedef Matx Matx43d; + +typedef Matx Matx44f; +typedef Matx Matx44d; +typedef Matx Matx66f; +typedef Matx Matx66d; + + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. +*/ +template class Vec : public Matx<_Tp, cn, 1> +{ +public: + typedef _Tp value_type; + enum { depth = DataDepth<_Tp>::value, channels = cn, type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Vec(); + + Vec(_Tp v0); //!< 1-element vector constructor + Vec(_Tp v0, _Tp v1); //!< 2-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2); //!< 3-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 4-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 5-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 6-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 7-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 8-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 9-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 10-element vector constructor + explicit Vec(const _Tp* values); + + Vec(const Vec<_Tp, cn>& v); + + static Vec all(_Tp alpha); + + //! per-element multiplication + Vec mul(const Vec<_Tp, cn>& v) const; + + //! conjugation (makes sense for complex numbers and quaternions) + Vec conj() const; + + /*! + cross product of the two 3D vectors. + + For other dimensionalities the exception is raised + */ + Vec cross(const Vec& v) const; + //! convertion to another data type + template operator Vec() const; + //! conversion to 4-element CvScalar. + operator CvScalar() const; + + /*! element access */ + const _Tp& operator [](int i) const; + _Tp& operator[](int i); + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp); + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp); + template Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp); +}; + + +/* \typedef + + Shorter aliases for the most popular specializations of Vec +*/ +typedef Vec Vec2b; +typedef Vec Vec3b; +typedef Vec Vec4b; + +typedef Vec Vec2s; +typedef Vec Vec3s; +typedef Vec Vec4s; + +typedef Vec Vec2w; +typedef Vec Vec3w; +typedef Vec Vec4w; + +typedef Vec Vec2i; +typedef Vec Vec3i; +typedef Vec Vec4i; +typedef Vec Vec6i; +typedef Vec Vec8i; + +typedef Vec Vec2f; +typedef Vec Vec3f; +typedef Vec Vec4f; +typedef Vec Vec6f; + +typedef Vec Vec2d; +typedef Vec Vec3d; +typedef Vec Vec4d; +typedef Vec Vec6d; + + +//////////////////////////////// Complex ////////////////////////////// + +/*! + A complex number class. + + The template class is similar and compatible with std::complex, however it provides slightly + more convenient access to the real and imaginary parts using through the simple field access, as opposite + to std::complex::real() and std::complex::imag(). +*/ +template class Complex +{ +public: + + //! constructors + Complex(); + Complex( _Tp _re, _Tp _im=0 ); + Complex( const std::complex<_Tp>& c ); + + //! conversion to another data type + template operator Complex() const; + //! conjugation + Complex conj() const; + //! conversion to std::complex + operator std::complex<_Tp>() const; + + _Tp re, im; //< the real and the imaginary parts +}; + + +/*! + \typedef +*/ +typedef Complex Complexf; +typedef Complex Complexd; + + +//////////////////////////////// Point_ //////////////////////////////// + +/*! + template 2D point class. + + The class defines a point in 2D space. Data type of the point coordinates is specified + as a template parameter. There are a few shorter aliases available for user convenience. + See cv::Point, cv::Point2i, cv::Point2f and cv::Point2d. +*/ +template class Point_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point_(); + Point_(_Tp _x, _Tp _y); + Point_(const Point_& pt); + Point_(const CvPoint& pt); + Point_(const CvPoint2D32f& pt); + Point_(const Size_<_Tp>& sz); + Point_(const Vec<_Tp, 2>& v); + + Point_& operator = (const Point_& pt); + //! conversion to another data type + template operator Point_<_Tp2>() const; + + //! conversion to the old-style C structures + operator CvPoint() const; + operator CvPoint2D32f() const; + operator Vec<_Tp, 2>() const; + + //! dot product + _Tp dot(const Point_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point_& pt) const; + //! cross-product + double cross(const Point_& pt) const; + //! checks whether the point is inside the specified rectangle + bool inside(const Rect_<_Tp>& r) const; + + _Tp x, y; //< the point coordinates +}; + +/*! + template 3D point class. + + The class defines a point in 3D space. Data type of the point coordinates is specified + as a template parameter. + + \see cv::Point3i, cv::Point3f and cv::Point3d +*/ +template class Point3_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point3_(); + Point3_(_Tp _x, _Tp _y, _Tp _z); + Point3_(const Point3_& pt); + explicit Point3_(const Point_<_Tp>& pt); + Point3_(const CvPoint3D32f& pt); + Point3_(const Vec<_Tp, 3>& v); + + Point3_& operator = (const Point3_& pt); + //! conversion to another data type + template operator Point3_<_Tp2>() const; + //! conversion to the old-style CvPoint... + operator CvPoint3D32f() const; + //! conversion to cv::Vec<> + operator Vec<_Tp, 3>() const; + + //! dot product + _Tp dot(const Point3_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point3_& pt) const; + //! cross product of the 2 3D points + Point3_ cross(const Point3_& pt) const; + + _Tp x, y, z; //< the point coordinates +}; + +//////////////////////////////// Size_ //////////////////////////////// + +/*! + The 2D size class + + The class represents the size of a 2D rectangle, image size, matrix size etc. + Normally, cv::Size ~ cv::Size_ is used. +*/ +template class Size_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Size_(); + Size_(_Tp _width, _Tp _height); + Size_(const Size_& sz); + Size_(const CvSize& sz); + Size_(const CvSize2D32f& sz); + Size_(const Point_<_Tp>& pt); + + Size_& operator = (const Size_& sz); + //! the area (width*height) + _Tp area() const; + + //! conversion of another data type. + template operator Size_<_Tp2>() const; + + //! conversion to the old-style OpenCV types + operator CvSize() const; + operator CvSize2D32f() const; + + _Tp width, height; // the width and the height +}; + +//////////////////////////////// Rect_ //////////////////////////////// + +/*! + The 2D up-right rectangle class + + The class represents a 2D rectangle with coordinates of the specified data type. + Normally, cv::Rect ~ cv::Rect_ is used. +*/ +template class Rect_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Rect_(); + Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); + Rect_(const Rect_& r); + Rect_(const CvRect& r); + Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz); + Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2); + + Rect_& operator = ( const Rect_& r ); + //! the top-left corner + Point_<_Tp> tl() const; + //! the bottom-right corner + Point_<_Tp> br() const; + + //! size (width, height) of the rectangle + Size_<_Tp> size() const; + //! area (width*height) of the rectangle + _Tp area() const; + + //! conversion to another data type + template operator Rect_<_Tp2>() const; + //! conversion to the old-style CvRect + operator CvRect() const; + + //! checks whether the rectangle contains the point + bool contains(const Point_<_Tp>& pt) const; + + _Tp x, y, width, height; //< the top-left corner, as well as width and height of the rectangle +}; + + +/*! + \typedef + + shorter aliases for the most popular cv::Point_<>, cv::Size_<> and cv::Rect_<> specializations +*/ +typedef Point_ Point2i; +typedef Point2i Point; +typedef Size_ Size2i; +typedef Size_ Size2d; +typedef Size2i Size; +typedef Rect_ Rect; +typedef Point_ Point2f; +typedef Point_ Point2d; +typedef Size_ Size2f; +typedef Point3_ Point3i; +typedef Point3_ Point3f; +typedef Point3_ Point3d; + + +/*! + The rotated 2D rectangle. + + The class represents rotated (i.e. not up-right) rectangles on a plane. + Each rectangle is described by the center point (mass center), length of each side + (represented by cv::Size2f structure) and the rotation angle in degrees. +*/ +class CV_EXPORTS RotatedRect +{ +public: + //! various constructors + RotatedRect(); + RotatedRect(const Point2f& center, const Size2f& size, float angle); + RotatedRect(const CvBox2D& box); + + //! returns 4 vertices of the rectangle + void points(Point2f pts[]) const; + //! returns the minimal up-right rectangle containing the rotated rectangle + Rect boundingRect() const; + //! conversion to the old-style CvBox2D structure + operator CvBox2D() const; + + Point2f center; //< the rectangle mass center + Size2f size; //< width and height of the rectangle + float angle; //< the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. +}; + +//////////////////////////////// Scalar_ /////////////////////////////// + +/*! + The template scalar class. + + This is partially specialized cv::Vec class with the number of elements = 4, i.e. a short vector of four elements. + Normally, cv::Scalar ~ cv::Scalar_ is used. +*/ +template class Scalar_ : public Vec<_Tp, 4> +{ +public: + //! various constructors + Scalar_(); + Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0); + Scalar_(const CvScalar& s); + Scalar_(_Tp v0); + + //! returns a scalar with all elements set to v0 + static Scalar_<_Tp> all(_Tp v0); + //! conversion to the old-style CvScalar + operator CvScalar() const; + + //! conversion to another data type + template operator Scalar_() const; + + //! per-element product + Scalar_<_Tp> mul(const Scalar_<_Tp>& t, double scale=1 ) const; + + // returns (v0, -v1, -v2, -v3) + Scalar_<_Tp> conj() const; + + // returns true iff v1 == v2 == v3 == 0 + bool isReal() const; +}; + +typedef Scalar_ Scalar; + +CV_EXPORTS void scalarToRawData(const Scalar& s, void* buf, int type, int unroll_to=0); + +//////////////////////////////// Range ///////////////////////////////// + +/*! + The 2D range class + + This is the class used to specify a continuous subsequence, i.e. part of a contour, or a column span in a matrix. +*/ +class CV_EXPORTS Range +{ +public: + Range(); + Range(int _start, int _end); + Range(const CvSlice& slice); + int size() const; + bool empty() const; + static Range all(); + operator CvSlice() const; + + int start, end; +}; + +/////////////////////////////// DataType //////////////////////////////// + +/*! + Informative template class for OpenCV "scalars". + + The class is specialized for each primitive numerical type supported by OpenCV (such as unsigned char or float), + as well as for more complex types, like cv::Complex<>, std::complex<>, cv::Vec<> etc. + The common property of all such types (called "scalars", do not confuse it with cv::Scalar_) + is that each of them is basically a tuple of numbers of the same type. Each "scalar" can be represented + by the depth id (CV_8U ... CV_64F) and the number of channels. + OpenCV matrices, 2D or nD, dense or sparse, can store "scalars", + as long as the number of channels does not exceed CV_CN_MAX. +*/ +template class DataType +{ +public: + typedef _Tp value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 1, depth = -1, channels = 1, fmt=0, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef bool value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef uchar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef ushort value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef short value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef int value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef float value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef double value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef Matx<_Tp, m, n> value_type; + typedef Matx::work_type, m, n> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = m*n, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef Vec<_Tp, cn> value_type; + typedef Vec::work_type, cn> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = cn, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef std::complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point_<_Tp> value_type; + typedef Point_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point3_<_Tp> value_type; + typedef Point3_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 3, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Size_<_Tp> value_type; + typedef Size_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Rect_<_Tp> value_type; + typedef Rect_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Scalar_<_Tp> value_type; + typedef Scalar_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template<> class DataType +{ +public: + typedef Range value_type; + typedef value_type work_type; + typedef int channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +//////////////////// generic_type ref-counting pointer class for C/C++ objects //////////////////////// + +/*! + Smart pointer to dynamically allocated objects. + + This is template pointer-wrapping class that stores the associated reference counter along with the + object pointer. The class is similar to std::smart_ptr<> from the recent addons to the C++ standard, + but is shorter to write :) and self-contained (i.e. does add any dependency on the compiler or an external library). + + Basically, you can use "Ptr ptr" (or faster "const Ptr& ptr" for read-only access) + everywhere instead of "MyObjectType* ptr", where MyObjectType is some C structure or a C++ class. + To make it all work, you need to specialize Ptr<>::delete_obj(), like: + + \code + template<> void Ptr::delete_obj() { call_destructor_func(obj); } + \endcode + + \note{if MyObjectType is a C++ class with a destructor, you do not need to specialize delete_obj(), + since the default implementation calls "delete obj;"} + + \note{Another good property of the class is that the operations on the reference counter are atomic, + i.e. it is safe to use the class in multi-threaded applications} +*/ +template class Ptr +{ +public: + //! empty constructor + Ptr(); + //! take ownership of the pointer. The associated reference counter is allocated and set to 1 + Ptr(_Tp* _obj); + //! calls release() + ~Ptr(); + //! copy constructor. Copies the members and calls addref() + Ptr(const Ptr& ptr); + template Ptr(const Ptr<_Tp2>& ptr); + //! copy operator. Calls ptr.addref() and release() before copying the members + Ptr& operator = (const Ptr& ptr); + //! increments the reference counter + void addref(); + //! decrements the reference counter. If it reaches 0, delete_obj() is called + void release(); + //! deletes the object. Override if needed + void delete_obj(); + //! returns true iff obj==NULL + bool empty() const; + + //! cast pointer to another type + template Ptr<_Tp2> ptr(); + template const Ptr<_Tp2> ptr() const; + + //! helper operators making "Ptr ptr" use very similar to "T* ptr". + _Tp* operator -> (); + const _Tp* operator -> () const; + + operator _Tp* (); + operator const _Tp*() const; + + _Tp* obj; //< the object pointer. + int* refcount; //< the associated reference counter +}; + + +//////////////////////// Input/Output Array Arguments ///////////////////////////////// + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _InputArray +{ +public: + enum { + KIND_SHIFT = 16, + FIXED_TYPE = 0x8000 << KIND_SHIFT, + FIXED_SIZE = 0x4000 << KIND_SHIFT, + KIND_MASK = ~(FIXED_TYPE|FIXED_SIZE) - (1 << KIND_SHIFT) + 1, + + NONE = 0 << KIND_SHIFT, + MAT = 1 << KIND_SHIFT, + MATX = 2 << KIND_SHIFT, + STD_VECTOR = 3 << KIND_SHIFT, + STD_VECTOR_VECTOR = 4 << KIND_SHIFT, + STD_VECTOR_MAT = 5 << KIND_SHIFT, + EXPR = 6 << KIND_SHIFT, + OPENGL_BUFFER = 7 << KIND_SHIFT, + OPENGL_TEXTURE = 8 << KIND_SHIFT, + GPU_MAT = 9 << KIND_SHIFT, + OCL_MAT =10 << KIND_SHIFT + }; + _InputArray(); + + _InputArray(const Mat& m); + _InputArray(const MatExpr& expr); + template _InputArray(const _Tp* vec, int n); + template _InputArray(const vector<_Tp>& vec); + template _InputArray(const vector >& vec); + _InputArray(const vector& vec); + template _InputArray(const vector >& vec); + template _InputArray(const Mat_<_Tp>& m); + template _InputArray(const Matx<_Tp, m, n>& matx); + _InputArray(const Scalar& s); + _InputArray(const double& val); + // < Deprecated + _InputArray(const GlBuffer& buf); + _InputArray(const GlTexture& tex); + // > + _InputArray(const gpu::GpuMat& d_mat); + _InputArray(const ogl::Buffer& buf); + _InputArray(const ogl::Texture2D& tex); + + virtual Mat getMat(int i=-1) const; + virtual void getMatVector(vector& mv) const; + // < Deprecated + virtual GlBuffer getGlBuffer() const; + virtual GlTexture getGlTexture() const; + // > + virtual gpu::GpuMat getGpuMat() const; + /*virtual*/ ogl::Buffer getOGlBuffer() const; + /*virtual*/ ogl::Texture2D getOGlTexture2D() const; + + virtual int kind() const; + virtual Size size(int i=-1) const; + virtual size_t total(int i=-1) const; + virtual int type(int i=-1) const; + virtual int depth(int i=-1) const; + virtual int channels(int i=-1) const; + virtual bool empty() const; + +#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY + virtual ~_InputArray(); +#endif + + int flags; + void* obj; + Size sz; +}; + + +enum +{ + DEPTH_MASK_8U = 1 << CV_8U, + DEPTH_MASK_8S = 1 << CV_8S, + DEPTH_MASK_16U = 1 << CV_16U, + DEPTH_MASK_16S = 1 << CV_16S, + DEPTH_MASK_32S = 1 << CV_32S, + DEPTH_MASK_32F = 1 << CV_32F, + DEPTH_MASK_64F = 1 << CV_64F, + DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1, + DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S, + DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F +}; + + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _OutputArray : public _InputArray +{ +public: + _OutputArray(); + + _OutputArray(Mat& m); + template _OutputArray(vector<_Tp>& vec); + template _OutputArray(vector >& vec); + _OutputArray(vector& vec); + template _OutputArray(vector >& vec); + template _OutputArray(Mat_<_Tp>& m); + template _OutputArray(Matx<_Tp, m, n>& matx); + template _OutputArray(_Tp* vec, int n); + _OutputArray(gpu::GpuMat& d_mat); + _OutputArray(ogl::Buffer& buf); + _OutputArray(ogl::Texture2D& tex); + + _OutputArray(const Mat& m); + template _OutputArray(const vector<_Tp>& vec); + template _OutputArray(const vector >& vec); + _OutputArray(const vector& vec); + template _OutputArray(const vector >& vec); + template _OutputArray(const Mat_<_Tp>& m); + template _OutputArray(const Matx<_Tp, m, n>& matx); + template _OutputArray(const _Tp* vec, int n); + _OutputArray(const gpu::GpuMat& d_mat); + _OutputArray(const ogl::Buffer& buf); + _OutputArray(const ogl::Texture2D& tex); + + virtual bool fixedSize() const; + virtual bool fixedType() const; + virtual bool needed() const; + virtual Mat& getMatRef(int i=-1) const; + /*virtual*/ gpu::GpuMat& getGpuMatRef() const; + /*virtual*/ ogl::Buffer& getOGlBufferRef() const; + /*virtual*/ ogl::Texture2D& getOGlTexture2DRef() const; + virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void release() const; + virtual void clear() const; + +#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY + virtual ~_OutputArray(); +#endif +}; + +typedef const _InputArray& InputArray; +typedef InputArray InputArrayOfArrays; +typedef const _OutputArray& OutputArray; +typedef OutputArray OutputArrayOfArrays; +typedef OutputArray InputOutputArray; +typedef OutputArray InputOutputArrayOfArrays; + +CV_EXPORTS OutputArray noArray(); + +/////////////////////////////////////// Mat /////////////////////////////////////////// + +enum { MAGIC_MASK=0xFFFF0000, TYPE_MASK=0x00000FFF, DEPTH_MASK=7 }; + +static inline size_t getElemSize(int type) { return CV_ELEM_SIZE(type); } + +/*! + Custom array allocator + +*/ +class CV_EXPORTS MatAllocator +{ +public: + MatAllocator() {} + virtual ~MatAllocator() {} + virtual void allocate(int dims, const int* sizes, int type, int*& refcount, + uchar*& datastart, uchar*& data, size_t* step) = 0; + virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0; +}; + +/*! + The n-dimensional matrix class. + + The class represents an n-dimensional dense numerical array that can act as + a matrix, image, optical flow map, 3-focal tensor etc. + It is very similar to CvMat and CvMatND types from earlier versions of OpenCV, + and similarly to those types, the matrix can be multi-channel. It also fully supports ROI mechanism. + + There are many different ways to create cv::Mat object. Here are the some popular ones: +
    +
  • using cv::Mat::create(nrows, ncols, type) method or + the similar constructor cv::Mat::Mat(nrows, ncols, type[, fill_value]) constructor. + A new matrix of the specified size and specifed type will be allocated. + "type" has the same meaning as in cvCreateMat function, + e.g. CV_8UC1 means 8-bit single-channel matrix, CV_32FC2 means 2-channel (i.e. complex) + floating-point matrix etc: + + \code + // make 7x7 complex matrix filled with 1+3j. + cv::Mat M(7,7,CV_32FC2,Scalar(1,3)); + // and now turn M to 100x60 15-channel 8-bit matrix. + // The old content will be deallocated + M.create(100,60,CV_8UC(15)); + \endcode + + As noted in the introduction of this chapter, Mat::create() + will only allocate a new matrix when the current matrix dimensionality + or type are different from the specified. + +
  • by using a copy constructor or assignment operator, where on the right side it can + be a matrix or expression, see below. Again, as noted in the introduction, + matrix assignment is O(1) operation because it only copies the header + and increases the reference counter. cv::Mat::clone() method can be used to get a full + (a.k.a. deep) copy of the matrix when you need it. + +
  • by constructing a header for a part of another matrix. It can be a single row, single column, + several rows, several columns, rectangular region in the matrix (called a minor in algebra) or + a diagonal. Such operations are also O(1), because the new header will reference the same data. + You can actually modify a part of the matrix using this feature, e.g. + + \code + // add 5-th row, multiplied by 3 to the 3rd row + M.row(3) = M.row(3) + M.row(5)*3; + + // now copy 7-th column to the 1-st column + // M.col(1) = M.col(7); // this will not work + Mat M1 = M.col(1); + M.col(7).copyTo(M1); + + // create new 320x240 image + cv::Mat img(Size(320,240),CV_8UC3); + // select a roi + cv::Mat roi(img, Rect(10,10,100,100)); + // fill the ROI with (0,255,0) (which is green in RGB space); + // the original 320x240 image will be modified + roi = Scalar(0,255,0); + \endcode + + Thanks to the additional cv::Mat::datastart and cv::Mat::dataend members, it is possible to + compute the relative sub-matrix position in the main "container" matrix using cv::Mat::locateROI(): + + \code + Mat A = Mat::eye(10, 10, CV_32S); + // extracts A columns, 1 (inclusive) to 3 (exclusive). + Mat B = A(Range::all(), Range(1, 3)); + // extracts B rows, 5 (inclusive) to 9 (exclusive). + // that is, C ~ A(Range(5, 9), Range(1, 3)) + Mat C = B(Range(5, 9), Range::all()); + Size size; Point ofs; + C.locateROI(size, ofs); + // size will be (width=10,height=10) and the ofs will be (x=1, y=5) + \endcode + + As in the case of whole matrices, if you need a deep copy, use cv::Mat::clone() method + of the extracted sub-matrices. + +
  • by making a header for user-allocated-data. It can be useful for +
      +
    1. processing "foreign" data using OpenCV (e.g. when you implement + a DirectShow filter or a processing module for gstreamer etc.), e.g. + + \code + void process_video_frame(const unsigned char* pixels, + int width, int height, int step) + { + cv::Mat img(height, width, CV_8UC3, pixels, step); + cv::GaussianBlur(img, img, cv::Size(7,7), 1.5, 1.5); + } + \endcode + +
    2. for quick initialization of small matrices and/or super-fast element access + + \code + double m[3][3] = {{a, b, c}, {d, e, f}, {g, h, i}}; + cv::Mat M = cv::Mat(3, 3, CV_64F, m).inv(); + \endcode +
    + + partial yet very common cases of this "user-allocated data" case are conversions + from CvMat and IplImage to cv::Mat. For this purpose there are special constructors + taking pointers to CvMat or IplImage and the optional + flag indicating whether to copy the data or not. + + Backward conversion from cv::Mat to CvMat or IplImage is provided via cast operators + cv::Mat::operator CvMat() an cv::Mat::operator IplImage(). + The operators do not copy the data. + + + \code + IplImage* img = cvLoadImage("greatwave.jpg", 1); + Mat mtx(img); // convert IplImage* -> cv::Mat + CvMat oldmat = mtx; // convert cv::Mat -> CvMat + CV_Assert(oldmat.cols == img->width && oldmat.rows == img->height && + oldmat.data.ptr == (uchar*)img->imageData && oldmat.step == img->widthStep); + \endcode + +
  • by using MATLAB-style matrix initializers, cv::Mat::zeros(), cv::Mat::ones(), cv::Mat::eye(), e.g.: + + \code + // create a double-precision identity martix and add it to M. + M += Mat::eye(M.rows, M.cols, CV_64F); + \endcode + +
  • by using comma-separated initializer: + + \code + // create 3x3 double-precision identity matrix + Mat M = (Mat_(3,3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); + \endcode + + here we first call constructor of cv::Mat_ class (that we describe further) with the proper matrix, + and then we just put "<<" operator followed by comma-separated values that can be constants, + variables, expressions etc. Also, note the extra parentheses that are needed to avoid compiler errors. + +
+ + Once matrix is created, it will be automatically managed by using reference-counting mechanism + (unless the matrix header is built on top of user-allocated data, + in which case you should handle the data by yourself). + The matrix data will be deallocated when no one points to it; + if you want to release the data pointed by a matrix header before the matrix destructor is called, + use cv::Mat::release(). + + The next important thing to learn about the matrix class is element access. Here is how the matrix is stored. + The elements are stored in row-major order (row by row). The cv::Mat::data member points to the first element of the first row, + cv::Mat::rows contains the number of matrix rows and cv::Mat::cols - the number of matrix columns. There is yet another member, + cv::Mat::step that is used to actually compute address of a matrix element. cv::Mat::step is needed because the matrix can be + a part of another matrix or because there can some padding space in the end of each row for a proper alignment. + + \image html roi.png + + Given these parameters, address of the matrix element M_{ij} is computed as following: + + addr(M_{ij})=M.data + M.step*i + j*M.elemSize() + + if you know the matrix element type, e.g. it is float, then you can use cv::Mat::at() method: + + addr(M_{ij})=&M.at(i,j) + + (where & is used to convert the reference returned by cv::Mat::at() to a pointer). + if you need to process a whole row of matrix, the most efficient way is to get + the pointer to the row first, and then just use plain C operator []: + + \code + // compute sum of positive matrix elements + // (assuming that M is double-precision matrix) + double sum=0; + for(int i = 0; i < M.rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < M.cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + + Some operations, like the above one, do not actually depend on the matrix shape, + they just process elements of a matrix one by one (or elements from multiple matrices + that are sitting in the same place, e.g. matrix addition). Such operations are called + element-wise and it makes sense to check whether all the input/output matrices are continuous, + i.e. have no gaps in the end of each row, and if yes, process them as a single long row: + + \code + // compute sum of positive matrix elements, optimized variant + double sum=0; + int cols = M.cols, rows = M.rows; + if(M.isContinuous()) + { + cols *= rows; + rows = 1; + } + for(int i = 0; i < rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + in the case of continuous matrix the outer loop body will be executed just once, + so the overhead will be smaller, which will be especially noticeable in the case of small matrices. + + Finally, there are STL-style iterators that are smart enough to skip gaps between successive rows: + \code + // compute sum of positive matrix elements, iterator-based variant + double sum=0; + MatConstIterator_ it = M.begin(), it_end = M.end(); + for(; it != it_end; ++it) + sum += std::max(*it, 0.); + \endcode + + The matrix iterators are random-access iterators, so they can be passed + to any STL algorithm, including std::sort(). +*/ +class CV_EXPORTS Mat +{ +public: + //! default constructor + Mat(); + //! constructs 2D matrix of the specified size and type + // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + Mat(int rows, int cols, int type); + Mat(Size size, int type); + //! constucts 2D matrix and fills it with the specified value _s. + Mat(int rows, int cols, int type, const Scalar& s); + Mat(Size size, int type, const Scalar& s); + + //! constructs n-dimensional matrix + Mat(int ndims, const int* sizes, int type); + Mat(int ndims, const int* sizes, int type, const Scalar& s); + + //! copy constructor + Mat(const Mat& m); + //! constructor for matrix headers pointing to user-allocated data + Mat(int rows, int cols, int type, void* data, size_t step=AUTO_STEP); + Mat(Size size, int type, void* data, size_t step=AUTO_STEP); + Mat(int ndims, const int* sizes, int type, void* data, const size_t* steps=0); + + //! creates a matrix header for a part of the bigger matrix + Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all()); + Mat(const Mat& m, const Rect& roi); + Mat(const Mat& m, const Range* ranges); + //! converts old-style CvMat to the new matrix; the data is not copied by default + Mat(const CvMat* m, bool copyData=false); + //! converts old-style CvMatND to the new matrix; the data is not copied by default + Mat(const CvMatND* m, bool copyData=false); + //! converts old-style IplImage to the new matrix; the data is not copied by default + Mat(const IplImage* img, bool copyData=false); + //! builds matrix from std::vector with or without copying the data + template explicit Mat(const vector<_Tp>& vec, bool copyData=false); + //! builds matrix from cv::Vec; the data is copied by default + template explicit Mat(const Vec<_Tp, n>& vec, bool copyData=true); + //! builds matrix from cv::Matx; the data is copied by default + template explicit Mat(const Matx<_Tp, m, n>& mtx, bool copyData=true); + //! builds matrix from a 2D point + template explicit Mat(const Point_<_Tp>& pt, bool copyData=true); + //! builds matrix from a 3D point + template explicit Mat(const Point3_<_Tp>& pt, bool copyData=true); + //! builds matrix from comma initializer + template explicit Mat(const MatCommaInitializer_<_Tp>& commaInitializer); + + //! download data from GpuMat + explicit Mat(const gpu::GpuMat& m); + + //! destructor - calls release() + ~Mat(); + //! assignment operators + Mat& operator = (const Mat& m); + Mat& operator = (const MatExpr& expr); + + //! returns a new matrix header for the specified row + Mat row(int y) const; + //! returns a new matrix header for the specified column + Mat col(int x) const; + //! ... for the specified row span + Mat rowRange(int startrow, int endrow) const; + Mat rowRange(const Range& r) const; + //! ... for the specified column span + Mat colRange(int startcol, int endcol) const; + Mat colRange(const Range& r) const; + //! ... for the specified diagonal + // (d=0 - the main diagonal, + // >0 - a diagonal from the lower half, + // <0 - a diagonal from the upper half) + Mat diag(int d=0) const; + //! constructs a square diagonal matrix which main diagonal is vector "d" + static Mat diag(const Mat& d); + + //! returns deep copy of the matrix, i.e. the data is copied + Mat clone() const; + //! copies the matrix content to "m". + // It calls m.create(this->size(), this->type()). + void copyTo( OutputArray m ) const; + //! copies those matrix elements to "m" that are marked with non-zero mask elements. + void copyTo( OutputArray m, InputArray mask ) const; + //! converts matrix to another datatype with optional scalng. See cvConvertScale. + void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const; + + void assignTo( Mat& m, int type=-1 ) const; + + //! sets every matrix element to s + Mat& operator = (const Scalar& s); + //! sets some of the matrix elements to s, according to the mask + Mat& setTo(InputArray value, InputArray mask=noArray()); + //! creates alternative matrix header for the same data, with different + // number of channels and/or different number of rows. see cvReshape. + Mat reshape(int cn, int rows=0) const; + Mat reshape(int cn, int newndims, const int* newsz) const; + + //! matrix transposition by means of matrix expressions + MatExpr t() const; + //! matrix inversion by means of matrix expressions + MatExpr inv(int method=DECOMP_LU) const; + //! per-element matrix multiplication by means of matrix expressions + MatExpr mul(InputArray m, double scale=1) const; + + //! computes cross-product of 2 3D vectors + Mat cross(InputArray m) const; + //! computes dot-product + double dot(InputArray m) const; + + //! Matlab-style matrix initialization + static MatExpr zeros(int rows, int cols, int type); + static MatExpr zeros(Size size, int type); + static MatExpr zeros(int ndims, const int* sz, int type); + static MatExpr ones(int rows, int cols, int type); + static MatExpr ones(Size size, int type); + static MatExpr ones(int ndims, const int* sz, int type); + static MatExpr eye(int rows, int cols, int type); + static MatExpr eye(Size size, int type); + + //! allocates new matrix data unless the matrix already has specified size and type. + // previous data is unreferenced if needed. + void create(int rows, int cols, int type); + void create(Size size, int type); + void create(int ndims, const int* sizes, int type); + + //! increases the reference counter; use with care to avoid memleaks + void addref(); + //! decreases reference counter; + // deallocates the data when reference counter reaches 0. + void release(); + + //! deallocates the matrix data + void deallocate(); + //! internal use function; properly re-allocates _size, _step arrays + void copySize(const Mat& m); + + //! reserves enough space to fit sz hyper-planes + void reserve(size_t sz); + //! resizes matrix to the specified number of hyper-planes + void resize(size_t sz); + //! resizes matrix to the specified number of hyper-planes; initializes the newly added elements + void resize(size_t sz, const Scalar& s); + //! internal function + void push_back_(const void* elem); + //! adds element to the end of 1d matrix (or possibly multiple elements when _Tp=Mat) + template void push_back(const _Tp& elem); + template void push_back(const Mat_<_Tp>& elem); + void push_back(const Mat& m); + //! removes several hyper-planes from bottom of the matrix + void pop_back(size_t nelems=1); + + //! locates matrix header within a parent matrix. See below + void locateROI( Size& wholeSize, Point& ofs ) const; + //! moves/resizes the current matrix ROI inside the parent matrix. + Mat& adjustROI( int dtop, int dbottom, int dleft, int dright ); + //! extracts a rectangular sub-matrix + // (this is a generalized form of row, rowRange etc.) + Mat operator()( Range rowRange, Range colRange ) const; + Mat operator()( const Rect& roi ) const; + Mat operator()( const Range* ranges ) const; + + //! converts header to CvMat; no data is copied + operator CvMat() const; + //! converts header to CvMatND; no data is copied + operator CvMatND() const; + //! converts header to IplImage; no data is copied + operator IplImage() const; + + template operator vector<_Tp>() const; + template operator Vec<_Tp, n>() const; + template operator Matx<_Tp, m, n>() const; + + //! returns true iff the matrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_MAT_CONT(cvmat->type) + bool isContinuous() const; + + //! returns true if the matrix is a submatrix of another matrix + bool isSubmatrix() const; + + //! returns element size in bytes, + // similar to CV_ELEM_SIZE(cvmat->type) + size_t elemSize() const; + //! returns the size of element channel in bytes. + size_t elemSize1() const; + //! returns element type, similar to CV_MAT_TYPE(cvmat->type) + int type() const; + //! returns element type, similar to CV_MAT_DEPTH(cvmat->type) + int depth() const; + //! returns element type, similar to CV_MAT_CN(cvmat->type) + int channels() const; + //! returns step/elemSize1() + size_t step1(int i=0) const; + //! returns true if matrix data is NULL + bool empty() const; + //! returns the total number of matrix elements + size_t total() const; + + //! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise + int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const; + + //! returns pointer to i0-th submatrix along the dimension #0 + uchar* ptr(int i0=0); + const uchar* ptr(int i0=0) const; + + //! returns pointer to (i0,i1) submatrix along the dimensions #0 and #1 + uchar* ptr(int i0, int i1); + const uchar* ptr(int i0, int i1) const; + + //! returns pointer to (i0,i1,i3) submatrix along the dimensions #0, #1, #2 + uchar* ptr(int i0, int i1, int i2); + const uchar* ptr(int i0, int i1, int i2) const; + + //! returns pointer to the matrix element + uchar* ptr(const int* idx); + //! returns read-only pointer to the matrix element + const uchar* ptr(const int* idx) const; + + template uchar* ptr(const Vec& idx); + template const uchar* ptr(const Vec& idx) const; + + //! template version of the above method + template _Tp* ptr(int i0=0); + template const _Tp* ptr(int i0=0) const; + + template _Tp* ptr(int i0, int i1); + template const _Tp* ptr(int i0, int i1) const; + + template _Tp* ptr(int i0, int i1, int i2); + template const _Tp* ptr(int i0, int i1, int i2) const; + + template _Tp* ptr(const int* idx); + template const _Tp* ptr(const int* idx) const; + + template _Tp* ptr(const Vec& idx); + template const _Tp* ptr(const Vec& idx) const; + + //! the same as above, with the pointer dereferencing + template _Tp& at(int i0=0); + template const _Tp& at(int i0=0) const; + + template _Tp& at(int i0, int i1); + template const _Tp& at(int i0, int i1) const; + + template _Tp& at(int i0, int i1, int i2); + template const _Tp& at(int i0, int i1, int i2) const; + + template _Tp& at(const int* idx); + template const _Tp& at(const int* idx) const; + + template _Tp& at(const Vec& idx); + template const _Tp& at(const Vec& idx) const; + + //! special versions for 2D arrays (especially convenient for referencing image pixels) + template _Tp& at(Point pt); + template const _Tp& at(Point pt) const; + + //! template methods for iteration over matrix elements. + // the iterators take care of skipping gaps in the end of rows (if any) + template MatIterator_<_Tp> begin(); + template MatIterator_<_Tp> end(); + template MatConstIterator_<_Tp> begin() const; + template MatConstIterator_<_Tp> end() const; + + enum { MAGIC_VAL=0x42FF0000, AUTO_STEP=0, CONTINUOUS_FLAG=CV_MAT_CONT_FLAG, SUBMATRIX_FLAG=CV_SUBMAT_FLAG }; + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + //! the matrix dimensionality, >= 2 + int dims; + //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions + int rows, cols; + //! pointer to the data + uchar* data; + + //! pointer to the reference counter; + // when matrix points to user-allocated data, the pointer is NULL + int* refcount; + + //! helper fields used in locateROI and adjustROI + uchar* datastart; + uchar* dataend; + uchar* datalimit; + + //! custom allocator + MatAllocator* allocator; + + struct CV_EXPORTS MSize + { + MSize(int* _p); + Size operator()() const; + const int& operator[](int i) const; + int& operator[](int i); + operator const int*() const; + bool operator == (const MSize& sz) const; + bool operator != (const MSize& sz) const; + + int* p; + }; + + struct CV_EXPORTS MStep + { + MStep(); + MStep(size_t s); + const size_t& operator[](int i) const; + size_t& operator[](int i); + operator size_t() const; + MStep& operator = (size_t s); + + size_t* p; + size_t buf[2]; + protected: + MStep& operator = (const MStep&); + }; + + MSize size; + MStep step; + +protected: + void initEmpty(); +}; + + +/*! + Random Number Generator + + The class implements RNG using Multiply-with-Carry algorithm +*/ +class CV_EXPORTS RNG +{ +public: + enum { UNIFORM=0, NORMAL=1 }; + + RNG(); + RNG(uint64 state); + //! updates the state and returns the next 32-bit unsigned integer random number + unsigned next(); + + operator uchar(); + operator schar(); + operator ushort(); + operator short(); + operator unsigned(); + //! returns a random integer sampled uniformly from [0, N). + unsigned operator ()(unsigned N); + unsigned operator ()(); + operator int(); + operator float(); + operator double(); + //! returns uniformly distributed integer random number from [a,b) range + int uniform(int a, int b); + //! returns uniformly distributed floating-point random number from [a,b) range + float uniform(float a, float b); + //! returns uniformly distributed double-precision floating-point random number from [a,b) range + double uniform(double a, double b); + void fill( InputOutputArray mat, int distType, InputArray a, InputArray b, bool saturateRange=false ); + //! returns Gaussian random variate with mean zero. + double gaussian(double sigma); + + uint64 state; +}; + +/*! + Random Number Generator - MT + + The class implements RNG using the Mersenne Twister algorithm +*/ +class CV_EXPORTS RNG_MT19937 +{ +public: + RNG_MT19937(); + RNG_MT19937(unsigned s); + void seed(unsigned s); + + unsigned next(); + + operator int(); + operator unsigned(); + operator float(); + operator double(); + + unsigned operator ()(unsigned N); + unsigned operator ()(); + + //! returns uniformly distributed integer random number from [a,b) range + int uniform(int a, int b); + //! returns uniformly distributed floating-point random number from [a,b) range + float uniform(float a, float b); + //! returns uniformly distributed double-precision floating-point random number from [a,b) range + double uniform(double a, double b); + +private: + enum PeriodParameters {N = 624, M = 397}; + unsigned state[N]; + int mti; +}; + +/*! + Termination criteria in iterative algorithms + */ +class CV_EXPORTS TermCriteria +{ +public: + enum + { + COUNT=1, //!< the maximum number of iterations or elements to compute + MAX_ITER=COUNT, //!< ditto + EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops + }; + + //! default constructor + TermCriteria(); + //! full constructor + TermCriteria(int type, int maxCount, double epsilon); + //! conversion from CvTermCriteria + TermCriteria(const CvTermCriteria& criteria); + //! conversion to CvTermCriteria + operator CvTermCriteria() const; + + int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS + int maxCount; // the maximum number of iterations/elements + double epsilon; // the desired accuracy +}; + + +typedef void (*BinaryFunc)(const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, + void*); + +CV_EXPORTS BinaryFunc getConvertFunc(int sdepth, int ddepth); +CV_EXPORTS BinaryFunc getConvertScaleFunc(int sdepth, int ddepth); +CV_EXPORTS BinaryFunc getCopyMaskFunc(size_t esz); + +//! swaps two matrices +CV_EXPORTS void swap(Mat& a, Mat& b); + +//! converts array (CvMat or IplImage) to cv::Mat +CV_EXPORTS Mat cvarrToMat(const CvArr* arr, bool copyData=false, + bool allowND=true, int coiMode=0); +//! extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it. +CV_EXPORTS void extractImageCOI(const CvArr* arr, OutputArray coiimg, int coi=-1); +//! inserts single-channel cv::Mat into a multi-channel CvMat or IplImage +CV_EXPORTS void insertImageCOI(InputArray coiimg, CvArr* arr, int coi=-1); + +//! adds one matrix to another (dst = src1 + src2) +CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); +//! subtracts one matrix from another (dst = src1 - src2) +CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); + +//! computes element-wise weighted product of the two arrays (dst = scale*src1*src2) +CV_EXPORTS_W void multiply(InputArray src1, InputArray src2, + OutputArray dst, double scale=1, int dtype=-1); + +//! computes element-wise weighted quotient of the two arrays (dst = scale*src1/src2) +CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst, + double scale=1, int dtype=-1); + +//! computes element-wise weighted reciprocal of an array (dst = scale/src2) +CV_EXPORTS_W void divide(double scale, InputArray src2, + OutputArray dst, int dtype=-1); + +//! adds scaled array to another one (dst = alpha*src1 + src2) +CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst); + +//! computes weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma) +CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2, + double beta, double gamma, OutputArray dst, int dtype=-1); + +//! scales array elements, computes absolute values and converts the results to 8-bit unsigned integers: dst(i)=saturate_castabs(src(i)*alpha+beta) +CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst, + double alpha=1, double beta=0); +//! transforms array of numbers using a lookup table: dst(i)=lut(src(i)) +CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst, + int interpolation=0); + +//! computes sum of array elements +CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src); +//! computes the number of nonzero array elements +CV_EXPORTS_W int countNonZero( InputArray src ); +//! returns the list of locations of non-zero pixels +CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx ); + +//! computes mean value of selected array elements +CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask=noArray()); +//! computes mean value and standard deviation of all or selected array elements +CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev, + InputArray mask=noArray()); +//! computes norm of the selected array part +CV_EXPORTS_W double norm(InputArray src1, int normType=NORM_L2, InputArray mask=noArray()); +//! computes norm of selected part of the difference between two arrays +CV_EXPORTS_W double norm(InputArray src1, InputArray src2, + int normType=NORM_L2, InputArray mask=noArray()); + +//! naive nearest neighbor finder +CV_EXPORTS_W void batchDistance(InputArray src1, InputArray src2, + OutputArray dist, int dtype, OutputArray nidx, + int normType=NORM_L2, int K=0, + InputArray mask=noArray(), int update=0, + bool crosscheck=false); + +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS_W void normalize( InputArray src, OutputArray dst, double alpha=1, double beta=0, + int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray()); + +//! finds global minimum and maximum array elements and returns their values and their locations +CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal, + CV_OUT double* maxVal=0, CV_OUT Point* minLoc=0, + CV_OUT Point* maxLoc=0, InputArray mask=noArray()); +CV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal, + int* minIdx=0, int* maxIdx=0, InputArray mask=noArray()); + +//! transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows +CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype=-1); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst); +CV_EXPORTS void merge(const vector& mv, OutputArray dst ); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS void split(const Mat& src, Mat* mvbegin); +CV_EXPORTS void split(const Mat& m, vector& mv ); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS_W void split(InputArray m, OutputArrayOfArrays mv); + +//! copies selected channels from the input arrays to the selected channels of the output arrays +CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, + const int* fromTo, size_t npairs); +CV_EXPORTS void mixChannels(const vector& src, vector& dst, + const int* fromTo, size_t npairs); +CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst, + const vector& fromTo); + +//! extracts a single channel from src (coi is 0-based index) +CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi); + +//! inserts a single channel to dst (coi is 0-based index) +CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi); + +//! reverses the order of the rows, columns or both in a matrix +CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode); + +//! replicates the input matrix the specified number of times in the horizontal and/or vertical direction +CV_EXPORTS_W void repeat(InputArray src, int ny, int nx, OutputArray dst); +CV_EXPORTS Mat repeat(const Mat& src, int ny, int nx); + +CV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void hconcat(InputArrayOfArrays src, OutputArray dst); + +CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst); + +//! computes bitwise conjunction of the two arrays (dst = src1 & src2) +CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise disjunction of the two arrays (dst = src1 | src2) +CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise exclusive-or of the two arrays (dst = src1 ^ src2) +CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! inverts each bit of array (dst = ~src) +CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst, + InputArray mask=noArray()); +//! computes element-wise absolute difference of two arrays (dst = abs(src1 - src2)) +CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst); +//! set mask elements for those array elements which are within the element-specific bounding box (dst = lowerb <= src && src < upperb) +CV_EXPORTS_W void inRange(InputArray src, InputArray lowerb, + InputArray upperb, OutputArray dst); +//! compares elements of two arrays (dst = src1 src2) +CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop); +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst); + +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element minimum of array and scalar (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, double src2, Mat& dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element maximum of array and scalar (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, double src2, Mat& dst); + +//! computes square root of each matrix element (dst = src**0.5) +CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst); +//! raises the input matrix elements to the specified power (b = a**power) +CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst); +//! computes exponent of each matrix element (dst = e**src) +CV_EXPORTS_W void exp(InputArray src, OutputArray dst); +//! computes natural logarithm of absolute value of each matrix element: dst = log(abs(src)) +CV_EXPORTS_W void log(InputArray src, OutputArray dst); +//! computes cube root of the argument +CV_EXPORTS_W float cubeRoot(float val); +//! computes the angle in degrees (0..360) of the vector (x,y) +CV_EXPORTS_W float fastAtan2(float y, float x); + +CV_EXPORTS void exp(const float* src, float* dst, int n); +CV_EXPORTS void log(const float* src, float* dst, int n); +CV_EXPORTS void fastAtan2(const float* y, const float* x, float* dst, int n, bool angleInDegrees); +CV_EXPORTS void magnitude(const float* x, const float* y, float* dst, int n); + +//! converts polar coordinates to Cartesian +CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle, + OutputArray x, OutputArray y, bool angleInDegrees=false); +//! converts Cartesian coordinates to polar +CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y, + OutputArray magnitude, OutputArray angle, + bool angleInDegrees=false); +//! computes angle (angle(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle, + bool angleInDegrees=false); +//! computes magnitude (magnitude(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude); +//! checks that each matrix element is within the specified range. +CV_EXPORTS_W bool checkRange(InputArray a, bool quiet=true, CV_OUT Point* pos=0, + double minVal=-DBL_MAX, double maxVal=DBL_MAX); +//! converts NaN's to the given number +CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val=0); + +//! implements generalized matrix product algorithm GEMM from BLAS +CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha, + InputArray src3, double gamma, OutputArray dst, int flags=0); +//! multiplies matrix by its transposition from the left or from the right +CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa, + InputArray delta=noArray(), + double scale=1, int dtype=-1 ); +//! transposes the matrix +CV_EXPORTS_W void transpose(InputArray src, OutputArray dst); +//! performs affine transformation of each element of multi-channel input matrix +CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m ); +//! performs perspective transformation of each element of multi-channel input matrix +CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m ); + +//! extends the symmetrical matrix from the lower half or from the upper half +CV_EXPORTS_W void completeSymm(InputOutputArray mtx, bool lowerToUpper=false); +//! initializes scaled identity matrix +CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s=Scalar(1)); +//! computes determinant of a square matrix +CV_EXPORTS_W double determinant(InputArray mtx); +//! computes trace of a matrix +CV_EXPORTS_W Scalar trace(InputArray mtx); +//! computes inverse or pseudo-inverse matrix +CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags=DECOMP_LU); +//! solves linear system or a least-square problem +CV_EXPORTS_W bool solve(InputArray src1, InputArray src2, + OutputArray dst, int flags=DECOMP_LU); + +enum +{ + SORT_EVERY_ROW=0, + SORT_EVERY_COLUMN=1, + SORT_ASCENDING=0, + SORT_DESCENDING=16 +}; + +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags); +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags); +//! finds real roots of a cubic polynomial +CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots); +//! finds real and complex roots of a polynomial +CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters=300); +//! finds eigenvalues of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, int lowindex=-1, + int highindex=-1); +//! finds eigenvalues and eigenvectors of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, + OutputArray eigenvectors, + int lowindex=-1, int highindex=-1); +CV_EXPORTS_W bool eigen(InputArray src, bool computeEigenvectors, + OutputArray eigenvalues, OutputArray eigenvectors); + +enum +{ + COVAR_SCRAMBLED=0, + COVAR_NORMAL=1, + COVAR_USE_AVG=2, + COVAR_SCALE=4, + COVAR_ROWS=8, + COVAR_COLS=16 +}; + +//! computes covariation matrix of a set of samples +CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean, + int flags, int ctype=CV_64F); +//! computes covariation matrix of a set of samples +CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar, + OutputArray mean, int flags, int ctype=CV_64F); + +/*! + Principal Component Analysis + + The class PCA is used to compute the special basis for a set of vectors. + The basis will consist of eigenvectors of the covariance matrix computed + from the input set of vectors. After PCA is performed, vectors can be transformed from + the original high-dimensional space to the subspace formed by a few most + prominent eigenvectors (called the principal components), + corresponding to the largest eigenvalues of the covariation matrix. + Thus the dimensionality of the vector and the correlation between the coordinates is reduced. + + The following sample is the function that takes two matrices. The first one stores the set + of vectors (a row per vector) that is used to compute PCA, the second one stores another + "test" set of vectors (a row per vector) that are first compressed with PCA, + then reconstructed back and then the reconstruction error norm is computed and printed for each vector. + + \code + using namespace cv; + + PCA compressPCA(const Mat& pcaset, int maxComponents, + const Mat& testset, Mat& compressed) + { + PCA pca(pcaset, // pass the data + Mat(), // we do not have a pre-computed mean vector, + // so let the PCA engine to compute it + CV_PCA_DATA_AS_ROW, // indicate that the vectors + // are stored as matrix rows + // (use CV_PCA_DATA_AS_COL if the vectors are + // the matrix columns) + maxComponents // specify, how many principal components to retain + ); + // if there is no test data, just return the computed basis, ready-to-use + if( !testset.data ) + return pca; + CV_Assert( testset.cols == pcaset.cols ); + + compressed.create(testset.rows, maxComponents, testset.type()); + + Mat reconstructed; + for( int i = 0; i < testset.rows; i++ ) + { + Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed; + // compress the vector, the result will be stored + // in the i-th row of the output matrix + pca.project(vec, coeffs); + // and then reconstruct it + pca.backProject(coeffs, reconstructed); + // and measure the error + printf("%d. diff = %g\n", i, norm(vec, reconstructed, NORM_L2)); + } + return pca; + } + \endcode +*/ +class CV_EXPORTS PCA +{ +public: + //! default constructor + PCA(); + //! the constructor that performs PCA + PCA(InputArray data, InputArray mean, int flags, int maxComponents=0); + PCA(InputArray data, InputArray mean, int flags, double retainedVariance); + //! operator that performs PCA. The previously stored data, if any, is released + PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents=0); + PCA& computeVar(InputArray data, InputArray mean, int flags, double retainedVariance); + //! projects vector from the original space to the principal components subspace + Mat project(InputArray vec) const; + //! projects vector from the original space to the principal components subspace + void project(InputArray vec, OutputArray result) const; + //! reconstructs the original vector from the projection + Mat backProject(InputArray vec) const; + //! reconstructs the original vector from the projection + void backProject(InputArray vec, OutputArray result) const; + + Mat eigenvectors; //!< eigenvectors of the covariation matrix + Mat eigenvalues; //!< eigenvalues of the covariation matrix + Mat mean; //!< mean value subtracted before the projection and added after the back projection +}; + +CV_EXPORTS_W void PCACompute(InputArray data, CV_OUT InputOutputArray mean, + OutputArray eigenvectors, int maxComponents=0); + +CV_EXPORTS_W void PCAComputeVar(InputArray data, CV_OUT InputOutputArray mean, + OutputArray eigenvectors, double retainedVariance); + +CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + +CV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + + +/*! + Singular Value Decomposition class + + The class is used to compute Singular Value Decomposition of a floating-point matrix and then + use it to solve least-square problems, under-determined linear systems, invert matrices, + compute condition numbers etc. + + For a bit faster operation you can pass flags=SVD::MODIFY_A|... to modify the decomposed matrix + when it is not necessarily to preserve it. If you want to compute condition number of a matrix + or absolute value of its determinant - you do not need SVD::u or SVD::vt, + so you can pass flags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that the full-size SVD::u and SVD::vt + must be computed, which is not necessary most of the time. +*/ +class CV_EXPORTS SVD +{ +public: + enum { MODIFY_A=1, NO_UV=2, FULL_UV=4 }; + //! the default constructor + SVD(); + //! the constructor that performs SVD + SVD( InputArray src, int flags=0 ); + //! the operator that performs SVD. The previously allocated SVD::u, SVD::w are SVD::vt are released. + SVD& operator ()( InputArray src, int flags=0 ); + + //! decomposes matrix and stores the results to user-provided matrices + static void compute( InputArray src, OutputArray w, + OutputArray u, OutputArray vt, int flags=0 ); + //! computes singular values of a matrix + static void compute( InputArray src, OutputArray w, int flags=0 ); + //! performs back substitution + static void backSubst( InputArray w, InputArray u, + InputArray vt, InputArray rhs, + OutputArray dst ); + + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ); + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w ); + template static void backSubst( const Matx<_Tp, nm, 1>& w, + const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst ); + + //! finds dst = arg min_{|dst|=1} |m*dst| + static void solveZ( InputArray src, OutputArray dst ); + //! performs back substitution, so that dst is the solution or pseudo-solution of m*dst = rhs, where m is the decomposed matrix + void backSubst( InputArray rhs, OutputArray dst ) const; + + Mat u, w, vt; +}; + +//! computes SVD of src +CV_EXPORTS_W void SVDecomp( InputArray src, CV_OUT OutputArray w, + CV_OUT OutputArray u, CV_OUT OutputArray vt, int flags=0 ); + +//! performs back substitution for the previously computed SVD +CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt, + InputArray rhs, CV_OUT OutputArray dst ); + +//! computes Mahalanobis distance between two vectors: sqrt((v1-v2)'*icovar*(v1-v2)), where icovar is the inverse covariation matrix +CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar); +//! a synonym for Mahalanobis +CV_EXPORTS double Mahalonobis(InputArray v1, InputArray v2, InputArray icovar); + +//! performs forward or inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs forward or inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags=0); +//! performs inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags=0); +//! computes element-wise product of the two Fourier spectrums. The second spectrum can optionally be conjugated before the multiplication +CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c, + int flags, bool conjB=false); +//! computes the minimal vector size vecsize1 >= vecsize so that the dft() of the vector of length vecsize1 can be computed efficiently +CV_EXPORTS_W int getOptimalDFTSize(int vecsize); + +/*! + Various k-Means flags +*/ +enum +{ + KMEANS_RANDOM_CENTERS=0, // Chooses random centers for k-Means initialization + KMEANS_PP_CENTERS=2, // Uses k-Means++ algorithm for initialization + KMEANS_USE_INITIAL_LABELS=1 // Uses the user-provided labels for K-Means initialization +}; +//! clusters the input data using k-Means algorithm +CV_EXPORTS_W double kmeans( InputArray data, int K, CV_OUT InputOutputArray bestLabels, + TermCriteria criteria, int attempts, + int flags, OutputArray centers=noArray() ); + +//! returns the thread-local Random number generator +CV_EXPORTS RNG& theRNG(); + +//! returns the next unifomly-distributed random number of the specified type +template static inline _Tp randu() { return (_Tp)theRNG(); } + +//! fills array with uniformly-distributed random numbers from the range [low, high) +CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high); + +//! fills array with normally-distributed random numbers with the specified mean and the standard deviation +CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev); + +//! shuffles the input array elements +CV_EXPORTS void randShuffle(InputOutputArray dst, double iterFactor=1., RNG* rng=0); +CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor=1.); + +//! draws the line segment (pt1, pt2) in the image +CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color, + int thickness=1, int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image +CV_EXPORTS_W void rectangle(CV_IN_OUT Mat& img, Point pt1, Point pt2, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle covering rec in the image +CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the circle outline or a solid circle in the image +CV_EXPORTS_W void circle(CV_IN_OUT Mat& img, Point center, int radius, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image +CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, Point center, Size axes, + double angle, double startAngle, double endAngle, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws a rotated ellipse in the image +CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, const RotatedRect& box, const Scalar& color, + int thickness=1, int lineType=8); + +//! draws a filled convex polygon in the image +CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts, + const Scalar& color, int lineType=8, + int shift=0); +CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points, + const Scalar& color, int lineType=8, + int shift=0); + +//! fills an area bounded by one or more polygons +CV_EXPORTS void fillPoly(Mat& img, const Point** pts, + const int* npts, int ncontours, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +//! draws one or more polygonal curves +CV_EXPORTS void polylines(Mat& img, const Point** pts, const int* npts, + int ncontours, bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts, + bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +//! clips the line segment by the rectangle Rect(0, 0, imgSize.width, imgSize.height) +CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2); + +//! clips the line segment by the rectangle imgRect +CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2); + +/*! + Line iterator class + + The class is used to iterate over all the pixels on the raster line + segment connecting two specified points. +*/ +class CV_EXPORTS LineIterator +{ +public: + //! intializes the iterator + LineIterator( const Mat& img, Point pt1, Point pt2, + int connectivity=8, bool leftToRight=false ); + //! returns pointer to the current pixel + uchar* operator *(); + //! prefix increment operator (++it). shifts iterator to the next pixel + LineIterator& operator ++(); + //! postfix increment operator (it++). shifts iterator to the next pixel + LineIterator operator ++(int); + //! returns coordinates of the current pixel + Point pos() const; + + uchar* ptr; + const uchar* ptr0; + int step, elemSize; + int err, count; + int minusDelta, plusDelta; + int minusStep, plusStep; +}; + +//! converts elliptic arc to a polygonal curve +CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle, + int arcStart, int arcEnd, int delta, + CV_OUT vector& pts ); + +enum +{ + FONT_HERSHEY_SIMPLEX = 0, + FONT_HERSHEY_PLAIN = 1, + FONT_HERSHEY_DUPLEX = 2, + FONT_HERSHEY_COMPLEX = 3, + FONT_HERSHEY_TRIPLEX = 4, + FONT_HERSHEY_COMPLEX_SMALL = 5, + FONT_HERSHEY_SCRIPT_SIMPLEX = 6, + FONT_HERSHEY_SCRIPT_COMPLEX = 7, + FONT_ITALIC = 16 +}; + +//! renders text string in the image +CV_EXPORTS_W void putText( Mat& img, const string& text, Point org, + int fontFace, double fontScale, Scalar color, + int thickness=1, int lineType=8, + bool bottomLeftOrigin=false ); + +//! returns bounding box of the text string +CV_EXPORTS_W Size getTextSize(const string& text, int fontFace, + double fontScale, int thickness, + CV_OUT int* baseLine); + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +/*! + Template matrix class derived from Mat + + The class Mat_ is a "thin" template wrapper on top of cv::Mat. It does not have any extra data fields, + nor it or cv::Mat have any virtual methods and thus references or pointers to these two classes + can be safely converted one to another. But do it with care, for example: + + \code + // create 100x100 8-bit matrix + Mat M(100,100,CV_8U); + // this will compile fine. no any data conversion will be done. + Mat_& M1 = (Mat_&)M; + // the program will likely crash at the statement below + M1(99,99) = 1.f; + \endcode + + While cv::Mat is sufficient in most cases, cv::Mat_ can be more convenient if you use a lot of element + access operations and if you know matrix type at compile time. + Note that cv::Mat::at<_Tp>(int y, int x) and cv::Mat_<_Tp>::operator ()(int y, int x) do absolutely the + same thing and run at the same speed, but the latter is certainly shorter: + + \code + Mat_ M(20,20); + for(int i = 0; i < M.rows; i++) + for(int j = 0; j < M.cols; j++) + M(i,j) = 1./(i+j+1); + Mat E, V; + eigen(M,E,V); + cout << E.at(0,0)/E.at(M.rows-1,0); + \endcode + + It is easy to use Mat_ for multi-channel images/matrices - just pass cv::Vec as cv::Mat_ template parameter: + + \code + // allocate 320x240 color image and fill it with green (in RGB space) + Mat_ img(240, 320, Vec3b(0,255,0)); + // now draw a diagonal white line + for(int i = 0; i < 100; i++) + img(i,i)=Vec3b(255,255,255); + // and now modify the 2nd (red) channel of each pixel + for(int i = 0; i < img.rows; i++) + for(int j = 0; j < img.cols; j++) + img(i,j)[2] ^= (uchar)(i ^ j); // img(y,x)[c] accesses c-th channel of the pixel (x,y) + \endcode +*/ +template class Mat_ : public Mat +{ +public: + typedef _Tp value_type; + typedef typename DataType<_Tp>::channel_type channel_type; + typedef MatIterator_<_Tp> iterator; + typedef MatConstIterator_<_Tp> const_iterator; + + //! default constructor + Mat_(); + //! equivalent to Mat(_rows, _cols, DataType<_Tp>::type) + Mat_(int _rows, int _cols); + //! constructor that sets each matrix element to specified value + Mat_(int _rows, int _cols, const _Tp& value); + //! equivalent to Mat(_size, DataType<_Tp>::type) + explicit Mat_(Size _size); + //! constructor that sets each matrix element to specified value + Mat_(Size _size, const _Tp& value); + //! n-dim array constructor + Mat_(int _ndims, const int* _sizes); + //! n-dim array constructor that sets each matrix element to specified value + Mat_(int _ndims, const int* _sizes, const _Tp& value); + //! copy/conversion contructor. If m is of different type, it's converted + Mat_(const Mat& m); + //! copy constructor + Mat_(const Mat_& m); + //! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type + Mat_(int _rows, int _cols, _Tp* _data, size_t _step=AUTO_STEP); + //! constructs n-dim matrix on top of user-allocated data. steps are in bytes(!!!), regardless of the type + Mat_(int _ndims, const int* _sizes, _Tp* _data, const size_t* _steps=0); + //! selects a submatrix + Mat_(const Mat_& m, const Range& rowRange, const Range& colRange=Range::all()); + //! selects a submatrix + Mat_(const Mat_& m, const Rect& roi); + //! selects a submatrix, n-dim version + Mat_(const Mat_& m, const Range* ranges); + //! from a matrix expression + explicit Mat_(const MatExpr& e); + //! makes a matrix out of Vec, std::vector, Point_ or Point3_. The matrix will have a single column + explicit Mat_(const vector<_Tp>& vec, bool copyData=false); + template explicit Mat_(const Vec::channel_type, n>& vec, bool copyData=true); + template explicit Mat_(const Matx::channel_type, m, n>& mtx, bool copyData=true); + explicit Mat_(const Point_::channel_type>& pt, bool copyData=true); + explicit Mat_(const Point3_::channel_type>& pt, bool copyData=true); + explicit Mat_(const MatCommaInitializer_<_Tp>& commaInitializer); + + Mat_& operator = (const Mat& m); + Mat_& operator = (const Mat_& m); + //! set all the elements to s. + Mat_& operator = (const _Tp& s); + //! assign a matrix expression + Mat_& operator = (const MatExpr& e); + + //! iterators; they are smart enough to skip gaps in the end of rows + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + //! equivalent to Mat::create(_rows, _cols, DataType<_Tp>::type) + void create(int _rows, int _cols); + //! equivalent to Mat::create(_size, DataType<_Tp>::type) + void create(Size _size); + //! equivalent to Mat::create(_ndims, _sizes, DatType<_Tp>::type) + void create(int _ndims, const int* _sizes); + //! cross-product + Mat_ cross(const Mat_& m) const; + //! data type conversion + template operator Mat_() const; + //! overridden forms of Mat::row() etc. + Mat_ row(int y) const; + Mat_ col(int x) const; + Mat_ diag(int d=0) const; + Mat_ clone() const; + + //! overridden forms of Mat::elemSize() etc. + size_t elemSize() const; + size_t elemSize1() const; + int type() const; + int depth() const; + int channels() const; + size_t step1(int i=0) const; + //! returns step()/sizeof(_Tp) + size_t stepT(int i=0) const; + + //! overridden forms of Mat::zeros() etc. Data type is omitted, of course + static MatExpr zeros(int rows, int cols); + static MatExpr zeros(Size size); + static MatExpr zeros(int _ndims, const int* _sizes); + static MatExpr ones(int rows, int cols); + static MatExpr ones(Size size); + static MatExpr ones(int _ndims, const int* _sizes); + static MatExpr eye(int rows, int cols); + static MatExpr eye(Size size); + + //! some more overriden methods + Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright ); + Mat_ operator()( const Range& rowRange, const Range& colRange ) const; + Mat_ operator()( const Rect& roi ) const; + Mat_ operator()( const Range* ranges ) const; + + //! more convenient forms of row and element access operators + _Tp* operator [](int y); + const _Tp* operator [](int y) const; + + //! returns reference to the specified element + _Tp& operator ()(const int* idx); + //! returns read-only reference to the specified element + const _Tp& operator ()(const int* idx) const; + + //! returns reference to the specified element + template _Tp& operator ()(const Vec& idx); + //! returns read-only reference to the specified element + template const _Tp& operator ()(const Vec& idx) const; + + //! returns reference to the specified element (1D case) + _Tp& operator ()(int idx0); + //! returns read-only reference to the specified element (1D case) + const _Tp& operator ()(int idx0) const; + //! returns reference to the specified element (2D case) + _Tp& operator ()(int idx0, int idx1); + //! returns read-only reference to the specified element (2D case) + const _Tp& operator ()(int idx0, int idx1) const; + //! returns reference to the specified element (3D case) + _Tp& operator ()(int idx0, int idx1, int idx2); + //! returns read-only reference to the specified element (3D case) + const _Tp& operator ()(int idx0, int idx1, int idx2) const; + + _Tp& operator ()(Point pt); + const _Tp& operator ()(Point pt) const; + + //! conversion to vector. + operator vector<_Tp>() const; + //! conversion to Vec + template operator Vec::channel_type, n>() const; + //! conversion to Matx + template operator Matx::channel_type, m, n>() const; +}; + +typedef Mat_ Mat1b; +typedef Mat_ Mat2b; +typedef Mat_ Mat3b; +typedef Mat_ Mat4b; + +typedef Mat_ Mat1s; +typedef Mat_ Mat2s; +typedef Mat_ Mat3s; +typedef Mat_ Mat4s; + +typedef Mat_ Mat1w; +typedef Mat_ Mat2w; +typedef Mat_ Mat3w; +typedef Mat_ Mat4w; + +typedef Mat_ Mat1i; +typedef Mat_ Mat2i; +typedef Mat_ Mat3i; +typedef Mat_ Mat4i; + +typedef Mat_ Mat1f; +typedef Mat_ Mat2f; +typedef Mat_ Mat3f; +typedef Mat_ Mat4f; + +typedef Mat_ Mat1d; +typedef Mat_ Mat2d; +typedef Mat_ Mat3d; +typedef Mat_ Mat4d; + +//////////// Iterators & Comma initializers ////////////////// + +class CV_EXPORTS MatConstIterator +{ +public: + typedef uchar* value_type; + typedef ptrdiff_t difference_type; + typedef const uchar** pointer; + typedef uchar* reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator(const Mat* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, const int* _idx); + //! copy constructor + MatConstIterator(const MatConstIterator& it); + + //! copy operator + MatConstIterator& operator = (const MatConstIterator& it); + //! returns the current matrix element + uchar* operator *() const; + //! returns the i-th matrix element, relative to the current + uchar* operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator& operator --(); + //! decrements the iterator + MatConstIterator operator --(int); + //! increments the iterator + MatConstIterator& operator ++(); + //! increments the iterator + MatConstIterator operator ++(int); + //! returns the current iterator position + Point pos() const; + //! returns the current iterator position + void pos(int* _idx) const; + ptrdiff_t lpos() const; + void seek(ptrdiff_t ofs, bool relative=false); + void seek(const int* _idx, bool relative=false); + + const Mat* m; + size_t elemSize; + uchar* ptr; + uchar* sliceStart; + uchar* sliceEnd; +}; + +/*! + Matrix read-only iterator + + */ +template +class MatConstIterator_ : public MatConstIterator +{ +public: + typedef _Tp value_type; + typedef ptrdiff_t difference_type; + typedef const _Tp* pointer; + typedef const _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator_(const Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatConstIterator_(const MatConstIterator_& it); + + //! copy operator + MatConstIterator_& operator = (const MatConstIterator_& it); + //! returns the current matrix element + _Tp operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator_& operator --(); + //! decrements the iterator + MatConstIterator_ operator --(int); + //! increments the iterator + MatConstIterator_& operator ++(); + //! increments the iterator + MatConstIterator_ operator ++(int); + //! returns the current iterator position + Point pos() const; +}; + + +/*! + Matrix read-write iterator + +*/ +template +class MatIterator_ : public MatConstIterator_<_Tp> +{ +public: + typedef _Tp* pointer; + typedef _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! the default constructor + MatIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatIterator_(Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatIterator_(const MatIterator_& it); + //! copy operator + MatIterator_& operator = (const MatIterator_<_Tp>& it ); + + //! returns the current matrix element + _Tp& operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp& operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatIterator_& operator --(); + //! decrements the iterator + MatIterator_ operator --(int); + //! increments the iterator + MatIterator_& operator ++(); + //! increments the iterator + MatIterator_ operator ++(int); +}; + +template class MatOp_Iter_; + +/*! + Comma-separated Matrix Initializer + + The class instances are usually not created explicitly. + Instead, they are created on "matrix << firstValue" operator. + + The sample below initializes 2x2 rotation matrix: + + \code + double angle = 30, a = cos(angle*CV_PI/180), b = sin(angle*CV_PI/180); + Mat R = (Mat_(2,2) << a, -b, b, a); + \endcode +*/ +template class MatCommaInitializer_ +{ +public: + //! the constructor, created by "matrix << firstValue" operator, where matrix is cv::Mat + MatCommaInitializer_(Mat_<_Tp>* _m); + //! the operator that takes the next value and put it to the matrix + template MatCommaInitializer_<_Tp>& operator , (T2 v); + //! another form of conversion operator + Mat_<_Tp> operator *() const; + operator Mat_<_Tp>() const; +protected: + MatIterator_<_Tp> it; +}; + + +template class MatxCommaInitializer +{ +public: + MatxCommaInitializer(Matx<_Tp, m, n>* _mtx); + template MatxCommaInitializer<_Tp, m, n>& operator , (T2 val); + Matx<_Tp, m, n> operator *() const; + + Matx<_Tp, m, n>* dst; + int idx; +}; + +template class VecCommaInitializer : public MatxCommaInitializer<_Tp, m, 1> +{ +public: + VecCommaInitializer(Vec<_Tp, m>* _vec); + template VecCommaInitializer<_Tp, m>& operator , (T2 val); + Vec<_Tp, m> operator *() const; +}; + +/*! + Automatically Allocated Buffer Class + + The class is used for temporary buffers in functions and methods. + If a temporary buffer is usually small (a few K's of memory), + but its size depends on the parameters, it makes sense to create a small + fixed-size array on stack and use it if it's large enough. If the required buffer size + is larger than the fixed size, another buffer of sufficient size is allocated dynamically + and released after the processing. Therefore, in typical cases, when the buffer size is small, + there is no overhead associated with malloc()/free(). + At the same time, there is no limit on the size of processed data. + + This is what AutoBuffer does. The template takes 2 parameters - type of the buffer elements and + the number of stack-allocated elements. Here is how the class is used: + + \code + void my_func(const cv::Mat& m) + { + cv::AutoBuffer buf; // create automatic buffer containing 1000 floats + + buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used, + // otherwise the buffer of "m.rows" floats will be allocated + // dynamically and deallocated in cv::AutoBuffer destructor + ... + } + \endcode +*/ +template class AutoBuffer +{ +public: + typedef _Tp value_type; + enum { buffer_padding = (int)((16 + sizeof(_Tp) - 1)/sizeof(_Tp)) }; + + //! the default contructor + AutoBuffer(); + //! constructor taking the real buffer size + AutoBuffer(size_t _size); + //! destructor. calls deallocate() + ~AutoBuffer(); + + //! allocates the new buffer of size _size. if the _size is small enough, stack-allocated buffer is used + void allocate(size_t _size); + //! deallocates the buffer if it was dynamically allocated + void deallocate(); + //! returns pointer to the real buffer, stack-allocated or head-allocated + operator _Tp* (); + //! returns read-only pointer to the real buffer, stack-allocated or head-allocated + operator const _Tp* () const; + +protected: + //! pointer to the real buffer, can point to buf if the buffer is small enough + _Tp* ptr; + //! size of the real buffer + size_t size; + //! pre-allocated buffer + _Tp buf[fixed_size+buffer_padding]; +}; + +/////////////////////////// multi-dimensional dense matrix ////////////////////////// + +/*! + n-Dimensional Dense Matrix Iterator Class. + + The class cv::NAryMatIterator is used for iterating over one or more n-dimensional dense arrays (cv::Mat's). + + The iterator is completely different from cv::Mat_ and cv::SparseMat_ iterators. + It iterates through the slices (or planes), not the elements, where "slice" is a continuous part of the arrays. + + Here is the example on how the iterator can be used to normalize 3D histogram: + + \code + void normalizeColorHist(Mat& hist) + { + #if 1 + // intialize iterator (the style is different from STL). + // after initialization the iterator will contain + // the number of slices or planes + // the iterator will go through + Mat* arrays[] = { &hist, 0 }; + Mat planes[1]; + NAryMatIterator it(arrays, planes); + double s = 0; + // iterate through the matrix. on each iteration + // it.planes[i] (of type Mat) will be set to the current plane of + // i-th n-dim matrix passed to the iterator constructor. + for(int p = 0; p < it.nplanes; p++, ++it) + s += sum(it.planes[0])[0]; + it = NAryMatIterator(hist); + s = 1./s; + for(int p = 0; p < it.nplanes; p++, ++it) + it.planes[0] *= s; + #elif 1 + // this is a shorter implementation of the above + // using built-in operations on Mat + double s = sum(hist)[0]; + hist.convertTo(hist, hist.type(), 1./s, 0); + #else + // and this is even shorter one + // (assuming that the histogram elements are non-negative) + normalize(hist, hist, 1, 0, NORM_L1); + #endif + } + \endcode + + You can iterate through several matrices simultaneously as long as they have the same geometry + (dimensionality and all the dimension sizes are the same), which is useful for binary + and n-ary operations on such matrices. Just pass those matrices to cv::MatNDIterator. + Then, during the iteration it.planes[0], it.planes[1], ... will + be the slices of the corresponding matrices +*/ +class CV_EXPORTS NAryMatIterator +{ +public: + //! the default constructor + NAryMatIterator(); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, uchar** ptrs, int narrays=-1); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, Mat* planes, int narrays=-1); + //! the separate iterator initialization method + void init(const Mat** arrays, Mat* planes, uchar** ptrs, int narrays=-1); + + //! proceeds to the next plane of every iterated matrix + NAryMatIterator& operator ++(); + //! proceeds to the next plane of every iterated matrix (postfix increment operator) + NAryMatIterator operator ++(int); + + //! the iterated arrays + const Mat** arrays; + //! the current planes + Mat* planes; + //! data pointers + uchar** ptrs; + //! the number of arrays + int narrays; + //! the number of hyper-planes that the iterator steps through + size_t nplanes; + //! the size of each segment (in elements) + size_t size; +protected: + int iterdepth; + size_t idx; +}; + +//typedef NAryMatIterator NAryMatNDIterator; + +typedef void (*ConvertData)(const void* from, void* to, int cn); +typedef void (*ConvertScaleData)(const void* from, void* to, int cn, double alpha, double beta); + +//! returns the function for converting pixels from one data type to another +CV_EXPORTS ConvertData getConvertElem(int fromType, int toType); +//! returns the function for converting pixels from one data type to another with the optional scaling +CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType); + + +/////////////////////////// multi-dimensional sparse matrix ////////////////////////// + +class SparseMatIterator; +class SparseMatConstIterator; +template class SparseMatIterator_; +template class SparseMatConstIterator_; + +/*! + Sparse matrix class. + + The class represents multi-dimensional sparse numerical arrays. Such a sparse array can store elements + of any type that cv::Mat is able to store. "Sparse" means that only non-zero elements + are stored (though, as a result of some operations on a sparse matrix, some of its stored elements + can actually become 0. It's user responsibility to detect such elements and delete them using cv::SparseMat::erase(). + The non-zero elements are stored in a hash table that grows when it's filled enough, + so that the search time remains O(1) in average. Elements can be accessed using the following methods: + +
    +
  1. Query operations: cv::SparseMat::ptr() and the higher-level cv::SparseMat::ref(), + cv::SparseMat::value() and cv::SparseMat::find, for example: + \code + const int dims = 5; + int size[] = {10, 10, 10, 10, 10}; + SparseMat sparse_mat(dims, size, CV_32F); + for(int i = 0; i < 1000; i++) + { + int idx[dims]; + for(int k = 0; k < dims; k++) + idx[k] = rand()%sparse_mat.size(k); + sparse_mat.ref(idx) += 1.f; + } + \endcode + +
  2. Sparse matrix iterators. Like cv::Mat iterators and unlike cv::Mat iterators, the sparse matrix iterators are STL-style, + that is, the iteration is done as following: + \code + // prints elements of a sparse floating-point matrix and the sum of elements. + SparseMatConstIterator_ + it = sparse_mat.begin(), + it_end = sparse_mat.end(); + double s = 0; + int dims = sparse_mat.dims(); + for(; it != it_end; ++it) + { + // print element indices and the element value + const Node* n = it.node(); + printf("(") + for(int i = 0; i < dims; i++) + printf("%3d%c", n->idx[i], i < dims-1 ? ',' : ')'); + printf(": %f\n", *it); + s += *it; + } + printf("Element sum is %g\n", s); + \endcode + If you run this loop, you will notice that elements are enumerated + in no any logical order (lexicographical etc.), + they come in the same order as they stored in the hash table, i.e. semi-randomly. + + You may collect pointers to the nodes and sort them to get the proper ordering. + Note, however, that pointers to the nodes may become invalid when you add more + elements to the matrix; this is because of possible buffer reallocation. + +
  3. A combination of the above 2 methods when you need to process 2 or more sparse + matrices simultaneously, e.g. this is how you can compute unnormalized + cross-correlation of the 2 floating-point sparse matrices: + \code + double crossCorr(const SparseMat& a, const SparseMat& b) + { + const SparseMat *_a = &a, *_b = &b; + // if b contains less elements than a, + // it's faster to iterate through b + if(_a->nzcount() > _b->nzcount()) + std::swap(_a, _b); + SparseMatConstIterator_ it = _a->begin(), + it_end = _a->end(); + double ccorr = 0; + for(; it != it_end; ++it) + { + // take the next element from the first matrix + float avalue = *it; + const Node* anode = it.node(); + // and try to find element with the same index in the second matrix. + // since the hash value depends only on the element index, + // we reuse hashvalue stored in the node + float bvalue = _b->value(anode->idx,&anode->hashval); + ccorr += avalue*bvalue; + } + return ccorr; + } + \endcode +
+*/ +class CV_EXPORTS SparseMat +{ +public: + typedef SparseMatIterator iterator; + typedef SparseMatConstIterator const_iterator; + + //! the sparse matrix header + struct CV_EXPORTS Hdr + { + Hdr(int _dims, const int* _sizes, int _type); + void clear(); + int refcount; + int dims; + int valueOffset; + size_t nodeSize; + size_t nodeCount; + size_t freeList; + vector pool; + vector hashtab; + int size[CV_MAX_DIM]; + }; + + //! sparse matrix node - element of a hash table + struct CV_EXPORTS Node + { + //! hash value + size_t hashval; + //! index of the next node in the same hash table entry + size_t next; + //! index of the matrix element + int idx[CV_MAX_DIM]; + }; + + //! default constructor + SparseMat(); + //! creates matrix of the specified size and type + SparseMat(int dims, const int* _sizes, int _type); + //! copy constructor + SparseMat(const SparseMat& m); + //! converts dense 2d matrix to the sparse form + /*! + \param m the input matrix + */ + explicit SparseMat(const Mat& m); + //! converts old-style sparse matrix to the new-style. All the data is copied + SparseMat(const CvSparseMat* m); + //! the destructor + ~SparseMat(); + + //! assignment operator. This is O(1) operation, i.e. no data is copied + SparseMat& operator = (const SparseMat& m); + //! equivalent to the corresponding constructor + SparseMat& operator = (const Mat& m); + + //! creates full copy of the matrix + SparseMat clone() const; + + //! copies all the data to the destination matrix. All the previous content of m is erased + void copyTo( SparseMat& m ) const; + //! converts sparse matrix to dense matrix. + void copyTo( Mat& m ) const; + //! multiplies all the matrix elements by the specified scale factor alpha and converts the results to the specified data type + void convertTo( SparseMat& m, int rtype, double alpha=1 ) const; + //! converts sparse matrix to dense n-dim matrix with optional type conversion and scaling. + /*! + \param rtype The output matrix data type. When it is =-1, the output array will have the same data type as (*this) + \param alpha The scale factor + \param beta The optional delta added to the scaled values before the conversion + */ + void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const; + + // not used now + void assignTo( SparseMat& m, int type=-1 ) const; + + //! reallocates sparse matrix. + /*! + If the matrix already had the proper size and type, + it is simply cleared with clear(), otherwise, + the old matrix is released (using release()) and the new one is allocated. + */ + void create(int dims, const int* _sizes, int _type); + //! sets all the sparse matrix elements to 0, which means clearing the hash table. + void clear(); + //! manually increments the reference counter to the header. + void addref(); + // decrements the header reference counter. When the counter reaches 0, the header and all the underlying data are deallocated. + void release(); + + //! converts sparse matrix to the old-style representation; all the elements are copied. + operator CvSparseMat*() const; + //! returns the size of each element in bytes (not including the overhead - the space occupied by SparseMat::Node elements) + size_t elemSize() const; + //! returns elemSize()/channels() + size_t elemSize1() const; + + //! returns type of sparse matrix elements + int type() const; + //! returns the depth of sparse matrix elements + int depth() const; + //! returns the number of channels + int channels() const; + + //! returns the array of sizes, or NULL if the matrix is not allocated + const int* size() const; + //! returns the size of i-th matrix dimension (or 0) + int size(int i) const; + //! returns the matrix dimensionality + int dims() const; + //! returns the number of non-zero elements (=the number of hash table nodes) + size_t nzcount() const; + + //! computes the element hash value (1D case) + size_t hash(int i0) const; + //! computes the element hash value (2D case) + size_t hash(int i0, int i1) const; + //! computes the element hash value (3D case) + size_t hash(int i0, int i1, int i2) const; + //! computes the element hash value (nD case) + size_t hash(const int* idx) const; + + //@{ + /*! + specialized variants for 1D, 2D, 3D cases and the generic_type one for n-D case. + + return pointer to the matrix element. +
    +
  • if the element is there (it's non-zero), the pointer to it is returned +
  • if it's not there and createMissing=false, NULL pointer is returned +
  • if it's not there and createMissing=true, then the new element + is created and initialized with 0. Pointer to it is returned +
  • if the optional hashval pointer is not NULL, the element hash value is + not computed, but *hashval is taken instead. +
+ */ + //! returns pointer to the specified element (1D case) + uchar* ptr(int i0, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (2D case) + uchar* ptr(int i0, int i1, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (3D case) + uchar* ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (nD case) + uchar* ptr(const int* idx, bool createMissing, size_t* hashval=0); + //@} + + //@{ + /*! + return read-write reference to the specified sparse matrix element. + + ref<_Tp>(i0,...[,hashval]) is equivalent to *(_Tp*)ptr(i0,...,true[,hashval]). + The methods always return a valid reference. + If the element did not exist, it is created and initialiazed with 0. + */ + //! returns reference to the specified element (1D case) + template _Tp& ref(int i0, size_t* hashval=0); + //! returns reference to the specified element (2D case) + template _Tp& ref(int i0, int i1, size_t* hashval=0); + //! returns reference to the specified element (3D case) + template _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! returns reference to the specified element (nD case) + template _Tp& ref(const int* idx, size_t* hashval=0); + //@} + + //@{ + /*! + return value of the specified sparse matrix element. + + value<_Tp>(i0,...[,hashval]) is equivalent + + \code + { const _Tp* p = find<_Tp>(i0,...[,hashval]); return p ? *p : _Tp(); } + \endcode + + That is, if the element did not exist, the methods return 0. + */ + //! returns value of the specified element (1D case) + template _Tp value(int i0, size_t* hashval=0) const; + //! returns value of the specified element (2D case) + template _Tp value(int i0, int i1, size_t* hashval=0) const; + //! returns value of the specified element (3D case) + template _Tp value(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns value of the specified element (nD case) + template _Tp value(const int* idx, size_t* hashval=0) const; + //@} + + //@{ + /*! + Return pointer to the specified sparse matrix element if it exists + + find<_Tp>(i0,...[,hashval]) is equivalent to (_const Tp*)ptr(i0,...false[,hashval]). + + If the specified element does not exist, the methods return NULL. + */ + //! returns pointer to the specified element (1D case) + template const _Tp* find(int i0, size_t* hashval=0) const; + //! returns pointer to the specified element (2D case) + template const _Tp* find(int i0, int i1, size_t* hashval=0) const; + //! returns pointer to the specified element (3D case) + template const _Tp* find(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns pointer to the specified element (nD case) + template const _Tp* find(const int* idx, size_t* hashval=0) const; + + //! erases the specified element (2D case) + void erase(int i0, int i1, size_t* hashval=0); + //! erases the specified element (3D case) + void erase(int i0, int i1, int i2, size_t* hashval=0); + //! erases the specified element (nD case) + void erase(const int* idx, size_t* hashval=0); + + //@{ + /*! + return the sparse matrix iterator pointing to the first sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix beginning + SparseMatIterator begin(); + //! returns the sparse matrix iterator at the matrix beginning + template SparseMatIterator_<_Tp> begin(); + //! returns the read-only sparse matrix iterator at the matrix beginning + SparseMatConstIterator begin() const; + //! returns the read-only sparse matrix iterator at the matrix beginning + template SparseMatConstIterator_<_Tp> begin() const; + //@} + /*! + return the sparse matrix iterator pointing to the element following the last sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix end + SparseMatIterator end(); + //! returns the read-only sparse matrix iterator at the matrix end + SparseMatConstIterator end() const; + //! returns the typed sparse matrix iterator at the matrix end + template SparseMatIterator_<_Tp> end(); + //! returns the typed read-only sparse matrix iterator at the matrix end + template SparseMatConstIterator_<_Tp> end() const; + + //! returns the value stored in the sparse martix node + template _Tp& value(Node* n); + //! returns the value stored in the sparse martix node + template const _Tp& value(const Node* n) const; + + ////////////// some internal-use methods /////////////// + Node* node(size_t nidx); + const Node* node(size_t nidx) const; + + uchar* newNode(const int* idx, size_t hashval); + void removeNode(size_t hidx, size_t nidx, size_t previdx); + void resizeHashTab(size_t newsize); + + enum { MAGIC_VAL=0x42FD0000, MAX_DIM=CV_MAX_DIM, HASH_SCALE=0x5bd1e995, HASH_BIT=0x80000000 }; + + int flags; + Hdr* hdr; +}; + +//! finds global minimum and maximum sparse array elements and returns their values and their locations +CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal, + double* maxVal, int* minIdx=0, int* maxIdx=0); +//! computes norm of a sparse matrix +CV_EXPORTS double norm( const SparseMat& src, int normType ); +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType ); + +/*! + Read-Only Sparse Matrix Iterator. + Here is how to use the iterator to compute the sum of floating-point sparse matrix elements: + + \code + SparseMatConstIterator it = m.begin(), it_end = m.end(); + double s = 0; + CV_Assert( m.type() == CV_32F ); + for( ; it != it_end; ++it ) + s += it.value(); + \endcode +*/ +class CV_EXPORTS SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatConstIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator(const SparseMat* _m); + //! the copy constructor + SparseMatConstIterator(const SparseMatConstIterator& it); + + //! the assignment operator + SparseMatConstIterator& operator = (const SparseMatConstIterator& it); + + //! template method returning the current matrix element + template const _Tp& value() const; + //! returns the current node of the sparse matrix. it.node->idx is the current element index + const SparseMat::Node* node() const; + + //! moves iterator to the previous element + SparseMatConstIterator& operator --(); + //! moves iterator to the previous element + SparseMatConstIterator operator --(int); + //! moves iterator to the next element + SparseMatConstIterator& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator operator ++(int); + + //! moves iterator to the element after the last element + void seekEnd(); + + const SparseMat* m; + size_t hashidx; + uchar* ptr; +}; + +/*! + Read-write Sparse Matrix Iterator + + The class is similar to cv::SparseMatConstIterator, + but can be used for in-place modification of the matrix elements. +*/ +class CV_EXPORTS SparseMatIterator : public SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator(SparseMat* _m); + //! the full constructor setting the iterator to the specified sparse matrix element + SparseMatIterator(SparseMat* _m, const int* idx); + //! the copy constructor + SparseMatIterator(const SparseMatIterator& it); + + //! the assignment operator + SparseMatIterator& operator = (const SparseMatIterator& it); + //! returns read-write reference to the current sparse matrix element + template _Tp& value() const; + //! returns pointer to the current sparse matrix node. it.node->idx is the index of the current element (do not modify it!) + SparseMat::Node* node() const; + + //! moves iterator to the next element + SparseMatIterator& operator ++(); + //! moves iterator to the next element + SparseMatIterator operator ++(int); +}; + +/*! + The Template Sparse Matrix class derived from cv::SparseMat + + The class provides slightly more convenient operations for accessing elements. + + \code + SparseMat m; + ... + SparseMat_ m_ = (SparseMat_&)m; + m_.ref(1)++; // equivalent to m.ref(1)++; + m_.ref(2) += m_(3); // equivalent to m.ref(2) += m.value(3); + \endcode +*/ +template class SparseMat_ : public SparseMat +{ +public: + typedef SparseMatIterator_<_Tp> iterator; + typedef SparseMatConstIterator_<_Tp> const_iterator; + + //! the default constructor + SparseMat_(); + //! the full constructor equivelent to SparseMat(dims, _sizes, DataType<_Tp>::type) + SparseMat_(int dims, const int* _sizes); + //! the copy constructor. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_(const SparseMat& m); + //! the copy constructor. This is O(1) operation - no data is copied + SparseMat_(const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_(const Mat& m); + //! converts the old-style sparse matrix to the C++ class. All the elements are copied + SparseMat_(const CvSparseMat* m); + //! the assignment operator. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_& operator = (const SparseMat& m); + //! the assignment operator. This is O(1) operation - no data is copied + SparseMat_& operator = (const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_& operator = (const Mat& m); + + //! makes full copy of the matrix. All the elements are duplicated + SparseMat_ clone() const; + //! equivalent to cv::SparseMat::create(dims, _sizes, DataType<_Tp>::type) + void create(int dims, const int* _sizes); + //! converts sparse matrix to the old-style CvSparseMat. All the elements are copied + operator CvSparseMat*() const; + + //! returns type of the matrix elements + int type() const; + //! returns depth of the matrix elements + int depth() const; + //! returns the number of channels in each matrix element + int channels() const; + + //! equivalent to SparseMat::ref<_Tp>(i0, hashval) + _Tp& ref(int i0, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, hashval) + _Tp& ref(int i0, int i1, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, i2, hashval) + _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(idx, hashval) + _Tp& ref(const int* idx, size_t* hashval=0); + + //! equivalent to SparseMat::value<_Tp>(i0, hashval) + _Tp operator()(int i0, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, hashval) + _Tp operator()(int i0, int i1, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, i2, hashval) + _Tp operator()(int i0, int i1, int i2, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(idx, hashval) + _Tp operator()(const int* idx, size_t* hashval=0) const; + + //! returns sparse matrix iterator pointing to the first sparse matrix element + SparseMatIterator_<_Tp> begin(); + //! returns read-only sparse matrix iterator pointing to the first sparse matrix element + SparseMatConstIterator_<_Tp> begin() const; + //! returns sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatIterator_<_Tp> end(); + //! returns read-only sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatConstIterator_<_Tp> end() const; +}; + + +/*! + Template Read-Only Sparse Matrix Iterator Class. + + This is the derived from SparseMatConstIterator class that + introduces more convenient operator *() for accessing the current element. +*/ +template class SparseMatConstIterator_ : public SparseMatConstIterator +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatConstIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator_(const SparseMat_<_Tp>* _m); + SparseMatConstIterator_(const SparseMat* _m); + //! the copy constructor + SparseMatConstIterator_(const SparseMatConstIterator_& it); + + //! the assignment operator + SparseMatConstIterator_& operator = (const SparseMatConstIterator_& it); + //! the element access operator + const _Tp& operator *() const; + + //! moves iterator to the next element + SparseMatConstIterator_& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator_ operator ++(int); +}; + +/*! + Template Read-Write Sparse Matrix Iterator Class. + + This is the derived from cv::SparseMatConstIterator_ class that + introduces more convenient operator *() for accessing the current element. +*/ +template class SparseMatIterator_ : public SparseMatConstIterator_<_Tp> +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator_(SparseMat_<_Tp>* _m); + SparseMatIterator_(SparseMat* _m); + //! the copy constructor + SparseMatIterator_(const SparseMatIterator_& it); + + //! the assignment operator + SparseMatIterator_& operator = (const SparseMatIterator_& it); + //! returns the reference to the current element + _Tp& operator *() const; + + //! moves the iterator to the next element + SparseMatIterator_& operator ++(); + //! moves the iterator to the next element + SparseMatIterator_ operator ++(int); +}; + +//////////////////// Fast Nearest-Neighbor Search Structure //////////////////// + +/*! + Fast Nearest Neighbor Search Class. + + The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last + approximate (or accurate) nearest neighbor search in multi-dimensional spaces. + + First, a set of vectors is passed to KDTree::KDTree() constructor + or KDTree::build() method, where it is reordered. + + Then arbitrary vectors can be passed to KDTree::findNearest() methods, which + find the K nearest neighbors among the vectors from the initial set. + The user can balance between the speed and accuracy of the search by varying Emax + parameter, which is the number of leaves that the algorithm checks. + The larger parameter values yield more accurate results at the expense of lower processing speed. + + \code + KDTree T(points, false); + const int K = 3, Emax = INT_MAX; + int idx[K]; + float dist[K]; + T.findNearest(query_vec, K, Emax, idx, 0, dist); + CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]); + \endcode +*/ +class CV_EXPORTS_W KDTree +{ +public: + /*! + The node of the search tree. + */ + struct Node + { + Node() : idx(-1), left(-1), right(-1), boundary(0.f) {} + Node(int _idx, int _left, int _right, float _boundary) + : idx(_idx), left(_left), right(_right), boundary(_boundary) {} + //! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point) + int idx; + //! node indices of the left and the right branches + int left, right; + //! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right + float boundary; + }; + + //! the default constructor + CV_WRAP KDTree(); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints=false); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, InputArray _labels, + bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, InputArray labels, + bool copyAndReorderPoints=false); + //! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves + CV_WRAP int findNearest(InputArray vec, int K, int Emax, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray dist=noArray(), + OutputArray labels=noArray()) const; + //! finds all the points from the initial set that belong to the specified box + CV_WRAP void findOrthoRange(InputArray minBounds, + InputArray maxBounds, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray labels=noArray()) const; + //! returns vectors with the specified indices + CV_WRAP void getPoints(InputArray idx, OutputArray pts, + OutputArray labels=noArray()) const; + //! return a vector with the specified index + const float* getPoint(int ptidx, int* label=0) const; + //! returns the search space dimensionality + CV_WRAP int dims() const; + + vector nodes; //!< all the tree nodes + CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set. + CV_PROP vector labels; //!< the parallel array of labels. + CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it + CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it +}; + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +class CV_EXPORTS FileNode; + +/*! + XML/YAML File Storage Class. + + The class describes an object associated with XML or YAML file. + It can be used to store data to such a file or read and decode the data. + + The storage is organized as a tree of nested sequences (or lists) and mappings. + Sequence is a heterogenious array, which elements are accessed by indices or sequentially using an iterator. + Mapping is analogue of std::map or C structure, which elements are accessed by names. + The most top level structure is a mapping. + Leaves of the file storage tree are integers, floating-point numbers and text strings. + + For example, the following code: + + \code + // open file storage for writing. Type of the file is determined from the extension + FileStorage fs("test.yml", FileStorage::WRITE); + fs << "test_int" << 5 << "test_real" << 3.1 << "test_string" << "ABCDEFGH"; + fs << "test_mat" << Mat::eye(3,3,CV_32F); + + fs << "test_list" << "[" << 0.0000000000001 << 2 << CV_PI << -3435345 << "2-502 2-029 3egegeg" << + "{:" << "month" << 12 << "day" << 31 << "year" << 1969 << "}" << "]"; + fs << "test_map" << "{" << "x" << 1 << "y" << 2 << "width" << 100 << "height" << 200 << "lbp" << "[:"; + + const uchar arr[] = {0, 1, 1, 0, 1, 1, 0, 1}; + fs.writeRaw("u", arr, (int)(sizeof(arr)/sizeof(arr[0]))); + + fs << "]" << "}"; + \endcode + + will produce the following file: + + \verbatim + %YAML:1.0 + test_int: 5 + test_real: 3.1000000000000001e+00 + test_string: ABCDEFGH + test_mat: !!opencv-matrix + rows: 3 + cols: 3 + dt: f + data: [ 1., 0., 0., 0., 1., 0., 0., 0., 1. ] + test_list: + - 1.0000000000000000e-13 + - 2 + - 3.1415926535897931e+00 + - -3435345 + - "2-502 2-029 3egegeg" + - { month:12, day:31, year:1969 } + test_map: + x: 1 + y: 2 + width: 100 + height: 200 + lbp: [ 0, 1, 1, 0, 1, 1, 0, 1 ] + \endverbatim + + and to read the file above, the following code can be used: + + \code + // open file storage for reading. + // Type of the file is determined from the content, not the extension + FileStorage fs("test.yml", FileStorage::READ); + int test_int = (int)fs["test_int"]; + double test_real = (double)fs["test_real"]; + string test_string = (string)fs["test_string"]; + + Mat M; + fs["test_mat"] >> M; + + FileNode tl = fs["test_list"]; + CV_Assert(tl.type() == FileNode::SEQ && tl.size() == 6); + double tl0 = (double)tl[0]; + int tl1 = (int)tl[1]; + double tl2 = (double)tl[2]; + int tl3 = (int)tl[3]; + string tl4 = (string)tl[4]; + CV_Assert(tl[5].type() == FileNode::MAP && tl[5].size() == 3); + + int month = (int)tl[5]["month"]; + int day = (int)tl[5]["day"]; + int year = (int)tl[5]["year"]; + + FileNode tm = fs["test_map"]; + + int x = (int)tm["x"]; + int y = (int)tm["y"]; + int width = (int)tm["width"]; + int height = (int)tm["height"]; + + int lbp_val = 0; + FileNodeIterator it = tm["lbp"].begin(); + + for(int k = 0; k < 8; k++, ++it) + lbp_val |= ((int)*it) << k; + \endcode +*/ +class CV_EXPORTS_W FileStorage +{ +public: + //! file storage mode + enum + { + READ=0, //! read mode + WRITE=1, //! write mode + APPEND=2, //! append mode + MEMORY=4, + FORMAT_MASK=(7<<3), + FORMAT_AUTO=0, + FORMAT_XML=(1<<3), + FORMAT_YAML=(2<<3) + }; + enum + { + UNDEFINED=0, + VALUE_EXPECTED=1, + NAME_EXPECTED=2, + INSIDE_MAP=4 + }; + //! the default constructor + CV_WRAP FileStorage(); + //! the full constructor that opens file storage for reading or writing + CV_WRAP FileStorage(const string& source, int flags, const string& encoding=string()); + //! the constructor that takes pointer to the C FileStorage structure + FileStorage(CvFileStorage* fs); + //! the destructor. calls release() + virtual ~FileStorage(); + + //! opens file storage for reading or writing. The previous storage is closed with release() + CV_WRAP virtual bool open(const string& filename, int flags, const string& encoding=string()); + //! returns true if the object is associated with currently opened file. + CV_WRAP virtual bool isOpened() const; + //! closes the file and releases all the memory buffers + CV_WRAP virtual void release(); + //! closes the file, releases all the memory buffers and returns the text string + CV_WRAP string releaseAndGetString(); + + //! returns the first element of the top-level mapping + CV_WRAP FileNode getFirstTopLevelNode() const; + //! returns the top-level mapping. YAML supports multiple streams + CV_WRAP FileNode root(int streamidx=0) const; + //! returns the specified element of the top-level mapping + FileNode operator[](const string& nodename) const; + //! returns the specified element of the top-level mapping + CV_WRAP FileNode operator[](const char* nodename) const; + + //! returns pointer to the underlying C FileStorage structure + CvFileStorage* operator *() { return fs; } + //! returns pointer to the underlying C FileStorage structure + const CvFileStorage* operator *() const { return fs; } + //! writes one or more numbers of the specified format to the currently written structure + void writeRaw( const string& fmt, const uchar* vec, size_t len ); + //! writes the registered C structure (CvMat, CvMatND, CvSeq). See cvWrite() + void writeObj( const string& name, const void* obj ); + + //! returns the normalized object name for the specified file name + static string getDefaultObjectName(const string& filename); + + Ptr fs; //!< the underlying C FileStorage structure + string elname; //!< the currently written element + vector structs; //!< the stack of written structures + int state; //!< the writer state +}; + +class CV_EXPORTS FileNodeIterator; + +/*! + File Storage Node class + + The node is used to store each and every element of the file storage opened for reading - + from the primitive objects, such as numbers and text strings, to the complex nodes: + sequences, mappings and the registered objects. + + Note that file nodes are only used for navigating file storages opened for reading. + When a file storage is opened for writing, no data is stored in memory after it is written. +*/ +class CV_EXPORTS_W_SIMPLE FileNode +{ +public: + //! type of the file storage node + enum + { + NONE=0, //!< empty node + INT=1, //!< an integer + REAL=2, //!< floating-point number + FLOAT=REAL, //!< synonym or REAL + STR=3, //!< text string in UTF-8 encoding + STRING=STR, //!< synonym for STR + REF=4, //!< integer of size size_t. Typically used for storing complex dynamic structures where some elements reference the others + SEQ=5, //!< sequence + MAP=6, //!< mapping + TYPE_MASK=7, + FLOW=8, //!< compact representation of a sequence or mapping. Used only by YAML writer + USER=16, //!< a registered object (e.g. a matrix) + EMPTY=32, //!< empty structure (sequence or mapping) + NAMED=64 //!< the node has a name (i.e. it is element of a mapping) + }; + //! the default constructor + CV_WRAP FileNode(); + //! the full constructor wrapping CvFileNode structure. + FileNode(const CvFileStorage* fs, const CvFileNode* node); + //! the copy constructor + FileNode(const FileNode& node); + //! returns element of a mapping node + FileNode operator[](const string& nodename) const; + //! returns element of a mapping node + CV_WRAP FileNode operator[](const char* nodename) const; + //! returns element of a sequence node + CV_WRAP FileNode operator[](int i) const; + //! returns type of the node + CV_WRAP int type() const; + + //! returns true if the node is empty + CV_WRAP bool empty() const; + //! returns true if the node is a "none" object + CV_WRAP bool isNone() const; + //! returns true if the node is a sequence + CV_WRAP bool isSeq() const; + //! returns true if the node is a mapping + CV_WRAP bool isMap() const; + //! returns true if the node is an integer + CV_WRAP bool isInt() const; + //! returns true if the node is a floating-point number + CV_WRAP bool isReal() const; + //! returns true if the node is a text string + CV_WRAP bool isString() const; + //! returns true if the node has a name + CV_WRAP bool isNamed() const; + //! returns the node name or an empty string if the node is nameless + CV_WRAP string name() const; + //! returns the number of elements in the node, if it is a sequence or mapping, or 1 otherwise. + CV_WRAP size_t size() const; + //! returns the node content as an integer. If the node stores floating-point number, it is rounded. + operator int() const; + //! returns the node content as float + operator float() const; + //! returns the node content as double + operator double() const; + //! returns the node content as text string + operator string() const; + + //! returns pointer to the underlying file node + CvFileNode* operator *(); + //! returns pointer to the underlying file node + const CvFileNode* operator* () const; + + //! returns iterator pointing to the first node element + FileNodeIterator begin() const; + //! returns iterator pointing to the element following the last node element + FileNodeIterator end() const; + + //! reads node elements to the buffer with the specified format + void readRaw( const string& fmt, uchar* vec, size_t len ) const; + //! reads the registered object and returns pointer to it + void* readObj() const; + + // do not use wrapper pointer classes for better efficiency + const CvFileStorage* fs; + const CvFileNode* node; +}; + + +/*! + File Node Iterator + + The class is used for iterating sequences (usually) and mappings. + */ +class CV_EXPORTS FileNodeIterator +{ +public: + //! the default constructor + FileNodeIterator(); + //! the full constructor set to the ofs-th element of the node + FileNodeIterator(const CvFileStorage* fs, const CvFileNode* node, size_t ofs=0); + //! the copy constructor + FileNodeIterator(const FileNodeIterator& it); + //! returns the currently observed element + FileNode operator *() const; + //! accesses the currently observed element methods + FileNode operator ->() const; + + //! moves iterator to the next node + FileNodeIterator& operator ++ (); + //! moves iterator to the next node + FileNodeIterator operator ++ (int); + //! moves iterator to the previous node + FileNodeIterator& operator -- (); + //! moves iterator to the previous node + FileNodeIterator operator -- (int); + //! moves iterator forward by the specified offset (possibly negative) + FileNodeIterator& operator += (int ofs); + //! moves iterator backward by the specified offset (possibly negative) + FileNodeIterator& operator -= (int ofs); + + //! reads the next maxCount elements (or less, if the sequence/mapping last element occurs earlier) to the buffer with the specified format + FileNodeIterator& readRaw( const string& fmt, uchar* vec, + size_t maxCount=(size_t)INT_MAX ); + + const CvFileStorage* fs; + const CvFileNode* container; + CvSeqReader reader; + size_t remaining; +}; + +////////////// convenient wrappers for operating old-style dynamic structures ////////////// + +template class SeqIterator; + +typedef Ptr MemStorage; + +/*! + Template Sequence Class derived from CvSeq + + The class provides more convenient access to sequence elements, + STL-style operations and iterators. + + \note The class is targeted for simple data types, + i.e. no constructors or destructors + are called for the sequence elements. +*/ +template class Seq +{ +public: + typedef SeqIterator<_Tp> iterator; + typedef SeqIterator<_Tp> const_iterator; + + //! the default constructor + Seq(); + //! the constructor for wrapping CvSeq structure. The real element type in CvSeq should match _Tp. + Seq(const CvSeq* seq); + //! creates the empty sequence that resides in the specified storage + Seq(MemStorage& storage, int headerSize = sizeof(CvSeq)); + //! returns read-write reference to the specified element + _Tp& operator [](int idx); + //! returns read-only reference to the specified element + const _Tp& operator[](int idx) const; + //! returns iterator pointing to the beginning of the sequence + SeqIterator<_Tp> begin() const; + //! returns iterator pointing to the element following the last sequence element + SeqIterator<_Tp> end() const; + //! returns the number of elements in the sequence + size_t size() const; + //! returns the type of sequence elements (CV_8UC1 ... CV_64FC(CV_CN_MAX) ...) + int type() const; + //! returns the depth of sequence elements (CV_8U ... CV_64F) + int depth() const; + //! returns the number of channels in each sequence element + int channels() const; + //! returns the size of each sequence element + size_t elemSize() const; + //! returns index of the specified sequence element + size_t index(const _Tp& elem) const; + //! appends the specified element to the end of the sequence + void push_back(const _Tp& elem); + //! appends the specified element to the front of the sequence + void push_front(const _Tp& elem); + //! appends zero or more elements to the end of the sequence + void push_back(const _Tp* elems, size_t count); + //! appends zero or more elements to the front of the sequence + void push_front(const _Tp* elems, size_t count); + //! inserts the specified element to the specified position + void insert(int idx, const _Tp& elem); + //! inserts zero or more elements to the specified position + void insert(int idx, const _Tp* elems, size_t count); + //! removes element at the specified position + void remove(int idx); + //! removes the specified subsequence + void remove(const Range& r); + + //! returns reference to the first sequence element + _Tp& front(); + //! returns read-only reference to the first sequence element + const _Tp& front() const; + //! returns reference to the last sequence element + _Tp& back(); + //! returns read-only reference to the last sequence element + const _Tp& back() const; + //! returns true iff the sequence contains no elements + bool empty() const; + + //! removes all the elements from the sequence + void clear(); + //! removes the first element from the sequence + void pop_front(); + //! removes the last element from the sequence + void pop_back(); + //! removes zero or more elements from the beginning of the sequence + void pop_front(_Tp* elems, size_t count); + //! removes zero or more elements from the end of the sequence + void pop_back(_Tp* elems, size_t count); + + //! copies the whole sequence or the sequence slice to the specified vector + void copyTo(vector<_Tp>& vec, const Range& range=Range::all()) const; + //! returns the vector containing all the sequence elements + operator vector<_Tp>() const; + + CvSeq* seq; +}; + + +/*! + STL-style Sequence Iterator inherited from the CvSeqReader structure +*/ +template class SeqIterator : public CvSeqReader +{ +public: + //! the default constructor + SeqIterator(); + //! the constructor setting the iterator to the beginning or to the end of the sequence + SeqIterator(const Seq<_Tp>& seq, bool seekEnd=false); + //! positions the iterator within the sequence + void seek(size_t pos); + //! reports the current iterator position + size_t tell() const; + //! returns reference to the current sequence element + _Tp& operator *(); + //! returns read-only reference to the current sequence element + const _Tp& operator *() const; + //! moves iterator to the next sequence element + SeqIterator& operator ++(); + //! moves iterator to the next sequence element + SeqIterator operator ++(int) const; + //! moves iterator to the previous sequence element + SeqIterator& operator --(); + //! moves iterator to the previous sequence element + SeqIterator operator --(int) const; + + //! moves iterator forward by the specified offset (possibly negative) + SeqIterator& operator +=(int); + //! moves iterator backward by the specified offset (possibly negative) + SeqIterator& operator -=(int); + + // this is index of the current element module seq->total*2 + // (to distinguish between 0 and seq->total) + int index; +}; + + +class CV_EXPORTS Algorithm; +class CV_EXPORTS AlgorithmInfo; +struct CV_EXPORTS AlgorithmInfoData; + +template struct ParamType {}; + +/*! + Base class for high-level OpenCV algorithms +*/ +class CV_EXPORTS_W Algorithm +{ +public: + Algorithm(); + virtual ~Algorithm(); + string name() const; + + template typename ParamType<_Tp>::member_type get(const string& name) const; + template typename ParamType<_Tp>::member_type get(const char* name) const; + + CV_WRAP int getInt(const string& name) const; + CV_WRAP double getDouble(const string& name) const; + CV_WRAP bool getBool(const string& name) const; + CV_WRAP string getString(const string& name) const; + CV_WRAP Mat getMat(const string& name) const; + CV_WRAP vector getMatVector(const string& name) const; + CV_WRAP Ptr getAlgorithm(const string& name) const; + + void set(const string& name, int value); + void set(const string& name, double value); + void set(const string& name, bool value); + void set(const string& name, const string& value); + void set(const string& name, const Mat& value); + void set(const string& name, const vector& value); + void set(const string& name, const Ptr& value); + template void set(const string& name, const Ptr<_Tp>& value); + + CV_WRAP void setInt(const string& name, int value); + CV_WRAP void setDouble(const string& name, double value); + CV_WRAP void setBool(const string& name, bool value); + CV_WRAP void setString(const string& name, const string& value); + CV_WRAP void setMat(const string& name, const Mat& value); + CV_WRAP void setMatVector(const string& name, const vector& value); + CV_WRAP void setAlgorithm(const string& name, const Ptr& value); + template void setAlgorithm(const string& name, const Ptr<_Tp>& value); + + void set(const char* name, int value); + void set(const char* name, double value); + void set(const char* name, bool value); + void set(const char* name, const string& value); + void set(const char* name, const Mat& value); + void set(const char* name, const vector& value); + void set(const char* name, const Ptr& value); + template void set(const char* name, const Ptr<_Tp>& value); + + void setInt(const char* name, int value); + void setDouble(const char* name, double value); + void setBool(const char* name, bool value); + void setString(const char* name, const string& value); + void setMat(const char* name, const Mat& value); + void setMatVector(const char* name, const vector& value); + void setAlgorithm(const char* name, const Ptr& value); + template void setAlgorithm(const char* name, const Ptr<_Tp>& value); + + CV_WRAP string paramHelp(const string& name) const; + int paramType(const char* name) const; + CV_WRAP int paramType(const string& name) const; + CV_WRAP void getParams(CV_OUT vector& names) const; + + + virtual void write(FileStorage& fs) const; + virtual void read(const FileNode& fn); + + typedef Algorithm* (*Constructor)(void); + typedef int (Algorithm::*Getter)() const; + typedef void (Algorithm::*Setter)(int); + + CV_WRAP static void getList(CV_OUT vector& algorithms); + CV_WRAP static Ptr _create(const string& name); + template static Ptr<_Tp> create(const string& name); + + virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; } +}; + + +class CV_EXPORTS AlgorithmInfo +{ +public: + friend class Algorithm; + AlgorithmInfo(const string& name, Algorithm::Constructor create); + ~AlgorithmInfo(); + void get(const Algorithm* algo, const char* name, int argType, void* value) const; + void addParam_(Algorithm& algo, const char* name, int argType, + void* value, bool readOnly, + Algorithm::Getter getter, Algorithm::Setter setter, + const string& help=string()); + string paramHelp(const char* name) const; + int paramType(const char* name) const; + void getParams(vector& names) const; + + void write(const Algorithm* algo, FileStorage& fs) const; + void read(Algorithm* algo, const FileNode& fn) const; + string name() const; + + void addParam(Algorithm& algo, const char* name, + int& value, bool readOnly=false, + int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + short& value, bool readOnly=false, + int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + bool& value, bool readOnly=false, + int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + double& value, bool readOnly=false, + double (Algorithm::*getter)()=0, + void (Algorithm::*setter)(double)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + string& value, bool readOnly=false, + string (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const string&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + Mat& value, bool readOnly=false, + Mat (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Mat&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + vector& value, bool readOnly=false, + vector (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const vector&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + Ptr& value, bool readOnly=false, + Ptr (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Ptr&)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + float& value, bool readOnly=false, + float (Algorithm::*getter)()=0, + void (Algorithm::*setter)(float)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + unsigned int& value, bool readOnly=false, + unsigned int (Algorithm::*getter)()=0, + void (Algorithm::*setter)(unsigned int)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + uint64& value, bool readOnly=false, + uint64 (Algorithm::*getter)()=0, + void (Algorithm::*setter)(uint64)=0, + const string& help=string()); + void addParam(Algorithm& algo, const char* name, + uchar& value, bool readOnly=false, + uchar (Algorithm::*getter)()=0, + void (Algorithm::*setter)(uchar)=0, + const string& help=string()); + template void addParam(Algorithm& algo, const char* name, + Ptr<_Tp>& value, bool readOnly=false, + Ptr<_Tp> (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Ptr<_Tp>&)=0, + const string& help=string()); + template void addParam(Algorithm& algo, const char* name, + Ptr<_Tp>& value, bool readOnly=false, + Ptr<_Tp> (Algorithm::*getter)()=0, + void (Algorithm::*setter)(const Ptr<_Tp>&)=0, + const string& help=string()); +protected: + AlgorithmInfoData* data; + void set(Algorithm* algo, const char* name, int argType, + const void* value, bool force=false) const; +}; + + +struct CV_EXPORTS Param +{ + enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9, SHORT=10, UCHAR=11 }; + + Param(); + Param(int _type, bool _readonly, int _offset, + Algorithm::Getter _getter=0, + Algorithm::Setter _setter=0, + const string& _help=string()); + int type; + int offset; + bool readonly; + Algorithm::Getter getter; + Algorithm::Setter setter; + string help; +}; + +template<> struct ParamType +{ + typedef bool const_param_type; + typedef bool member_type; + + enum { type = Param::BOOLEAN }; +}; + +template<> struct ParamType +{ + typedef int const_param_type; + typedef int member_type; + + enum { type = Param::INT }; +}; + +template<> struct ParamType +{ + typedef int const_param_type; + typedef int member_type; + + enum { type = Param::SHORT }; +}; + +template<> struct ParamType +{ + typedef double const_param_type; + typedef double member_type; + + enum { type = Param::REAL }; +}; + +template<> struct ParamType +{ + typedef const string& const_param_type; + typedef string member_type; + + enum { type = Param::STRING }; +}; + +template<> struct ParamType +{ + typedef const Mat& const_param_type; + typedef Mat member_type; + + enum { type = Param::MAT }; +}; + +template<> struct ParamType > +{ + typedef const vector& const_param_type; + typedef vector member_type; + + enum { type = Param::MAT_VECTOR }; +}; + +template<> struct ParamType +{ + typedef const Ptr& const_param_type; + typedef Ptr member_type; + + enum { type = Param::ALGORITHM }; +}; + +template<> struct ParamType +{ + typedef float const_param_type; + typedef float member_type; + + enum { type = Param::FLOAT }; +}; + +template<> struct ParamType +{ + typedef unsigned const_param_type; + typedef unsigned member_type; + + enum { type = Param::UNSIGNED_INT }; +}; + +template<> struct ParamType +{ + typedef uint64 const_param_type; + typedef uint64 member_type; + + enum { type = Param::UINT64 }; +}; + +template<> struct ParamType +{ + typedef uchar const_param_type; + typedef uchar member_type; + + enum { type = Param::UCHAR }; +}; + +/*! +"\nThe CommandLineParser class is designed for command line arguments parsing\n" + "Keys map: \n" + "Before you start to work with CommandLineParser you have to create a map for keys.\n" + " It will look like this\n" + " const char* keys =\n" + " {\n" + " { s| string| 123asd |string parameter}\n" + " { d| digit | 100 |digit parameter }\n" + " { c|noCamera|false |without camera }\n" + " { 1| |some text|help }\n" + " { 2| |333 |another help }\n" + " };\n" + "Usage syntax: \n" + " \"{\" - start of parameter string.\n" + " \"}\" - end of parameter string\n" + " \"|\" - separator between short name, full name, default value and help\n" + "Supported syntax: \n" + " --key1=arg1 \n" + " -key2=arg2 \n" + "Usage: \n" + " Imagine that the input parameters are next:\n" + " -s=string_value --digit=250 --noCamera lena.jpg 10000\n" + " CommandLineParser parser(argc, argv, keys) - create a parser object\n" + " parser.get(\"s\" or \"string\") will return you first parameter value\n" + " parser.get(\"s\", false or \"string\", false) will return you first parameter value\n" + " without spaces in end and begin\n" + " parser.get(\"d\" or \"digit\") will return you second parameter value.\n" + " It also works with 'unsigned int', 'double', and 'float' types>\n" + " parser.get(\"c\" or \"noCamera\") will return you true .\n" + " If you enter this key in commandline>\n" + " It return you false otherwise.\n" + " parser.get(\"1\") will return you the first argument without parameter (lena.jpg) \n" + " parser.get(\"2\") will return you the second argument without parameter (10000)\n" + " It also works with 'unsigned int', 'double', and 'float' types \n" +*/ +class CV_EXPORTS CommandLineParser +{ + public: + + //! the default constructor + CommandLineParser(int argc, const char* const argv[], const char* key_map); + + //! get parameter, you can choose: delete spaces in end and begin or not + template + _Tp get(const std::string& name, bool space_delete=true) + { + if (!has(name)) + { + return _Tp(); + } + std::string str = getString(name); + return analyzeValue<_Tp>(str, space_delete); + } + + //! print short name, full name, current value and help for all params + void printParams(); + + protected: + std::map > data; + std::string getString(const std::string& name); + + bool has(const std::string& keys); + + template + _Tp analyzeValue(const std::string& str, bool space_delete=false); + + template + static _Tp getData(const std::string& str) + { + _Tp res = _Tp(); + std::stringstream s1(str); + s1 >> res; + return res; + } + + template + _Tp fromStringNumber(const std::string& str);//the default conversion function for numbers + + }; + +template<> CV_EXPORTS +bool CommandLineParser::get(const std::string& name, bool space_delete); + +template<> CV_EXPORTS +std::string CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +int CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +unsigned int CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +uint64 CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +float CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +double CommandLineParser::analyzeValue(const std::string& str, bool space_delete); + + +/////////////////////////////// Parallel Primitives ////////////////////////////////// + +// a base body class +class CV_EXPORTS ParallelLoopBody +{ +public: + virtual ~ParallelLoopBody(); + virtual void operator() (const Range& range) const = 0; +}; + +CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.); + +/////////////////////////// Synchronization Primitives /////////////////////////////// + +class CV_EXPORTS Mutex +{ +public: + Mutex(); + ~Mutex(); + Mutex(const Mutex& m); + Mutex& operator = (const Mutex& m); + + void lock(); + bool trylock(); + void unlock(); + + struct Impl; +protected: + Impl* impl; +}; + +class CV_EXPORTS AutoLock +{ +public: + AutoLock(Mutex& m) : mutex(&m) { mutex->lock(); } + ~AutoLock() { mutex->unlock(); } +protected: + Mutex* mutex; +private: + AutoLock(const AutoLock&); + AutoLock& operator = (const AutoLock&); +}; + +class TLSDataContainer +{ +private: + int key_; +protected: + CV_EXPORTS TLSDataContainer(); + CV_EXPORTS ~TLSDataContainer(); // virtual is not required +public: + virtual void* createDataInstance() const = 0; + virtual void deleteDataInstance(void* data) const = 0; + + CV_EXPORTS void* getData() const; +}; + +template +class TLSData : protected TLSDataContainer +{ +public: + inline TLSData() {} + inline ~TLSData() {} + inline T* get() const { return (T*)getData(); } +private: + virtual void* createDataInstance() const { return new T; } + virtual void deleteDataInstance(void* data) const { delete (T*)data; } +}; + +} + +#endif // __cplusplus + +#include "opencv2/core/operations.hpp" +#include "opencv2/core/mat.hpp" + +#endif /*__OPENCV_CORE_HPP__*/ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/core_c.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/core_c.h new file mode 100644 index 0000000..38abfc4 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/core_c.h @@ -0,0 +1,1885 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef __OPENCV_CORE_C_H__ +#define __OPENCV_CORE_C_H__ + +#include "opencv2/core/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************************\ +* Array allocation, deallocation, initialization and access to elements * +\****************************************************************************************/ + +/* wrapper. + If there is no enough memory, the function + (as well as other OpenCV functions that call cvAlloc) + raises an error. */ +CVAPI(void*) cvAlloc( size_t size ); + +/* wrapper. + Here and further all the memory releasing functions + (that all call cvFree) take double pointer in order to + to clear pointer to the data after releasing it. + Passing pointer to NULL pointer is Ok: nothing happens in this case +*/ +CVAPI(void) cvFree_( void* ptr ); +#define cvFree(ptr) (cvFree_(*(ptr)), *(ptr)=0) + +/* Allocates and initializes IplImage header */ +CVAPI(IplImage*) cvCreateImageHeader( CvSize size, int depth, int channels ); + +/* Inializes IplImage header */ +CVAPI(IplImage*) cvInitImageHeader( IplImage* image, CvSize size, int depth, + int channels, int origin CV_DEFAULT(0), + int align CV_DEFAULT(4)); + +/* Creates IPL image (header and data) */ +CVAPI(IplImage*) cvCreateImage( CvSize size, int depth, int channels ); + +/* Releases (i.e. deallocates) IPL image header */ +CVAPI(void) cvReleaseImageHeader( IplImage** image ); + +/* Releases IPL image header and data */ +CVAPI(void) cvReleaseImage( IplImage** image ); + +/* Creates a copy of IPL image (widthStep may differ) */ +CVAPI(IplImage*) cvCloneImage( const IplImage* image ); + +/* Sets a Channel Of Interest (only a few functions support COI) - + use cvCopy to extract the selected channel and/or put it back */ +CVAPI(void) cvSetImageCOI( IplImage* image, int coi ); + +/* Retrieves image Channel Of Interest */ +CVAPI(int) cvGetImageCOI( const IplImage* image ); + +/* Sets image ROI (region of interest) (COI is not changed) */ +CVAPI(void) cvSetImageROI( IplImage* image, CvRect rect ); + +/* Resets image ROI and COI */ +CVAPI(void) cvResetImageROI( IplImage* image ); + +/* Retrieves image ROI */ +CVAPI(CvRect) cvGetImageROI( const IplImage* image ); + +/* Allocates and initializes CvMat header */ +CVAPI(CvMat*) cvCreateMatHeader( int rows, int cols, int type ); + +#define CV_AUTOSTEP 0x7fffffff + +/* Initializes CvMat header */ +CVAPI(CvMat*) cvInitMatHeader( CvMat* mat, int rows, int cols, + int type, void* data CV_DEFAULT(NULL), + int step CV_DEFAULT(CV_AUTOSTEP) ); + +/* Allocates and initializes CvMat header and allocates data */ +CVAPI(CvMat*) cvCreateMat( int rows, int cols, int type ); + +/* Releases CvMat header and deallocates matrix data + (reference counting is used for data) */ +CVAPI(void) cvReleaseMat( CvMat** mat ); + +/* Decrements CvMat data reference counter and deallocates the data if + it reaches 0 */ +CV_INLINE void cvDecRefData( CvArr* arr ) +{ + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } +} + +/* Increments CvMat data reference counter */ +CV_INLINE int cvIncRefData( CvArr* arr ) +{ + int refcount = 0; + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + return refcount; +} + + +/* Creates an exact copy of the input matrix (except, may be, step value) */ +CVAPI(CvMat*) cvCloneMat( const CvMat* mat ); + + +/* Makes a new matrix from subrectangle of input array. + No data is copied */ +CVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect ); +#define cvGetSubArr cvGetSubRect + +/* Selects row span of the input array: arr(start_row:delta_row:end_row,:) + (end_row is not included into the span). */ +CVAPI(CvMat*) cvGetRows( const CvArr* arr, CvMat* submat, + int start_row, int end_row, + int delta_row CV_DEFAULT(1)); + +CV_INLINE CvMat* cvGetRow( const CvArr* arr, CvMat* submat, int row ) +{ + return cvGetRows( arr, submat, row, row + 1, 1 ); +} + + +/* Selects column span of the input array: arr(:,start_col:end_col) + (end_col is not included into the span) */ +CVAPI(CvMat*) cvGetCols( const CvArr* arr, CvMat* submat, + int start_col, int end_col ); + +CV_INLINE CvMat* cvGetCol( const CvArr* arr, CvMat* submat, int col ) +{ + return cvGetCols( arr, submat, col, col + 1 ); +} + +/* Select a diagonal of the input array. + (diag = 0 means the main diagonal, >0 means a diagonal above the main one, + <0 - below the main one). + The diagonal will be represented as a column (nx1 matrix). */ +CVAPI(CvMat*) cvGetDiag( const CvArr* arr, CvMat* submat, + int diag CV_DEFAULT(0)); + +/* low-level scalar <-> raw data conversion functions */ +CVAPI(void) cvScalarToRawData( const CvScalar* scalar, void* data, int type, + int extend_to_12 CV_DEFAULT(0) ); + +CVAPI(void) cvRawDataToScalar( const void* data, int type, CvScalar* scalar ); + +/* Allocates and initializes CvMatND header */ +CVAPI(CvMatND*) cvCreateMatNDHeader( int dims, const int* sizes, int type ); + +/* Allocates and initializes CvMatND header and allocates data */ +CVAPI(CvMatND*) cvCreateMatND( int dims, const int* sizes, int type ); + +/* Initializes preallocated CvMatND header */ +CVAPI(CvMatND*) cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes, + int type, void* data CV_DEFAULT(NULL) ); + +/* Releases CvMatND */ +CV_INLINE void cvReleaseMatND( CvMatND** mat ) +{ + cvReleaseMat( (CvMat**)mat ); +} + +/* Creates a copy of CvMatND (except, may be, steps) */ +CVAPI(CvMatND*) cvCloneMatND( const CvMatND* mat ); + +/* Allocates and initializes CvSparseMat header and allocates data */ +CVAPI(CvSparseMat*) cvCreateSparseMat( int dims, const int* sizes, int type ); + +/* Releases CvSparseMat */ +CVAPI(void) cvReleaseSparseMat( CvSparseMat** mat ); + +/* Creates a copy of CvSparseMat (except, may be, zero items) */ +CVAPI(CvSparseMat*) cvCloneSparseMat( const CvSparseMat* mat ); + +/* Initializes sparse array iterator + (returns the first node or NULL if the array is empty) */ +CVAPI(CvSparseNode*) cvInitSparseMatIterator( const CvSparseMat* mat, + CvSparseMatIterator* mat_iterator ); + +// returns next sparse array node (or NULL if there is no more nodes) +CV_INLINE CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator ) +{ + if( mat_iterator->node->next ) + return mat_iterator->node = mat_iterator->node->next; + else + { + int idx; + for( idx = ++mat_iterator->curidx; idx < mat_iterator->mat->hashsize; idx++ ) + { + CvSparseNode* node = (CvSparseNode*)mat_iterator->mat->hashtable[idx]; + if( node ) + { + mat_iterator->curidx = idx; + return mat_iterator->node = node; + } + } + return NULL; + } +} + +/**************** matrix iterator: used for n-ary operations on dense arrays *********/ + +#define CV_MAX_ARR 10 + +typedef struct CvNArrayIterator +{ + int count; /* number of arrays */ + int dims; /* number of dimensions to iterate */ + CvSize size; /* maximal common linear size: { width = size, height = 1 } */ + uchar* ptr[CV_MAX_ARR]; /* pointers to the array slices */ + int stack[CV_MAX_DIM]; /* for internal use */ + CvMatND* hdr[CV_MAX_ARR]; /* pointers to the headers of the + matrices that are processed */ +} +CvNArrayIterator; + +#define CV_NO_DEPTH_CHECK 1 +#define CV_NO_CN_CHECK 2 +#define CV_NO_SIZE_CHECK 4 + +/* initializes iterator that traverses through several arrays simulteneously + (the function together with cvNextArraySlice is used for + N-ari element-wise operations) */ +CVAPI(int) cvInitNArrayIterator( int count, CvArr** arrs, + const CvArr* mask, CvMatND* stubs, + CvNArrayIterator* array_iterator, + int flags CV_DEFAULT(0) ); + +/* returns zero value if iteration is finished, non-zero (slice length) otherwise */ +CVAPI(int) cvNextNArraySlice( CvNArrayIterator* array_iterator ); + + +/* Returns type of array elements: + CV_8UC1 ... CV_64FC4 ... */ +CVAPI(int) cvGetElemType( const CvArr* arr ); + +/* Retrieves number of an array dimensions and + optionally sizes of the dimensions */ +CVAPI(int) cvGetDims( const CvArr* arr, int* sizes CV_DEFAULT(NULL) ); + + +/* Retrieves size of a particular array dimension. + For 2d arrays cvGetDimSize(arr,0) returns number of rows (image height) + and cvGetDimSize(arr,1) returns number of columns (image width) */ +CVAPI(int) cvGetDimSize( const CvArr* arr, int index ); + + +/* ptr = &arr(idx0,idx1,...). All indexes are zero-based, + the major dimensions go first (e.g. (y,x) for 2D, (z,y,x) for 3D */ +CVAPI(uchar*) cvPtr1D( const CvArr* arr, int idx0, int* type CV_DEFAULT(NULL)); +CVAPI(uchar*) cvPtr2D( const CvArr* arr, int idx0, int idx1, int* type CV_DEFAULT(NULL) ); +CVAPI(uchar*) cvPtr3D( const CvArr* arr, int idx0, int idx1, int idx2, + int* type CV_DEFAULT(NULL)); + +/* For CvMat or IplImage number of indices should be 2 + (row index (y) goes first, column index (x) goes next). + For CvMatND or CvSparseMat number of infices should match number of and + indices order should match the array dimension order. */ +CVAPI(uchar*) cvPtrND( const CvArr* arr, const int* idx, int* type CV_DEFAULT(NULL), + int create_node CV_DEFAULT(1), + unsigned* precalc_hashval CV_DEFAULT(NULL)); + +/* value = arr(idx0,idx1,...) */ +CVAPI(CvScalar) cvGet1D( const CvArr* arr, int idx0 ); +CVAPI(CvScalar) cvGet2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(CvScalar) cvGet3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(CvScalar) cvGetND( const CvArr* arr, const int* idx ); + +/* for 1-channel arrays */ +CVAPI(double) cvGetReal1D( const CvArr* arr, int idx0 ); +CVAPI(double) cvGetReal2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(double) cvGetReal3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(double) cvGetRealND( const CvArr* arr, const int* idx ); + +/* arr(idx0,idx1,...) = value */ +CVAPI(void) cvSet1D( CvArr* arr, int idx0, CvScalar value ); +CVAPI(void) cvSet2D( CvArr* arr, int idx0, int idx1, CvScalar value ); +CVAPI(void) cvSet3D( CvArr* arr, int idx0, int idx1, int idx2, CvScalar value ); +CVAPI(void) cvSetND( CvArr* arr, const int* idx, CvScalar value ); + +/* for 1-channel arrays */ +CVAPI(void) cvSetReal1D( CvArr* arr, int idx0, double value ); +CVAPI(void) cvSetReal2D( CvArr* arr, int idx0, int idx1, double value ); +CVAPI(void) cvSetReal3D( CvArr* arr, int idx0, + int idx1, int idx2, double value ); +CVAPI(void) cvSetRealND( CvArr* arr, const int* idx, double value ); + +/* clears element of ND dense array, + in case of sparse arrays it deletes the specified node */ +CVAPI(void) cvClearND( CvArr* arr, const int* idx ); + +/* Converts CvArr (IplImage or CvMat,...) to CvMat. + If the last parameter is non-zero, function can + convert multi(>2)-dimensional array to CvMat as long as + the last array's dimension is continous. The resultant + matrix will be have appropriate (a huge) number of rows */ +CVAPI(CvMat*) cvGetMat( const CvArr* arr, CvMat* header, + int* coi CV_DEFAULT(NULL), + int allowND CV_DEFAULT(0)); + +/* Converts CvArr (IplImage or CvMat) to IplImage */ +CVAPI(IplImage*) cvGetImage( const CvArr* arr, IplImage* image_header ); + + +/* Changes a shape of multi-dimensional array. + new_cn == 0 means that number of channels remains unchanged. + new_dims == 0 means that number and sizes of dimensions remain the same + (unless they need to be changed to set the new number of channels) + if new_dims == 1, there is no need to specify new dimension sizes + The resultant configuration should be achievable w/o data copying. + If the resultant array is sparse, CvSparseMat header should be passed + to the function else if the result is 1 or 2 dimensional, + CvMat header should be passed to the function + else CvMatND header should be passed */ +CVAPI(CvArr*) cvReshapeMatND( const CvArr* arr, + int sizeof_header, CvArr* header, + int new_cn, int new_dims, int* new_sizes ); + +#define cvReshapeND( arr, header, new_cn, new_dims, new_sizes ) \ + cvReshapeMatND( (arr), sizeof(*(header)), (header), \ + (new_cn), (new_dims), (new_sizes)) + +CVAPI(CvMat*) cvReshape( const CvArr* arr, CvMat* header, + int new_cn, int new_rows CV_DEFAULT(0) ); + +/* Repeats source 2d array several times in both horizontal and + vertical direction to fill destination array */ +CVAPI(void) cvRepeat( const CvArr* src, CvArr* dst ); + +/* Allocates array data */ +CVAPI(void) cvCreateData( CvArr* arr ); + +/* Releases array data */ +CVAPI(void) cvReleaseData( CvArr* arr ); + +/* Attaches user data to the array header. The step is reffered to + the pre-last dimension. That is, all the planes of the array + must be joint (w/o gaps) */ +CVAPI(void) cvSetData( CvArr* arr, void* data, int step ); + +/* Retrieves raw data of CvMat, IplImage or CvMatND. + In the latter case the function raises an error if + the array can not be represented as a matrix */ +CVAPI(void) cvGetRawData( const CvArr* arr, uchar** data, + int* step CV_DEFAULT(NULL), + CvSize* roi_size CV_DEFAULT(NULL)); + +/* Returns width and height of array in elements */ +CVAPI(CvSize) cvGetSize( const CvArr* arr ); + +/* Copies source array to destination array */ +CVAPI(void) cvCopy( const CvArr* src, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Sets all or "masked" elements of input array + to the same value*/ +CVAPI(void) cvSet( CvArr* arr, CvScalar value, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Clears all the array elements (sets them to 0) */ +CVAPI(void) cvSetZero( CvArr* arr ); +#define cvZero cvSetZero + + +/* Splits a multi-channel array into the set of single-channel arrays or + extracts particular [color] plane */ +CVAPI(void) cvSplit( const CvArr* src, CvArr* dst0, CvArr* dst1, + CvArr* dst2, CvArr* dst3 ); + +/* Merges a set of single-channel arrays into the single multi-channel array + or inserts one particular [color] plane to the array */ +CVAPI(void) cvMerge( const CvArr* src0, const CvArr* src1, + const CvArr* src2, const CvArr* src3, + CvArr* dst ); + +/* Copies several channels from input arrays to + certain channels of output arrays */ +CVAPI(void) cvMixChannels( const CvArr** src, int src_count, + CvArr** dst, int dst_count, + const int* from_to, int pair_count ); + +/* Performs linear transformation on every source array element: + dst(x,y,c) = scale*src(x,y,c)+shift. + Arbitrary combination of input and output array depths are allowed + (number of channels must be the same), thus the function can be used + for type conversion */ +CVAPI(void) cvConvertScale( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScale cvConvertScale +#define cvScale cvConvertScale +#define cvConvert( src, dst ) cvConvertScale( (src), (dst), 1, 0 ) + + +/* Performs linear transformation on every source array element, + stores absolute value of the result: + dst(x,y,c) = abs(scale*src(x,y,c)+shift). + destination array must have 8u type. + In other cases one may use cvConvertScale + cvAbsDiffS */ +CVAPI(void) cvConvertScaleAbs( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScaleAbs cvConvertScaleAbs + + +/* checks termination criteria validity and + sets eps to default_eps (if it is not set), + max_iter to default_max_iters (if it is not set) +*/ +CVAPI(CvTermCriteria) cvCheckTermCriteria( CvTermCriteria criteria, + double default_eps, + int default_max_iters ); + +/****************************************************************************************\ +* Arithmetic, logic and comparison operations * +\****************************************************************************************/ + +/* dst(mask) = src1(mask) + src2(mask) */ +CVAPI(void) cvAdd( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) + value */ +CVAPI(void) cvAddS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src1(mask) - src2(mask) */ +CVAPI(void) cvSub( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) - value = src(mask) + (-value) */ +CV_INLINE void cvSubS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)) +{ + cvAddS( src, cvScalar( -value.val[0], -value.val[1], -value.val[2], -value.val[3]), + dst, mask ); +} + +/* dst(mask) = value - src(mask) */ +CVAPI(void) cvSubRS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) * src2(idx) * scale + (scaled element-wise multiplication of 2 arrays) */ +CVAPI(void) cvMul( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1) ); + +/* element-wise division/inversion with scaling: + dst(idx) = src1(idx) * scale / src2(idx) + or dst(idx) = scale / src2(idx) if src1 == 0 */ +CVAPI(void) cvDiv( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1)); + +/* dst = src1 * scale + src2 */ +CVAPI(void) cvScaleAdd( const CvArr* src1, CvScalar scale, + const CvArr* src2, CvArr* dst ); +#define cvAXPY( A, real_scalar, B, C ) cvScaleAdd(A, cvRealScalar(real_scalar), B, C) + +/* dst = src1 * alpha + src2 * beta + gamma */ +CVAPI(void) cvAddWeighted( const CvArr* src1, double alpha, + const CvArr* src2, double beta, + double gamma, CvArr* dst ); + +/* result = sum_i(src1(i) * src2(i)) (results for all channels are accumulated together) */ +CVAPI(double) cvDotProduct( const CvArr* src1, const CvArr* src2 ); + +/* dst(idx) = src1(idx) & src2(idx) */ +CVAPI(void) cvAnd( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) & value */ +CVAPI(void) cvAndS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) | src2(idx) */ +CVAPI(void) cvOr( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) | value */ +CVAPI(void) cvOrS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) ^ src2(idx) */ +CVAPI(void) cvXor( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) ^ value */ +CVAPI(void) cvXorS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = ~src(idx) */ +CVAPI(void) cvNot( const CvArr* src, CvArr* dst ); + +/* dst(idx) = lower(idx) <= src(idx) < upper(idx) */ +CVAPI(void) cvInRange( const CvArr* src, const CvArr* lower, + const CvArr* upper, CvArr* dst ); + +/* dst(idx) = lower <= src(idx) < upper */ +CVAPI(void) cvInRangeS( const CvArr* src, CvScalar lower, + CvScalar upper, CvArr* dst ); + +#define CV_CMP_EQ 0 +#define CV_CMP_GT 1 +#define CV_CMP_GE 2 +#define CV_CMP_LT 3 +#define CV_CMP_LE 4 +#define CV_CMP_NE 5 + +/* The comparison operation support single-channel arrays only. + Destination image should be 8uC1 or 8sC1 */ + +/* dst(idx) = src1(idx) _cmp_op_ src2(idx) */ +CVAPI(void) cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op ); + +/* dst(idx) = src1(idx) _cmp_op_ value */ +CVAPI(void) cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op ); + +/* dst(idx) = min(src1(idx),src2(idx)) */ +CVAPI(void) cvMin( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = max(src1(idx),src2(idx)) */ +CVAPI(void) cvMax( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = min(src(idx),value) */ +CVAPI(void) cvMinS( const CvArr* src, double value, CvArr* dst ); + +/* dst(idx) = max(src(idx),value) */ +CVAPI(void) cvMaxS( const CvArr* src, double value, CvArr* dst ); + +/* dst(x,y,c) = abs(src1(x,y,c) - src2(x,y,c)) */ +CVAPI(void) cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(x,y,c) = abs(src(x,y,c) - value(c)) */ +CVAPI(void) cvAbsDiffS( const CvArr* src, CvArr* dst, CvScalar value ); +#define cvAbs( src, dst ) cvAbsDiffS( (src), (dst), cvScalarAll(0)) + +/****************************************************************************************\ +* Math operations * +\****************************************************************************************/ + +/* Does cartesian->polar coordinates conversion. + Either of output components (magnitude or angle) is optional */ +CVAPI(void) cvCartToPolar( const CvArr* x, const CvArr* y, + CvArr* magnitude, CvArr* angle CV_DEFAULT(NULL), + int angle_in_degrees CV_DEFAULT(0)); + +/* Does polar->cartesian coordinates conversion. + Either of output components (magnitude or angle) is optional. + If magnitude is missing it is assumed to be all 1's */ +CVAPI(void) cvPolarToCart( const CvArr* magnitude, const CvArr* angle, + CvArr* x, CvArr* y, + int angle_in_degrees CV_DEFAULT(0)); + +/* Does powering: dst(idx) = src(idx)^power */ +CVAPI(void) cvPow( const CvArr* src, CvArr* dst, double power ); + +/* Does exponention: dst(idx) = exp(src(idx)). + Overflow is not handled yet. Underflow is handled. + Maximal relative error is ~7e-6 for single-precision input */ +CVAPI(void) cvExp( const CvArr* src, CvArr* dst ); + +/* Calculates natural logarithms: dst(idx) = log(abs(src(idx))). + Logarithm of 0 gives large negative number(~-700) + Maximal relative error is ~3e-7 for single-precision output +*/ +CVAPI(void) cvLog( const CvArr* src, CvArr* dst ); + +/* Fast arctangent calculation */ +CVAPI(float) cvFastArctan( float y, float x ); + +/* Fast cubic root calculation */ +CVAPI(float) cvCbrt( float value ); + +/* Checks array values for NaNs, Infs or simply for too large numbers + (if CV_CHECK_RANGE is set). If CV_CHECK_QUIET is set, + no runtime errors is raised (function returns zero value in case of "bad" values). + Otherwise cvError is called */ +#define CV_CHECK_RANGE 1 +#define CV_CHECK_QUIET 2 +CVAPI(int) cvCheckArr( const CvArr* arr, int flags CV_DEFAULT(0), + double min_val CV_DEFAULT(0), double max_val CV_DEFAULT(0)); +#define cvCheckArray cvCheckArr + +#define CV_RAND_UNI 0 +#define CV_RAND_NORMAL 1 +CVAPI(void) cvRandArr( CvRNG* rng, CvArr* arr, int dist_type, + CvScalar param1, CvScalar param2 ); + +CVAPI(void) cvRandShuffle( CvArr* mat, CvRNG* rng, + double iter_factor CV_DEFAULT(1.)); + +#define CV_SORT_EVERY_ROW 0 +#define CV_SORT_EVERY_COLUMN 1 +#define CV_SORT_ASCENDING 0 +#define CV_SORT_DESCENDING 16 + +CVAPI(void) cvSort( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + CvArr* idxmat CV_DEFAULT(NULL), + int flags CV_DEFAULT(0)); + +/* Finds real roots of a cubic equation */ +CVAPI(int) cvSolveCubic( const CvMat* coeffs, CvMat* roots ); + +/* Finds all real and complex roots of a polynomial equation */ +CVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2, + int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100)); + +/****************************************************************************************\ +* Matrix operations * +\****************************************************************************************/ + +/* Calculates cross product of two 3d vectors */ +CVAPI(void) cvCrossProduct( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* Matrix transform: dst = A*B + C, C is optional */ +#define cvMatMulAdd( src1, src2, src3, dst ) cvGEMM( (src1), (src2), 1., (src3), 1., (dst), 0 ) +#define cvMatMul( src1, src2, dst ) cvMatMulAdd( (src1), (src2), NULL, (dst)) + +#define CV_GEMM_A_T 1 +#define CV_GEMM_B_T 2 +#define CV_GEMM_C_T 4 +/* Extended matrix transform: + dst = alpha*op(A)*op(B) + beta*op(C), where op(X) is X or X^T */ +CVAPI(void) cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, + const CvArr* src3, double beta, CvArr* dst, + int tABC CV_DEFAULT(0)); +#define cvMatMulAddEx cvGEMM + +/* Transforms each element of source array and stores + resultant vectors in destination array */ +CVAPI(void) cvTransform( const CvArr* src, CvArr* dst, + const CvMat* transmat, + const CvMat* shiftvec CV_DEFAULT(NULL)); +#define cvMatMulAddS cvTransform + +/* Does perspective transform on every element of input array */ +CVAPI(void) cvPerspectiveTransform( const CvArr* src, CvArr* dst, + const CvMat* mat ); + +/* Calculates (A-delta)*(A-delta)^T (order=0) or (A-delta)^T*(A-delta) (order=1) */ +CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order, + const CvArr* delta CV_DEFAULT(NULL), + double scale CV_DEFAULT(1.) ); + +/* Tranposes matrix. Square matrices can be transposed in-place */ +CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst ); +#define cvT cvTranspose + +/* Completes the symmetric matrix from the lower (LtoR=0) or from the upper (LtoR!=0) part */ +CVAPI(void) cvCompleteSymm( CvMat* matrix, int LtoR CV_DEFAULT(0) ); + +/* Mirror array data around horizontal (flip=0), + vertical (flip=1) or both(flip=-1) axises: + cvFlip(src) flips images vertically and sequences horizontally (inplace) */ +CVAPI(void) cvFlip( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + int flip_mode CV_DEFAULT(0)); +#define cvMirror cvFlip + + +#define CV_SVD_MODIFY_A 1 +#define CV_SVD_U_T 2 +#define CV_SVD_V_T 4 + +/* Performs Singular Value Decomposition of a matrix */ +CVAPI(void) cvSVD( CvArr* A, CvArr* W, CvArr* U CV_DEFAULT(NULL), + CvArr* V CV_DEFAULT(NULL), int flags CV_DEFAULT(0)); + +/* Performs Singular Value Back Substitution (solves A*X = B): + flags must be the same as in cvSVD */ +CVAPI(void) cvSVBkSb( const CvArr* W, const CvArr* U, + const CvArr* V, const CvArr* B, + CvArr* X, int flags ); + +#define CV_LU 0 +#define CV_SVD 1 +#define CV_SVD_SYM 2 +#define CV_CHOLESKY 3 +#define CV_QR 4 +#define CV_NORMAL 16 + +/* Inverts matrix */ +CVAPI(double) cvInvert( const CvArr* src, CvArr* dst, + int method CV_DEFAULT(CV_LU)); +#define cvInv cvInvert + +/* Solves linear system (src1)*(dst) = (src2) + (returns 0 if src1 is a singular and CV_LU method is used) */ +CVAPI(int) cvSolve( const CvArr* src1, const CvArr* src2, CvArr* dst, + int method CV_DEFAULT(CV_LU)); + +/* Calculates determinant of input matrix */ +CVAPI(double) cvDet( const CvArr* mat ); + +/* Calculates trace of the matrix (sum of elements on the main diagonal) */ +CVAPI(CvScalar) cvTrace( const CvArr* mat ); + +/* Finds eigen values and vectors of a symmetric matrix */ +CVAPI(void) cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, + double eps CV_DEFAULT(0), + int lowindex CV_DEFAULT(-1), + int highindex CV_DEFAULT(-1)); + +///* Finds selected eigen values and vectors of a symmetric matrix */ +//CVAPI(void) cvSelectedEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, +// int lowindex, int highindex ); + +/* Makes an identity matrix (mat_ij = i == j) */ +CVAPI(void) cvSetIdentity( CvArr* mat, CvScalar value CV_DEFAULT(cvRealScalar(1)) ); + +/* Fills matrix with given range of numbers */ +CVAPI(CvArr*) cvRange( CvArr* mat, double start, double end ); + +/* Calculates covariation matrix for a set of vectors */ +/* transpose([v1-avg, v2-avg,...]) * [v1-avg,v2-avg,...] */ +#define CV_COVAR_SCRAMBLED 0 + +/* [v1-avg, v2-avg,...] * transpose([v1-avg,v2-avg,...]) */ +#define CV_COVAR_NORMAL 1 + +/* do not calc average (i.e. mean vector) - use the input vector instead + (useful for calculating covariance matrix by parts) */ +#define CV_COVAR_USE_AVG 2 + +/* scale the covariance matrix coefficients by number of the vectors */ +#define CV_COVAR_SCALE 4 + +/* all the input vectors are stored in a single matrix, as its rows */ +#define CV_COVAR_ROWS 8 + +/* all the input vectors are stored in a single matrix, as its columns */ +#define CV_COVAR_COLS 16 + +CVAPI(void) cvCalcCovarMatrix( const CvArr** vects, int count, + CvArr* cov_mat, CvArr* avg, int flags ); + +#define CV_PCA_DATA_AS_ROW 0 +#define CV_PCA_DATA_AS_COL 1 +#define CV_PCA_USE_AVG 2 +CVAPI(void) cvCalcPCA( const CvArr* data, CvArr* mean, + CvArr* eigenvals, CvArr* eigenvects, int flags ); + +CVAPI(void) cvProjectPCA( const CvArr* data, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +CVAPI(void) cvBackProjectPCA( const CvArr* proj, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +/* Calculates Mahalanobis(weighted) distance */ +CVAPI(double) cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat ); +#define cvMahalonobis cvMahalanobis + +/****************************************************************************************\ +* Array Statistics * +\****************************************************************************************/ + +/* Finds sum of array elements */ +CVAPI(CvScalar) cvSum( const CvArr* arr ); + +/* Calculates number of non-zero pixels */ +CVAPI(int) cvCountNonZero( const CvArr* arr ); + +/* Calculates mean value of array elements */ +CVAPI(CvScalar) cvAvg( const CvArr* arr, const CvArr* mask CV_DEFAULT(NULL) ); + +/* Calculates mean and standard deviation of pixel values */ +CVAPI(void) cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Finds global minimum, maximum and their positions */ +CVAPI(void) cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val, + CvPoint* min_loc CV_DEFAULT(NULL), + CvPoint* max_loc CV_DEFAULT(NULL), + const CvArr* mask CV_DEFAULT(NULL) ); + +/* types of array norm */ +#define CV_C 1 +#define CV_L1 2 +#define CV_L2 4 +#define CV_NORM_MASK 7 +#define CV_RELATIVE 8 +#define CV_DIFF 16 +#define CV_MINMAX 32 + +#define CV_DIFF_C (CV_DIFF | CV_C) +#define CV_DIFF_L1 (CV_DIFF | CV_L1) +#define CV_DIFF_L2 (CV_DIFF | CV_L2) +#define CV_RELATIVE_C (CV_RELATIVE | CV_C) +#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1) +#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2) + +/* Finds norm, difference norm or relative difference norm for an array (or two arrays) */ +CVAPI(double) cvNorm( const CvArr* arr1, const CvArr* arr2 CV_DEFAULT(NULL), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + +CVAPI(void) cvNormalize( const CvArr* src, CvArr* dst, + double a CV_DEFAULT(1.), double b CV_DEFAULT(0.), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + + +#define CV_REDUCE_SUM 0 +#define CV_REDUCE_AVG 1 +#define CV_REDUCE_MAX 2 +#define CV_REDUCE_MIN 3 + +CVAPI(void) cvReduce( const CvArr* src, CvArr* dst, int dim CV_DEFAULT(-1), + int op CV_DEFAULT(CV_REDUCE_SUM) ); + +/****************************************************************************************\ +* Discrete Linear Transforms and Related Functions * +\****************************************************************************************/ + +#define CV_DXT_FORWARD 0 +#define CV_DXT_INVERSE 1 +#define CV_DXT_SCALE 2 /* divide result by size of array */ +#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE) +#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE +#define CV_DXT_ROWS 4 /* transform each row individually */ +#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */ + +/* Discrete Fourier Transform: + complex->complex, + real->ccs (forward), + ccs->real (inverse) */ +CVAPI(void) cvDFT( const CvArr* src, CvArr* dst, int flags, + int nonzero_rows CV_DEFAULT(0) ); +#define cvFFT cvDFT + +/* Multiply results of DFTs: DFT(X)*DFT(Y) or DFT(X)*conj(DFT(Y)) */ +CVAPI(void) cvMulSpectrums( const CvArr* src1, const CvArr* src2, + CvArr* dst, int flags ); + +/* Finds optimal DFT vector size >= size0 */ +CVAPI(int) cvGetOptimalDFTSize( int size0 ); + +/* Discrete Cosine Transform */ +CVAPI(void) cvDCT( const CvArr* src, CvArr* dst, int flags ); + +/****************************************************************************************\ +* Dynamic data structures * +\****************************************************************************************/ + +/* Calculates length of sequence slice (with support of negative indices). */ +CVAPI(int) cvSliceLength( CvSlice slice, const CvSeq* seq ); + + +/* Creates new memory storage. + block_size == 0 means that default, + somewhat optimal size, is used (currently, it is 64K) */ +CVAPI(CvMemStorage*) cvCreateMemStorage( int block_size CV_DEFAULT(0)); + + +/* Creates a memory storage that will borrow memory blocks from parent storage */ +CVAPI(CvMemStorage*) cvCreateChildMemStorage( CvMemStorage* parent ); + + +/* Releases memory storage. All the children of a parent must be released before + the parent. A child storage returns all the blocks to parent when it is released */ +CVAPI(void) cvReleaseMemStorage( CvMemStorage** storage ); + + +/* Clears memory storage. This is the only way(!!!) (besides cvRestoreMemStoragePos) + to reuse memory allocated for the storage - cvClearSeq,cvClearSet ... + do not free any memory. + A child storage returns all the blocks to the parent when it is cleared */ +CVAPI(void) cvClearMemStorage( CvMemStorage* storage ); + +/* Remember a storage "free memory" position */ +CVAPI(void) cvSaveMemStoragePos( const CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Restore a storage "free memory" position */ +CVAPI(void) cvRestoreMemStoragePos( CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Allocates continuous buffer of the specified size in the storage */ +CVAPI(void*) cvMemStorageAlloc( CvMemStorage* storage, size_t size ); + +/* Allocates string in memory storage */ +CVAPI(CvString) cvMemStorageAllocString( CvMemStorage* storage, const char* ptr, + int len CV_DEFAULT(-1) ); + +/* Creates new empty sequence that will reside in the specified storage */ +CVAPI(CvSeq*) cvCreateSeq( int seq_flags, size_t header_size, + size_t elem_size, CvMemStorage* storage ); + +/* Changes default size (granularity) of sequence blocks. + The default size is ~1Kbyte */ +CVAPI(void) cvSetSeqBlockSize( CvSeq* seq, int delta_elems ); + + +/* Adds new element to the end of sequence. Returns pointer to the element */ +CVAPI(schar*) cvSeqPush( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Adds new element to the beginning of sequence. Returns pointer to it */ +CVAPI(schar*) cvSeqPushFront( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Removes the last element from sequence and optionally saves it */ +CVAPI(void) cvSeqPop( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +/* Removes the first element from sequence and optioanally saves it */ +CVAPI(void) cvSeqPopFront( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +#define CV_FRONT 1 +#define CV_BACK 0 +/* Adds several new elements to the end of sequence */ +CVAPI(void) cvSeqPushMulti( CvSeq* seq, const void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Removes several elements from the end of sequence and optionally saves them */ +CVAPI(void) cvSeqPopMulti( CvSeq* seq, void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Inserts a new element in the middle of sequence. + cvSeqInsert(seq,0,elem) == cvSeqPushFront(seq,elem) */ +CVAPI(schar*) cvSeqInsert( CvSeq* seq, int before_index, + const void* element CV_DEFAULT(NULL)); + +/* Removes specified sequence element */ +CVAPI(void) cvSeqRemove( CvSeq* seq, int index ); + + +/* Removes all the elements from the sequence. The freed memory + can be reused later only by the same sequence unless cvClearMemStorage + or cvRestoreMemStoragePos is called */ +CVAPI(void) cvClearSeq( CvSeq* seq ); + + +/* Retrieves pointer to specified sequence element. + Negative indices are supported and mean counting from the end + (e.g -1 means the last sequence element) */ +CVAPI(schar*) cvGetSeqElem( const CvSeq* seq, int index ); + +/* Calculates index of the specified sequence element. + Returns -1 if element does not belong to the sequence */ +CVAPI(int) cvSeqElemIdx( const CvSeq* seq, const void* element, + CvSeqBlock** block CV_DEFAULT(NULL) ); + +/* Initializes sequence writer. The new elements will be added to the end of sequence */ +CVAPI(void) cvStartAppendToSeq( CvSeq* seq, CvSeqWriter* writer ); + + +/* Combination of cvCreateSeq and cvStartAppendToSeq */ +CVAPI(void) cvStartWriteSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage* storage, + CvSeqWriter* writer ); + +/* Closes sequence writer, updates sequence header and returns pointer + to the resultant sequence + (which may be useful if the sequence was created using cvStartWriteSeq)) +*/ +CVAPI(CvSeq*) cvEndWriteSeq( CvSeqWriter* writer ); + + +/* Updates sequence header. May be useful to get access to some of previously + written elements via cvGetSeqElem or sequence reader */ +CVAPI(void) cvFlushSeqWriter( CvSeqWriter* writer ); + + +/* Initializes sequence reader. + The sequence can be read in forward or backward direction */ +CVAPI(void) cvStartReadSeq( const CvSeq* seq, CvSeqReader* reader, + int reverse CV_DEFAULT(0) ); + + +/* Returns current sequence reader position (currently observed sequence element) */ +CVAPI(int) cvGetSeqReaderPos( CvSeqReader* reader ); + + +/* Changes sequence reader position. It may seek to an absolute or + to relative to the current position */ +CVAPI(void) cvSetSeqReaderPos( CvSeqReader* reader, int index, + int is_relative CV_DEFAULT(0)); + +/* Copies sequence content to a continuous piece of memory */ +CVAPI(void*) cvCvtSeqToArray( const CvSeq* seq, void* elements, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ) ); + +/* Creates sequence header for array. + After that all the operations on sequences that do not alter the content + can be applied to the resultant sequence */ +CVAPI(CvSeq*) cvMakeSeqHeaderForArray( int seq_type, int header_size, + int elem_size, void* elements, int total, + CvSeq* seq, CvSeqBlock* block ); + +/* Extracts sequence slice (with or without copying sequence elements) */ +CVAPI(CvSeq*) cvSeqSlice( const CvSeq* seq, CvSlice slice, + CvMemStorage* storage CV_DEFAULT(NULL), + int copy_data CV_DEFAULT(0)); + +CV_INLINE CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage CV_DEFAULT(NULL)) +{ + return cvSeqSlice( seq, CV_WHOLE_SEQ, storage, 1 ); +} + +/* Removes sequence slice */ +CVAPI(void) cvSeqRemoveSlice( CvSeq* seq, CvSlice slice ); + +/* Inserts a sequence or array into another sequence */ +CVAPI(void) cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +/* a < b ? -1 : a > b ? 1 : 0 */ +typedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata ); + +/* Sorts sequence in-place given element comparison function */ +CVAPI(void) cvSeqSort( CvSeq* seq, CvCmpFunc func, void* userdata CV_DEFAULT(NULL) ); + +/* Finds element in a [sorted] sequence */ +CVAPI(schar*) cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, + int is_sorted, int* elem_idx, + void* userdata CV_DEFAULT(NULL) ); + +/* Reverses order of sequence elements in-place */ +CVAPI(void) cvSeqInvert( CvSeq* seq ); + +/* Splits sequence into one or more equivalence classes using the specified criteria */ +CVAPI(int) cvSeqPartition( const CvSeq* seq, CvMemStorage* storage, + CvSeq** labels, CvCmpFunc is_equal, void* userdata ); + +/************ Internal sequence functions ************/ +CVAPI(void) cvChangeSeqBlock( void* reader, int direction ); +CVAPI(void) cvCreateSeqBlock( CvSeqWriter* writer ); + + +/* Creates a new set */ +CVAPI(CvSet*) cvCreateSet( int set_flags, int header_size, + int elem_size, CvMemStorage* storage ); + +/* Adds new element to the set and returns pointer to it */ +CVAPI(int) cvSetAdd( CvSet* set_header, CvSetElem* elem CV_DEFAULT(NULL), + CvSetElem** inserted_elem CV_DEFAULT(NULL) ); + +/* Fast variant of cvSetAdd */ +CV_INLINE CvSetElem* cvSetNew( CvSet* set_header ) +{ + CvSetElem* elem = set_header->free_elems; + if( elem ) + { + set_header->free_elems = elem->next_free; + elem->flags = elem->flags & CV_SET_ELEM_IDX_MASK; + set_header->active_count++; + } + else + cvSetAdd( set_header, NULL, (CvSetElem**)&elem ); + return elem; +} + +/* Removes set element given its pointer */ +CV_INLINE void cvSetRemoveByPtr( CvSet* set_header, void* elem ) +{ + CvSetElem* _elem = (CvSetElem*)elem; + assert( _elem->flags >= 0 /*&& (elem->flags & CV_SET_ELEM_IDX_MASK) < set_header->total*/ ); + _elem->next_free = set_header->free_elems; + _elem->flags = (_elem->flags & CV_SET_ELEM_IDX_MASK) | CV_SET_ELEM_FREE_FLAG; + set_header->free_elems = _elem; + set_header->active_count--; +} + +/* Removes element from the set by its index */ +CVAPI(void) cvSetRemove( CvSet* set_header, int index ); + +/* Returns a set element by index. If the element doesn't belong to the set, + NULL is returned */ +CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int idx ) +{ + CvSetElem* elem = (CvSetElem*)(void *)cvGetSeqElem( (CvSeq*)set_header, idx ); + return elem && CV_IS_SET_ELEM( elem ) ? elem : 0; +} + +/* Removes all the elements from the set */ +CVAPI(void) cvClearSet( CvSet* set_header ); + +/* Creates new graph */ +CVAPI(CvGraph*) cvCreateGraph( int graph_flags, int header_size, + int vtx_size, int edge_size, + CvMemStorage* storage ); + +/* Adds new vertex to the graph */ +CVAPI(int) cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* vtx CV_DEFAULT(NULL), + CvGraphVtx** inserted_vtx CV_DEFAULT(NULL) ); + + +/* Removes vertex from the graph together with all incident edges */ +CVAPI(int) cvGraphRemoveVtx( CvGraph* graph, int index ); +CVAPI(int) cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx ); + + +/* Link two vertices specifed by indices or pointers if they + are not connected or return pointer to already existing edge + connecting the vertices. + Functions return 1 if a new edge was created, 0 otherwise */ +CVAPI(int) cvGraphAddEdge( CvGraph* graph, + int start_idx, int end_idx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +CVAPI(int) cvGraphAddEdgeByPtr( CvGraph* graph, + CvGraphVtx* start_vtx, CvGraphVtx* end_vtx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +/* Remove edge connecting two vertices */ +CVAPI(void) cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx ); +CVAPI(void) cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx, + CvGraphVtx* end_vtx ); + +/* Find edge connecting two vertices */ +CVAPI(CvGraphEdge*) cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx ); +CVAPI(CvGraphEdge*) cvFindGraphEdgeByPtr( const CvGraph* graph, + const CvGraphVtx* start_vtx, + const CvGraphVtx* end_vtx ); +#define cvGraphFindEdge cvFindGraphEdge +#define cvGraphFindEdgeByPtr cvFindGraphEdgeByPtr + +/* Remove all vertices and edges from the graph */ +CVAPI(void) cvClearGraph( CvGraph* graph ); + + +/* Count number of edges incident to the vertex */ +CVAPI(int) cvGraphVtxDegree( const CvGraph* graph, int vtx_idx ); +CVAPI(int) cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vtx ); + + +/* Retrieves graph vertex by given index */ +#define cvGetGraphVtx( graph, idx ) (CvGraphVtx*)cvGetSetElem((CvSet*)(graph), (idx)) + +/* Retrieves index of a graph vertex given its pointer */ +#define cvGraphVtxIdx( graph, vtx ) ((vtx)->flags & CV_SET_ELEM_IDX_MASK) + +/* Retrieves index of a graph edge given its pointer */ +#define cvGraphEdgeIdx( graph, edge ) ((edge)->flags & CV_SET_ELEM_IDX_MASK) + +#define cvGraphGetVtxCount( graph ) ((graph)->active_count) +#define cvGraphGetEdgeCount( graph ) ((graph)->edges->active_count) + +#define CV_GRAPH_VERTEX 1 +#define CV_GRAPH_TREE_EDGE 2 +#define CV_GRAPH_BACK_EDGE 4 +#define CV_GRAPH_FORWARD_EDGE 8 +#define CV_GRAPH_CROSS_EDGE 16 +#define CV_GRAPH_ANY_EDGE 30 +#define CV_GRAPH_NEW_TREE 32 +#define CV_GRAPH_BACKTRACKING 64 +#define CV_GRAPH_OVER -1 + +#define CV_GRAPH_ALL_ITEMS -1 + +/* flags for graph vertices and edges */ +#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30) +#define CV_IS_GRAPH_VERTEX_VISITED(vtx) \ + (((CvGraphVtx*)(vtx))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_IS_GRAPH_EDGE_VISITED(edge) \ + (((CvGraphEdge*)(edge))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29) +#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28) + +typedef struct CvGraphScanner +{ + CvGraphVtx* vtx; /* current graph vertex (or current edge origin) */ + CvGraphVtx* dst; /* current graph edge destination vertex */ + CvGraphEdge* edge; /* current edge */ + + CvGraph* graph; /* the graph */ + CvSeq* stack; /* the graph vertex stack */ + int index; /* the lower bound of certainly visited vertices */ + int mask; /* event mask */ +} +CvGraphScanner; + +/* Creates new graph scanner. */ +CVAPI(CvGraphScanner*) cvCreateGraphScanner( CvGraph* graph, + CvGraphVtx* vtx CV_DEFAULT(NULL), + int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS)); + +/* Releases graph scanner. */ +CVAPI(void) cvReleaseGraphScanner( CvGraphScanner** scanner ); + +/* Get next graph element */ +CVAPI(int) cvNextGraphItem( CvGraphScanner* scanner ); + +/* Creates a copy of graph */ +CVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage ); + +/****************************************************************************************\ +* Drawing * +\****************************************************************************************/ + +/****************************************************************************************\ +* Drawing functions work with images/matrices of arbitrary type. * +* For color images the channel order is BGR[A] * +* Antialiasing is supported only for 8-bit image now. * +* All the functions include parameter color that means rgb value (that may be * +* constructed with CV_RGB macro) for color images and brightness * +* for grayscale images. * +* If a drawn figure is partially or completely outside of the image, it is clipped.* +\****************************************************************************************/ + +#define CV_RGB( r, g, b ) cvScalar( (b), (g), (r), 0 ) +#define CV_FILLED -1 + +#define CV_AA 16 + +/* Draws 4-connected, 8-connected or antialiased line segment connecting two points */ +CVAPI(void) cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws a rectangle given two opposite corners of the rectangle (pt1 & pt2), + if thickness<0 (e.g. thickness == CV_FILLED), the filled box is drawn */ +CVAPI(void) cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + +/* Draws a rectangle specified by a CvRect structure */ +CVAPI(void) cvRectangleR( CvArr* img, CvRect r, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + + +/* Draws a circle with specified center and radius. + Thickness works in the same way as with cvRectangle */ +CVAPI(void) cvCircle( CvArr* img, CvPoint center, int radius, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Draws ellipse outline, filled ellipse, elliptic arc or filled elliptic sector, + depending on , and parameters. The resultant figure + is rotated by . All the angles are in degrees */ +CVAPI(void) cvEllipse( CvArr* img, CvPoint center, CvSize axes, + double angle, double start_angle, double end_angle, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +CV_INLINE void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ) +{ + CvSize axes; + axes.width = cvRound(box.size.width*0.5); + axes.height = cvRound(box.size.height*0.5); + + cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle, + 0, 360, color, thickness, line_type, shift ); +} + +/* Fills convex or monotonous polygon. */ +CVAPI(void) cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Fills an area bounded by one or more arbitrary polygons */ +CVAPI(void) cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, + int contours, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws one or more polygonal curves */ +CVAPI(void) cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, + int is_closed, CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +#define cvDrawRect cvRectangle +#define cvDrawLine cvLine +#define cvDrawCircle cvCircle +#define cvDrawEllipse cvEllipse +#define cvDrawPolyLine cvPolyLine + +/* Clips the line segment connecting *pt1 and *pt2 + by the rectangular window + (0<=xptr will point + to pt1 (or pt2, see left_to_right description) location in the image. + Returns the number of pixels on the line between the ending points. */ +CVAPI(int) cvInitLineIterator( const CvArr* image, CvPoint pt1, CvPoint pt2, + CvLineIterator* line_iterator, + int connectivity CV_DEFAULT(8), + int left_to_right CV_DEFAULT(0)); + +/* Moves iterator to the next line point */ +#define CV_NEXT_LINE_POINT( line_iterator ) \ +{ \ + int _line_iterator_mask = (line_iterator).err < 0 ? -1 : 0; \ + (line_iterator).err += (line_iterator).minus_delta + \ + ((line_iterator).plus_delta & _line_iterator_mask); \ + (line_iterator).ptr += (line_iterator).minus_step + \ + ((line_iterator).plus_step & _line_iterator_mask); \ +} + + +/* basic font types */ +#define CV_FONT_HERSHEY_SIMPLEX 0 +#define CV_FONT_HERSHEY_PLAIN 1 +#define CV_FONT_HERSHEY_DUPLEX 2 +#define CV_FONT_HERSHEY_COMPLEX 3 +#define CV_FONT_HERSHEY_TRIPLEX 4 +#define CV_FONT_HERSHEY_COMPLEX_SMALL 5 +#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6 +#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7 + +/* font flags */ +#define CV_FONT_ITALIC 16 + +#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX + + +/* Font structure */ +typedef struct CvFont +{ + const char* nameFont; //Qt:nameFont + CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component]) + int font_face; //Qt: bool italic /* =CV_FONT_* */ + const int* ascii; /* font data and metrics */ + const int* greek; + const int* cyrillic; + float hscale, vscale; + float shear; /* slope coefficient: 0 - normal, >0 - italic */ + int thickness; //Qt: weight /* letters thickness */ + float dx; /* horizontal interval between letters */ + int line_type; //Qt: PointSize +} +CvFont; + +/* Initializes font structure used further in cvPutText */ +CVAPI(void) cvInitFont( CvFont* font, int font_face, + double hscale, double vscale, + double shear CV_DEFAULT(0), + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8)); + +CV_INLINE CvFont cvFont( double scale, int thickness CV_DEFAULT(1) ) +{ + CvFont font; + cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, scale, scale, 0, thickness, CV_AA ); + return font; +} + +/* Renders text stroke with specified font and color at specified location. + CvFont should be initialized with cvInitFont */ +CVAPI(void) cvPutText( CvArr* img, const char* text, CvPoint org, + const CvFont* font, CvScalar color ); + +/* Calculates bounding box of text stroke (useful for alignment) */ +CVAPI(void) cvGetTextSize( const char* text_string, const CvFont* font, + CvSize* text_size, int* baseline ); + + + +/* Unpacks color value, if arrtype is CV_8UC?, is treated as + packed color value, otherwise the first channels (depending on arrtype) + of destination scalar are set to the same value = */ +CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype ); + +/* Returns the polygon points which make up the given ellipse. The ellipse is define by + the box of size 'axes' rotated 'angle' around the 'center'. A partial sweep + of the ellipse arc can be done by spcifying arc_start and arc_end to be something + other than 0 and 360, respectively. The input array 'pts' must be large enough to + hold the result. The total number of points stored into 'pts' is returned by this + function. */ +CVAPI(int) cvEllipse2Poly( CvPoint center, CvSize axes, + int angle, int arc_start, int arc_end, CvPoint * pts, int delta ); + +/* Draws contour outlines or filled interiors on the image */ +CVAPI(void) cvDrawContours( CvArr *img, CvSeq* contour, + CvScalar external_color, CvScalar hole_color, + int max_level, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + CvPoint offset CV_DEFAULT(cvPoint(0,0))); + +/* Does look-up transformation. Elements of the source array + (that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */ +CVAPI(void) cvLUT( const CvArr* src, CvArr* dst, const CvArr* lut ); + + +/******************* Iteration through the sequence tree *****************/ +typedef struct CvTreeNodeIterator +{ + const void* node; + int level; + int max_level; +} +CvTreeNodeIterator; + +CVAPI(void) cvInitTreeNodeIterator( CvTreeNodeIterator* tree_iterator, + const void* first, int max_level ); +CVAPI(void*) cvNextTreeNode( CvTreeNodeIterator* tree_iterator ); +CVAPI(void*) cvPrevTreeNode( CvTreeNodeIterator* tree_iterator ); + +/* Inserts sequence into tree with specified "parent" sequence. + If parent is equal to frame (e.g. the most external contour), + then added contour will have null pointer to parent. */ +CVAPI(void) cvInsertNodeIntoTree( void* node, void* parent, void* frame ); + +/* Removes contour from tree (together with the contour children). */ +CVAPI(void) cvRemoveNodeFromTree( void* node, void* frame ); + +/* Gathers pointers to all the sequences, + accessible from the , to the single sequence */ +CVAPI(CvSeq*) cvTreeToNodeSeq( const void* first, int header_size, + CvMemStorage* storage ); + +/* The function implements the K-means algorithm for clustering an array of sample + vectors in a specified number of classes */ +#define CV_KMEANS_USE_INITIAL_LABELS 1 +CVAPI(int) cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, + CvTermCriteria termcrit, int attempts CV_DEFAULT(1), + CvRNG* rng CV_DEFAULT(0), int flags CV_DEFAULT(0), + CvArr* _centers CV_DEFAULT(0), double* compactness CV_DEFAULT(0) ); + +/****************************************************************************************\ +* System functions * +\****************************************************************************************/ + +/* Add the function pointers table with associated information to the IPP primitives list */ +CVAPI(int) cvRegisterModule( const CvModuleInfo* module_info ); + +/* Loads optimized functions from IPP, MKL etc. or switches back to pure C code */ +CVAPI(int) cvUseOptimized( int on_off ); + +/* Retrieves information about the registered modules and loaded optimized plugins */ +CVAPI(void) cvGetModuleInfo( const char* module_name, + const char** version, + const char** loaded_addon_plugins ); + +typedef void* (CV_CDECL *CvAllocFunc)(size_t size, void* userdata); +typedef int (CV_CDECL *CvFreeFunc)(void* pptr, void* userdata); + +/* Set user-defined memory managment functions (substitutors for malloc and free) that + will be called by cvAlloc, cvFree and higher-level functions (e.g. cvCreateImage) */ +CVAPI(void) cvSetMemoryManager( CvAllocFunc alloc_func CV_DEFAULT(NULL), + CvFreeFunc free_func CV_DEFAULT(NULL), + void* userdata CV_DEFAULT(NULL)); + + +typedef IplImage* (CV_STDCALL* Cv_iplCreateImageHeader) + (int,int,int,char*,char*,int,int,int,int,int, + IplROI*,IplImage*,void*,IplTileInfo*); +typedef void (CV_STDCALL* Cv_iplAllocateImageData)(IplImage*,int,int); +typedef void (CV_STDCALL* Cv_iplDeallocate)(IplImage*,int); +typedef IplROI* (CV_STDCALL* Cv_iplCreateROI)(int,int,int,int,int); +typedef IplImage* (CV_STDCALL* Cv_iplCloneImage)(const IplImage*); + +/* Makes OpenCV use IPL functions for IplImage allocation/deallocation */ +CVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header, + Cv_iplAllocateImageData allocate_data, + Cv_iplDeallocate deallocate, + Cv_iplCreateROI create_roi, + Cv_iplCloneImage clone_image ); + +#define CV_TURN_ON_IPL_COMPATIBILITY() \ + cvSetIPLAllocators( iplCreateImageHeader, iplAllocateImage, \ + iplDeallocate, iplCreateROI, iplCloneImage ) + +/****************************************************************************************\ +* Data Persistence * +\****************************************************************************************/ + +/********************************** High-level functions ********************************/ + +/* opens existing or creates new file storage */ +CVAPI(CvFileStorage*) cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, + int flags, const char* encoding CV_DEFAULT(NULL) ); + +/* closes file storage and deallocates buffers */ +CVAPI(void) cvReleaseFileStorage( CvFileStorage** fs ); + +/* returns attribute value or 0 (NULL) if there is no such attribute */ +CVAPI(const char*) cvAttrValue( const CvAttrList* attr, const char* attr_name ); + +/* starts writing compound structure (map or sequence) */ +CVAPI(void) cvStartWriteStruct( CvFileStorage* fs, const char* name, + int struct_flags, const char* type_name CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* finishes writing compound structure */ +CVAPI(void) cvEndWriteStruct( CvFileStorage* fs ); + +/* writes an integer */ +CVAPI(void) cvWriteInt( CvFileStorage* fs, const char* name, int value ); + +/* writes a floating-point number */ +CVAPI(void) cvWriteReal( CvFileStorage* fs, const char* name, double value ); + +/* writes a string */ +CVAPI(void) cvWriteString( CvFileStorage* fs, const char* name, + const char* str, int quote CV_DEFAULT(0) ); + +/* writes a comment */ +CVAPI(void) cvWriteComment( CvFileStorage* fs, const char* comment, + int eol_comment ); + +/* writes instance of a standard type (matrix, image, sequence, graph etc.) + or user-defined type */ +CVAPI(void) cvWrite( CvFileStorage* fs, const char* name, const void* ptr, + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* starts the next stream */ +CVAPI(void) cvStartNextStream( CvFileStorage* fs ); + +/* helper function: writes multiple integer or floating-point numbers */ +CVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src, + int len, const char* dt ); + +/* returns the hash entry corresponding to the specified literal key string or 0 + if there is no such a key in the storage */ +CVAPI(CvStringHashNode*) cvGetHashedKey( CvFileStorage* fs, const char* name, + int len CV_DEFAULT(-1), + int create_missing CV_DEFAULT(0)); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetRootFileNode( const CvFileStorage* fs, + int stream_index CV_DEFAULT(0) ); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetFileNode( CvFileStorage* fs, CvFileNode* map, + const CvStringHashNode* key, + int create_missing CV_DEFAULT(0) ); + +/* this is a slower version of cvGetFileNode that takes the key as a literal string */ +CVAPI(CvFileNode*) cvGetFileNodeByName( const CvFileStorage* fs, + const CvFileNode* map, + const char* name ); + +CV_INLINE int cvReadInt( const CvFileNode* node, int default_value CV_DEFAULT(0) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? node->data.i : + CV_NODE_IS_REAL(node->tag) ? cvRound(node->data.f) : 0x7fffffff; +} + + +CV_INLINE int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, int default_value CV_DEFAULT(0) ) +{ + return cvReadInt( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE double cvReadReal( const CvFileNode* node, double default_value CV_DEFAULT(0.) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? (double)node->data.i : + CV_NODE_IS_REAL(node->tag) ? node->data.f : 1e300; +} + + +CV_INLINE double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, double default_value CV_DEFAULT(0.) ) +{ + return cvReadReal( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE const char* cvReadString( const CvFileNode* node, + const char* default_value CV_DEFAULT(NULL) ) +{ + return !node ? default_value : CV_NODE_IS_STRING(node->tag) ? node->data.str.ptr : 0; +} + + +CV_INLINE const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, const char* default_value CV_DEFAULT(NULL) ) +{ + return cvReadString( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +/* decodes standard or user-defined object and returns it */ +CVAPI(void*) cvRead( CvFileStorage* fs, CvFileNode* node, + CvAttrList* attributes CV_DEFAULT(NULL)); + +/* decodes standard or user-defined object and returns it */ +CV_INLINE void* cvReadByName( CvFileStorage* fs, const CvFileNode* map, + const char* name, CvAttrList* attributes CV_DEFAULT(NULL) ) +{ + return cvRead( fs, cvGetFileNodeByName( fs, map, name ), attributes ); +} + + +/* starts reading data from sequence or scalar numeric node */ +CVAPI(void) cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src, + CvSeqReader* reader ); + +/* reads multiple numbers and stores them to array */ +CVAPI(void) cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader, + int count, void* dst, const char* dt ); + +/* combination of two previous functions for easier reading of whole sequences */ +CVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src, + void* dst, const char* dt ); + +/* writes a copy of file node to file storage */ +CVAPI(void) cvWriteFileNode( CvFileStorage* fs, const char* new_node_name, + const CvFileNode* node, int embed ); + +/* returns name of file node */ +CVAPI(const char*) cvGetFileNodeName( const CvFileNode* node ); + +/*********************************** Adding own types ***********************************/ + +CVAPI(void) cvRegisterType( const CvTypeInfo* info ); +CVAPI(void) cvUnregisterType( const char* type_name ); +CVAPI(CvTypeInfo*) cvFirstType(void); +CVAPI(CvTypeInfo*) cvFindType( const char* type_name ); +CVAPI(CvTypeInfo*) cvTypeOf( const void* struct_ptr ); + +/* universal functions */ +CVAPI(void) cvRelease( void** struct_ptr ); +CVAPI(void*) cvClone( const void* struct_ptr ); + +/* simple API for reading/writing data */ +CVAPI(void) cvSave( const char* filename, const void* struct_ptr, + const char* name CV_DEFAULT(NULL), + const char* comment CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); +CVAPI(void*) cvLoad( const char* filename, + CvMemStorage* memstorage CV_DEFAULT(NULL), + const char* name CV_DEFAULT(NULL), + const char** real_name CV_DEFAULT(NULL) ); + +/*********************************** Measuring Execution Time ***************************/ + +/* helper functions for RNG initialization and accurate time measurement: + uses internal clock counter on x86 */ +CVAPI(int64) cvGetTickCount( void ); +CVAPI(double) cvGetTickFrequency( void ); + +/*********************************** CPU capabilities ***********************************/ + +#define CV_CPU_NONE 0 +#define CV_CPU_MMX 1 +#define CV_CPU_SSE 2 +#define CV_CPU_SSE2 3 +#define CV_CPU_SSE3 4 +#define CV_CPU_SSSE3 5 +#define CV_CPU_SSE4_1 6 +#define CV_CPU_SSE4_2 7 +#define CV_CPU_POPCNT 8 +#define CV_CPU_AVX 10 +#define CV_HARDWARE_MAX_FEATURE 255 + +CVAPI(int) cvCheckHardwareSupport(int feature); + +/*********************************** Multi-Threading ************************************/ + +/* retrieve/set the number of threads used in OpenMP implementations */ +CVAPI(int) cvGetNumThreads( void ); +CVAPI(void) cvSetNumThreads( int threads CV_DEFAULT(0) ); +/* get index of the thread being executed */ +CVAPI(int) cvGetThreadNum( void ); + + +/********************************** Error Handling **************************************/ + +/* Get current OpenCV error status */ +CVAPI(int) cvGetErrStatus( void ); + +/* Sets error status silently */ +CVAPI(void) cvSetErrStatus( int status ); + +#define CV_ErrModeLeaf 0 /* Print error and exit program */ +#define CV_ErrModeParent 1 /* Print error and continue */ +#define CV_ErrModeSilent 2 /* Don't print and continue */ + +/* Retrives current error processing mode */ +CVAPI(int) cvGetErrMode( void ); + +/* Sets error processing mode, returns previously used mode */ +CVAPI(int) cvSetErrMode( int mode ); + +/* Sets error status and performs some additonal actions (displaying message box, + writing message to stderr, terminating application etc.) + depending on the current error mode */ +CVAPI(void) cvError( int status, const char* func_name, + const char* err_msg, const char* file_name, int line ); + +/* Retrieves textual description of the error given its code */ +CVAPI(const char*) cvErrorStr( int status ); + +/* Retrieves detailed information about the last error occured */ +CVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description, + const char** filename, int* line ); + +/* Maps IPP error codes to the counterparts from OpenCV */ +CVAPI(int) cvErrorFromIppStatus( int ipp_status ); + +typedef int (CV_CDECL *CvErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, int line, void* userdata ); + +/* Assigns a new error-handling function */ +CVAPI(CvErrorCallback) cvRedirectError( CvErrorCallback error_handler, + void* userdata CV_DEFAULT(NULL), + void** prev_userdata CV_DEFAULT(NULL) ); + +/* + Output to: + cvNulDevReport - nothing + cvStdErrReport - console(fprintf(stderr,...)) + cvGuiBoxReport - MessageBox(WIN32) + */ +CVAPI(int) cvNulDevReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvStdErrReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +#define OPENCV_ERROR(status,func,context) \ +cvError((status),(func),(context),__FILE__,__LINE__) + +#define OPENCV_ERRCHK(func,context) \ +{if (cvGetErrStatus() >= 0) \ +{OPENCV_ERROR(CV_StsBackTrace,(func),(context));}} + +#define OPENCV_ASSERT(expr,func,context) \ +{if (! (expr)) \ +{OPENCV_ERROR(CV_StsInternal,(func),(context));}} + +#define OPENCV_RSTERR() (cvSetErrStatus(CV_StsOk)) + +#define OPENCV_CALL( Func ) \ +{ \ +Func; \ +} + + +/* CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */ +#ifdef CV_NO_FUNC_NAMES +#define CV_FUNCNAME( Name ) +#define cvFuncName "" +#else +#define CV_FUNCNAME( Name ) \ +static char cvFuncName[] = Name +#endif + + +/* + CV_ERROR macro unconditionally raises error with passed code and message. + After raising error, control will be transferred to the exit label. + */ +#define CV_ERROR( Code, Msg ) \ +{ \ + cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \ + __CV_EXIT__; \ +} + +/* Simplified form of CV_ERROR */ +#define CV_ERROR_FROM_CODE( code ) \ + CV_ERROR( code, "" ) + +/* + CV_CHECK macro checks error status after CV (or IPL) + function call. If error detected, control will be transferred to the exit + label. + */ +#define CV_CHECK() \ +{ \ + if( cvGetErrStatus() < 0 ) \ + CV_ERROR( CV_StsBackTrace, "Inner function failed." ); \ +} + + +/* + CV_CALL macro calls CV (or IPL) function, checks error status and + signals a error if the function failed. Useful in "parent node" + error procesing mode + */ +#define CV_CALL( Func ) \ +{ \ + Func; \ + CV_CHECK(); \ +} + + +/* Runtime assertion macro */ +#define CV_ASSERT( Condition ) \ +{ \ + if( !(Condition) ) \ + CV_ERROR( CV_StsInternal, "Assertion: " #Condition " failed" ); \ +} + +#define __CV_BEGIN__ { +#define __CV_END__ goto exit; exit: ; } +#define __CV_EXIT__ goto exit + +#ifdef __cplusplus +} + +// classes for automatic module/RTTI data registration/unregistration +struct CV_EXPORTS CvModule +{ + CvModule( CvModuleInfo* _info ); + ~CvModule(); + CvModuleInfo* info; + + static CvModuleInfo* first; + static CvModuleInfo* last; +}; + +struct CV_EXPORTS CvType +{ + CvType( const char* type_name, + CvIsInstanceFunc is_instance, CvReleaseFunc release=0, + CvReadFunc read=0, CvWriteFunc write=0, CvCloneFunc clone=0 ); + ~CvType(); + CvTypeInfo* info; + + static CvTypeInfo* first; + static CvTypeInfo* last; +}; + +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/cuda_devptrs.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/cuda_devptrs.hpp new file mode 100644 index 0000000..1534045 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/cuda_devptrs.hpp @@ -0,0 +1,199 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_DEVPTRS_HPP__ +#define __OPENCV_CORE_DEVPTRS_HPP__ + +#ifdef __cplusplus + +#ifdef __CUDACC__ + #define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__ +#else + #define __CV_GPU_HOST_DEVICE__ +#endif + +namespace cv +{ + namespace gpu + { + // Simple lightweight structures that encapsulates information about an image on device. + // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile + + template struct StaticAssert; + template <> struct StaticAssert {static __CV_GPU_HOST_DEVICE__ void check(){}}; + + template struct DevPtr + { + typedef T elem_type; + typedef int index_type; + + enum { elem_size = sizeof(elem_type) }; + + T* data; + + __CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {} + __CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {} + + __CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; } + __CV_GPU_HOST_DEVICE__ operator T*() { return data; } + __CV_GPU_HOST_DEVICE__ operator const T*() const { return data; } + }; + + template struct PtrSz : public DevPtr + { + __CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {} + __CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr(data_), size(size_) {} + + size_t size; + }; + + template struct PtrStep : public DevPtr + { + __CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {} + __CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr(data_), step(step_) {} + + /** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */ + size_t step; + + __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr::data + y * step); } + __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr::data + y * step); } + + __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; } + __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; } + }; + + template struct PtrStepSz : public PtrStep + { + __CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {} + __CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_) + : PtrStep(data_, step_), cols(cols_), rows(rows_) {} + + template + explicit PtrStepSz(const PtrStepSz& d) : PtrStep((T*)d.data, d.step), cols(d.cols), rows(d.rows){} + + int cols; + int rows; + }; + + typedef PtrStepSz PtrStepSzb; + typedef PtrStepSz PtrStepSzf; + typedef PtrStepSz PtrStepSzi; + + typedef PtrStep PtrStepb; + typedef PtrStep PtrStepf; + typedef PtrStep PtrStepi; + + +#if defined __GNUC__ + #define __CV_GPU_DEPR_BEFORE__ + #define __CV_GPU_DEPR_AFTER__ __attribute__ ((deprecated)) +#elif defined(__MSVC__) //|| defined(__CUDACC__) + #pragma deprecated(DevMem2D_) + #define __CV_GPU_DEPR_BEFORE__ __declspec(deprecated) + #define __CV_GPU_DEPR_AFTER__ +#else + #define __CV_GPU_DEPR_BEFORE__ + #define __CV_GPU_DEPR_AFTER__ +#endif + + template struct __CV_GPU_DEPR_BEFORE__ DevMem2D_ : public PtrStepSz + { + DevMem2D_() {} + DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz(rows_, cols_, data_, step_) {} + + template + explicit __CV_GPU_DEPR_BEFORE__ DevMem2D_(const DevMem2D_& d) : PtrStepSz(d.rows, d.cols, (T*)d.data, d.step) {} + } __CV_GPU_DEPR_AFTER__ ; + + typedef DevMem2D_ DevMem2Db; + typedef DevMem2Db DevMem2D; + typedef DevMem2D_ DevMem2Df; + typedef DevMem2D_ DevMem2Di; + + template struct PtrElemStep_ : public PtrStep + { + PtrElemStep_(const DevMem2D_& mem) : PtrStep(mem.data, mem.step) + { + StaticAssert<256 % sizeof(T) == 0>::check(); + + PtrStep::step /= PtrStep::elem_size; + } + __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep::data + y * PtrStep::step; } + __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep::data + y * PtrStep::step; } + + __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; } + __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; } + }; + + template struct PtrStep_ : public PtrStep + { + PtrStep_() {} + PtrStep_(const DevMem2D_& mem) : PtrStep(mem.data, mem.step) {} + }; + + typedef PtrElemStep_ PtrElemStep; + typedef PtrElemStep_ PtrElemStepf; + typedef PtrElemStep_ PtrElemStepi; + +//#undef __CV_GPU_DEPR_BEFORE__ +//#undef __CV_GPU_DEPR_AFTER__ + + namespace device + { + using cv::gpu::PtrSz; + using cv::gpu::PtrStep; + using cv::gpu::PtrStepSz; + + using cv::gpu::PtrStepSzb; + using cv::gpu::PtrStepSzf; + using cv::gpu::PtrStepSzi; + + using cv::gpu::PtrStepb; + using cv::gpu::PtrStepf; + using cv::gpu::PtrStepi; + } + } +} + +#endif // __cplusplus + +#endif /* __OPENCV_CORE_DEVPTRS_HPP__ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/devmem2d.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/devmem2d.hpp new file mode 100644 index 0000000..18dfcd8 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/devmem2d.hpp @@ -0,0 +1,43 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/cuda_devptrs.hpp" diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/eigen.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/eigen.hpp new file mode 100644 index 0000000..a7b237f --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/eigen.hpp @@ -0,0 +1,280 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_EIGEN_HPP__ +#define __OPENCV_CORE_EIGEN_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core_c.h" +#include "opencv2/core/core.hpp" + +#if defined _MSC_VER && _MSC_VER >= 1200 +#pragma warning( disable: 4714 ) //__forceinline is not inlined +#pragma warning( disable: 4127 ) //conditional expression is constant +#pragma warning( disable: 4244 ) //conversion from '__int64' to 'int', possible loss of data +#endif + +namespace cv +{ + +template +void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, Mat& dst ) +{ + if( !(src.Flags & Eigen::RowMajorBit) ) + { + Mat _src(src.cols(), src.rows(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + transpose(_src, dst); + } + else + { + Mat _src(src.rows(), src.cols(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + _src.copyTo(dst); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + CV_DbgAssert(src.rows == _rows && src.cols == _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +// Matx case +template +void cv2eigen( const Matx<_Tp, _rows, _cols>& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(_cols, _rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(_rows, _cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(src.rows, src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +// Matx case +template +void cv2eigen( const Matx<_Tp, _rows, _cols>& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(_rows, _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(_cols, _rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(_rows, _cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + CV_Assert(src.cols == 1); + dst.resize(src.rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +// Matx case +template +void cv2eigen( const Matx<_Tp, _rows, 1>& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + dst.resize(_rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(1, _rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(_rows, 1, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + CV_Assert(src.rows == 1); + dst.resize(src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +//Matx +template +void cv2eigen( const Matx<_Tp, 1, _cols>& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + dst.resize(_cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(_cols, 1, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + transpose(src, _dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(1, _cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +} + +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/gpumat.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/gpumat.hpp new file mode 100644 index 0000000..193c9aa --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/gpumat.hpp @@ -0,0 +1,562 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_GPUMAT_HPP__ +#define __OPENCV_GPUMAT_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" +#include "opencv2/core/cuda_devptrs.hpp" + +namespace cv { namespace gpu +{ + //////////////////////////////// Initialization & Info //////////////////////// + + //! This is the only function that do not throw exceptions if the library is compiled without Cuda. + CV_EXPORTS int getCudaEnabledDeviceCount(); + + //! Functions below throw cv::Expception if the library is compiled without Cuda. + + CV_EXPORTS void setDevice(int device); + CV_EXPORTS int getDevice(); + + //! Explicitly destroys and cleans up all resources associated with the current device in the current process. + //! Any subsequent API call to this device will reinitialize the device. + CV_EXPORTS void resetDevice(); + + enum FeatureSet + { + FEATURE_SET_COMPUTE_10 = 10, + FEATURE_SET_COMPUTE_11 = 11, + FEATURE_SET_COMPUTE_12 = 12, + FEATURE_SET_COMPUTE_13 = 13, + FEATURE_SET_COMPUTE_20 = 20, + FEATURE_SET_COMPUTE_21 = 21, + FEATURE_SET_COMPUTE_30 = 30, + FEATURE_SET_COMPUTE_35 = 35, + + GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11, + SHARED_ATOMICS = FEATURE_SET_COMPUTE_12, + NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13, + WARP_SHUFFLE_FUNCTIONS = FEATURE_SET_COMPUTE_30, + DYNAMIC_PARALLELISM = FEATURE_SET_COMPUTE_35 + }; + + // Checks whether current device supports the given feature + CV_EXPORTS bool deviceSupports(FeatureSet feature_set); + + // Gives information about what GPU archs this OpenCV GPU module was + // compiled for + class CV_EXPORTS TargetArchs + { + public: + static bool builtWith(FeatureSet feature_set); + static bool has(int major, int minor); + static bool hasPtx(int major, int minor); + static bool hasBin(int major, int minor); + static bool hasEqualOrLessPtx(int major, int minor); + static bool hasEqualOrGreater(int major, int minor); + static bool hasEqualOrGreaterPtx(int major, int minor); + static bool hasEqualOrGreaterBin(int major, int minor); + private: + TargetArchs(); + }; + + // Gives information about the given GPU + class CV_EXPORTS DeviceInfo + { + public: + // Creates DeviceInfo object for the current GPU + DeviceInfo() : device_id_(getDevice()) { query(); } + + // Creates DeviceInfo object for the given GPU + DeviceInfo(int device_id) : device_id_(device_id) { query(); } + + std::string name() const { return name_; } + + // Return compute capability versions + int majorVersion() const { return majorVersion_; } + int minorVersion() const { return minorVersion_; } + + int multiProcessorCount() const { return multi_processor_count_; } + + size_t sharedMemPerBlock() const; + + void queryMemory(size_t& totalMemory, size_t& freeMemory) const; + size_t freeMemory() const; + size_t totalMemory() const; + + // Checks whether device supports the given feature + bool supports(FeatureSet feature_set) const; + + // Checks whether the GPU module can be run on the given device + bool isCompatible() const; + + int deviceID() const { return device_id_; } + + private: + void query(); + + int device_id_; + + std::string name_; + int multi_processor_count_; + int majorVersion_; + int minorVersion_; + }; + + CV_EXPORTS void printCudaDeviceInfo(int device); + CV_EXPORTS void printShortCudaDeviceInfo(int device); + + //////////////////////////////// GpuMat /////////////////////////////// + + //! Smart pointer for GPU memory with reference counting. Its interface is mostly similar with cv::Mat. + class CV_EXPORTS GpuMat + { + public: + //! default constructor + GpuMat(); + + //! constructs GpuMatrix of the specified size and type (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + GpuMat(int rows, int cols, int type); + GpuMat(Size size, int type); + + //! constucts GpuMatrix and fills it with the specified value _s. + GpuMat(int rows, int cols, int type, Scalar s); + GpuMat(Size size, int type, Scalar s); + + //! copy constructor + GpuMat(const GpuMat& m); + + //! constructor for GpuMatrix headers pointing to user-allocated data + GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP); + GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP); + + //! creates a matrix header for a part of the bigger matrix + GpuMat(const GpuMat& m, Range rowRange, Range colRange); + GpuMat(const GpuMat& m, Rect roi); + + //! builds GpuMat from Mat. Perfom blocking upload to device. + explicit GpuMat(const Mat& m); + + //! destructor - calls release() + ~GpuMat(); + + //! assignment operators + GpuMat& operator = (const GpuMat& m); + + //! pefroms blocking upload data to GpuMat. + void upload(const Mat& m); + + //! downloads data from device to host memory. Blocking calls. + void download(Mat& m) const; + + //! returns a new GpuMatrix header for the specified row + GpuMat row(int y) const; + //! returns a new GpuMatrix header for the specified column + GpuMat col(int x) const; + //! ... for the specified row span + GpuMat rowRange(int startrow, int endrow) const; + GpuMat rowRange(Range r) const; + //! ... for the specified column span + GpuMat colRange(int startcol, int endcol) const; + GpuMat colRange(Range r) const; + + //! returns deep copy of the GpuMatrix, i.e. the data is copied + GpuMat clone() const; + //! copies the GpuMatrix content to "m". + // It calls m.create(this->size(), this->type()). + void copyTo(GpuMat& m) const; + //! copies those GpuMatrix elements to "m" that are marked with non-zero mask elements. + void copyTo(GpuMat& m, const GpuMat& mask) const; + //! converts GpuMatrix to another datatype with optional scalng. See cvConvertScale. + void convertTo(GpuMat& m, int rtype, double alpha = 1, double beta = 0) const; + + void assignTo(GpuMat& m, int type=-1) const; + + //! sets every GpuMatrix element to s + GpuMat& operator = (Scalar s); + //! sets some of the GpuMatrix elements to s, according to the mask + GpuMat& setTo(Scalar s, const GpuMat& mask = GpuMat()); + //! creates alternative GpuMatrix header for the same data, with different + // number of channels and/or different number of rows. see cvReshape. + GpuMat reshape(int cn, int rows = 0) const; + + //! allocates new GpuMatrix data unless the GpuMatrix already has specified size and type. + // previous data is unreferenced if needed. + void create(int rows, int cols, int type); + void create(Size size, int type); + //! decreases reference counter; + // deallocate the data when reference counter reaches 0. + void release(); + + //! swaps with other smart pointer + void swap(GpuMat& mat); + + //! locates GpuMatrix header within a parent GpuMatrix. See below + void locateROI(Size& wholeSize, Point& ofs) const; + //! moves/resizes the current GpuMatrix ROI inside the parent GpuMatrix. + GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright); + //! extracts a rectangular sub-GpuMatrix + // (this is a generalized form of row, rowRange etc.) + GpuMat operator()(Range rowRange, Range colRange) const; + GpuMat operator()(Rect roi) const; + + //! returns true iff the GpuMatrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_GpuMat_CONT(cvGpuMat->type) + bool isContinuous() const; + //! returns element size in bytes, + // similar to CV_ELEM_SIZE(cvMat->type) + size_t elemSize() const; + //! returns the size of element channel in bytes. + size_t elemSize1() const; + //! returns element type, similar to CV_MAT_TYPE(cvMat->type) + int type() const; + //! returns element type, similar to CV_MAT_DEPTH(cvMat->type) + int depth() const; + //! returns element type, similar to CV_MAT_CN(cvMat->type) + int channels() const; + //! returns step/elemSize1() + size_t step1() const; + //! returns GpuMatrix size: + // width == number of columns, height == number of rows + Size size() const; + //! returns true if GpuMatrix data is NULL + bool empty() const; + + //! returns pointer to y-th row + uchar* ptr(int y = 0); + const uchar* ptr(int y = 0) const; + + //! template version of the above method + template _Tp* ptr(int y = 0); + template const _Tp* ptr(int y = 0) const; + + template operator PtrStepSz<_Tp>() const; + template operator PtrStep<_Tp>() const; + + // Deprecated function + __CV_GPU_DEPR_BEFORE__ template operator DevMem2D_<_Tp>() const __CV_GPU_DEPR_AFTER__; + __CV_GPU_DEPR_BEFORE__ template operator PtrStep_<_Tp>() const __CV_GPU_DEPR_AFTER__; + #undef __CV_GPU_DEPR_BEFORE__ + #undef __CV_GPU_DEPR_AFTER__ + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + + //! the number of rows and columns + int rows, cols; + + //! a distance between successive rows in bytes; includes the gap if any + size_t step; + + //! pointer to the data + uchar* data; + + //! pointer to the reference counter; + // when GpuMatrix points to user-allocated data, the pointer is NULL + int* refcount; + + //! helper fields used in locateROI and adjustROI + uchar* datastart; + uchar* dataend; + }; + + //! Creates continuous GPU matrix + CV_EXPORTS void createContinuous(int rows, int cols, int type, GpuMat& m); + CV_EXPORTS GpuMat createContinuous(int rows, int cols, int type); + CV_EXPORTS void createContinuous(Size size, int type, GpuMat& m); + CV_EXPORTS GpuMat createContinuous(Size size, int type); + + //! Ensures that size of the given matrix is not less than (rows, cols) size + //! and matrix type is match specified one too + CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, GpuMat& m); + CV_EXPORTS void ensureSizeIsEnough(Size size, int type, GpuMat& m); + + CV_EXPORTS GpuMat allocMatFromBuf(int rows, int cols, int type, GpuMat &mat); + + //////////////////////////////////////////////////////////////////////// + // Error handling + + CV_EXPORTS void error(const char* error_string, const char* file, const int line, const char* func = ""); + + //////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////// + + inline GpuMat::GpuMat() + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + } + + inline GpuMat::GpuMat(int rows_, int cols_, int type_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (rows_ > 0 && cols_ > 0) + create(rows_, cols_, type_); + } + + inline GpuMat::GpuMat(Size size_, int type_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (size_.height > 0 && size_.width > 0) + create(size_.height, size_.width, type_); + } + + inline GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (rows_ > 0 && cols_ > 0) + { + create(rows_, cols_, type_); + setTo(s_); + } + } + + inline GpuMat::GpuMat(Size size_, int type_, Scalar s_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) + { + if (size_.height > 0 && size_.width > 0) + { + create(size_.height, size_.width, type_); + setTo(s_); + } + } + + inline GpuMat::~GpuMat() + { + release(); + } + + inline GpuMat GpuMat::clone() const + { + GpuMat m; + copyTo(m); + return m; + } + + inline void GpuMat::assignTo(GpuMat& m, int _type) const + { + if (_type < 0) + m = *this; + else + convertTo(m, _type); + } + + inline size_t GpuMat::step1() const + { + return step / elemSize1(); + } + + inline bool GpuMat::empty() const + { + return data == 0; + } + + template inline _Tp* GpuMat::ptr(int y) + { + return (_Tp*)ptr(y); + } + + template inline const _Tp* GpuMat::ptr(int y) const + { + return (const _Tp*)ptr(y); + } + + inline void swap(GpuMat& a, GpuMat& b) + { + a.swap(b); + } + + inline GpuMat GpuMat::row(int y) const + { + return GpuMat(*this, Range(y, y+1), Range::all()); + } + + inline GpuMat GpuMat::col(int x) const + { + return GpuMat(*this, Range::all(), Range(x, x+1)); + } + + inline GpuMat GpuMat::rowRange(int startrow, int endrow) const + { + return GpuMat(*this, Range(startrow, endrow), Range::all()); + } + + inline GpuMat GpuMat::rowRange(Range r) const + { + return GpuMat(*this, r, Range::all()); + } + + inline GpuMat GpuMat::colRange(int startcol, int endcol) const + { + return GpuMat(*this, Range::all(), Range(startcol, endcol)); + } + + inline GpuMat GpuMat::colRange(Range r) const + { + return GpuMat(*this, Range::all(), r); + } + + inline void GpuMat::create(Size size_, int type_) + { + create(size_.height, size_.width, type_); + } + + inline GpuMat GpuMat::operator()(Range _rowRange, Range _colRange) const + { + return GpuMat(*this, _rowRange, _colRange); + } + + inline GpuMat GpuMat::operator()(Rect roi) const + { + return GpuMat(*this, roi); + } + + inline bool GpuMat::isContinuous() const + { + return (flags & Mat::CONTINUOUS_FLAG) != 0; + } + + inline size_t GpuMat::elemSize() const + { + return CV_ELEM_SIZE(flags); + } + + inline size_t GpuMat::elemSize1() const + { + return CV_ELEM_SIZE1(flags); + } + + inline int GpuMat::type() const + { + return CV_MAT_TYPE(flags); + } + + inline int GpuMat::depth() const + { + return CV_MAT_DEPTH(flags); + } + + inline int GpuMat::channels() const + { + return CV_MAT_CN(flags); + } + + inline Size GpuMat::size() const + { + return Size(cols, rows); + } + + inline uchar* GpuMat::ptr(int y) + { + CV_DbgAssert((unsigned)y < (unsigned)rows); + return data + step * y; + } + + inline const uchar* GpuMat::ptr(int y) const + { + CV_DbgAssert((unsigned)y < (unsigned)rows); + return data + step * y; + } + + inline GpuMat& GpuMat::operator = (Scalar s) + { + setTo(s); + return *this; + } + + template inline GpuMat::operator PtrStepSz() const + { + return PtrStepSz(rows, cols, (T*)data, step); + } + + template inline GpuMat::operator PtrStep() const + { + return PtrStep((T*)data, step); + } + + template inline GpuMat::operator DevMem2D_() const + { + return DevMem2D_(rows, cols, (T*)data, step); + } + + template inline GpuMat::operator PtrStep_() const + { + return PtrStep_(static_cast< DevMem2D_ >(*this)); + } + + inline GpuMat createContinuous(int rows, int cols, int type) + { + GpuMat m; + createContinuous(rows, cols, type, m); + return m; + } + + inline void createContinuous(Size size, int type, GpuMat& m) + { + createContinuous(size.height, size.width, type, m); + } + + inline GpuMat createContinuous(Size size, int type) + { + GpuMat m; + createContinuous(size, type, m); + return m; + } + + inline void ensureSizeIsEnough(Size size, int type, GpuMat& m) + { + ensureSizeIsEnough(size.height, size.width, type, m); + } +}} + +#endif // __cplusplus + +#endif // __OPENCV_GPUMAT_HPP__ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/internal.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/internal.hpp new file mode 100644 index 0000000..3cd2f90 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/internal.hpp @@ -0,0 +1,781 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* The header is for internal use and it is likely to change. + It contains some macro definitions that are used in cxcore, cv, cvaux + and, probably, other libraries. If you need some of this functionality, + the safe way is to copy it into your code and rename the macros. +*/ +#ifndef __OPENCV_CORE_INTERNAL_HPP__ +#define __OPENCV_CORE_INTERNAL_HPP__ + +#include + +#include "opencv2/core/core.hpp" +#include "opencv2/core/types_c.h" + +#if defined WIN32 || defined _WIN32 +# ifndef WIN32 +# define WIN32 +# endif +# ifndef _WIN32 +# define _WIN32 +# endif +#endif + +#if !defined WIN32 && !defined WINCE +# include +#endif + +#ifdef __BORLANDC__ +# ifndef WIN32 +# define WIN32 +# endif +# ifndef _WIN32 +# define _WIN32 +# endif +# define CV_DLL +# undef _CV_ALWAYS_PROFILE_ +# define _CV_ALWAYS_NO_PROFILE_ +#endif + +#ifndef FALSE +# define FALSE 0 +#endif +#ifndef TRUE +# define TRUE 1 +#endif + +#define __BEGIN__ __CV_BEGIN__ +#define __END__ __CV_END__ +#define EXIT __CV_EXIT__ + +#ifdef HAVE_IPP +# include "ipp.h" + +CV_INLINE IppiSize ippiSize(int width, int height) +{ + IppiSize size = { width, height }; + return size; +} +#endif + +#ifndef IPPI_CALL +# define IPPI_CALL(func) CV_Assert((func) >= 0) +#endif + +#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2) +# include "emmintrin.h" +# define CV_SSE 1 +# define CV_SSE2 1 +# if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include "pmmintrin.h" +# define CV_SSE3 1 +# endif +# if defined __SSSE3__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include "tmmintrin.h" +# define CV_SSSE3 1 +# endif +# if defined __SSE4_1__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include +# define CV_SSE4_1 1 +# endif +# if defined __SSE4_2__ || (defined _MSC_VER && _MSC_VER >= 1500) +# include +# define CV_SSE4_2 1 +# endif +# if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219) +// MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX +// See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32 +# include +# define CV_AVX 1 +# if defined(_XCR_XFEATURE_ENABLED_MASK) +# define __xgetbv() _xgetbv(_XCR_XFEATURE_ENABLED_MASK) +# else +# define __xgetbv() 0 +# endif +# endif +#endif + + +#if (defined WIN32 || defined _WIN32) && defined(_M_ARM) +# include +# include "arm_neon.h" +# define CV_NEON 1 +# define CPU_HAS_NEON_FEATURE (true) +#elif defined(__ARM_NEON__) +# include +# define CV_NEON 1 +# define CPU_HAS_NEON_FEATURE (true) +#endif + +#ifndef CV_SSE +# define CV_SSE 0 +#endif +#ifndef CV_SSE2 +# define CV_SSE2 0 +#endif +#ifndef CV_SSE3 +# define CV_SSE3 0 +#endif +#ifndef CV_SSSE3 +# define CV_SSSE3 0 +#endif +#ifndef CV_SSE4_1 +# define CV_SSE4_1 0 +#endif +#ifndef CV_SSE4_2 +# define CV_SSE4_2 0 +#endif +#ifndef CV_AVX +# define CV_AVX 0 +#endif +#ifndef CV_NEON +# define CV_NEON 0 +#endif + +#ifdef HAVE_TBB +# include "tbb/tbb_stddef.h" +# if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202 +# include "tbb/tbb.h" +# include "tbb/task.h" +# undef min +# undef max +# else +# undef HAVE_TBB +# endif +#endif + +#ifdef HAVE_EIGEN +# if defined __GNUC__ && defined __APPLE__ +# pragma GCC diagnostic ignored "-Wshadow" +# endif +# include +# include "opencv2/core/eigen.hpp" +#endif + +#ifdef __cplusplus + +namespace cv +{ +#ifdef HAVE_TBB + + typedef tbb::blocked_range BlockedRange; + + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + tbb::parallel_for(range, body); + } + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + tbb::parallel_do(first, last, body); + } + + typedef tbb::split Split; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + tbb::parallel_reduce(range, body); + } + + typedef tbb::concurrent_vector ConcurrentRectVector; + typedef tbb::concurrent_vector ConcurrentDoubleVector; +#else + class BlockedRange + { + public: + BlockedRange() : _begin(0), _end(0), _grainsize(0) {} + BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {} + int begin() const { return _begin; } + int end() const { return _end; } + int grainsize() const { return _grainsize; } + + protected: + int _begin, _end, _grainsize; + }; + + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + body(range); + } + typedef std::vector ConcurrentRectVector; + typedef std::vector ConcurrentDoubleVector; + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + for( ; first != last; ++first ) + body(*first); + } + + class Split {}; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + body(range); + } +#endif + + // Returns a static string if there is a parallel framework, + // NULL otherwise. + CV_EXPORTS const char* currentParallelFramework(); +} //namespace cv + +#define CV_INIT_ALGORITHM(classname, algname, memberinit) \ + static ::cv::Algorithm* create##classname() \ + { \ + return new classname; \ + } \ + \ + static ::cv::AlgorithmInfo& classname##_info() \ + { \ + static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname); \ + return classname##_info_var; \ + } \ + \ + static ::cv::AlgorithmInfo& classname##_info_auto = classname##_info(); \ + \ + ::cv::AlgorithmInfo* classname::info() const \ + { \ + static volatile bool initialized = false; \ + \ + if( !initialized ) \ + { \ + initialized = true; \ + classname obj; \ + memberinit; \ + } \ + return &classname##_info(); \ + } + +#endif //__cplusplus + +/* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */ +#define CV_MAX_INLINE_MAT_OP_SIZE 10 + +/* maximal linear size of matrix to allocate it on stack. */ +#define CV_MAX_LOCAL_MAT_SIZE 32 + +/* maximal size of local memory storage */ +#define CV_MAX_LOCAL_SIZE \ + (CV_MAX_LOCAL_MAT_SIZE*CV_MAX_LOCAL_MAT_SIZE*(int)sizeof(double)) + +/* default image row align (in bytes) */ +#define CV_DEFAULT_IMAGE_ROW_ALIGN 4 + +/* matrices are continuous by default */ +#define CV_DEFAULT_MAT_ROW_ALIGN 1 + +/* maximum size of dynamic memory buffer. + cvAlloc reports an error if a larger block is requested. */ +#define CV_MAX_ALLOC_SIZE (((size_t)1 << (sizeof(size_t)*8-2))) + +/* the alignment of all the allocated buffers */ +#define CV_MALLOC_ALIGN 16 + +/* default alignment for dynamic data strucutures, resided in storages. */ +#define CV_STRUCT_ALIGN ((int)sizeof(double)) + +/* default storage block size */ +#define CV_STORAGE_BLOCK_SIZE ((1<<16) - 128) + +/* default memory block for sparse array elements */ +#define CV_SPARSE_MAT_BLOCK (1<<12) + +/* initial hash table size */ +#define CV_SPARSE_HASH_SIZE0 (1<<10) + +/* maximal average node_count/hash_size ratio beyond which hash table is resized */ +#define CV_SPARSE_HASH_RATIO 3 + +/* max length of strings */ +#define CV_MAX_STRLEN 1024 + +#if 0 /*def CV_CHECK_FOR_NANS*/ +# define CV_CHECK_NANS( arr ) cvCheckArray((arr)) +#else +# define CV_CHECK_NANS( arr ) +#endif + +/****************************************************************************************\ +* Common declarations * +\****************************************************************************************/ + +#ifdef __GNUC__ +# define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined _MSC_VER +# define CV_DECL_ALIGNED(x) __declspec(align(x)) +#else +# define CV_DECL_ALIGNED(x) +#endif + +#ifndef CV_IMPL +# define CV_IMPL CV_EXTERN_C +#endif + +#define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; } + +/* default step, set in case of continuous data + to work around checks for valid step in some ipp functions */ +#define CV_STUB_STEP (1 << 30) + +#define CV_SIZEOF_FLOAT ((int)sizeof(float)) +#define CV_SIZEOF_SHORT ((int)sizeof(short)) + +#define CV_ORIGIN_TL 0 +#define CV_ORIGIN_BL 1 + +/* IEEE754 constants and macros */ +#define CV_POS_INF 0x7f800000 +#define CV_NEG_INF 0x807fffff /* CV_TOGGLE_FLT(0xff800000) */ +#define CV_1F 0x3f800000 +#define CV_TOGGLE_FLT(x) ((x)^((int)(x) < 0 ? 0x7fffffff : 0)) +#define CV_TOGGLE_DBL(x) \ + ((x)^((int64)(x) < 0 ? CV_BIG_INT(0x7fffffffffffffff) : 0)) + +#define CV_NOP(a) (a) +#define CV_ADD(a, b) ((a) + (b)) +#define CV_SUB(a, b) ((a) - (b)) +#define CV_MUL(a, b) ((a) * (b)) +#define CV_AND(a, b) ((a) & (b)) +#define CV_OR(a, b) ((a) | (b)) +#define CV_XOR(a, b) ((a) ^ (b)) +#define CV_ANDN(a, b) (~(a) & (b)) +#define CV_ORN(a, b) (~(a) | (b)) +#define CV_SQR(a) ((a) * (a)) + +#define CV_LT(a, b) ((a) < (b)) +#define CV_LE(a, b) ((a) <= (b)) +#define CV_EQ(a, b) ((a) == (b)) +#define CV_NE(a, b) ((a) != (b)) +#define CV_GT(a, b) ((a) > (b)) +#define CV_GE(a, b) ((a) >= (b)) + +#define CV_NONZERO(a) ((a) != 0) +#define CV_NONZERO_FLT(a) (((a)+(a)) != 0) + +/* general-purpose saturation macros */ +#define CV_CAST_8U(t) (uchar)(!((t) & ~255) ? (t) : (t) > 0 ? 255 : 0) +#define CV_CAST_8S(t) (schar)(!(((t)+128) & ~255) ? (t) : (t) > 0 ? 127 : -128) +#define CV_CAST_16U(t) (ushort)(!((t) & ~65535) ? (t) : (t) > 0 ? 65535 : 0) +#define CV_CAST_16S(t) (short)(!(((t)+32768) & ~65535) ? (t) : (t) > 0 ? 32767 : -32768) +#define CV_CAST_32S(t) (int)(t) +#define CV_CAST_64S(t) (int64)(t) +#define CV_CAST_32F(t) (float)(t) +#define CV_CAST_64F(t) (double)(t) + +#define CV_PASTE2(a,b) a##b +#define CV_PASTE(a,b) CV_PASTE2(a,b) + +#define CV_EMPTY +#define CV_MAKE_STR(a) #a + +#define CV_ZERO_OBJ(x) memset((x), 0, sizeof(*(x))) + +#define CV_DIM(static_array) ((int)(sizeof(static_array)/sizeof((static_array)[0]))) + +#define cvUnsupportedFormat "Unsupported format" + +CV_INLINE void* cvAlignPtr( const void* ptr, int align CV_DEFAULT(32) ) +{ + assert( (align & (align-1)) == 0 ); + return (void*)( ((size_t)ptr + align - 1) & ~(size_t)(align-1) ); +} + +CV_INLINE int cvAlign( int size, int align ) +{ + assert( (align & (align-1)) == 0 && size < INT_MAX ); + return (size + align - 1) & -align; +} + +CV_INLINE CvSize cvGetMatSize( const CvMat* mat ) +{ + CvSize size; + size.width = mat->cols; + size.height = mat->rows; + return size; +} + +#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n)) +#define CV_FLT_TO_FIX(x,n) cvRound((x)*(1<<(n))) + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm. + ---------------------------------------------- + Using this macro user can declare customized sort function that can be much faster + than built-in qsort function because of lower overhead on elements + comparison and exchange. The macro takes less_than (or LT) argument - a macro or function + that takes 2 arguments returns non-zero if the first argument should be before the second + one in the sorted sequence and zero otherwise. + + Example: + + Suppose that the task is to sort points by ascending of y coordinates and if + y's are equal x's should ascend. + + The code is: + ------------------------------------------------------------------------------ + #define cmp_pts( pt1, pt2 ) \ + ((pt1).y < (pt2).y || ((pt1).y < (pt2).y && (pt1).x < (pt2).x)) + + [static] CV_IMPLEMENT_QSORT( icvSortPoints, CvPoint, cmp_pts ) + ------------------------------------------------------------------------------ + + After that the function "void icvSortPoints( CvPoint* array, size_t total, int aux );" + is available to user. + + aux is an additional parameter, which can be used when comparing elements. + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +#define CV_IMPLEMENT_QSORT_EX( func_name, T, LT, user_data_type ) \ +void func_name( T *array, size_t total, user_data_type aux ) \ +{ \ + int isort_thresh = 7; \ + T t; \ + int sp = 0; \ + \ + struct \ + { \ + T *lb; \ + T *ub; \ + } \ + stack[48]; \ + \ + aux = aux; \ + \ + if( total <= 1 ) \ + return; \ + \ + stack[0].lb = array; \ + stack[0].ub = array + (total - 1); \ + \ + while( sp >= 0 ) \ + { \ + T* left = stack[sp].lb; \ + T* right = stack[sp--].ub; \ + \ + for(;;) \ + { \ + int i, n = (int)(right - left) + 1, m; \ + T* ptr; \ + T* ptr2; \ + \ + if( n <= isort_thresh ) \ + { \ + insert_sort: \ + for( ptr = left + 1; ptr <= right; ptr++ ) \ + { \ + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) \ + CV_SWAP( ptr2[0], ptr2[-1], t ); \ + } \ + break; \ + } \ + else \ + { \ + T* left0; \ + T* left1; \ + T* right0; \ + T* right1; \ + T* pivot; \ + T* a; \ + T* b; \ + T* c; \ + int swap_cnt = 0; \ + \ + left0 = left; \ + right0 = right; \ + pivot = left + (n/2); \ + \ + if( n > 40 ) \ + { \ + int d = n / 8; \ + a = left, b = left + d, c = left + 2*d; \ + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = pivot - d, b = pivot, c = pivot + d; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = right - 2*d, b = right - d, c = right; \ + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + } \ + \ + a = left, b = pivot, c = right; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + if( pivot != left0 ) \ + { \ + CV_SWAP( *pivot, *left0, t ); \ + pivot = left0; \ + } \ + left = left1 = left0 + 1; \ + right = right1 = right0; \ + \ + for(;;) \ + { \ + while( left <= right && !LT(*pivot, *left) ) \ + { \ + if( !LT(*left, *pivot) ) \ + { \ + if( left > left1 ) \ + CV_SWAP( *left1, *left, t ); \ + swap_cnt = 1; \ + left1++; \ + } \ + left++; \ + } \ + \ + while( left <= right && !LT(*right, *pivot) ) \ + { \ + if( !LT(*pivot, *right) ) \ + { \ + if( right < right1 ) \ + CV_SWAP( *right1, *right, t ); \ + swap_cnt = 1; \ + right1--; \ + } \ + right--; \ + } \ + \ + if( left > right ) \ + break; \ + CV_SWAP( *left, *right, t ); \ + swap_cnt = 1; \ + left++; \ + right--; \ + } \ + \ + if( swap_cnt == 0 ) \ + { \ + left = left0, right = right0; \ + goto insert_sort; \ + } \ + \ + n = MIN( (int)(left1 - left0), (int)(left - left1) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left0[i], left[i-n], t ); \ + \ + n = MIN( (int)(right0 - right1), (int)(right1 - right) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left[i], right0[i-n+1], t ); \ + n = (int)(left - left1); \ + m = (int)(right1 - right); \ + if( n > 1 ) \ + { \ + if( m > 1 ) \ + { \ + if( n > m ) \ + { \ + stack[++sp].lb = left0; \ + stack[sp].ub = left0 + n - 1; \ + left = right0 - m + 1, right = right0; \ + } \ + else \ + { \ + stack[++sp].lb = right0 - m + 1; \ + stack[sp].ub = right0; \ + left = left0, right = left0 + n - 1; \ + } \ + } \ + else \ + left = left0, right = left0 + n - 1; \ + } \ + else if( m > 1 ) \ + left = right0 - m + 1, right = right0; \ + else \ + break; \ + } \ + } \ + } \ +} + +#define CV_IMPLEMENT_QSORT( func_name, T, cmp ) \ + CV_IMPLEMENT_QSORT_EX( func_name, T, cmp, int ) + +/****************************************************************************************\ +* Structures and macros for integration with IPP * +\****************************************************************************************/ + +/* IPP-compatible return codes */ +typedef enum CvStatus +{ + CV_BADMEMBLOCK_ERR = -113, + CV_INPLACE_NOT_SUPPORTED_ERR= -112, + CV_UNMATCHED_ROI_ERR = -111, + CV_NOTFOUND_ERR = -110, + CV_BADCONVERGENCE_ERR = -109, + + CV_BADDEPTH_ERR = -107, + CV_BADROI_ERR = -106, + CV_BADHEADER_ERR = -105, + CV_UNMATCHED_FORMATS_ERR = -104, + CV_UNSUPPORTED_COI_ERR = -103, + CV_UNSUPPORTED_CHANNELS_ERR = -102, + CV_UNSUPPORTED_DEPTH_ERR = -101, + CV_UNSUPPORTED_FORMAT_ERR = -100, + + CV_BADARG_ERR = -49, //ipp comp + CV_NOTDEFINED_ERR = -48, //ipp comp + + CV_BADCHANNELS_ERR = -47, //ipp comp + CV_BADRANGE_ERR = -44, //ipp comp + CV_BADSTEP_ERR = -29, //ipp comp + + CV_BADFLAG_ERR = -12, + CV_DIV_BY_ZERO_ERR = -11, //ipp comp + CV_BADCOEF_ERR = -10, + + CV_BADFACTOR_ERR = -7, + CV_BADPOINT_ERR = -6, + CV_BADSCALE_ERR = -4, + CV_OUTOFMEM_ERR = -3, + CV_NULLPTR_ERR = -2, + CV_BADSIZE_ERR = -1, + CV_NO_ERR = 0, + CV_OK = CV_NO_ERR +} +CvStatus; + +#define CV_NOTHROW throw() + +typedef struct CvFuncTable +{ + void* fn_2d[CV_DEPTH_MAX]; +} +CvFuncTable; + +typedef struct CvBigFuncTable +{ + void* fn_2d[CV_DEPTH_MAX*4]; +} CvBigFuncTable; + +#define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG ) \ + (tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG; \ + (tab).fn_2d[CV_8S] = 0; \ + (tab).fn_2d[CV_16U] = (void*)FUNCNAME##_16u##FLAG; \ + (tab).fn_2d[CV_16S] = (void*)FUNCNAME##_16s##FLAG; \ + (tab).fn_2d[CV_32S] = (void*)FUNCNAME##_32s##FLAG; \ + (tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \ + (tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG + +#ifdef __cplusplus + +// < Deprecated + +class CV_EXPORTS CvOpenGlFuncTab +{ +public: + virtual ~CvOpenGlFuncTab(); + + virtual void genBuffers(int n, unsigned int* buffers) const = 0; + virtual void deleteBuffers(int n, const unsigned int* buffers) const = 0; + + virtual void bufferData(unsigned int target, ptrdiff_t size, const void* data, unsigned int usage) const = 0; + virtual void bufferSubData(unsigned int target, ptrdiff_t offset, ptrdiff_t size, const void* data) const = 0; + + virtual void bindBuffer(unsigned int target, unsigned int buffer) const = 0; + + virtual void* mapBuffer(unsigned int target, unsigned int access) const = 0; + virtual void unmapBuffer(unsigned int target) const = 0; + + virtual void generateBitmapFont(const std::string& family, int height, int weight, bool italic, bool underline, int start, int count, int base) const = 0; + + virtual bool isGlContextInitialized() const = 0; +}; + +CV_EXPORTS void icvSetOpenGlFuncTab(const CvOpenGlFuncTab* tab); + +CV_EXPORTS bool icvCheckGlError(const char* file, const int line, const char* func = ""); + +// > + +namespace cv { namespace ogl { +CV_EXPORTS bool checkError(const char* file, const int line, const char* func = ""); +}} + +#define CV_CheckGlError() CV_DbgAssert( (cv::ogl::checkError(__FILE__, __LINE__, CV_Func)) ) + +#endif //__cplusplus + +#endif // __OPENCV_CORE_INTERNAL_HPP__ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/mat.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/mat.hpp new file mode 100644 index 0000000..45c2590 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/mat.hpp @@ -0,0 +1,2619 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ +#define __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES +#include +#include +#endif // SKIP_INCLUDES + +#ifdef __cplusplus + +namespace cv +{ + +//////////////////////////////// Mat //////////////////////////////// + +inline void Mat::initEmpty() +{ + flags = MAGIC_VAL; + dims = rows = cols = 0; + data = datastart = dataend = datalimit = 0; + refcount = 0; + allocator = 0; +} + +inline Mat::Mat() : size(&rows) +{ + initEmpty(); +} + +inline Mat::Mat(int _rows, int _cols, int _type) : size(&rows) +{ + initEmpty(); + create(_rows, _cols, _type); +} + +inline Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s) : size(&rows) +{ + initEmpty(); + create(_rows, _cols, _type); + *this = _s; +} + +inline Mat::Mat(Size _sz, int _type) : size(&rows) +{ + initEmpty(); + create( _sz.height, _sz.width, _type ); +} + +inline Mat::Mat(Size _sz, int _type, const Scalar& _s) : size(&rows) +{ + initEmpty(); + create(_sz.height, _sz.width, _type); + *this = _s; +} + +inline Mat::Mat(int _dims, const int* _sz, int _type) : size(&rows) +{ + initEmpty(); + create(_dims, _sz, _type); +} + +inline Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s) : size(&rows) +{ + initEmpty(); + create(_dims, _sz, _type); + *this = _s; +} + +inline Mat::Mat(const Mat& m) + : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), + refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), + datalimit(m.datalimit), allocator(m.allocator), size(&rows) +{ + if( refcount ) + CV_XADD(refcount, 1); + if( m.dims <= 2 ) + { + step[0] = m.step[0]; step[1] = m.step[1]; + } + else + { + dims = 0; + copySize(m); + } +} + +inline Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + +inline Mat::Mat(Size _sz, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + + +template inline Mat::Mat(const vector<_Tp>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows((int)vec.size()), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if(vec.empty()) + return; + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&vec[0]; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this); +} + + +template inline Mat::Mat(const Vec<_Tp, n>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(n), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)vec.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(n, 1, DataType<_Tp>::type, (void*)vec.val).copyTo(*this); +} + + +template inline Mat::Mat(const Matx<_Tp,m,n>& M, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(m), cols(n), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = cols*sizeof(_Tp); + step[1] = sizeof(_Tp); + data = datastart = (uchar*)M.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(m, n, DataType<_Tp>::type, (uchar*)M.val).copyTo(*this); +} + + +template inline Mat::Mat(const Point_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(2), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(2, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + } +} + + +template inline Mat::Mat(const Point3_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(3), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(3, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + ((_Tp*)data)[2] = pt.z; + } +} + + +template inline Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + *this = *commaInitializer; +} + +inline Mat::~Mat() +{ + release(); + if( step.p != step.buf ) + fastFree(step.p); +} + +inline Mat& Mat::operator = (const Mat& m) +{ + if( this != &m ) + { + if( m.refcount ) + CV_XADD(m.refcount, 1); + release(); + flags = m.flags; + if( dims <= 2 && m.dims <= 2 ) + { + dims = m.dims; + rows = m.rows; + cols = m.cols; + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + copySize(m); + data = m.data; + datastart = m.datastart; + dataend = m.dataend; + datalimit = m.datalimit; + refcount = m.refcount; + allocator = m.allocator; + } + return *this; +} + +inline Mat Mat::row(int y) const { return Mat(*this, Range(y, y+1), Range::all()); } +inline Mat Mat::col(int x) const { return Mat(*this, Range::all(), Range(x, x+1)); } +inline Mat Mat::rowRange(int startrow, int endrow) const + { return Mat(*this, Range(startrow, endrow), Range::all()); } +inline Mat Mat::rowRange(const Range& r) const + { return Mat(*this, r, Range::all()); } +inline Mat Mat::colRange(int startcol, int endcol) const + { return Mat(*this, Range::all(), Range(startcol, endcol)); } +inline Mat Mat::colRange(const Range& r) const + { return Mat(*this, Range::all(), r); } + +inline Mat Mat::diag(const Mat& d) +{ + CV_Assert( d.cols == 1 || d.rows == 1 ); + int len = d.rows + d.cols - 1; + Mat m(len, len, d.type(), Scalar(0)), md = m.diag(); + if( d.cols == 1 ) + d.copyTo(md); + else + transpose(d, md); + return m; +} + +inline Mat Mat::clone() const +{ + Mat m; + copyTo(m); + return m; +} + +inline void Mat::assignTo( Mat& m, int _type ) const +{ + if( _type < 0 ) + m = *this; + else + convertTo(m, _type); +} + +inline void Mat::create(int _rows, int _cols, int _type) +{ + _type &= TYPE_MASK; + if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data ) + return; + int sz[] = {_rows, _cols}; + create(2, sz, _type); +} + +inline void Mat::create(Size _sz, int _type) +{ + create(_sz.height, _sz.width, _type); +} + +inline void Mat::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +inline void Mat::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + deallocate(); + data = datastart = dataend = datalimit = 0; + size.p[0] = 0; + refcount = 0; +} + +inline Mat Mat::operator()( Range _rowRange, Range _colRange ) const +{ + return Mat(*this, _rowRange, _colRange); +} + +inline Mat Mat::operator()( const Rect& roi ) const +{ return Mat(*this, roi); } + +inline Mat Mat::operator()(const Range* ranges) const +{ + return Mat(*this, ranges); +} + +inline Mat::operator CvMat() const +{ + CV_DbgAssert(dims <= 2); + CvMat m = cvMat(rows, dims == 1 ? 1 : cols, type(), data); + m.step = (int)step[0]; + m.type = (m.type & ~CONTINUOUS_FLAG) | (flags & CONTINUOUS_FLAG); + return m; +} + +inline bool Mat::isContinuous() const { return (flags & CONTINUOUS_FLAG) != 0; } +inline bool Mat::isSubmatrix() const { return (flags & SUBMATRIX_FLAG) != 0; } +inline size_t Mat::elemSize() const { return dims > 0 ? step.p[dims-1] : 0; } +inline size_t Mat::elemSize1() const { return CV_ELEM_SIZE1(flags); } +inline int Mat::type() const { return CV_MAT_TYPE(flags); } +inline int Mat::depth() const { return CV_MAT_DEPTH(flags); } +inline int Mat::channels() const { return CV_MAT_CN(flags); } +inline size_t Mat::step1(int i) const { return step.p[i]/elemSize1(); } +inline bool Mat::empty() const { return data == 0 || total() == 0; } +inline size_t Mat::total() const +{ + if( dims <= 2 ) + return (size_t)rows*cols; + size_t p = 1; + for( int i = 0; i < dims; i++ ) + p *= size[i]; + return p; +} + +inline uchar* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +inline const uchar* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +template inline _Tp* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return (_Tp*)(data + step.p[0]*y); +} + +template inline const _Tp* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return (const _Tp*)(data + step.p[0]*y); +} + + +inline uchar* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +inline const uchar* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +template inline _Tp* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +inline uchar* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +inline const uchar* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +template inline _Tp* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +inline uchar* Mat::ptr(const int* idx) +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +inline const uchar* Mat::ptr(const int* idx) const +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +template inline _Tp& Mat::at(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat::at(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat::at(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat::at(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat::at(int i0) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)(size.p[0]*size.p[1]) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + if( isContinuous() || size.p[0] == 1 ) + return ((_Tp*)data)[i0]; + if( size.p[1] == 1 ) + return *(_Tp*)(data + step.p[0]*i0); + int i = i0/cols, j = i0 - i*cols; + return ((_Tp*)(data + step.p[0]*i))[j]; +} + +template inline const _Tp& Mat::at(int i0) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)(size.p[0]*size.p[1]) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + if( isContinuous() || size.p[0] == 1 ) + return ((const _Tp*)data)[i0]; + if( size.p[1] == 1 ) + return *(const _Tp*)(data + step.p[0]*i0); + int i = i0/cols, j = i0 - i*cols; + return ((const _Tp*)(data + step.p[0]*i))[j]; +} + +template inline _Tp& Mat::at(int i0, int i1, int i2) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(i0, i1, i2); +} +template inline const _Tp& Mat::at(int i0, int i1, int i2) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(i0, i1, i2); +} +template inline _Tp& Mat::at(const int* idx) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(idx); +} +template inline const _Tp& Mat::at(const int* idx) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(idx); +} +template _Tp& Mat::at(const Vec& idx) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(idx.val); +} +template inline const _Tp& Mat::at(const Vec& idx) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(idx.val); +} + + +template inline MatConstIterator_<_Tp> Mat::begin() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatConstIterator_<_Tp>((const Mat_<_Tp>*)this); +} + +template inline MatConstIterator_<_Tp> Mat::end() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatConstIterator_<_Tp> it((const Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline MatIterator_<_Tp> Mat::begin() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatIterator_<_Tp>((Mat_<_Tp>*)this); +} + +template inline MatIterator_<_Tp> Mat::end() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatIterator_<_Tp> it((Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline Mat::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template inline Mat::operator Vec<_Tp, n>() const +{ + CV_Assert( data && dims <= 2 && (rows == 1 || cols == 1) && + rows + cols - 1 == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Vec<_Tp, n>((_Tp*)data); + Vec<_Tp, n> v; Mat tmp(rows, cols, DataType<_Tp>::type, v.val); + convertTo(tmp, tmp.type()); + return v; +} + +template inline Mat::operator Matx<_Tp, m, n>() const +{ + CV_Assert( data && dims <= 2 && rows == m && cols == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Matx<_Tp, m, n>((_Tp*)data); + Matx<_Tp, m, n> mtx; Mat tmp(rows, cols, DataType<_Tp>::type, mtx.val); + convertTo(tmp, tmp.type()); + return mtx; +} + + +template inline void Mat::push_back(const _Tp& elem) +{ + if( !data ) + { + *this = Mat(1, 1, DataType<_Tp>::type, (void*)&elem).clone(); + return; + } + CV_Assert(DataType<_Tp>::type == type() && cols == 1 + /* && dims == 2 (cols == 1 implies dims == 2) */); + uchar* tmp = dataend + step[0]; + if( !isSubmatrix() && isContinuous() && tmp <= datalimit ) + { + *(_Tp*)(data + (size.p[0]++)*step.p[0]) = elem; + dataend = tmp; + } + else + push_back_(&elem); +} + +template inline void Mat::push_back(const Mat_<_Tp>& m) +{ + push_back((const Mat&)m); +} + +inline Mat::MSize::MSize(int* _p) : p(_p) {} +inline Size Mat::MSize::operator()() const +{ + CV_DbgAssert(p[-1] <= 2); + return Size(p[1], p[0]); +} +inline const int& Mat::MSize::operator[](int i) const { return p[i]; } +inline int& Mat::MSize::operator[](int i) { return p[i]; } +inline Mat::MSize::operator const int*() const { return p; } + +inline bool Mat::MSize::operator == (const MSize& sz) const +{ + int d = p[-1], dsz = sz.p[-1]; + if( d != dsz ) + return false; + if( d == 2 ) + return p[0] == sz.p[0] && p[1] == sz.p[1]; + + for( int i = 0; i < d; i++ ) + if( p[i] != sz.p[i] ) + return false; + return true; +} + +inline bool Mat::MSize::operator != (const MSize& sz) const +{ + return !(*this == sz); +} + +inline Mat::MStep::MStep() { p = buf; p[0] = p[1] = 0; } +inline Mat::MStep::MStep(size_t s) { p = buf; p[0] = s; p[1] = 0; } +inline const size_t& Mat::MStep::operator[](int i) const { return p[i]; } +inline size_t& Mat::MStep::operator[](int i) { return p[i]; } +inline Mat::MStep::operator size_t() const +{ + CV_DbgAssert( p == buf ); + return buf[0]; +} +inline Mat::MStep& Mat::MStep::operator = (size_t s) +{ + CV_DbgAssert( p == buf ); + buf[0] = s; + return *this; +} + +static inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0) +{ + return cvarrToMat(arr, copyData, true, coiMode); +} + +///////////////////////////////////////////// SVD ////////////////////////////////////////////////////// + +inline SVD::SVD() {} +inline SVD::SVD( InputArray m, int flags ) { operator ()(m, flags); } +inline void SVD::solveZ( InputArray m, OutputArray _dst ) +{ + Mat mtx = m.getMat(); + SVD svd(mtx, (mtx.rows >= mtx.cols ? 0 : SVD::FULL_UV)); + _dst.create(svd.vt.cols, 1, svd.vt.type()); + Mat dst = _dst.getMat(); + svd.vt.row(svd.vt.rows-1).reshape(1,svd.vt.cols).copyTo(dst); +} + +template inline void + SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false); + SVD::compute(_a, _w, _u, _vt); + CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]); +} + +template inline void +SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _w(w, false); + SVD::compute(_a, _w); + CV_Assert(_w.data == (uchar*)&w.val[0]); +} + +template inline void +SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, + const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, + Matx<_Tp, n, nb>& dst ) +{ + assert( nm == MIN(m, n)); + Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false); + SVD::backSubst(_w, _u, _vt, _rhs, _dst); + CV_Assert(_dst.data == (uchar*)&dst.val[0]); +} + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +template inline Mat_<_Tp>::Mat_() + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; } + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols) + : Mat(_rows, _cols, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, const _Tp& value) + : Mat(_rows, _cols, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(Size _sz) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(Size _sz, const _Tp& value) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz) + : Mat(_dims, _sz, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz, const _Tp& _s) + : Mat(_dims, _sz, DataType<_Tp>::type, Scalar(_s)) {} + +template inline Mat_<_Tp>::Mat_(const Mat_<_Tp>& m, const Range* ranges) + : Mat(m, ranges) {} + +template inline Mat_<_Tp>::Mat_(const Mat& m) + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; *this = m; } + +template inline Mat_<_Tp>::Mat_(const Mat_& m) + : Mat(m) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, _Tp* _data, size_t steps) + : Mat(_rows, _cols, DataType<_Tp>::type, _data, steps) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Range& _rowRange, const Range& _colRange) + : Mat(m, _rowRange, _colRange) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Rect& roi) + : Mat(m, roi) {} + +template template inline + Mat_<_Tp>::Mat_(const Vec::channel_type, n>& vec, bool copyData) + : Mat(n/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&vec) +{ + CV_Assert(n%DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template template inline + Mat_<_Tp>::Mat_(const Matx::channel_type,m,n>& M, bool copyData) + : Mat(m, n/DataType<_Tp>::channels, DataType<_Tp>::type, (void*)&M) +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point_::channel_type>& pt, bool copyData) + : Mat(2/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(2 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point3_::channel_type>& pt, bool copyData) + : Mat(3/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(3 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const MatCommaInitializer_<_Tp>& commaInitializer) + : Mat(commaInitializer) {} + +template inline Mat_<_Tp>::Mat_(const vector<_Tp>& vec, bool copyData) + : Mat(vec, copyData) {} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat& m) +{ + if( DataType<_Tp>::type == m.type() ) + { + Mat::operator = (m); + return *this; + } + if( DataType<_Tp>::depth == m.depth() ) + { + return (*this = m.reshape(DataType<_Tp>::channels, m.dims, 0)); + } + CV_DbgAssert(DataType<_Tp>::channels == m.channels()); + m.convertTo(*this, type()); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat_& m) +{ + Mat::operator=(m); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const _Tp& s) +{ + typedef typename DataType<_Tp>::vec_type VT; + Mat::operator=(Scalar((const VT&)s)); + return *this; +} + +template inline void Mat_<_Tp>::create(int _rows, int _cols) +{ + Mat::create(_rows, _cols, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(Size _sz) +{ + Mat::create(_sz, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(int _dims, const int* _sz) +{ + Mat::create(_dims, _sz, DataType<_Tp>::type); +} + + +template inline Mat_<_Tp> Mat_<_Tp>::cross(const Mat_& m) const +{ return Mat_<_Tp>(Mat::cross(m)); } + +template template inline Mat_<_Tp>::operator Mat_() const +{ return Mat_(*this); } + +template inline Mat_<_Tp> Mat_<_Tp>::row(int y) const +{ return Mat_(*this, Range(y, y+1), Range::all()); } +template inline Mat_<_Tp> Mat_<_Tp>::col(int x) const +{ return Mat_(*this, Range::all(), Range(x, x+1)); } +template inline Mat_<_Tp> Mat_<_Tp>::diag(int d) const +{ return Mat_(Mat::diag(d)); } +template inline Mat_<_Tp> Mat_<_Tp>::clone() const +{ return Mat_(Mat::clone()); } + +template inline size_t Mat_<_Tp>::elemSize() const +{ + CV_DbgAssert( Mat::elemSize() == sizeof(_Tp) ); + return sizeof(_Tp); +} + +template inline size_t Mat_<_Tp>::elemSize1() const +{ + CV_DbgAssert( Mat::elemSize1() == sizeof(_Tp)/DataType<_Tp>::channels ); + return sizeof(_Tp)/DataType<_Tp>::channels; +} +template inline int Mat_<_Tp>::type() const +{ + CV_DbgAssert( Mat::type() == DataType<_Tp>::type ); + return DataType<_Tp>::type; +} +template inline int Mat_<_Tp>::depth() const +{ + CV_DbgAssert( Mat::depth() == DataType<_Tp>::depth ); + return DataType<_Tp>::depth; +} +template inline int Mat_<_Tp>::channels() const +{ + CV_DbgAssert( Mat::channels() == DataType<_Tp>::channels ); + return DataType<_Tp>::channels; +} +template inline size_t Mat_<_Tp>::stepT(int i) const { return step.p[i]/elemSize(); } +template inline size_t Mat_<_Tp>::step1(int i) const { return step.p[i]/elemSize1(); } + +template inline Mat_<_Tp>& Mat_<_Tp>::adjustROI( int dtop, int dbottom, int dleft, int dright ) +{ return (Mat_<_Tp>&)(Mat::adjustROI(dtop, dbottom, dleft, dright)); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range& _rowRange, const Range& _colRange ) const +{ return Mat_<_Tp>(*this, _rowRange, _colRange); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Rect& roi ) const +{ return Mat_<_Tp>(*this, roi); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range* ranges ) const +{ return Mat_<_Tp>(*this, ranges); } + +template inline _Tp* Mat_<_Tp>::operator [](int y) +{ return (_Tp*)ptr(y); } +template inline const _Tp* Mat_<_Tp>::operator [](int y) const +{ return (const _Tp*)ptr(y); } + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(const int* idx) +{ + return Mat::at<_Tp>(idx); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(const int* idx) const +{ + return Mat::at<_Tp>(idx); +} + +template template inline _Tp& Mat_<_Tp>::operator ()(const Vec& idx) +{ + return Mat::at<_Tp>(idx); +} + +template template inline const _Tp& Mat_<_Tp>::operator ()(const Vec& idx) const +{ + return Mat::at<_Tp>(idx); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0) +{ + return this->at<_Tp>(i0); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0) const +{ + return this->at<_Tp>(i0); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) +{ + return this->at<_Tp>(i0, i1, i2); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) const +{ + return this->at<_Tp>(i0, i1, i2); +} + + +template inline Mat_<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template template inline Mat_<_Tp>::operator Vec::channel_type, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + return this->Mat::operator Vec::channel_type, n>(); +} + +template template inline Mat_<_Tp>::operator Matx::channel_type, m, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + + Matx::channel_type, m, n> res = this->Mat::operator Matx::channel_type, m, n>(); + return res; +} + +template inline void +process( const Mat_& m1, Mat_& m2, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src = m1[y]; + T2* dst = m2[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op(src[x]); + } +} + +template inline void +process( const Mat_& m1, const Mat_& m2, Mat_& m3, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src1 = m1[y]; + const T2* src2 = m2[y]; + T3* dst = m3[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op( src1[x], src2[x] ); + } +} + + +/////////////////////////////// Input/Output Arrays ///////////////////////////////// + +template inline _InputArray::_InputArray(const vector<_Tp>& vec) + : flags(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const vector >& vec) + : flags(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const vector >& vec) + : flags(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const Matx<_Tp, m, n>& mtx) + : flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)&mtx), sz(n, m) {} + +template inline _InputArray::_InputArray(const _Tp* vec, int n) + : flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)vec), sz(n, 1) {} + +inline _InputArray::_InputArray(const Scalar& s) + : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&s), sz(1, 4) {} + +template inline _InputArray::_InputArray(const Mat_<_Tp>& m) + : flags(FIXED_TYPE + MAT + DataType<_Tp>::type), obj((void*)&m) {} + +template inline _OutputArray::_OutputArray(vector<_Tp>& vec) + : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(vector >& vec) + : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(vector >& vec) + : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(Mat_<_Tp>& m) + : _InputArray(m) {} +template inline _OutputArray::_OutputArray(Matx<_Tp, m, n>& mtx) + : _InputArray(mtx) {} +template inline _OutputArray::_OutputArray(_Tp* vec, int n) + : _InputArray(vec, n) {} + +template inline _OutputArray::_OutputArray(const vector<_Tp>& vec) + : _InputArray(vec) {flags |= FIXED_SIZE;} +template inline _OutputArray::_OutputArray(const vector >& vec) + : _InputArray(vec) {flags |= FIXED_SIZE;} +template inline _OutputArray::_OutputArray(const vector >& vec) + : _InputArray(vec) {flags |= FIXED_SIZE;} + +template inline _OutputArray::_OutputArray(const Mat_<_Tp>& m) + : _InputArray(m) {flags |= FIXED_SIZE;} +template inline _OutputArray::_OutputArray(const Matx<_Tp, m, n>& mtx) + : _InputArray(mtx) {} +template inline _OutputArray::_OutputArray(const _Tp* vec, int n) + : _InputArray(vec, n) {} + +//////////////////////////////////// Matrix Expressions ///////////////////////////////////////// + +class CV_EXPORTS MatOp +{ +public: + MatOp() {}; + virtual ~MatOp() {}; + + virtual bool elementWise(const MatExpr& expr) const; + virtual void assign(const MatExpr& expr, Mat& m, int type=-1) const = 0; + virtual void roi(const MatExpr& expr, const Range& rowRange, + const Range& colRange, MatExpr& res) const; + virtual void diag(const MatExpr& expr, int d, MatExpr& res) const; + virtual void augAssignAdd(const MatExpr& expr, Mat& m) const; + virtual void augAssignSubtract(const MatExpr& expr, Mat& m) const; + virtual void augAssignMultiply(const MatExpr& expr, Mat& m) const; + virtual void augAssignDivide(const MatExpr& expr, Mat& m) const; + virtual void augAssignAnd(const MatExpr& expr, Mat& m) const; + virtual void augAssignOr(const MatExpr& expr, Mat& m) const; + virtual void augAssignXor(const MatExpr& expr, Mat& m) const; + + virtual void add(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const; + + virtual void subtract(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const; + + virtual void multiply(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void multiply(const MatExpr& expr1, double s, MatExpr& res) const; + + virtual void divide(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void divide(double s, const MatExpr& expr, MatExpr& res) const; + + virtual void abs(const MatExpr& expr, MatExpr& res) const; + + virtual void transpose(const MatExpr& expr, MatExpr& res) const; + virtual void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void invert(const MatExpr& expr, int method, MatExpr& res) const; + + virtual Size size(const MatExpr& expr) const; + virtual int type(const MatExpr& expr) const; +}; + + +class CV_EXPORTS MatExpr +{ +public: + MatExpr() : op(0), flags(0), a(Mat()), b(Mat()), c(Mat()), alpha(0), beta(0), s(Scalar()) {} + MatExpr(const MatOp* _op, int _flags, const Mat& _a=Mat(), const Mat& _b=Mat(), + const Mat& _c=Mat(), double _alpha=1, double _beta=1, const Scalar& _s=Scalar()) + : op(_op), flags(_flags), a(_a), b(_b), c(_c), alpha(_alpha), beta(_beta), s(_s) {} + explicit MatExpr(const Mat& m); + operator Mat() const + { + Mat m; + op->assign(*this, m); + return m; + } + + template operator Mat_<_Tp>() const + { + Mat_<_Tp> m; + op->assign(*this, m, DataType<_Tp>::type); + return m; + } + + MatExpr row(int y) const; + MatExpr col(int x) const; + MatExpr diag(int d=0) const; + MatExpr operator()( const Range& rowRange, const Range& colRange ) const; + MatExpr operator()( const Rect& roi ) const; + + Mat cross(const Mat& m) const; + double dot(const Mat& m) const; + + MatExpr t() const; + MatExpr inv(int method = DECOMP_LU) const; + MatExpr mul(const MatExpr& e, double scale=1) const; + MatExpr mul(const Mat& m, double scale=1) const; + + Size size() const; + int type() const; + + const MatOp* op; + int flags; + + Mat a, b, c; + double alpha, beta; + Scalar s; +}; + + +CV_EXPORTS MatExpr operator + (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator + (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator + (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator - (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator - (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& m); +CV_EXPORTS MatExpr operator - (const MatExpr& e); + +CV_EXPORTS MatExpr operator * (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator * (const Mat& a, double s); +CV_EXPORTS MatExpr operator * (double s, const Mat& a); +CV_EXPORTS MatExpr operator * (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator * (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator * (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator / (const Mat& a, double s); +CV_EXPORTS MatExpr operator / (double s, const Mat& a); +CV_EXPORTS MatExpr operator / (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator / (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator / (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator < (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator < (const Mat& a, double s); +CV_EXPORTS MatExpr operator < (double s, const Mat& a); + +CV_EXPORTS MatExpr operator <= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator <= (const Mat& a, double s); +CV_EXPORTS MatExpr operator <= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator == (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator == (const Mat& a, double s); +CV_EXPORTS MatExpr operator == (double s, const Mat& a); + +CV_EXPORTS MatExpr operator != (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator != (const Mat& a, double s); +CV_EXPORTS MatExpr operator != (double s, const Mat& a); + +CV_EXPORTS MatExpr operator >= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator >= (const Mat& a, double s); +CV_EXPORTS MatExpr operator >= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator > (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator > (const Mat& a, double s); +CV_EXPORTS MatExpr operator > (double s, const Mat& a); + +CV_EXPORTS MatExpr min(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr min(const Mat& a, double s); +CV_EXPORTS MatExpr min(double s, const Mat& a); + +CV_EXPORTS MatExpr max(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr max(const Mat& a, double s); +CV_EXPORTS MatExpr max(double s, const Mat& a); + +template static inline MatExpr min(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::min((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr min(const Mat_<_Tp>& a, double s) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr min(double s, const Mat_<_Tp>& a) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::max((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, double s) +{ + return cv::max((const Mat&)a, s); +} + +template static inline MatExpr max(double s, const Mat_<_Tp>& a) +{ + return cv::max((const Mat&)a, s); +} + +template static inline void min(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void min(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void min(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::max((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::max((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::max((const Mat&)a, s, (Mat&)c); +} + + +CV_EXPORTS MatExpr operator & (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator & (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator & (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator | (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator | (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator | (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator ^ (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ~(const Mat& m); + +CV_EXPORTS MatExpr abs(const Mat& m); +CV_EXPORTS MatExpr abs(const MatExpr& e); + +template static inline MatExpr abs(const Mat_<_Tp>& m) +{ + return cv::abs((const Mat&)m); +} + +////////////////////////////// Augmenting algebraic operations ////////////////////////////////// + +inline Mat& Mat::operator = (const MatExpr& e) +{ + e.op->assign(e, *this); + return *this; +} + +template inline Mat_<_Tp>::Mat_(const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); +} + +template Mat_<_Tp>& Mat_<_Tp>::operator = (const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); + return *this; +} + +static inline Mat& operator += (const Mat& a, const Mat& b) +{ + add(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator += (const Mat& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + add(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator += (const Mat& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const Mat& b) +{ + subtract(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator -= (const Mat& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + subtract(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const Mat& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat&)a; +} + +static inline Mat& operator *= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const Mat& b) +{ + divide(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator /= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + divide(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +////////////////////////////// Logical operations /////////////////////////////// + +static inline Mat& operator &= (const Mat& a, const Mat& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator &= (const Mat& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator |= (const Mat& a, const Mat& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator |= (const Mat& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Mat& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +/////////////////////////////// Miscellaneous operations ////////////////////////////// + +template void split(const Mat& src, vector >& mv) +{ split(src, (vector&)mv ); } + +////////////////////////////////////////////////////////////// + +template inline MatExpr Mat_<_Tp>::zeros(int rows, int cols) +{ + return Mat::zeros(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::zeros(Size sz) +{ + return Mat::zeros(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(int rows, int cols) +{ + return Mat::ones(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(Size sz) +{ + return Mat::ones(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(int rows, int cols) +{ + return Mat::eye(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(Size sz) +{ + return Mat::eye(sz, DataType<_Tp>::type); +} + +//////////////////////////////// Iterators & Comma initializers ////////////////////////////////// + +inline MatConstIterator::MatConstIterator() + : m(0), elemSize(0), ptr(0), sliceStart(0), sliceEnd(0) {} + +inline MatConstIterator::MatConstIterator(const Mat* _m) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + if( m && m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + seek((const int*)0); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, int _row, int _col) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_row, _col}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, Point _pt) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_pt.y, _pt.x}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const MatConstIterator& it) + : m(it.m), elemSize(it.elemSize), ptr(it.ptr), sliceStart(it.sliceStart), sliceEnd(it.sliceEnd) +{} + +inline MatConstIterator& MatConstIterator::operator = (const MatConstIterator& it ) +{ + m = it.m; elemSize = it.elemSize; ptr = it.ptr; + sliceStart = it.sliceStart; sliceEnd = it.sliceEnd; + return *this; +} + +inline uchar* MatConstIterator::operator *() const { return ptr; } + +inline MatConstIterator& MatConstIterator::operator += (ptrdiff_t ofs) +{ + if( !m || ofs == 0 ) + return *this; + ptrdiff_t ofsb = ofs*elemSize; + ptr += ofsb; + if( ptr < sliceStart || sliceEnd <= ptr ) + { + ptr -= ofsb; + seek(ofs, true); + } + return *this; +} + +inline MatConstIterator& MatConstIterator::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +inline MatConstIterator& MatConstIterator::operator --() +{ + if( m && (ptr -= elemSize) < sliceStart ) + { + ptr += elemSize; + seek(-1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator --(int) +{ + MatConstIterator b = *this; + *this += -1; + return b; +} + +inline MatConstIterator& MatConstIterator::operator ++() +{ + if( m && (ptr += elemSize) >= sliceEnd ) + { + ptr -= elemSize; + seek(1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator ++(int) +{ + MatConstIterator b = *this; + *this += 1; + return b; +} + +template inline MatConstIterator_<_Tp>::MatConstIterator_() {} + +template inline MatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m) + : MatConstIterator(_m) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator(_m, _row, _col) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator(_m, _pt) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const MatConstIterator_& it) + : MatConstIterator(it) {} + +template inline MatConstIterator_<_Tp>& + MatConstIterator_<_Tp>::operator = (const MatConstIterator_& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp MatConstIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator --(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator ++(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline MatIterator_<_Tp>::MatIterator_() : MatConstIterator_<_Tp>() {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m) + : MatConstIterator_<_Tp>(_m) {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator_<_Tp>(_m, _row, _col) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator_<_Tp>(_m, _pt) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, const int* _idx) + : MatConstIterator_<_Tp>(_m, _idx) {} + +template inline MatIterator_<_Tp>::MatIterator_(const MatIterator_& it) + : MatConstIterator_<_Tp>(it) {} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator = (const MatIterator_<_Tp>& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp& MatIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ + MatConstIterator::operator += (-ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator --(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator ++(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline Point MatConstIterator_<_Tp>::pos() const +{ + if( !m ) + return Point(); + CV_DbgAssert( m->dims <= 2 ); + if( m->isContinuous() ) + { + ptrdiff_t ofs = (const _Tp*)ptr - (const _Tp*)m->data; + int y = (int)(ofs / m->cols), x = (int)(ofs - (ptrdiff_t)y*m->cols); + return Point(x, y); + } + else + { + ptrdiff_t ofs = (uchar*)ptr - m->data; + int y = (int)(ofs / m->step), x = (int)((ofs - y*m->step)/sizeof(_Tp)); + return Point(x, y); + } +} + +static inline bool +operator == (const MatConstIterator& a, const MatConstIterator& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator& a, const MatConstIterator& b) +{ return !(a == b); } + +template static inline bool +operator == (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +template static inline bool +operator == (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +static inline bool +operator < (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr < b.ptr; } + +static inline bool +operator > (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr > b.ptr; } + +static inline bool +operator <= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr <= b.ptr; } + +static inline bool +operator >= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr >= b.ptr; } + +CV_EXPORTS ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a); + +static inline MatConstIterator operator + (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator + (ptrdiff_t ofs, const MatConstIterator& a) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator - (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += -ofs; } + +template static inline MatConstIterator_<_Tp> +operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatConstIterator_<_Tp>&)t; } + +inline uchar* MatConstIterator::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline _Tp MatConstIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(_Tp*)MatConstIterator::operator [](i); } + +template static inline MatIterator_<_Tp> +operator + (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatIterator_<_Tp>&)t; } + +template inline _Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::begin() const +{ return Mat::begin<_Tp>(); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::end() const +{ return Mat::end<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::begin() +{ return Mat::begin<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::end() +{ return Mat::end<_Tp>(); } + +template inline MatCommaInitializer_<_Tp>::MatCommaInitializer_(Mat_<_Tp>* _m) : it(_m) {} + +template template inline MatCommaInitializer_<_Tp>& +MatCommaInitializer_<_Tp>::operator , (T2 v) +{ + CV_DbgAssert( this->it < ((const Mat_<_Tp>*)this->it.m)->end() ); + *this->it = _Tp(v); ++this->it; + return *this; +} + +template inline Mat_<_Tp> MatCommaInitializer_<_Tp>::operator *() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template inline MatCommaInitializer_<_Tp>::operator Mat_<_Tp>() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template static inline MatCommaInitializer_<_Tp> +operator << (const Mat_<_Tp>& m, T2 val) +{ + MatCommaInitializer_<_Tp> commaInitializer((Mat_<_Tp>*)&m); + return (commaInitializer, val); +} + +//////////////////////////////// SparseMat //////////////////////////////// + +inline SparseMat::SparseMat() +: flags(MAGIC_VAL), hdr(0) +{ +} + +inline SparseMat::SparseMat(int _dims, const int* _sizes, int _type) +: flags(MAGIC_VAL), hdr(0) +{ + create(_dims, _sizes, _type); +} + +inline SparseMat::SparseMat(const SparseMat& m) +: flags(m.flags), hdr(m.hdr) +{ + addref(); +} + +inline SparseMat::~SparseMat() +{ + release(); +} + +inline SparseMat& SparseMat::operator = (const SparseMat& m) +{ + if( this != &m ) + { + if( m.hdr ) + CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +inline SparseMat& SparseMat::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +inline SparseMat SparseMat::clone() const +{ + SparseMat temp; + this->copyTo(temp); + return temp; +} + + +inline void SparseMat::assignTo( SparseMat& m, int _type ) const +{ + if( _type < 0 ) + m = *this; + else + convertTo(m, _type); +} + +inline void SparseMat::addref() +{ if( hdr ) CV_XADD(&hdr->refcount, 1); } + +inline void SparseMat::release() +{ + if( hdr && CV_XADD(&hdr->refcount, -1) == 1 ) + delete hdr; + hdr = 0; +} + +inline size_t SparseMat::elemSize() const +{ return CV_ELEM_SIZE(flags); } + +inline size_t SparseMat::elemSize1() const +{ return CV_ELEM_SIZE1(flags); } + +inline int SparseMat::type() const +{ return CV_MAT_TYPE(flags); } + +inline int SparseMat::depth() const +{ return CV_MAT_DEPTH(flags); } + +inline int SparseMat::channels() const +{ return CV_MAT_CN(flags); } + +inline const int* SparseMat::size() const +{ + return hdr ? hdr->size : 0; +} + +inline int SparseMat::size(int i) const +{ + if( hdr ) + { + CV_DbgAssert((unsigned)i < (unsigned)hdr->dims); + return hdr->size[i]; + } + return 0; +} + +inline int SparseMat::dims() const +{ + return hdr ? hdr->dims : 0; +} + +inline size_t SparseMat::nzcount() const +{ + return hdr ? hdr->nodeCount : 0; +} + +inline size_t SparseMat::hash(int i0) const +{ + return (size_t)i0; +} + +inline size_t SparseMat::hash(int i0, int i1) const +{ + return (size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1; +} + +inline size_t SparseMat::hash(int i0, int i1, int i2) const +{ + return ((size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1)*HASH_SCALE + (unsigned)i2; +} + +inline size_t SparseMat::hash(const int* idx) const +{ + size_t h = (unsigned)idx[0]; + if( !hdr ) + return 0; + int i, d = hdr->dims; + for( i = 1; i < d; i++ ) + h = h*HASH_SCALE + (unsigned)idx[i]; + return h; +} + +template inline _Tp& SparseMat::ref(int i0, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, int i2, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, i2, true, hashval); } + +template inline _Tp& SparseMat::ref(const int* idx, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(idx, true, hashval); } + +template inline _Tp SparseMat::value(int i0, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, int i2, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(const int* idx, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); + return p ? *p : _Tp(); +} + +template inline const _Tp* SparseMat::find(int i0, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, int i2, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); } + +template inline const _Tp* SparseMat::find(const int* idx, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); } + +template inline _Tp& SparseMat::value(Node* n) +{ return *(_Tp*)((uchar*)n + hdr->valueOffset); } + +template inline const _Tp& SparseMat::value(const Node* n) const +{ return *(const _Tp*)((const uchar*)n + hdr->valueOffset); } + +inline SparseMat::Node* SparseMat::node(size_t nidx) +{ return (Node*)(void*)&hdr->pool[nidx]; } + +inline const SparseMat::Node* SparseMat::node(size_t nidx) const +{ return (const Node*)(void*)&hdr->pool[nidx]; } + +inline SparseMatIterator SparseMat::begin() +{ return SparseMatIterator(this); } + +inline SparseMatConstIterator SparseMat::begin() const +{ return SparseMatConstIterator(this); } + +inline SparseMatIterator SparseMat::end() +{ SparseMatIterator it(this); it.seekEnd(); return it; } + +inline SparseMatConstIterator SparseMat::end() const +{ SparseMatConstIterator it(this); it.seekEnd(); return it; } + +template inline SparseMatIterator_<_Tp> SparseMat::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + + +inline SparseMatConstIterator::SparseMatConstIterator() +: m(0), hashidx(0), ptr(0) +{ +} + +inline SparseMatConstIterator::SparseMatConstIterator(const SparseMatConstIterator& it) +: m(it.m), hashidx(it.hashidx), ptr(it.ptr) +{ +} + +static inline bool operator == (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return it1.m == it2.m && it1.ptr == it2.ptr; } + +static inline bool operator != (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return !(it1 == it2); } + + +inline SparseMatConstIterator& SparseMatConstIterator::operator = (const SparseMatConstIterator& it) +{ + if( this != &it ) + { + m = it.m; + hashidx = it.hashidx; + ptr = it.ptr; + } + return *this; +} + +template inline const _Tp& SparseMatConstIterator::value() const +{ return *(_Tp*)ptr; } + +inline const SparseMat::Node* SparseMatConstIterator::node() const +{ + return ptr && m && m->hdr ? + (const SparseMat::Node*)(void*)(ptr - m->hdr->valueOffset) : 0; +} + +inline SparseMatConstIterator SparseMatConstIterator::operator ++(int) +{ + SparseMatConstIterator it = *this; + ++*this; + return it; +} + + +inline void SparseMatConstIterator::seekEnd() +{ + if( m && m->hdr ) + { + hashidx = m->hdr->hashtab.size(); + ptr = 0; + } +} + +inline SparseMatIterator::SparseMatIterator() +{} + +inline SparseMatIterator::SparseMatIterator(SparseMat* _m) +: SparseMatConstIterator(_m) +{} + +inline SparseMatIterator::SparseMatIterator(const SparseMatIterator& it) +: SparseMatConstIterator(it) +{ +} + +inline SparseMatIterator& SparseMatIterator::operator = (const SparseMatIterator& it) +{ + (SparseMatConstIterator&)*this = it; + return *this; +} + +template inline _Tp& SparseMatIterator::value() const +{ return *(_Tp*)ptr; } + +inline SparseMat::Node* SparseMatIterator::node() const +{ + return (SparseMat::Node*)SparseMatConstIterator::node(); +} + +inline SparseMatIterator& SparseMatIterator::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +inline SparseMatIterator SparseMatIterator::operator ++(int) +{ + SparseMatIterator it = *this; + ++*this; + return it; +} + + +template inline SparseMat_<_Tp>::SparseMat_() +{ flags = MAGIC_VAL | DataType<_Tp>::type; } + +template inline SparseMat_<_Tp>::SparseMat_(int _dims, const int* _sizes) +: SparseMat(_dims, _sizes, DataType<_Tp>::type) +{} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + *this = (const SparseMat_<_Tp>&)m; + else + m.convertTo(*this, DataType<_Tp>::type); +} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat_<_Tp>& m) +{ + this->flags = m.flags; + this->hdr = m.hdr; + if( this->hdr ) + CV_XADD(&this->hdr->refcount, 1); +} + +template inline SparseMat_<_Tp>::SparseMat_(const Mat& m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>::SparseMat_(const CvSparseMat* m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat_<_Tp>& m) +{ + if( this != &m ) + { + if( m.hdr ) CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + return (*this = (const SparseMat_<_Tp>&)m); + m.convertTo(*this, DataType<_Tp>::type); + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +template inline SparseMat_<_Tp> +SparseMat_<_Tp>::clone() const +{ + SparseMat_<_Tp> m; + this->copyTo(m); + return m; +} + +template inline void +SparseMat_<_Tp>::create(int _dims, const int* _sizes) +{ + SparseMat::create(_dims, _sizes, DataType<_Tp>::type); +} + +template inline +SparseMat_<_Tp>::operator CvSparseMat*() const +{ + return SparseMat::operator CvSparseMat*(); +} + +template inline int SparseMat_<_Tp>::type() const +{ return DataType<_Tp>::type; } + +template inline int SparseMat_<_Tp>::depth() const +{ return DataType<_Tp>::depth; } + +template inline int SparseMat_<_Tp>::channels() const +{ return DataType<_Tp>::channels; } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, int i2, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, int i2, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(const int* idx, size_t* hashval) +{ return SparseMat::ref<_Tp>(idx, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(const int* idx, size_t* hashval) const +{ return SparseMat::value<_Tp>(idx, hashval); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_() +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat_<_Tp>* _m) +: SparseMatConstIterator(_m) +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat* _m) +: SparseMatConstIterator(_m) +{ + CV_Assert( _m->type() == DataType<_Tp>::type ); +} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMatConstIterator_<_Tp>& it) +: SparseMatConstIterator(it) +{} + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator = (const SparseMatConstIterator_<_Tp>& it) +{ return reinterpret_cast&> + (*reinterpret_cast(this) = + reinterpret_cast(it)); } + +template inline const _Tp& +SparseMatConstIterator_<_Tp>::operator *() const +{ return *(const _Tp*)this->ptr; } + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatConstIterator_<_Tp> +SparseMatConstIterator_<_Tp>::operator ++(int) +{ + SparseMatConstIterator it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_() +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat_<_Tp>* _m) +: SparseMatConstIterator_<_Tp>(_m) +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat* _m) +: SparseMatConstIterator_<_Tp>(_m) +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(const SparseMatIterator_<_Tp>& it) +: SparseMatConstIterator_<_Tp>(it) +{} + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator = (const SparseMatIterator_<_Tp>& it) +{ return reinterpret_cast&> + (*reinterpret_cast(this) = + reinterpret_cast(it)); } + +template inline _Tp& +SparseMatIterator_<_Tp>::operator *() const +{ return *(_Tp*)this->ptr; } + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatIterator_<_Tp> +SparseMatIterator_<_Tp>::operator ++(int) +{ + SparseMatIterator it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +} + +#endif +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/opengl_interop.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/opengl_interop.hpp new file mode 100644 index 0000000..7ecaa8e --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/opengl_interop.hpp @@ -0,0 +1,284 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OPENGL_INTEROP_HPP__ +#define __OPENCV_OPENGL_INTEROP_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" +#include "opencv2/core/opengl_interop_deprecated.hpp" + +namespace cv { namespace ogl { + +/////////////////// OpenGL Objects /////////////////// + +//! Smart pointer for OpenGL buffer memory with reference counting. +class CV_EXPORTS Buffer +{ +public: + enum Target + { + ARRAY_BUFFER = 0x8892, //!< The buffer will be used as a source for vertex data + ELEMENT_ARRAY_BUFFER = 0x8893, //!< The buffer will be used for indices (in glDrawElements, for example) + PIXEL_PACK_BUFFER = 0x88EB, //!< The buffer will be used for reading from OpenGL textures + PIXEL_UNPACK_BUFFER = 0x88EC //!< The buffer will be used for writing to OpenGL textures + }; + + enum Access + { + READ_ONLY = 0x88B8, + WRITE_ONLY = 0x88B9, + READ_WRITE = 0x88BA + }; + + //! create empty buffer + Buffer(); + + //! create buffer from existed buffer id + Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false); + Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false); + + //! create buffer + Buffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); + Buffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); + + //! copy from host/device memory + explicit Buffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false); + + //! create buffer + void create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); + void create(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false) { create(asize.height, asize.width, atype, target, autoRelease); } + + //! release memory and delete buffer object + void release(); + + //! set auto release mode (if true, release will be called in object's destructor) + void setAutoRelease(bool flag); + + //! copy from host/device memory + void copyFrom(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false); + + //! copy to host/device memory + void copyTo(OutputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false) const; + + //! create copy of current buffer + Buffer clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const; + + //! bind buffer for specified target + void bind(Target target) const; + + //! unbind any buffers from specified target + static void unbind(Target target); + + //! map to host memory + Mat mapHost(Access access); + void unmapHost(); + + //! map to device memory + gpu::GpuMat mapDevice(); + void unmapDevice(); + + int rows() const { return rows_; } + int cols() const { return cols_; } + Size size() const { return Size(cols_, rows_); } + bool empty() const { return rows_ == 0 || cols_ == 0; } + + int type() const { return type_; } + int depth() const { return CV_MAT_DEPTH(type_); } + int channels() const { return CV_MAT_CN(type_); } + int elemSize() const { return CV_ELEM_SIZE(type_); } + int elemSize1() const { return CV_ELEM_SIZE1(type_); } + + unsigned int bufId() const; + + class Impl; + +private: + Ptr impl_; + int rows_; + int cols_; + int type_; +}; + +//! Smart pointer for OpenGL 2D texture memory with reference counting. +class CV_EXPORTS Texture2D +{ +public: + enum Format + { + NONE = 0, + DEPTH_COMPONENT = 0x1902, //!< Depth + RGB = 0x1907, //!< Red, Green, Blue + RGBA = 0x1908 //!< Red, Green, Blue, Alpha + }; + + //! create empty texture + Texture2D(); + + //! create texture from existed texture id + Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false); + Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false); + + //! create texture + Texture2D(int arows, int acols, Format aformat, bool autoRelease = false); + Texture2D(Size asize, Format aformat, bool autoRelease = false); + + //! copy from host/device memory + explicit Texture2D(InputArray arr, bool autoRelease = false); + + //! create texture + void create(int arows, int acols, Format aformat, bool autoRelease = false); + void create(Size asize, Format aformat, bool autoRelease = false) { create(asize.height, asize.width, aformat, autoRelease); } + + //! release memory and delete texture object + void release(); + + //! set auto release mode (if true, release will be called in object's destructor) + void setAutoRelease(bool flag); + + //! copy from host/device memory + void copyFrom(InputArray arr, bool autoRelease = false); + + //! copy to host/device memory + void copyTo(OutputArray arr, int ddepth = CV_32F, bool autoRelease = false) const; + + //! bind texture to current active texture unit for GL_TEXTURE_2D target + void bind() const; + + int rows() const { return rows_; } + int cols() const { return cols_; } + Size size() const { return Size(cols_, rows_); } + bool empty() const { return rows_ == 0 || cols_ == 0; } + + Format format() const { return format_; } + + unsigned int texId() const; + + class Impl; + +private: + Ptr impl_; + int rows_; + int cols_; + Format format_; +}; + +//! OpenGL Arrays +class CV_EXPORTS Arrays +{ +public: + Arrays(); + + void setVertexArray(InputArray vertex); + void resetVertexArray(); + + void setColorArray(InputArray color); + void resetColorArray(); + + void setNormalArray(InputArray normal); + void resetNormalArray(); + + void setTexCoordArray(InputArray texCoord); + void resetTexCoordArray(); + + void release(); + + void setAutoRelease(bool flag); + + void bind() const; + + int size() const { return size_; } + bool empty() const { return size_ == 0; } + +private: + int size_; + Buffer vertex_; + Buffer color_; + Buffer normal_; + Buffer texCoord_; +}; + +/////////////////// Render Functions /////////////////// + +//! render texture rectangle in window +CV_EXPORTS void render(const Texture2D& tex, + Rect_ wndRect = Rect_(0.0, 0.0, 1.0, 1.0), + Rect_ texRect = Rect_(0.0, 0.0, 1.0, 1.0)); + +//! render mode +enum { + POINTS = 0x0000, + LINES = 0x0001, + LINE_LOOP = 0x0002, + LINE_STRIP = 0x0003, + TRIANGLES = 0x0004, + TRIANGLE_STRIP = 0x0005, + TRIANGLE_FAN = 0x0006, + QUADS = 0x0007, + QUAD_STRIP = 0x0008, + POLYGON = 0x0009 +}; + +//! render OpenGL arrays +CV_EXPORTS void render(const Arrays& arr, int mode = POINTS, Scalar color = Scalar::all(255)); +CV_EXPORTS void render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255)); + +}} // namespace cv::gl + +namespace cv { namespace gpu { + +//! set a CUDA device to use OpenGL interoperability +CV_EXPORTS void setGlDevice(int device = 0); + +}} + +namespace cv { + +template <> CV_EXPORTS void Ptr::delete_obj(); +template <> CV_EXPORTS void Ptr::delete_obj(); + +} + +#endif // __cplusplus + +#endif // __OPENCV_OPENGL_INTEROP_HPP__ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/opengl_interop_deprecated.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/opengl_interop_deprecated.hpp new file mode 100644 index 0000000..5bcc5ad --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/opengl_interop_deprecated.hpp @@ -0,0 +1,330 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OPENGL_INTEROP_DEPRECATED_HPP__ +#define __OPENCV_OPENGL_INTEROP_DEPRECATED_HPP__ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" + +namespace cv +{ +//! Smart pointer for OpenGL buffer memory with reference counting. +class CV_EXPORTS GlBuffer +{ +public: + enum Usage + { + ARRAY_BUFFER = 0x8892, // buffer will use for OpenGL arrays (vertices, colors, normals, etc) + TEXTURE_BUFFER = 0x88EC // buffer will ise for OpenGL textures + }; + + //! create empty buffer + explicit GlBuffer(Usage usage); + + //! create buffer + GlBuffer(int rows, int cols, int type, Usage usage); + GlBuffer(Size size, int type, Usage usage); + + //! copy from host/device memory + GlBuffer(InputArray mat, Usage usage); + + void create(int rows, int cols, int type, Usage usage); + void create(Size size, int type, Usage usage); + void create(int rows, int cols, int type); + void create(Size size, int type); + + void release(); + + //! copy from host/device memory + void copyFrom(InputArray mat); + + void bind() const; + void unbind() const; + + //! map to host memory + Mat mapHost(); + void unmapHost(); + + //! map to device memory + gpu::GpuMat mapDevice(); + void unmapDevice(); + + inline int rows() const { return rows_; } + inline int cols() const { return cols_; } + inline Size size() const { return Size(cols_, rows_); } + inline bool empty() const { return rows_ == 0 || cols_ == 0; } + + inline int type() const { return type_; } + inline int depth() const { return CV_MAT_DEPTH(type_); } + inline int channels() const { return CV_MAT_CN(type_); } + inline int elemSize() const { return CV_ELEM_SIZE(type_); } + inline int elemSize1() const { return CV_ELEM_SIZE1(type_); } + + inline Usage usage() const { return usage_; } + + class Impl; +private: + int rows_; + int cols_; + int type_; + Usage usage_; + + Ptr impl_; +}; + +template <> CV_EXPORTS void Ptr::delete_obj(); + +//! Smart pointer for OpenGL 2d texture memory with reference counting. +class CV_EXPORTS GlTexture +{ +public: + //! create empty texture + GlTexture(); + + //! create texture + GlTexture(int rows, int cols, int type); + GlTexture(Size size, int type); + + //! copy from host/device memory + explicit GlTexture(InputArray mat, bool bgra = true); + + void create(int rows, int cols, int type); + void create(Size size, int type); + void release(); + + //! copy from host/device memory + void copyFrom(InputArray mat, bool bgra = true); + + void bind() const; + void unbind() const; + + inline int rows() const { return rows_; } + inline int cols() const { return cols_; } + inline Size size() const { return Size(cols_, rows_); } + inline bool empty() const { return rows_ == 0 || cols_ == 0; } + + inline int type() const { return type_; } + inline int depth() const { return CV_MAT_DEPTH(type_); } + inline int channels() const { return CV_MAT_CN(type_); } + inline int elemSize() const { return CV_ELEM_SIZE(type_); } + inline int elemSize1() const { return CV_ELEM_SIZE1(type_); } + + class Impl; +private: + int rows_; + int cols_; + int type_; + + Ptr impl_; + GlBuffer buf_; +}; + +template <> CV_EXPORTS void Ptr::delete_obj(); + +//! OpenGL Arrays +class CV_EXPORTS GlArrays +{ +public: + inline GlArrays() + : vertex_(GlBuffer::ARRAY_BUFFER), color_(GlBuffer::ARRAY_BUFFER), bgra_(true), normal_(GlBuffer::ARRAY_BUFFER), texCoord_(GlBuffer::ARRAY_BUFFER) + { + } + + void setVertexArray(InputArray vertex); + inline void resetVertexArray() { vertex_.release(); } + + void setColorArray(InputArray color, bool bgra = true); + inline void resetColorArray() { color_.release(); } + + void setNormalArray(InputArray normal); + inline void resetNormalArray() { normal_.release(); } + + void setTexCoordArray(InputArray texCoord); + inline void resetTexCoordArray() { texCoord_.release(); } + + void bind() const; + void unbind() const; + + inline int rows() const { return vertex_.rows(); } + inline int cols() const { return vertex_.cols(); } + inline Size size() const { return vertex_.size(); } + inline bool empty() const { return vertex_.empty(); } + +private: + GlBuffer vertex_; + GlBuffer color_; + bool bgra_; + GlBuffer normal_; + GlBuffer texCoord_; +}; + +//! OpenGL Font +class CV_EXPORTS GlFont +{ +public: + enum Weight + { + WEIGHT_LIGHT = 300, + WEIGHT_NORMAL = 400, + WEIGHT_SEMIBOLD = 600, + WEIGHT_BOLD = 700, + WEIGHT_BLACK = 900 + }; + + enum Style + { + STYLE_NORMAL = 0, + STYLE_ITALIC = 1, + STYLE_UNDERLINE = 2 + }; + + static Ptr get(const std::string& family, int height = 12, Weight weight = WEIGHT_NORMAL, Style style = STYLE_NORMAL); + + void draw(const char* str, int len) const; + + inline const std::string& family() const { return family_; } + inline int height() const { return height_; } + inline Weight weight() const { return weight_; } + inline Style style() const { return style_; } + +private: + GlFont(const std::string& family, int height, Weight weight, Style style); + + std::string family_; + int height_; + Weight weight_; + Style style_; + + unsigned int base_; + + GlFont(const GlFont&); + GlFont& operator =(const GlFont&); +}; + +//! render functions + +//! render texture rectangle in window +CV_EXPORTS void render(const GlTexture& tex, + Rect_ wndRect = Rect_(0.0, 0.0, 1.0, 1.0), + Rect_ texRect = Rect_(0.0, 0.0, 1.0, 1.0)); + +//! render mode +namespace RenderMode { + enum { + POINTS = 0x0000, + LINES = 0x0001, + LINE_LOOP = 0x0002, + LINE_STRIP = 0x0003, + TRIANGLES = 0x0004, + TRIANGLE_STRIP = 0x0005, + TRIANGLE_FAN = 0x0006, + QUADS = 0x0007, + QUAD_STRIP = 0x0008, + POLYGON = 0x0009 + }; +} + +//! render OpenGL arrays +CV_EXPORTS void render(const GlArrays& arr, int mode = RenderMode::POINTS, Scalar color = Scalar::all(255)); + +CV_EXPORTS void render(const std::string& str, const Ptr& font, Scalar color, Point2d pos); + +//! OpenGL camera +class CV_EXPORTS GlCamera +{ +public: + GlCamera(); + + void lookAt(Point3d eye, Point3d center, Point3d up); + void setCameraPos(Point3d pos, double yaw, double pitch, double roll); + + void setScale(Point3d scale); + + void setProjectionMatrix(const Mat& projectionMatrix, bool transpose = true); + void setPerspectiveProjection(double fov, double aspect, double zNear, double zFar); + void setOrthoProjection(double left, double right, double bottom, double top, double zNear, double zFar); + + void setupProjectionMatrix() const; + void setupModelViewMatrix() const; + +private: + Point3d eye_; + Point3d center_; + Point3d up_; + + Point3d pos_; + double yaw_; + double pitch_; + double roll_; + + bool useLookAtParams_; + + Point3d scale_; + + Mat projectionMatrix_; + + double fov_; + double aspect_; + + double left_; + double right_; + double bottom_; + double top_; + + double zNear_; + double zFar_; + + bool perspectiveProjection_; +}; + +inline void GlBuffer::create(Size _size, int _type, Usage _usage) { create(_size.height, _size.width, _type, _usage); } +inline void GlBuffer::create(int _rows, int _cols, int _type) { create(_rows, _cols, _type, usage()); } +inline void GlBuffer::create(Size _size, int _type) { create(_size.height, _size.width, _type, usage()); } +inline void GlTexture::create(Size _size, int _type) { create(_size.height, _size.width, _type); } + +} // namespace cv + +#endif // __cplusplus + +#endif // __OPENCV_OPENGL_INTEROP_DEPRECATED_HPP__ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/operations.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/operations.hpp new file mode 100644 index 0000000..4ab7e35 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/operations.hpp @@ -0,0 +1,4046 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_OPERATIONS_HPP__ +#define __OPENCV_CORE_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES + #include + #include +#endif // SKIP_INCLUDES + + +#ifdef __cplusplus + +/////// exchange-add operation for atomic operations on reference counters /////// +#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32) // atomic increment on the linux version of the Intel(tm) compiler + #define CV_XADD(addr,delta) _InterlockedExchangeAdd(const_cast(reinterpret_cast(addr)), delta) +#elif defined __GNUC__ + + #if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ + #ifdef __ATOMIC_SEQ_CST + #define CV_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), (delta), __ATOMIC_SEQ_CST) + #else + #define CV_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), (delta), 5) + #endif + #elif __GNUC__*10 + __GNUC_MINOR__ >= 42 + + #if !(defined WIN32 || defined _WIN32) && (defined __i486__ || defined __i586__ || \ + defined __i686__ || defined __MMX__ || defined __SSE__ || defined __ppc__) || \ + (defined __GNUC__ && defined _STLPORT_MAJOR) || \ + defined __EMSCRIPTEN__ + + #define CV_XADD __sync_fetch_and_add + #else + #include + #define CV_XADD __gnu_cxx::__exchange_and_add + #endif + + #else + #include + #if __GNUC__*10 + __GNUC_MINOR__ >= 34 + #define CV_XADD __gnu_cxx::__exchange_and_add + #else + #define CV_XADD __exchange_and_add + #endif + #endif + +#elif defined WIN32 || defined _WIN32 || defined WINCE + namespace cv { CV_EXPORTS int _interlockedExchangeAdd(int* addr, int delta); } + #define CV_XADD cv::_interlockedExchangeAdd + +#else + static inline int CV_XADD(int* addr, int delta) + { int tmp = *addr; *addr += delta; return tmp; } +#endif + +#include + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable:4127) //conditional expression is constant +#endif + +namespace cv +{ + +using std::cos; +using std::sin; +using std::max; +using std::min; +using std::exp; +using std::log; +using std::pow; +using std::sqrt; + + +/////////////// saturate_cast (used in image & signal processing) /////////////////// + +template static inline _Tp saturate_cast(uchar v) { return _Tp(v); } +template static inline _Tp saturate_cast(schar v) { return _Tp(v); } +template static inline _Tp saturate_cast(ushort v) { return _Tp(v); } +template static inline _Tp saturate_cast(short v) { return _Tp(v); } +template static inline _Tp saturate_cast(unsigned v) { return _Tp(v); } +template static inline _Tp saturate_cast(int v) { return _Tp(v); } +template static inline _Tp saturate_cast(float v) { return _Tp(v); } +template static inline _Tp saturate_cast(double v) { return _Tp(v); } + +template<> inline uchar saturate_cast(schar v) +{ return (uchar)std::max((int)v, 0); } +template<> inline uchar saturate_cast(ushort v) +{ return (uchar)std::min((unsigned)v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(int v) +{ return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } +template<> inline uchar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline uchar saturate_cast(unsigned v) +{ return (uchar)std::min(v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline uchar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline schar saturate_cast(uchar v) +{ return (schar)std::min((int)v, SCHAR_MAX); } +template<> inline schar saturate_cast(ushort v) +{ return (schar)std::min((unsigned)v, (unsigned)SCHAR_MAX); } +template<> inline schar saturate_cast(int v) +{ + return (schar)((unsigned)(v-SCHAR_MIN) <= (unsigned)UCHAR_MAX ? + v : v > 0 ? SCHAR_MAX : SCHAR_MIN); +} +template<> inline schar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline schar saturate_cast(unsigned v) +{ return (schar)std::min(v, (unsigned)SCHAR_MAX); } + +template<> inline schar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline schar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline ushort saturate_cast(schar v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(short v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(int v) +{ return (ushort)((unsigned)v <= (unsigned)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); } +template<> inline ushort saturate_cast(unsigned v) +{ return (ushort)std::min(v, (unsigned)USHRT_MAX); } +template<> inline ushort saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline ushort saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline short saturate_cast(ushort v) +{ return (short)std::min((int)v, SHRT_MAX); } +template<> inline short saturate_cast(int v) +{ + return (short)((unsigned)(v - SHRT_MIN) <= (unsigned)USHRT_MAX ? + v : v > 0 ? SHRT_MAX : SHRT_MIN); +} +template<> inline short saturate_cast(unsigned v) +{ return (short)std::min(v, (unsigned)SHRT_MAX); } +template<> inline short saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline short saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline int saturate_cast(float v) { return cvRound(v); } +template<> inline int saturate_cast(double v) { return cvRound(v); } + +// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc. +template<> inline unsigned saturate_cast(float v){ return cvRound(v); } +template<> inline unsigned saturate_cast(double v) { return cvRound(v); } + +inline int fast_abs(uchar v) { return v; } +inline int fast_abs(schar v) { return std::abs((int)v); } +inline int fast_abs(ushort v) { return v; } +inline int fast_abs(short v) { return std::abs((int)v); } +inline int fast_abs(int v) { return std::abs(v); } +inline float fast_abs(float v) { return std::abs(v); } +inline double fast_abs(double v) { return std::abs(v); } + +//////////////////////////////// Matx ///////////////////////////////// + + +template inline Matx<_Tp, m, n>::Matx() +{ + for(int i = 0; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0) +{ + val[0] = v0; + for(int i = 1; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1) +{ + assert(channels >= 2); + val[0] = v0; val[1] = v1; + for(int i = 2; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2) +{ + assert(channels >= 3); + val[0] = v0; val[1] = v1; val[2] = v2; + for(int i = 3; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ + assert(channels >= 4); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + for(int i = 4; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) +{ + assert(channels >= 5); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4; + for(int i = 5; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5) +{ + assert(channels >= 6); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; + for(int i = 6; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) +{ + assert(channels >= 7); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; + for(int i = 7; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) +{ + assert(channels >= 8); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + for(int i = 8; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) +{ + assert(channels >= 9); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; + for(int i = 9; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) +{ + assert(channels >= 10); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; + for(int i = 10; i < channels; i++) val[i] = _Tp(0); +} + + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11) +{ + assert(channels == 12); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; +} + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15) +{ + assert(channels == 16); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; + val[12] = v12; val[13] = v13; val[14] = v14; val[15] = v15; +} + +template inline Matx<_Tp, m, n>::Matx(const _Tp* values) +{ + for( int i = 0; i < channels; i++ ) val[i] = values[i]; +} + +template inline Matx<_Tp, m, n> Matx<_Tp, m, n>::all(_Tp alpha) +{ + Matx<_Tp, m, n> M; + for( int i = 0; i < m*n; i++ ) M.val[i] = alpha; + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::zeros() +{ + return all(0); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::ones() +{ + return all(1); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::eye() +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = 1; + return M; +} + +template inline _Tp Matx<_Tp, m, n>::dot(const Matx<_Tp, m, n>& M) const +{ + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) s += val[i]*M.val[i]; + return s; +} + + +template inline double Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const +{ + double s = 0; + for( int i = 0; i < m*n; i++ ) s += (double)val[i]*M.val[i]; + return s; +} + + + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::diag(const typename Matx<_Tp,m,n>::diag_type& d) +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = d(i, 0); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randu(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randu(matM, Scalar(a), Scalar(b)); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randn(matM, Scalar(a), Scalar(b)); + return M; +} + +template template +inline Matx<_Tp, m, n>::operator Matx() const +{ + Matx M; + for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast(val[i]); + return M; +} + + +template template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const +{ + CV_DbgAssert(m1*n1 == m*n); + return (const Matx<_Tp, m1, n1>&)*this; +} + + +template +template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int i, int j) const +{ + CV_DbgAssert(0 <= i && i+m1 <= m && 0 <= j && j+n1 <= n); + Matx<_Tp, m1, n1> s; + for( int di = 0; di < m1; di++ ) + for( int dj = 0; dj < n1; dj++ ) + s(di, dj) = (*this)(i+di, j+dj); + return s; +} + + +template inline +Matx<_Tp, 1, n> Matx<_Tp, m, n>::row(int i) const +{ + CV_DbgAssert((unsigned)i < (unsigned)m); + return Matx<_Tp, 1, n>(&val[i*n]); +} + + +template inline +Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const +{ + CV_DbgAssert((unsigned)j < (unsigned)n); + Matx<_Tp, m, 1> v; + for( int i = 0; i < m; i++ ) + v.val[i] = val[i*n + j]; + return v; +} + + +template inline +typename Matx<_Tp, m, n>::diag_type Matx<_Tp, m, n>::diag() const +{ + diag_type d; + for( int i = 0; i < MIN(m, n); i++ ) + d.val[i] = val[i*n + i]; + return d; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i, int j) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return this->val[i*n + j]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i, int j) +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return val[i*n + j]; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i) const +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i) +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template static inline +Matx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + + +template static inline +Matx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * alpha); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + { + _Tp s = 0; + for( int k = 0; k < l; k++ ) + s += a(i, k) * b(k, j); + val[i*n + j] = s; + } +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + val[i*n + j] = a(j, i); +} + + +template static inline +Matx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_AddOp()); +} + + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_SubOp()); +} + + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, -1, Matx_ScaleOp()); +} + + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_MatMulOp()); +} + + +template static inline +Vec<_Tp, m> operator * (const Matx<_Tp, m, n>& a, const Vec<_Tp, n>& b) +{ + Matx<_Tp, m, 1> c(a, b, Matx_MatMulOp()); + return reinterpret_cast&>(c); +} + + +template static inline +Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b) +{ + Matx<_Tp, 2, 1> tmp = a*Vec<_Tp,2>(b.x, b.y); + return Point_<_Tp>(tmp.val[0], tmp.val[1]); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b) +{ + Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, b.z); + return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b) +{ + Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, 1); + return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]); +} + + +template static inline +Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b) +{ + return a*Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1); +} + + +template static inline +Scalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b) +{ + Matx c(Matx(a), b, Matx_MatMulOp()); + return static_cast(c); +} + + +static inline +Scalar operator * (const Matx& a, const Scalar& b) +{ + Matx c(a, b, Matx_MatMulOp()); + return static_cast(c); +} + + +template inline +Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const +{ + return Matx<_Tp, m, n>(*this, a, Matx_MulOp()); +} + + +CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n); + + +template struct Matx_DetOp +{ + double operator ()(const Matx<_Tp, m, m>& a) const + { + Matx<_Tp, m, m> temp = a; + double p = LU(temp.val, m*sizeof(_Tp), m, 0, 0, 0); + if( p == 0 ) + return p; + for( int i = 0; i < m; i++ ) + p *= temp(i, i); + return 1./p; + } +}; + + +template struct Matx_DetOp<_Tp, 1> +{ + double operator ()(const Matx<_Tp, 1, 1>& a) const + { + return a(0,0); + } +}; + + +template struct Matx_DetOp<_Tp, 2> +{ + double operator ()(const Matx<_Tp, 2, 2>& a) const + { + return a(0,0)*a(1,1) - a(0,1)*a(1,0); + } +}; + + +template struct Matx_DetOp<_Tp, 3> +{ + double operator ()(const Matx<_Tp, 3, 3>& a) const + { + return a(0,0)*(a(1,1)*a(2,2) - a(2,1)*a(1,2)) - + a(0,1)*(a(1,0)*a(2,2) - a(2,0)*a(1,2)) + + a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1)); + } +}; + +template static inline +double determinant(const Matx<_Tp, m, m>& a) +{ + return Matx_DetOp<_Tp, m>()(a); +} + + +template static inline +double trace(const Matx<_Tp, m, n>& a) +{ + _Tp s = 0; + for( int i = 0; i < std::min(m, n); i++ ) + s += a(i,i); + return s; +} + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::t() const +{ + return Matx<_Tp, n, m>(*this, Matx_TOp()); +} + + +template struct Matx_FastInvOp +{ + bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const + { + Matx<_Tp, m, m> temp = a; + + // assume that b is all 0's on input => make it a unity matrix + for( int i = 0; i < m; i++ ) + b(i, i) = (_Tp)1; + + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m); + + return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0; + } +}; + + +template struct Matx_FastInvOp<_Tp, 2> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(1,1) = a(0,0)*d; + b(0,0) = a(1,1)*d; + b(0,1) = -a(0,1)*d; + b(1,0) = -a(1,0)*d; + return true; + } +}; + + +template struct Matx_FastInvOp<_Tp, 3> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int) const + { + _Tp d = (_Tp)determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d; + b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d; + b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d; + + b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d; + b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d; + b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d; + + b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d; + b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d; + b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d; + return true; + } +}; + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method) const +{ + Matx<_Tp, n, m> b; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastInvOp<_Tp, m>()(*this, b, method); + else + { + Mat A(*this, false), B(b, false); + ok = (invert(A, B, method) != 0); + } + return ok ? b : Matx<_Tp, n, m>::zeros(); +} + + +template struct Matx_FastSolveOp +{ + bool operator()(const Matx<_Tp, m, m>& a, const Matx<_Tp, m, n>& b, + Matx<_Tp, m, n>& x, int method) const + { + Matx<_Tp, m, m> temp = a; + x = b; + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n); + + return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0; + } +}; + + +template struct Matx_FastSolveOp<_Tp, 2, 1> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, const Matx<_Tp, 2, 1>& b, + Matx<_Tp, 2, 1>& x, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = (b(0)*a(1,1) - b(1)*a(0,1))*d; + x(1) = (b(1)*a(0,0) - b(0)*a(1,0))*d; + return true; + } +}; + + +template struct Matx_FastSolveOp<_Tp, 3, 1> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b, + Matx<_Tp, 3, 1>& x, int) const + { + _Tp d = (_Tp)determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) - + a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) + + a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2))); + + x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) - + b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) + + a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0))); + + x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) - + a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) + + b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0))); + return true; + } +}; + + +template template inline +Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const +{ + Matx<_Tp, n, l> x; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastSolveOp<_Tp, m, l>()(*this, rhs, x, method); + else + { + Mat A(*this, false), B(rhs, false), X(x, false); + ok = cv::solve(A, B, X, method); + } + + return ok ? x : Matx<_Tp, n, l>::zeros(); +} + +template inline +Vec<_Tp, n> Matx<_Tp, m, n>::solve(const Vec<_Tp, m>& rhs, int method) const +{ + Matx<_Tp, n, 1> x = solve(reinterpret_cast&>(rhs), method); + return reinterpret_cast&>(x); +} + +template static inline +_AccTp normL2Sqr(const _Tp* a, int n) +{ + _AccTp s = 0; + int i=0; + #if CV_ENABLE_UNROLLED + for( ; i <= n - 4; i += 4 ) + { + _AccTp v0 = a[i], v1 = a[i+1], v2 = a[i+2], v3 = a[i+3]; + s += v0*v0 + v1*v1 + v2*v2 + v3*v3; + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = a[i]; + s += v*v; + } + return s; +} + + +template static inline +_AccTp normL1(const _Tp* a, int n) +{ + _AccTp s = 0; + int i = 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + s += (_AccTp)fast_abs(a[i]) + (_AccTp)fast_abs(a[i+1]) + + (_AccTp)fast_abs(a[i+2]) + (_AccTp)fast_abs(a[i+3]); + } +#endif + for( ; i < n; i++ ) + s += fast_abs(a[i]); + return s; +} + + +template static inline +_AccTp normInf(const _Tp* a, int n) +{ + _AccTp s = 0; + for( int i = 0; i < n; i++ ) + s = std::max(s, (_AccTp)fast_abs(a[i])); + return s; +} + + +template static inline +_AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + int i= 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]); + s += v0*v0 + v1*v1 + v2*v2 + v3*v3; + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = _AccTp(a[i] - b[i]); + s += v*v; + } + return s; +} + +CV_EXPORTS float normL2Sqr_(const float* a, const float* b, int n); +CV_EXPORTS float normL1_(const float* a, const float* b, int n); +CV_EXPORTS int normL1_(const uchar* a, const uchar* b, int n); +CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n); +CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n, int cellSize); + +template<> inline float normL2Sqr(const float* a, const float* b, int n) +{ + if( n >= 8 ) + return normL2Sqr_(a, b, n); + float s = 0; + for( int i = 0; i < n; i++ ) + { + float v = a[i] - b[i]; + s += v*v; + } + return s; +} + + +template static inline +_AccTp normL1(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + int i= 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]); + s += std::abs(v0) + std::abs(v1) + std::abs(v2) + std::abs(v3); + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = _AccTp(a[i] - b[i]); + s += std::abs(v); + } + return s; +} + +template<> inline float normL1(const float* a, const float* b, int n) +{ + if( n >= 8 ) + return normL1_(a, b, n); + float s = 0; + for( int i = 0; i < n; i++ ) + { + float v = a[i] - b[i]; + s += std::abs(v); + } + return s; +} + +template<> inline int normL1(const uchar* a, const uchar* b, int n) +{ + return normL1_(a, b, n); +} + +template static inline +_AccTp normInf(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + for( int i = 0; i < n; i++ ) + { + _AccTp v0 = a[i] - b[i]; + s = std::max(s, std::abs(v0)); + } + return s; +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M) +{ + return std::sqrt(normL2Sqr<_Tp, double>(M.val, m*n)); +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M, int normType) +{ + return normType == NORM_INF ? (double)normInf<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n) : + normType == NORM_L1 ? (double)normL1<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n) : + std::sqrt((double)normL2Sqr<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n)); +} + + +template static inline +bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + if( a.val[i] != b.val[i] ) return false; + return true; +} + +template static inline +bool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return !(a == b); +} + + +template static inline +MatxCommaInitializer<_Tp, m, n> operator << (const Matx<_Tp, m, n>& mtx, _T2 val) +{ + MatxCommaInitializer<_Tp, m, n> commaInitializer((Matx<_Tp, m, n>*)&mtx); + return (commaInitializer, val); +} + +template inline +MatxCommaInitializer<_Tp, m, n>::MatxCommaInitializer(Matx<_Tp, m, n>* _mtx) + : dst(_mtx), idx(0) +{} + +template template inline +MatxCommaInitializer<_Tp, m, n>& MatxCommaInitializer<_Tp, m, n>::operator , (_T2 value) +{ + CV_DbgAssert( idx < m*n ); + dst->val[idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Matx<_Tp, m, n> MatxCommaInitializer<_Tp, m, n>::operator *() const +{ + CV_DbgAssert( idx == n*m ); + return *dst; +} + +/////////////////////////// short vector (Vec) ///////////////////////////// + +template inline Vec<_Tp, cn>::Vec() +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0) + : Matx<_Tp, cn, 1>(v0) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1) + : Matx<_Tp, cn, 1>(v0, v1) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2) + : Matx<_Tp, cn, 1>(v0, v1, v2) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9) +{} + +template inline Vec<_Tp, cn>::Vec(const _Tp* values) + : Matx<_Tp, cn, 1>(values) +{} + + +template inline Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m) + : Matx<_Tp, cn, 1>(m.val) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op) +: Matx<_Tp, cn, 1>(a, alpha, op) +{} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha) +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = alpha; + return v; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::mul(const Vec<_Tp, cn>& v) const +{ + Vec<_Tp, cn> w; + for( int i = 0; i < cn; i++ ) w.val[i] = saturate_cast<_Tp>(this->val[i]*v.val[i]); + return w; +} + +template Vec<_Tp, 2> conjugate(const Vec<_Tp, 2>& v) +{ + return Vec<_Tp, 2>(v[0], -v[1]); +} + +template Vec<_Tp, 4> conjugate(const Vec<_Tp, 4>& v) +{ + return Vec<_Tp, 4>(v[0], -v[1], -v[2], -v[3]); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template<> inline Vec Vec::conj() const +{ + return conjugate(*this); +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>&) const +{ + CV_Error(CV_StsError, "for arbitrary-size vector there is no cross-product defined"); + return Vec<_Tp, cn>(); +} + +template template +inline Vec<_Tp, cn>::operator Vec() const +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = saturate_cast(this->val[i]); + return v; +} + +template inline Vec<_Tp, cn>::operator CvScalar() const +{ + CvScalar s = {{0,0,0,0}}; + int i; + for( i = 0; i < std::min(cn, 4); i++ ) s.val[i] = this->val[i]; + for( ; i < 4; i++ ) s.val[i] = 0; + return s; +} + +template inline const _Tp& Vec<_Tp, cn>::operator [](int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator [](int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline const _Tp& Vec<_Tp, cn>::operator ()(int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator ()(int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template static inline Vec<_Tp1, cn>& +operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + +template static inline Vec<_Tp1, cn>& +operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + +template static inline Vec<_Tp, cn> +operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_AddOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_SubOp()); +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, int alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, float alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, int alpha) +{ + double ialpha = 1./alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, float alpha) +{ + float ialpha = 1.f/alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, double alpha) +{ + double ialpha = 1./alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (int alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (float alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (double alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator / (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator / (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, 1.f/alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator / (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a) +{ + Vec<_Tp,cn> t; + for( int i = 0; i < cn; i++ ) t.val[i] = saturate_cast<_Tp>(-a.val[i]); + return t; +} + +template inline Vec<_Tp, 4> operator * (const Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2) +{ + return Vec<_Tp, 4>(saturate_cast<_Tp>(v1[0]*v2[0] - v1[1]*v2[1] - v1[2]*v2[2] - v1[3]*v2[3]), + saturate_cast<_Tp>(v1[0]*v2[1] + v1[1]*v2[0] + v1[2]*v2[3] - v1[3]*v2[2]), + saturate_cast<_Tp>(v1[0]*v2[2] - v1[1]*v2[3] + v1[2]*v2[0] + v1[3]*v2[1]), + saturate_cast<_Tp>(v1[0]*v2[3] + v1[1]*v2[2] - v1[2]*v2[1] + v1[3]*v2[0])); +} + +template inline Vec<_Tp, 4>& operator *= (Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2) +{ + v1 = v1 * v2; + return v1; +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template inline Vec<_Tp, cn> normalize(const Vec<_Tp, cn>& v) +{ + double nv = norm(v); + return v * (nv ? 1./nv : 0.); +} + +template static inline +VecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val) +{ + VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec); + return (commaInitializer, val); +} + +template inline +VecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec) + : MatxCommaInitializer<_Tp, cn, 1>(_vec) +{} + +template template inline +VecCommaInitializer<_Tp, cn>& VecCommaInitializer<_Tp, cn>::operator , (_T2 value) +{ + CV_DbgAssert( this->idx < cn ); + this->dst->val[this->idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Vec<_Tp, cn> VecCommaInitializer<_Tp, cn>::operator *() const +{ + CV_DbgAssert( this->idx == cn ); + return *this->dst; +} + +//////////////////////////////// Complex ////////////////////////////// + +template inline Complex<_Tp>::Complex() : re(0), im(0) {} +template inline Complex<_Tp>::Complex( _Tp _re, _Tp _im ) : re(_re), im(_im) {} +template template inline Complex<_Tp>::operator Complex() const +{ return Complex(saturate_cast(re), saturate_cast(im)); } +template inline Complex<_Tp> Complex<_Tp>::conj() const +{ return Complex<_Tp>(re, -im); } + +template static inline +bool operator == (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re == b.re && a.im == b.im; } + +template static inline +bool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re != b.re || a.im != b.im; } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re + b.re, a.im + b.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re += b.re; a.im += b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re - b.re, a.im - b.im ); } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re -= b.re; a.im -= b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a) +{ return Complex<_Tp>(-a.re, -a.im); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re*b.re - a.im*b.im, a.re*b.im + a.im*b.re ); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator * (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re - b, a.im ); } + +template static inline +Complex<_Tp> operator + (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( b - a.re, -a.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, _Tp b) +{ a.re += b; return a; } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, _Tp b) +{ a.re -= b; return a; } + +template static inline +Complex<_Tp>& operator *= (Complex<_Tp>& a, _Tp b) +{ a.re *= b; a.im *= b; return a; } + +template static inline +double abs(const Complex<_Tp>& a) +{ return std::sqrt( (double)a.re*a.re + (double)a.im*a.im); } + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + double t = 1./((double)b.re*b.re + (double)b.im*b.im); + return Complex<_Tp>( (_Tp)((a.re*b.re + a.im*b.im)*t), + (_Tp)((-a.re*b.im + a.im*b.re)*t) ); +} + +template static inline +Complex<_Tp>& operator /= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return (a = a / b); +} + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + return Complex<_Tp>( a.re*t, a.im*t ); +} + +template static inline +Complex<_Tp> operator / (_Tp b, const Complex<_Tp>& a) +{ + return Complex<_Tp>(b)/a; +} + +template static inline +Complex<_Tp> operator /= (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + a.re *= t; a.im *= t; return a; +} + +//////////////////////////////// 2D Point //////////////////////////////// + +template inline Point_<_Tp>::Point_() : x(0), y(0) {} +template inline Point_<_Tp>::Point_(_Tp _x, _Tp _y) : x(_x), y(_y) {} +template inline Point_<_Tp>::Point_(const Point_& pt) : x(pt.x), y(pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint& pt) : x((_Tp)pt.x), y((_Tp)pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint2D32f& pt) + : x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)) {} +template inline Point_<_Tp>::Point_(const Size_<_Tp>& sz) : x(sz.width), y(sz.height) {} +template inline Point_<_Tp>::Point_(const Vec<_Tp,2>& v) : x(v[0]), y(v[1]) {} +template inline Point_<_Tp>& Point_<_Tp>::operator = (const Point_& pt) +{ x = pt.x; y = pt.y; return *this; } + +template template inline Point_<_Tp>::operator Point_<_Tp2>() const +{ return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y)); } +template inline Point_<_Tp>::operator CvPoint() const +{ return cvPoint(saturate_cast(x), saturate_cast(y)); } +template inline Point_<_Tp>::operator CvPoint2D32f() const +{ return cvPoint2D32f((float)x, (float)y); } +template inline Point_<_Tp>::operator Vec<_Tp, 2>() const +{ return Vec<_Tp, 2>(x, y); } + +template inline _Tp Point_<_Tp>::dot(const Point_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y); } +template inline double Point_<_Tp>::ddot(const Point_& pt) const +{ return (double)x*pt.x + (double)y*pt.y; } + +template inline double Point_<_Tp>::cross(const Point_& pt) const +{ return (double)x*pt.y - (double)y*pt.x; } + +template static inline Point_<_Tp>& +operator += (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + return a; +} + +template static inline Point_<_Tp>& +operator -= (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline double norm(const Point_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y); } + +template static inline bool operator == (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x == b.x && a.y == b.y; } + +template static inline bool operator != (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x != b.x || a.y != b.y; } + +template static inline Point_<_Tp> operator + (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a) +{ return Point_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, int b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (int a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, float b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (float a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, double b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (double a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +//////////////////////////////// 3D Point //////////////////////////////// + +template inline Point3_<_Tp>::Point3_() : x(0), y(0), z(0) {} +template inline Point3_<_Tp>::Point3_(_Tp _x, _Tp _y, _Tp _z) : x(_x), y(_y), z(_z) {} +template inline Point3_<_Tp>::Point3_(const Point3_& pt) : x(pt.x), y(pt.y), z(pt.z) {} +template inline Point3_<_Tp>::Point3_(const Point_<_Tp>& pt) : x(pt.x), y(pt.y), z(_Tp()) {} +template inline Point3_<_Tp>::Point3_(const CvPoint3D32f& pt) : + x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)), z(saturate_cast<_Tp>(pt.z)) {} +template inline Point3_<_Tp>::Point3_(const Vec<_Tp, 3>& v) : x(v[0]), y(v[1]), z(v[2]) {} + +template template inline Point3_<_Tp>::operator Point3_<_Tp2>() const +{ return Point3_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(z)); } + +template inline Point3_<_Tp>::operator CvPoint3D32f() const +{ return cvPoint3D32f((float)x, (float)y, (float)z); } + +template inline Point3_<_Tp>::operator Vec<_Tp, 3>() const +{ return Vec<_Tp, 3>(x, y, z); } + +template inline Point3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt) +{ x = pt.x; y = pt.y; z = pt.z; return *this; } + +template inline _Tp Point3_<_Tp>::dot(const Point3_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z); } +template inline double Point3_<_Tp>::ddot(const Point3_& pt) const +{ return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z; } + +template inline Point3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const +{ + return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x); +} + +template static inline Point3_<_Tp>& +operator += (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + a.z = saturate_cast<_Tp>(a.z + b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + a.z = saturate_cast<_Tp>(a.z - b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline double norm(const Point3_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z); } + +template static inline bool operator == (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x == b.x && a.y == b.y && a.z == b.z; } + +template static inline bool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x != b.x || a.y != b.y || a.z != b.z; } + +template static inline Point3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x), + saturate_cast<_Tp>(a.y + b.y), + saturate_cast<_Tp>(a.z + b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x - b.x), + saturate_cast<_Tp>(a.y - b.y), + saturate_cast<_Tp>(a.z - b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a) +{ return Point3_<_Tp>( saturate_cast<_Tp>(-a.x), + saturate_cast<_Tp>(-a.y), + saturate_cast<_Tp>(-a.z) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, int b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (int a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, float b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (float a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, double b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (double a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +//////////////////////////////// Size //////////////////////////////// + +template inline Size_<_Tp>::Size_() + : width(0), height(0) {} +template inline Size_<_Tp>::Size_(_Tp _width, _Tp _height) + : width(_width), height(_height) {} +template inline Size_<_Tp>::Size_(const Size_& sz) + : width(sz.width), height(sz.height) {} +template inline Size_<_Tp>::Size_(const CvSize& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const CvSize2D32f& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const Point_<_Tp>& pt) : width(pt.x), height(pt.y) {} + +template template inline Size_<_Tp>::operator Size_<_Tp2>() const +{ return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Size_<_Tp>::operator CvSize() const +{ return cvSize(saturate_cast(width), saturate_cast(height)); } +template inline Size_<_Tp>::operator CvSize2D32f() const +{ return cvSize2D32f((float)width, (float)height); } + +template inline Size_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz) +{ width = sz.width; height = sz.height; return *this; } +template static inline Size_<_Tp> operator * (const Size_<_Tp>& a, _Tp b) +{ return Size_<_Tp>(a.width * b, a.height * b); } +template static inline Size_<_Tp> operator + (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width + b.width, a.height + b.height); } +template static inline Size_<_Tp> operator - (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width - b.width, a.height - b.height); } +template inline _Tp Size_<_Tp>::area() const { return width*height; } + +template static inline Size_<_Tp>& operator += (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width += b.width; a.height += b.height; return a; } +template static inline Size_<_Tp>& operator -= (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline bool operator == (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width == b.width && a.height == b.height; } +template static inline bool operator != (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width != b.width || a.height != b.height; } + +//////////////////////////////// Rect //////////////////////////////// + + +template inline Rect_<_Tp>::Rect_() : x(0), y(0), width(0), height(0) {} +template inline Rect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height) : x(_x), y(_y), width(_width), height(_height) {} +template inline Rect_<_Tp>::Rect_(const Rect_<_Tp>& r) : x(r.x), y(r.y), width(r.width), height(r.height) {} +template inline Rect_<_Tp>::Rect_(const CvRect& r) : x((_Tp)r.x), y((_Tp)r.y), width((_Tp)r.width), height((_Tp)r.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz) : + x(org.x), y(org.y), width(sz.width), height(sz.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2) +{ + x = std::min(pt1.x, pt2.x); y = std::min(pt1.y, pt2.y); + width = std::max(pt1.x, pt2.x) - x; height = std::max(pt1.y, pt2.y) - y; +} +template inline Rect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r ) +{ x = r.x; y = r.y; width = r.width; height = r.height; return *this; } + +template inline Point_<_Tp> Rect_<_Tp>::tl() const { return Point_<_Tp>(x,y); } +template inline Point_<_Tp> Rect_<_Tp>::br() const { return Point_<_Tp>(x+width, y+height); } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x += b.x; a.y += b.y; return a; } +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x -= b.x; a.y -= b.y; return a; } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width += b.width; a.height += b.height; return a; } + +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y); + a.width = std::min(a.x + a.width, b.x + b.width) - x1; + a.height = std::min(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + if( a.width <= 0 || a.height <= 0 ) + a = Rect(); + return a; +} + +template static inline Rect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y); + a.width = std::max(a.x + a.width, b.x + b.width) - x1; + a.height = std::max(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + return a; +} + +template inline Size_<_Tp> Rect_<_Tp>::size() const { return Size_<_Tp>(width, height); } +template inline _Tp Rect_<_Tp>::area() const { return width*height; } + +template template inline Rect_<_Tp>::operator Rect_<_Tp2>() const +{ return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), + saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Rect_<_Tp>::operator CvRect() const +{ return cvRect(saturate_cast(x), saturate_cast(y), + saturate_cast(width), saturate_cast(height)); } + +template inline bool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const +{ return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height; } + +template static inline bool operator == (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x == b.x && a.y == b.y && a.width == b.width && a.height == b.height; +} + +template static inline bool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height; +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator - (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x - b.x, a.y - b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Size_<_Tp>& b) +{ + return Rect_<_Tp>( a.x, a.y, a.width + b.width, a.height + b.height ); +} + +template static inline Rect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c &= b; +} + +template static inline Rect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c |= b; +} + +template inline bool Point_<_Tp>::inside( const Rect_<_Tp>& r ) const +{ + return r.contains(*this); +} + +inline RotatedRect::RotatedRect() { angle = 0; } +inline RotatedRect::RotatedRect(const Point2f& _center, const Size2f& _size, float _angle) + : center(_center), size(_size), angle(_angle) {} +inline RotatedRect::RotatedRect(const CvBox2D& box) + : center(box.center), size(box.size), angle(box.angle) {} +inline RotatedRect::operator CvBox2D() const +{ + CvBox2D box; box.center = center; box.size = size; box.angle = angle; + return box; +} + +//////////////////////////////// Scalar_ /////////////////////////////// + +template inline Scalar_<_Tp>::Scalar_() +{ this->val[0] = this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp>::Scalar_(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ this->val[0] = v0; this->val[1] = v1; this->val[2] = v2; this->val[3] = v3; } + +template inline Scalar_<_Tp>::Scalar_(const CvScalar& s) +{ + this->val[0] = saturate_cast<_Tp>(s.val[0]); + this->val[1] = saturate_cast<_Tp>(s.val[1]); + this->val[2] = saturate_cast<_Tp>(s.val[2]); + this->val[3] = saturate_cast<_Tp>(s.val[3]); +} + +template inline Scalar_<_Tp>::Scalar_(_Tp v0) +{ this->val[0] = v0; this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp> Scalar_<_Tp>::all(_Tp v0) +{ return Scalar_<_Tp>(v0, v0, v0, v0); } +template inline Scalar_<_Tp>::operator CvScalar() const +{ return cvScalar(this->val[0], this->val[1], this->val[2], this->val[3]); } + +template template inline Scalar_<_Tp>::operator Scalar_() const +{ + return Scalar_(saturate_cast(this->val[0]), + saturate_cast(this->val[1]), + saturate_cast(this->val[2]), + saturate_cast(this->val[3])); +} + +template static inline Scalar_<_Tp>& operator += (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] + b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] + b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] + b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] + b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator -= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] - b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] - b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] - b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] - b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator *= ( Scalar_<_Tp>& a, _Tp v ) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] * v); + a.val[1] = saturate_cast<_Tp>(a.val[1] * v); + a.val[2] = saturate_cast<_Tp>(a.val[2] * v); + a.val[3] = saturate_cast<_Tp>(a.val[3] * v); + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::mul(const Scalar_<_Tp>& t, double scale ) const +{ + return Scalar_<_Tp>( saturate_cast<_Tp>(this->val[0]*t.val[0]*scale), + saturate_cast<_Tp>(this->val[1]*t.val[1]*scale), + saturate_cast<_Tp>(this->val[2]*t.val[2]*scale), + saturate_cast<_Tp>(this->val[3]*t.val[3]*scale)); +} + +template static inline bool operator == ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] == b.val[0] && a.val[1] == b.val[1] && + a.val[2] == b.val[2] && a.val[3] == b.val[3]; +} + +template static inline bool operator != ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] != b.val[0] || a.val[1] != b.val[1] || + a.val[2] != b.val[2] || a.val[3] != b.val[3]; +} + +template static inline Scalar_<_Tp> operator + (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] + b.val[0]), + saturate_cast<_Tp>(a.val[1] + b.val[1]), + saturate_cast<_Tp>(a.val[2] + b.val[2]), + saturate_cast<_Tp>(a.val[3] + b.val[3])); +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] - b.val[0]), + saturate_cast<_Tp>(a.val[1] - b.val[1]), + saturate_cast<_Tp>(a.val[2] - b.val[2]), + saturate_cast<_Tp>(a.val[3] - b.val[3])); +} + +template static inline Scalar_<_Tp> operator * (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] * alpha), + saturate_cast<_Tp>(a.val[1] * alpha), + saturate_cast<_Tp>(a.val[2] * alpha), + saturate_cast<_Tp>(a.val[3] * alpha)); +} + +template static inline Scalar_<_Tp> operator * (_Tp alpha, const Scalar_<_Tp>& a) +{ + return a*alpha; +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(-a.val[0]), saturate_cast<_Tp>(-a.val[1]), + saturate_cast<_Tp>(-a.val[2]), saturate_cast<_Tp>(-a.val[3])); +} + + +template static inline Scalar_<_Tp> +operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]), + saturate_cast<_Tp>(a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]), + saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] + a[3]*b[1]), + saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + a[3]*b[0])); +} + +template static inline Scalar_<_Tp>& +operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a*b; + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::conj() const +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0]), + saturate_cast<_Tp>(-this->val[1]), + saturate_cast<_Tp>(-this->val[2]), + saturate_cast<_Tp>(-this->val[3])); +} + +template inline bool Scalar_<_Tp>::isReal() const +{ + return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] / alpha), + saturate_cast<_Tp>(a.val[1] / alpha), + saturate_cast<_Tp>(a.val[2] / alpha), + saturate_cast<_Tp>(a.val[3] / alpha)); +} + +template static inline +Scalar_ operator / (const Scalar_& a, float alpha) +{ + float s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_ operator / (const Scalar_& a, double alpha) +{ + double s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha) +{ + a = a/alpha; + return a; +} + +template static inline +Scalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b) +{ + _Tp s = a/(b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]); + return b.conj()*s; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return a*((_Tp)1/b); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a/b; + return a; +} + +//////////////////////////////// Range ///////////////////////////////// + +inline Range::Range() : start(0), end(0) {} +inline Range::Range(int _start, int _end) : start(_start), end(_end) {} +inline Range::Range(const CvSlice& slice) : start(slice.start_index), end(slice.end_index) +{ + if( start == 0 && end == CV_WHOLE_SEQ_END_INDEX ) + *this = Range::all(); +} + +inline int Range::size() const { return end - start; } +inline bool Range::empty() const { return start == end; } +inline Range Range::all() { return Range(INT_MIN, INT_MAX); } + +static inline bool operator == (const Range& r1, const Range& r2) +{ return r1.start == r2.start && r1.end == r2.end; } + +static inline bool operator != (const Range& r1, const Range& r2) +{ return !(r1 == r2); } + +static inline bool operator !(const Range& r) +{ return r.start == r.end; } + +static inline Range operator & (const Range& r1, const Range& r2) +{ + Range r(std::max(r1.start, r2.start), std::min(r1.end, r2.end)); + r.end = std::max(r.end, r.start); + return r; +} + +static inline Range& operator &= (Range& r1, const Range& r2) +{ + r1 = r1 & r2; + return r1; +} + +static inline Range operator + (const Range& r1, int delta) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator + (int delta, const Range& r1) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator - (const Range& r1, int delta) +{ + return r1 + (-delta); +} + +inline Range::operator CvSlice() const +{ return *this != Range::all() ? cvSlice(start, end) : CV_WHOLE_SEQ; } + + + +//////////////////////////////// Vector //////////////////////////////// + +// template vector class. It is similar to STL's vector, +// with a few important differences: +// 1) it can be created on top of user-allocated data w/o copying it +// 2) vector b = a means copying the header, +// not the underlying data (use clone() to make a deep copy) +template class Vector +{ +public: + typedef _Tp value_type; + typedef _Tp* iterator; + typedef const _Tp* const_iterator; + typedef _Tp& reference; + typedef const _Tp& const_reference; + + struct Hdr + { + Hdr() : data(0), datastart(0), refcount(0), size(0), capacity(0) {}; + _Tp* data; + _Tp* datastart; + int* refcount; + size_t size; + size_t capacity; + }; + + Vector() {} + Vector(size_t _size) { resize(_size); } + Vector(size_t _size, const _Tp& val) + { + resize(_size); + for(size_t i = 0; i < _size; i++) + hdr.data[i] = val; + } + Vector(_Tp* _data, size_t _size, bool _copyData=false) + { set(_data, _size, _copyData); } + + template Vector(const Vec<_Tp, n>& vec) + { set((_Tp*)&vec.val[0], n, true); } + + Vector(const std::vector<_Tp>& vec, bool _copyData=false) + { set(!vec.empty() ? (_Tp*)&vec[0] : 0, vec.size(), _copyData); } + + Vector(const Vector& d) { *this = d; } + + Vector(const Vector& d, const Range& r_) + { + Range r = r_ == Range::all() ? Range(0, d.size()) : r_; + /*if( r == Range::all() ) + r = Range(0, d.size());*/ + if( r.size() > 0 && r.start >= 0 && r.end <= d.size() ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + hdr.refcount = d.hdr.refcount; + hdr.datastart = d.hdr.datastart; + hdr.data = d.hdr.data + r.start; + hdr.capacity = hdr.size = r.size(); + } + } + + Vector<_Tp>& operator = (const Vector& d) + { + if( this != &d ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + release(); + hdr = d.hdr; + } + return *this; + } + + ~Vector() { release(); } + + Vector<_Tp> clone() const + { return hdr.data ? Vector<_Tp>(hdr.data, hdr.size, true) : Vector<_Tp>(); } + + void copyTo(Vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = vec.hdr.data; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + void copyTo(std::vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = sz ? &vec[0] : 0; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + operator CvMat() const + { return cvMat((int)size(), 1, type(), (void*)hdr.data); } + + _Tp& operator [] (size_t i) { CV_DbgAssert( i < size() ); return hdr.data[i]; } + const _Tp& operator [] (size_t i) const { CV_DbgAssert( i < size() ); return hdr.data[i]; } + Vector operator() (const Range& r) const { return Vector(*this, r); } + _Tp& back() { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + const _Tp& back() const { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + _Tp& front() { CV_DbgAssert(!empty()); return hdr.data[0]; } + const _Tp& front() const { CV_DbgAssert(!empty()); return hdr.data[0]; } + + _Tp* begin() { return hdr.data; } + _Tp* end() { return hdr.data + hdr.size; } + const _Tp* begin() const { return hdr.data; } + const _Tp* end() const { return hdr.data + hdr.size; } + + void addref() { if( hdr.refcount ) CV_XADD(hdr.refcount, 1); } + void release() + { + if( hdr.refcount && CV_XADD(hdr.refcount, -1) == 1 ) + { + delete[] hdr.datastart; + delete hdr.refcount; + } + hdr = Hdr(); + } + + void set(_Tp* _data, size_t _size, bool _copyData=false) + { + if( !_copyData ) + { + release(); + hdr.data = hdr.datastart = _data; + hdr.size = hdr.capacity = _size; + hdr.refcount = 0; + } + else + { + reserve(_size); + for( size_t i = 0; i < _size; i++ ) + hdr.data[i] = _data[i]; + hdr.size = _size; + } + } + + void reserve(size_t newCapacity) + { + _Tp* newData; + int* newRefcount; + size_t i, oldSize = hdr.size; + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.capacity >= newCapacity ) + return; + newCapacity = std::max(newCapacity, oldSize); + newData = new _Tp[newCapacity]; + newRefcount = new int(1); + for( i = 0; i < oldSize; i++ ) + newData[i] = hdr.data[i]; + release(); + hdr.data = hdr.datastart = newData; + hdr.capacity = newCapacity; + hdr.size = oldSize; + hdr.refcount = newRefcount; + } + + void resize(size_t newSize) + { + size_t i; + newSize = std::max(newSize, (size_t)0); + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.size == newSize ) + return; + if( newSize > hdr.capacity ) + reserve(std::max(newSize, std::max((size_t)4, hdr.capacity*2))); + for( i = hdr.size; i < newSize; i++ ) + hdr.data[i] = _Tp(); + hdr.size = newSize; + } + + Vector<_Tp>& push_back(const _Tp& elem) + { + if( hdr.size == hdr.capacity ) + reserve( std::max((size_t)4, hdr.capacity*2) ); + hdr.data[hdr.size++] = elem; + return *this; + } + + Vector<_Tp>& pop_back() + { + if( hdr.size > 0 ) + --hdr.size; + return *this; + } + + size_t size() const { return hdr.size; } + size_t capacity() const { return hdr.capacity; } + bool empty() const { return hdr.size == 0; } + void clear() { resize(0); } + int type() const { return DataType<_Tp>::type; } + +protected: + Hdr hdr; +}; + + +template inline typename DataType<_Tp>::work_type +dot(const Vector<_Tp>& v1, const Vector<_Tp>& v2) +{ + typedef typename DataType<_Tp>::work_type _Tw; + size_t i = 0, n = v1.size(); + assert(v1.size() == v2.size()); + + _Tw s = 0; + const _Tp *ptr1 = &v1[0], *ptr2 = &v2[0]; + for( ; i < n; i++ ) + s += (_Tw)ptr1[i]*ptr2[i]; + + return s; +} + +// Multiply-with-Carry RNG +inline RNG::RNG() { state = 0xffffffff; } +inline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; } +inline unsigned RNG::next() +{ + state = (uint64)(unsigned)state*CV_RNG_COEFF + (unsigned)(state >> 32); + return (unsigned)state; +} + +inline RNG::operator uchar() { return (uchar)next(); } +inline RNG::operator schar() { return (schar)next(); } +inline RNG::operator ushort() { return (ushort)next(); } +inline RNG::operator short() { return (short)next(); } +inline RNG::operator unsigned() { return next(); } +inline unsigned RNG::operator ()(unsigned N) {return (unsigned)uniform(0,N);} +inline unsigned RNG::operator ()() {return next();} +inline RNG::operator int() { return (int)next(); } +// * (2^32-1)^-1 +inline RNG::operator float() { return next()*2.3283064365386962890625e-10f; } +inline RNG::operator double() +{ + unsigned t = next(); + return (((uint64)t << 32) | next())*5.4210108624275221700372640043497e-20; +} +inline int RNG::uniform(int a, int b) { return a == b ? a : (int)(next()%(b - a) + a); } +inline float RNG::uniform(float a, float b) { return ((float)*this)*(b - a) + a; } +inline double RNG::uniform(double a, double b) { return ((double)*this)*(b - a) + a; } + +inline TermCriteria::TermCriteria() : type(0), maxCount(0), epsilon(0) {} +inline TermCriteria::TermCriteria(int _type, int _maxCount, double _epsilon) + : type(_type), maxCount(_maxCount), epsilon(_epsilon) {} +inline TermCriteria::TermCriteria(const CvTermCriteria& criteria) + : type(criteria.type), maxCount(criteria.max_iter), epsilon(criteria.epsilon) {} +inline TermCriteria::operator CvTermCriteria() const +{ return cvTermCriteria(type, maxCount, epsilon); } + +inline uchar* LineIterator::operator *() { return ptr; } +inline LineIterator& LineIterator::operator ++() +{ + int mask = err < 0 ? -1 : 0; + err += minusDelta + (plusDelta & mask); + ptr += minusStep + (plusStep & mask); + return *this; +} +inline LineIterator LineIterator::operator ++(int) +{ + LineIterator it = *this; + ++(*this); + return it; +} +inline Point LineIterator::pos() const +{ + Point p; + p.y = (int)((ptr - ptr0)/step); + p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize); + return p; +} + +/////////////////////////////// AutoBuffer //////////////////////////////////////// + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer() +{ + ptr = buf; + size = fixed_size; +} + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer(size_t _size) +{ + ptr = buf; + size = fixed_size; + allocate(_size); +} + +template inline AutoBuffer<_Tp, fixed_size>::~AutoBuffer() +{ deallocate(); } + +template inline void AutoBuffer<_Tp, fixed_size>::allocate(size_t _size) +{ + if(_size <= size) + return; + deallocate(); + if(_size > fixed_size) + { + ptr = cv::allocate<_Tp>(_size); + size = _size; + } +} + +template inline void AutoBuffer<_Tp, fixed_size>::deallocate() +{ + if( ptr != buf ) + { + cv::deallocate<_Tp>(ptr, size); + ptr = buf; + size = fixed_size; + } +} + +template inline AutoBuffer<_Tp, fixed_size>::operator _Tp* () +{ return ptr; } + +template inline AutoBuffer<_Tp, fixed_size>::operator const _Tp* () const +{ return ptr; } + + +/////////////////////////////////// Ptr //////////////////////////////////////// + +template inline Ptr<_Tp>::Ptr() : obj(0), refcount(0) {} +template inline Ptr<_Tp>::Ptr(_Tp* _obj) : obj(_obj) +{ + if(obj) + { + refcount = (int*)fastMalloc(sizeof(*refcount)); + *refcount = 1; + } + else + refcount = 0; +} + +template inline void Ptr<_Tp>::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +template inline void Ptr<_Tp>::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + { + delete_obj(); + fastFree(refcount); + } + refcount = 0; + obj = 0; +} + +template inline void Ptr<_Tp>::delete_obj() +{ + if( obj ) delete obj; +} + +template inline Ptr<_Tp>::~Ptr() { release(); } + +template inline Ptr<_Tp>::Ptr(const Ptr<_Tp>& _ptr) +{ + obj = _ptr.obj; + refcount = _ptr.refcount; + addref(); +} + +template inline Ptr<_Tp>& Ptr<_Tp>::operator = (const Ptr<_Tp>& _ptr) +{ + int* _refcount = _ptr.refcount; + if( _refcount ) + CV_XADD(_refcount, 1); + release(); + obj = _ptr.obj; + refcount = _refcount; + return *this; +} + +template inline _Tp* Ptr<_Tp>::operator -> () { return obj; } +template inline const _Tp* Ptr<_Tp>::operator -> () const { return obj; } + +template inline Ptr<_Tp>::operator _Tp* () { return obj; } +template inline Ptr<_Tp>::operator const _Tp*() const { return obj; } + +template inline bool Ptr<_Tp>::empty() const { return obj == 0; } + +template template Ptr<_Tp>::Ptr(const Ptr<_Tp2>& p) + : obj(0), refcount(0) +{ + if (p.empty()) + return; + + _Tp* p_casted = dynamic_cast<_Tp*>(p.obj); + if (!p_casted) + return; + + obj = p_casted; + refcount = p.refcount; + addref(); +} + +template template inline Ptr<_Tp2> Ptr<_Tp>::ptr() +{ + Ptr<_Tp2> p; + if( !obj ) + return p; + + _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj); + if (!obj_casted) + return p; + + if( refcount ) + CV_XADD(refcount, 1); + + p.obj = obj_casted; + p.refcount = refcount; + return p; +} + +template template inline const Ptr<_Tp2> Ptr<_Tp>::ptr() const +{ + Ptr<_Tp2> p; + if( !obj ) + return p; + + _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj); + if (!obj_casted) + return p; + + if( refcount ) + CV_XADD(refcount, 1); + + p.obj = obj_casted; + p.refcount = refcount; + return p; +} + +//// specializied implementations of Ptr::delete_obj() for classic OpenCV types + +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, int value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, float value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, double value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const string& value ); + +template inline void write(FileStorage& fs, const _Tp& value) +{ write(fs, string(), value); } + +CV_EXPORTS void writeScalar( FileStorage& fs, int value ); +CV_EXPORTS void writeScalar( FileStorage& fs, float value ); +CV_EXPORTS void writeScalar( FileStorage& fs, double value ); +CV_EXPORTS void writeScalar( FileStorage& fs, const string& value ); + +template<> inline void write( FileStorage& fs, const int& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const float& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const double& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const string& value ) +{ + writeScalar(fs, value); +} + +template inline void write(FileStorage& fs, const Point_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const Point3_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const Size_<_Tp>& sz ) +{ + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const Complex<_Tp>& c ) +{ + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const Rect_<_Tp>& r ) +{ + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const Vec<_Tp, cn>& v ) +{ + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const Scalar_<_Tp>& s ) +{ + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const Range& r ) +{ + write(fs, r.start); + write(fs, r.end); +} + +class CV_EXPORTS WriteStructContext +{ +public: + WriteStructContext(FileStorage& _fs, const string& name, + int flags, const string& typeName=string()); + ~WriteStructContext(); + FileStorage* fs; +}; + +template inline void write(FileStorage& fs, const string& name, const Point_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const string& name, const Point3_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const string& name, const Size_<_Tp>& sz ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const string& name, const Complex<_Tp>& c ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const string& name, const Rect_<_Tp>& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const string& name, const Vec<_Tp, cn>& v ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const string& name, const Scalar_<_Tp>& s ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const string& name, const Range& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.start); + write(fs, r.end); +} + +template class VecWriterProxy +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + size_t i, count = vec.size(); + for( i = 0; i < count; i++ ) + write( *fs, vec[i] ); + } + FileStorage* fs; +}; + +template class VecWriterProxy<_Tp,1> +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + fs->writeRaw( string(fmt), !vec.empty() ? (uchar*)&vec[0] : 0, vec.size()*sizeof(_Tp) ); + } + FileStorage* fs; +}; + +template static inline void write( FileStorage& fs, const vector<_Tp>& vec ) +{ + VecWriterProxy<_Tp, DataType<_Tp>::fmt != 0> w(&fs); + w(vec); +} + +template static inline void write( FileStorage& fs, const string& name, + const vector<_Tp>& vec ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+(DataType<_Tp>::fmt != 0 ? CV_NODE_FLOW : 0)); + write(fs, vec); +} + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const Mat& value ); +CV_EXPORTS void write( FileStorage& fs, const string& name, const SparseMat& value ); + +template static inline FileStorage& operator << (FileStorage& fs, const _Tp& value) +{ + if( !fs.isOpened() ) + return fs; + if( fs.state == FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP ) + CV_Error( CV_StsError, "No element name has been given" ); + write( fs, fs.elname, value ); + if( fs.state & FileStorage::INSIDE_MAP ) + fs.state = FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP; + return fs; +} + +CV_EXPORTS FileStorage& operator << (FileStorage& fs, const string& str); + +static inline FileStorage& operator << (FileStorage& fs, const char* str) +{ return (fs << string(str)); } + +static inline FileStorage& operator << (FileStorage& fs, char* value) +{ return (fs << string(value)); } + +inline FileNode::FileNode() : fs(0), node(0) {} +inline FileNode::FileNode(const CvFileStorage* _fs, const CvFileNode* _node) + : fs(_fs), node(_node) {} + +inline FileNode::FileNode(const FileNode& _node) : fs(_node.fs), node(_node.node) {} + +inline int FileNode::type() const { return !node ? NONE : (node->tag & TYPE_MASK); } +inline bool FileNode::empty() const { return node == 0; } +inline bool FileNode::isNone() const { return type() == NONE; } +inline bool FileNode::isSeq() const { return type() == SEQ; } +inline bool FileNode::isMap() const { return type() == MAP; } +inline bool FileNode::isInt() const { return type() == INT; } +inline bool FileNode::isReal() const { return type() == REAL; } +inline bool FileNode::isString() const { return type() == STR; } +inline bool FileNode::isNamed() const { return !node ? false : (node->tag & NAMED) != 0; } +inline size_t FileNode::size() const +{ + int t = type(); + return t == MAP ? (size_t)((CvSet*)node->data.map)->active_count : + t == SEQ ? (size_t)node->data.seq->total : (size_t)!isNone(); +} + +inline CvFileNode* FileNode::operator *() { return (CvFileNode*)node; } +inline const CvFileNode* FileNode::operator* () const { return node; } + +static inline void read(const FileNode& node, int& value, int default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? cvRound(node.node->data.f) : 0x7fffffff; +} + +static inline void read(const FileNode& node, bool& value, bool default_value) +{ + int temp; read(node, temp, (int)default_value); + value = temp != 0; +} + +static inline void read(const FileNode& node, uchar& value, uchar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, schar& value, schar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, ushort& value, ushort default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, short& value, short default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, float& value, float default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (float)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? (float)node.node->data.f : 1e30f; +} + +static inline void read(const FileNode& node, double& value, double default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (double)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? node.node->data.f : 1e300; +} + +static inline void read(const FileNode& node, string& value, const string& default_value) +{ + value = !node.node ? default_value : CV_NODE_IS_STRING(node.node->tag) ? string(node.node->data.str.ptr) : string(""); +} + +template static inline void read(const FileNode& node, Point_<_Tp>& value, const Point_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Point_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Point3_<_Tp>& value, const Point3_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 3 ? default_value : Point3_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2])); +} + +template static inline void read(const FileNode& node, Size_<_Tp>& value, const Size_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Size_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Complex<_Tp>& value, const Complex<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Complex<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Rect_<_Tp>& value, const Rect_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 4 ? default_value : Rect_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); +} + +template static inline void read(const FileNode& node, Vec<_Tp, cn>& value, const Vec<_Tp, cn>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != cn ? default_value : Vec<_Tp, cn>(&temp[0]); +} + +template static inline void read(const FileNode& node, Scalar_<_Tp>& value, const Scalar_<_Tp>& default_value) +{ + vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 4 ? default_value : Scalar_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); +} + +static inline void read(const FileNode& node, Range& value, const Range& default_value) +{ + Point2i temp(value.start, value.end); const Point2i default_temp = Point2i(default_value.start, default_value.end); + read(node, temp, default_temp); + value.start = temp.x; value.end = temp.y; +} + +CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() ); +CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() ); + +inline FileNode::operator int() const +{ + int value; + read(*this, value, 0); + return value; +} +inline FileNode::operator float() const +{ + float value; + read(*this, value, 0.f); + return value; +} +inline FileNode::operator double() const +{ + double value; + read(*this, value, 0.); + return value; +} +inline FileNode::operator string() const +{ + string value; + read(*this, value, value); + return value; +} + +inline void FileNode::readRaw( const string& fmt, uchar* vec, size_t len ) const +{ + begin().readRaw( fmt, vec, len ); +} + +template class VecReaderProxy +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + count = std::min(count, it->remaining); + vec.resize(count); + for( size_t i = 0; i < count; i++, ++(*it) ) + read(**it, vec[i], _Tp()); + } + FileNodeIterator* it; +}; + +template class VecReaderProxy<_Tp,1> +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + size_t remaining = it->remaining, cn = DataType<_Tp>::channels; + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + size_t remaining1 = remaining/cn; + count = count < remaining1 ? count : remaining1; + vec.resize(count); + it->readRaw( string(fmt), !vec.empty() ? (uchar*)&vec[0] : 0, count*sizeof(_Tp) ); + } + FileNodeIterator* it; +}; + +template static inline void +read( FileNodeIterator& it, vector<_Tp>& vec, size_t maxCount=(size_t)INT_MAX ) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, maxCount); +} + +template static inline void +read( const FileNode& node, vector<_Tp>& vec, const vector<_Tp>& default_value=vector<_Tp>() ) +{ + if(!node.node) + vec = default_value; + else + { + FileNodeIterator it = node.begin(); + read( it, vec ); + } +} + +inline FileNodeIterator FileNode::begin() const +{ + return FileNodeIterator(fs, node); +} + +inline FileNodeIterator FileNode::end() const +{ + return FileNodeIterator(fs, node, size()); +} + +inline FileNode FileNodeIterator::operator *() const +{ return FileNode(fs, (const CvFileNode*)(void*)reader.ptr); } + +inline FileNode FileNodeIterator::operator ->() const +{ return FileNode(fs, (const CvFileNode*)(void*)reader.ptr); } + +template static inline FileNodeIterator& operator >> (FileNodeIterator& it, _Tp& value) +{ read( *it, value, _Tp()); return ++it; } + +template static inline +FileNodeIterator& operator >> (FileNodeIterator& it, vector<_Tp>& vec) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, (size_t)INT_MAX); + return it; +} + +template static inline void operator >> (const FileNode& n, _Tp& value) +{ read( n, value, _Tp()); } + +template static inline void operator >> (const FileNode& n, vector<_Tp>& vec) +{ FileNodeIterator it = n.begin(); it >> vec; } + +static inline bool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.fs == it2.fs && it1.container == it2.container && + it1.reader.ptr == it2.reader.ptr && it1.remaining == it2.remaining; +} + +static inline bool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return !(it1 == it2); +} + +static inline ptrdiff_t operator - (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it2.remaining - it1.remaining; +} + +static inline bool operator < (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.remaining > it2.remaining; +} + +inline FileNode FileStorage::getFirstTopLevelNode() const +{ + FileNode r = root(); + FileNodeIterator it = r.begin(); + return it != r.end() ? *it : FileNode(); +} + +//////////////////////////////////////// Various algorithms //////////////////////////////////// + +template static inline _Tp gcd(_Tp a, _Tp b) +{ + if( a < b ) + std::swap(a, b); + while( b > 0 ) + { + _Tp r = a % b; + a = b; + b = r; + } + return a; +} + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm + Use it as: vector<_Tp> a; ... sort(a,); + + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +template void sort( vector<_Tp>& vec, _LT LT=_LT() ) +{ + int isort_thresh = 7; + int sp = 0; + + struct + { + _Tp *lb; + _Tp *ub; + } stack[48]; + + size_t total = vec.size(); + + if( total <= 1 ) + return; + + _Tp* arr = &vec[0]; + stack[0].lb = arr; + stack[0].ub = arr + (total - 1); + + while( sp >= 0 ) + { + _Tp* left = stack[sp].lb; + _Tp* right = stack[sp--].ub; + + for(;;) + { + int i, n = (int)(right - left) + 1, m; + _Tp* ptr; + _Tp* ptr2; + + if( n <= isort_thresh ) + { + insert_sort: + for( ptr = left + 1; ptr <= right; ptr++ ) + { + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) + std::swap( ptr2[0], ptr2[-1] ); + } + break; + } + else + { + _Tp* left0; + _Tp* left1; + _Tp* right0; + _Tp* right1; + _Tp* pivot; + _Tp* a; + _Tp* b; + _Tp* c; + int swap_cnt = 0; + + left0 = left; + right0 = right; + pivot = left + (n/2); + + if( n > 40 ) + { + int d = n / 8; + a = left, b = left + d, c = left + 2*d; + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = pivot - d, b = pivot, c = pivot + d; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = right - 2*d, b = right - d, c = right; + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + } + + a = left, b = pivot, c = right; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + if( pivot != left0 ) + { + std::swap( *pivot, *left0 ); + pivot = left0; + } + left = left1 = left0 + 1; + right = right1 = right0; + + for(;;) + { + while( left <= right && !LT(*pivot, *left) ) + { + if( !LT(*left, *pivot) ) + { + if( left > left1 ) + std::swap( *left1, *left ); + swap_cnt = 1; + left1++; + } + left++; + } + + while( left <= right && !LT(*right, *pivot) ) + { + if( !LT(*pivot, *right) ) + { + if( right < right1 ) + std::swap( *right1, *right ); + swap_cnt = 1; + right1--; + } + right--; + } + + if( left > right ) + break; + std::swap( *left, *right ); + swap_cnt = 1; + left++; + right--; + } + + if( swap_cnt == 0 ) + { + left = left0, right = right0; + goto insert_sort; + } + + n = std::min( (int)(left1 - left0), (int)(left - left1) ); + for( i = 0; i < n; i++ ) + std::swap( left0[i], left[i-n] ); + + n = std::min( (int)(right0 - right1), (int)(right1 - right) ); + for( i = 0; i < n; i++ ) + std::swap( left[i], right0[i-n+1] ); + n = (int)(left - left1); + m = (int)(right1 - right); + if( n > 1 ) + { + if( m > 1 ) + { + if( n > m ) + { + stack[++sp].lb = left0; + stack[sp].ub = left0 + n - 1; + left = right0 - m + 1, right = right0; + } + else + { + stack[++sp].lb = right0 - m + 1; + stack[sp].ub = right0; + left = left0, right = left0 + n - 1; + } + } + else + left = left0, right = left0 + n - 1; + } + else if( m > 1 ) + left = right0 - m + 1, right = right0; + else + break; + } + } + } +} + +template class LessThan +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a < b; } +}; + +template class GreaterEq +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a >= b; } +}; + +template class LessThanIdx +{ +public: + LessThanIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] < arr[b]; } + const _Tp* arr; +}; + +template class GreaterEqIdx +{ +public: + GreaterEqIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] >= arr[b]; } + const _Tp* arr; +}; + + +// This function splits the input sequence or set into one or more equivalence classes and +// returns the vector of labels - 0-based class indexes for each element. +// predicate(a,b) returns true if the two sequence elements certainly belong to the same class. +// +// The algorithm is described in "Introduction to Algorithms" +// by Cormen, Leiserson and Rivest, the chapter "Data structures for disjoint sets" +template int +partition( const vector<_Tp>& _vec, vector& labels, + _EqPredicate predicate=_EqPredicate()) +{ + int i, j, N = (int)_vec.size(); + const _Tp* vec = &_vec[0]; + + const int PARENT=0; + const int RANK=1; + + vector _nodes(N*2); + int (*nodes)[2] = (int(*)[2])&_nodes[0]; + + // The first O(N) pass: create N single-vertex trees + for(i = 0; i < N; i++) + { + nodes[i][PARENT]=-1; + nodes[i][RANK] = 0; + } + + // The main O(N^2) pass: merge connected components + for( i = 0; i < N; i++ ) + { + int root = i; + + // find root + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + + for( j = 0; j < N; j++ ) + { + if( i == j || !predicate(vec[i], vec[j])) + continue; + int root2 = j; + + while( nodes[root2][PARENT] >= 0 ) + root2 = nodes[root2][PARENT]; + + if( root2 != root ) + { + // unite both trees + int rank = nodes[root][RANK], rank2 = nodes[root2][RANK]; + if( rank > rank2 ) + nodes[root2][PARENT] = root; + else + { + nodes[root][PARENT] = root2; + nodes[root2][RANK] += rank == rank2; + root = root2; + } + assert( nodes[root][PARENT] < 0 ); + + int k = j, parent; + + // compress the path from node2 to root + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + + // compress the path from node to root + k = i; + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + } + } + } + + // Final O(N) pass: enumerate classes + labels.resize(N); + int nclasses = 0; + + for( i = 0; i < N; i++ ) + { + int root = i; + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + // re-use the rank as the class label + if( nodes[root][RANK] >= 0 ) + nodes[root][RANK] = ~nclasses++; + labels[i] = ~nodes[root][RANK]; + } + + return nclasses; +} + + +////////////////////////////////////////////////////////////////////////////// + +// bridge C++ => C Seq API +CV_EXPORTS schar* seqPush( CvSeq* seq, const void* element=0); +CV_EXPORTS schar* seqPushFront( CvSeq* seq, const void* element=0); +CV_EXPORTS void seqPop( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopFront( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopMulti( CvSeq* seq, void* elements, + int count, int in_front=0 ); +CV_EXPORTS void seqRemove( CvSeq* seq, int index ); +CV_EXPORTS void clearSeq( CvSeq* seq ); +CV_EXPORTS schar* getSeqElem( const CvSeq* seq, int index ); +CV_EXPORTS void seqRemoveSlice( CvSeq* seq, CvSlice slice ); +CV_EXPORTS void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +template inline Seq<_Tp>::Seq() : seq(0) {} +template inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq) +{ + CV_Assert(!_seq || _seq->elem_size == sizeof(_Tp)); +} + +template inline Seq<_Tp>::Seq( MemStorage& storage, + int headerSize ) +{ + CV_Assert(headerSize >= (int)sizeof(CvSeq)); + seq = cvCreateSeq(DataType<_Tp>::type, headerSize, sizeof(_Tp), storage); +} + +template inline _Tp& Seq<_Tp>::operator [](int idx) +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline const _Tp& Seq<_Tp>::operator [](int idx) const +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline SeqIterator<_Tp> Seq<_Tp>::begin() const +{ return SeqIterator<_Tp>(*this); } + +template inline SeqIterator<_Tp> Seq<_Tp>::end() const +{ return SeqIterator<_Tp>(*this, true); } + +template inline size_t Seq<_Tp>::size() const +{ return seq ? seq->total : 0; } + +template inline int Seq<_Tp>::type() const +{ return seq ? CV_MAT_TYPE(seq->flags) : 0; } + +template inline int Seq<_Tp>::depth() const +{ return seq ? CV_MAT_DEPTH(seq->flags) : 0; } + +template inline int Seq<_Tp>::channels() const +{ return seq ? CV_MAT_CN(seq->flags) : 0; } + +template inline size_t Seq<_Tp>::elemSize() const +{ return seq ? seq->elem_size : 0; } + +template inline size_t Seq<_Tp>::index(const _Tp& elem) const +{ return cvSeqElemIdx(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp& elem) +{ cvSeqPush(seq, &elem); } + +template inline void Seq<_Tp>::push_front(const _Tp& elem) +{ cvSeqPushFront(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 1); } + +template inline _Tp& Seq<_Tp>::back() +{ return *(_Tp*)getSeqElem(seq, -1); } + +template inline const _Tp& Seq<_Tp>::back() const +{ return *(const _Tp*)getSeqElem(seq, -1); } + +template inline _Tp& Seq<_Tp>::front() +{ return *(_Tp*)getSeqElem(seq, 0); } + +template inline const _Tp& Seq<_Tp>::front() const +{ return *(const _Tp*)getSeqElem(seq, 0); } + +template inline bool Seq<_Tp>::empty() const +{ return !seq || seq->total == 0; } + +template inline void Seq<_Tp>::clear() +{ if(seq) clearSeq(seq); } + +template inline void Seq<_Tp>::pop_back() +{ seqPop(seq); } + +template inline void Seq<_Tp>::pop_front() +{ seqPopFront(seq); } + +template inline void Seq<_Tp>::pop_back(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 1); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp& elem) +{ seqInsert(seq, idx, &elem); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count) +{ + CvMat m = cvMat(1, count, DataType<_Tp>::type, elems); + seqInsertSlice(seq, idx, &m); +} + +template inline void Seq<_Tp>::remove(int idx) +{ seqRemove(seq, idx); } + +template inline void Seq<_Tp>::remove(const Range& r) +{ seqRemoveSlice(seq, r); } + +template inline void Seq<_Tp>::copyTo(vector<_Tp>& vec, const Range& range) const +{ + size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start; + vec.resize(len); + if( seq && len ) + cvCvtSeqToArray(seq, &vec[0], range); +} + +template inline Seq<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> vec; + copyTo(vec); + return vec; +} + +template inline SeqIterator<_Tp>::SeqIterator() +{ memset(this, 0, sizeof(*this)); } + +template inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& _seq, bool seekEnd) +{ + cvStartReadSeq(_seq.seq, this); + index = seekEnd ? _seq.seq->total : 0; +} + +template inline void SeqIterator<_Tp>::seek(size_t pos) +{ + cvSetSeqReaderPos(this, (int)pos, false); + index = pos; +} + +template inline size_t SeqIterator<_Tp>::tell() const +{ return index; } + +template inline _Tp& SeqIterator<_Tp>::operator *() +{ return *(_Tp*)ptr; } + +template inline const _Tp& SeqIterator<_Tp>::operator *() const +{ return *(const _Tp*)ptr; } + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator ++() +{ + CV_NEXT_SEQ_ELEM(sizeof(_Tp), *this); + if( ++index >= seq->total*2 ) + index = 0; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator ++(int) const +{ + SeqIterator<_Tp> it = *this; + ++*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator --() +{ + CV_PREV_SEQ_ELEM(sizeof(_Tp), *this); + if( --index < 0 ) + index = seq->total*2-1; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator --(int) const +{ + SeqIterator<_Tp> it = *this; + --*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator +=(int delta) +{ + cvSetSeqReaderPos(this, delta, 1); + index += delta; + int n = seq->total*2; + if( index < 0 ) + index += n; + if( index >= n ) + index -= n; + return *this; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator -=(int delta) +{ + return (*this += -delta); +} + +template inline ptrdiff_t operator - (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + ptrdiff_t delta = a.index - b.index, n = a.seq->total; + if( std::abs(static_cast(delta)) > n ) + delta += delta < 0 ? n : -n; + return delta; +} + +template inline bool operator == (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return a.seq == b.seq && a.index == b.index; +} + +template inline bool operator != (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return !(a == b); +} + + +template struct RTTIImpl +{ +public: + static int isInstance(const void* ptr) + { + static _ClsName dummy; + static void* dummyp = &dummy; + union + { + const void* p; + const void** pp; + } a, b; + a.p = dummyp; + b.p = ptr; + return *a.pp == *b.pp; + } + static void release(void** dbptr) + { + if(dbptr && *dbptr) + { + delete (_ClsName*)*dbptr; + *dbptr = 0; + } + } + static void* read(CvFileStorage* fs, CvFileNode* n) + { + FileNode fn(fs, n); + _ClsName* obj = new _ClsName; + if(obj->read(fn)) + return obj; + delete obj; + return 0; + } + + static void write(CvFileStorage* _fs, const char* name, const void* ptr, CvAttrList) + { + if(ptr && _fs) + { + FileStorage fs(_fs); + fs.fs.addref(); + ((const _ClsName*)ptr)->write(fs, string(name)); + } + } + + static void* clone(const void* ptr) + { + if(!ptr) + return 0; + return new _ClsName(*(const _ClsName*)ptr); + } +}; + + +class CV_EXPORTS Formatter +{ +public: + virtual ~Formatter() {} + virtual void write(std::ostream& out, const Mat& m, const int* params=0, int nparams=0) const = 0; + virtual void write(std::ostream& out, const void* data, int nelems, int type, + const int* params=0, int nparams=0) const = 0; + static const Formatter* get(const char* fmt=""); + static const Formatter* setDefault(const Formatter* fmt); +}; + + +struct CV_EXPORTS Formatted +{ + Formatted(const Mat& m, const Formatter* fmt, + const vector& params); + Formatted(const Mat& m, const Formatter* fmt, + const int* params=0); + Mat mtx; + const Formatter* fmt; + vector params; +}; + +static inline Formatted format(const Mat& mtx, const char* fmt, + const vector& params=vector()) +{ + return Formatted(mtx, Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +/** \brief prints Mat to the output stream in Matlab notation + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Mat& mtx) +{ + Formatter::get()->write(out, mtx); + return out; +} + +/** \brief prints Mat to the output stream allows in the specified notation (see format) + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Formatted& fmtd) +{ + fmtd.fmt->write(out, fmtd.mtx); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + + +/** Writes a Matx to an output stream. + */ +template inline std::ostream& operator<<(std::ostream& out, const Matx<_Tp, m, n>& matx) +{ + out << cv::Mat(matx); + return out; +} + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << "]"; + return out; +} + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point3_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << ", " << p.z << "]"; + return out; +} + +/** Writes a Vec to an output stream. Format example : [10, 20, 30] + */ +template inline std::ostream& operator<<(std::ostream& out, const Vec<_Tp, n>& vec) +{ + out << "["; + + if(Vec<_Tp, n>::depth < CV_32F) + { + for (int i = 0; i < n - 1; ++i) { + out << (int)vec[i] << ", "; + } + out << (int)vec[n-1] << "]"; + } + else + { + for (int i = 0; i < n - 1; ++i) { + out << vec[i] << ", "; + } + out << vec[n-1] << "]"; + } + + return out; +} + +/** Writes a Size_ to an output stream. Format example : [640 x 480] + */ +template inline std::ostream& operator<<(std::ostream& out, const Size_<_Tp>& size) +{ + out << "[" << size.width << " x " << size.height << "]"; + return out; +} + +/** Writes a Rect_ to an output stream. Format example : [640 x 480 from (10, 20)] + */ +template inline std::ostream& operator<<(std::ostream& out, const Rect_<_Tp>& rect) +{ + out << "[" << rect.width << " x " << rect.height << " from (" << rect.x << ", " << rect.y << ")]"; + return out; +} + + +template inline Ptr<_Tp> Algorithm::create(const string& name) +{ + return _create(name).ptr<_Tp>(); +} + +template +inline void Algorithm::set(const char* _name, const Ptr<_Tp>& value) +{ + Ptr algo_ptr = value. template ptr(); + if (algo_ptr.empty()) { + CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set"); + } + info()->set(this, _name, ParamType::type, &algo_ptr); +} + +template +inline void Algorithm::set(const string& _name, const Ptr<_Tp>& value) +{ + this->set<_Tp>(_name.c_str(), value); +} + +template +inline void Algorithm::setAlgorithm(const char* _name, const Ptr<_Tp>& value) +{ + Ptr algo_ptr = value. template ptr(); + if (algo_ptr.empty()) { + CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set"); + } + info()->set(this, _name, ParamType::type, &algo_ptr); +} + +template +inline void Algorithm::setAlgorithm(const string& _name, const Ptr<_Tp>& value) +{ + this->set<_Tp>(_name.c_str(), value); +} + +template inline typename ParamType<_Tp>::member_type Algorithm::get(const string& _name) const +{ + typename ParamType<_Tp>::member_type value; + info()->get(this, _name.c_str(), ParamType<_Tp>::type, &value); + return value; +} + +template inline typename ParamType<_Tp>::member_type Algorithm::get(const char* _name) const +{ + typename ParamType<_Tp>::member_type value; + info()->get(this, _name, ParamType<_Tp>::type, &value); + return value; +} + +template inline void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, + Ptr<_Tp>& value, bool readOnly, Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&), + const string& help) +{ + //TODO: static assert: _Tp inherits from _Base + addParam_(algo, parameter, ParamType<_Base>::type, &value, readOnly, + (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); +} + +template inline void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, + Ptr<_Tp>& value, bool readOnly, Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&), + const string& help) +{ + //TODO: static assert: _Tp inherits from Algorithm + addParam_(algo, parameter, ParamType::type, &value, readOnly, + (Algorithm::Getter)getter, (Algorithm::Setter)setter, help); +} + +} + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +#endif // __cplusplus +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/types_c.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/types_c.h new file mode 100644 index 0000000..99ac0d2 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/types_c.h @@ -0,0 +1,1896 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_TYPES_H__ +#define __OPENCV_CORE_TYPES_H__ + +#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER +# if _MSC_VER > 1300 +# define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */ +# endif +#endif + + +#ifndef SKIP_INCLUDES + +#include +#include +#include +#include + +#if !defined _MSC_VER && !defined __BORLANDC__ +# include +#endif + +#if defined __ICL +# define CV_ICC __ICL +#elif defined __ICC +# define CV_ICC __ICC +#elif defined __ECL +# define CV_ICC __ECL +#elif defined __ECC +# define CV_ICC __ECC +#elif defined __INTEL_COMPILER +# define CV_ICC __INTEL_COMPILER +#endif + +#if defined CV_ICC && !defined CV_ENABLE_UNROLLED +# define CV_ENABLE_UNROLLED 0 +#else +# define CV_ENABLE_UNROLLED 1 +#endif + +#if (defined _M_X64 && defined _MSC_VER && _MSC_VER >= 1400) || (__GNUC__ >= 4 && defined __x86_64__) +# if defined WIN32 +# include +# endif +# if defined __SSE2__ || !defined __GNUC__ +# include +# endif +#endif + +#if defined __BORLANDC__ +# include +#else +# include +#endif + +#ifdef HAVE_IPL +# ifndef __IPL_H__ +# if defined WIN32 || defined _WIN32 +# include +# else +# include +# endif +# endif +#elif defined __IPL_H__ +# define HAVE_IPL +#endif + +#endif // SKIP_INCLUDES + +#if defined WIN32 || defined _WIN32 +# define CV_CDECL __cdecl +# define CV_STDCALL __stdcall +#else +# define CV_CDECL +# define CV_STDCALL +#endif + +#ifndef CV_EXTERN_C +# ifdef __cplusplus +# define CV_EXTERN_C extern "C" +# define CV_DEFAULT(val) = val +# else +# define CV_EXTERN_C +# define CV_DEFAULT(val) +# endif +#endif + +#ifndef CV_EXTERN_C_FUNCPTR +# ifdef __cplusplus +# define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; } +# else +# define CV_EXTERN_C_FUNCPTR(x) typedef x +# endif +#endif + +#ifndef CV_INLINE +# if defined __cplusplus +# define CV_INLINE inline +# elif defined _MSC_VER +# define CV_INLINE __inline +# else +# define CV_INLINE static +# endif +#endif /* CV_INLINE */ + +#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS +# define CV_EXPORTS __declspec(dllexport) +#else +# define CV_EXPORTS +#endif + +#ifndef CVAPI +# define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL +#endif + +#if defined _MSC_VER || defined __BORLANDC__ + typedef __int64 int64; + typedef unsigned __int64 uint64; +# define CV_BIG_INT(n) n##I64 +# define CV_BIG_UINT(n) n##UI64 +#else + typedef int64_t int64; + typedef uint64_t uint64; +# define CV_BIG_INT(n) n##LL +# define CV_BIG_UINT(n) n##ULL +#endif + +#ifndef HAVE_IPL + typedef unsigned char uchar; + typedef unsigned short ushort; +#endif + +typedef signed char schar; + +/* special informative macros for wrapper generators */ +#define CV_CARRAY(counter) +#define CV_CUSTOM_CARRAY(args) +#define CV_EXPORTS_W CV_EXPORTS +#define CV_EXPORTS_W_SIMPLE CV_EXPORTS +#define CV_EXPORTS_AS(synonym) CV_EXPORTS +#define CV_EXPORTS_W_MAP CV_EXPORTS +#define CV_IN_OUT +#define CV_OUT +#define CV_PROP +#define CV_PROP_RW +#define CV_WRAP +#define CV_WRAP_AS(synonym) +#define CV_WRAP_DEFAULT(value) + +/* CvArr* is used to pass arbitrary + * array-like data structures + * into functions where the particular + * array type is recognized at runtime: + */ +typedef void CvArr; + +typedef union Cv32suf +{ + int i; + unsigned u; + float f; +} +Cv32suf; + +typedef union Cv64suf +{ + int64 i; + uint64 u; + double f; +} +Cv64suf; + +typedef int CVStatus; + +enum { + CV_StsOk= 0, /* everithing is ok */ + CV_StsBackTrace= -1, /* pseudo error for back trace */ + CV_StsError= -2, /* unknown /unspecified error */ + CV_StsInternal= -3, /* internal error (bad state) */ + CV_StsNoMem= -4, /* insufficient memory */ + CV_StsBadArg= -5, /* function arg/param is bad */ + CV_StsBadFunc= -6, /* unsupported function */ + CV_StsNoConv= -7, /* iter. didn't converge */ + CV_StsAutoTrace= -8, /* tracing */ + CV_HeaderIsNull= -9, /* image header is NULL */ + CV_BadImageSize= -10, /* image size is invalid */ + CV_BadOffset= -11, /* offset is invalid */ + CV_BadDataPtr= -12, /**/ + CV_BadStep= -13, /**/ + CV_BadModelOrChSeq= -14, /**/ + CV_BadNumChannels= -15, /**/ + CV_BadNumChannel1U= -16, /**/ + CV_BadDepth= -17, /**/ + CV_BadAlphaChannel= -18, /**/ + CV_BadOrder= -19, /**/ + CV_BadOrigin= -20, /**/ + CV_BadAlign= -21, /**/ + CV_BadCallBack= -22, /**/ + CV_BadTileSize= -23, /**/ + CV_BadCOI= -24, /**/ + CV_BadROISize= -25, /**/ + CV_MaskIsTiled= -26, /**/ + CV_StsNullPtr= -27, /* null pointer */ + CV_StsVecLengthErr= -28, /* incorrect vector length */ + CV_StsFilterStructContentErr= -29, /* incorr. filter structure content */ + CV_StsKernelStructContentErr= -30, /* incorr. transform kernel content */ + CV_StsFilterOffsetErr= -31, /* incorrect filter offset value */ + CV_StsBadSize= -201, /* the input/output structure size is incorrect */ + CV_StsDivByZero= -202, /* division by zero */ + CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */ + CV_StsObjectNotFound= -204, /* request can't be completed */ + CV_StsUnmatchedFormats= -205, /* formats of input/output arrays differ */ + CV_StsBadFlag= -206, /* flag is wrong or not supported */ + CV_StsBadPoint= -207, /* bad CvPoint */ + CV_StsBadMask= -208, /* bad format of mask (neither 8uC1 nor 8sC1)*/ + CV_StsUnmatchedSizes= -209, /* sizes of input/output structures do not match */ + CV_StsUnsupportedFormat= -210, /* the data format/type is not supported by the function*/ + CV_StsOutOfRange= -211, /* some of parameters are out of range */ + CV_StsParseError= -212, /* invalid syntax/structure of the parsed file */ + CV_StsNotImplemented= -213, /* the requested function/feature is not implemented */ + CV_StsBadMemBlock= -214, /* an allocated block has been corrupted */ + CV_StsAssert= -215, /* assertion failed */ + CV_GpuNotSupported= -216, + CV_GpuApiCallError= -217, + CV_OpenGlNotSupported= -218, + CV_OpenGlApiCallError= -219, + CV_OpenCLDoubleNotSupported= -220, + CV_OpenCLInitError= -221, + CV_OpenCLNoAMDBlasFft= -222 +}; + +/****************************************************************************************\ +* Common macros and inline functions * +\****************************************************************************************/ + +#ifdef HAVE_TEGRA_OPTIMIZATION +# include "tegra_round.hpp" +#endif + +#define CV_PI 3.1415926535897932384626433832795 +#define CV_LOG2 0.69314718055994530941723212145818 + +#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t)) + +#ifndef MIN +# define MIN(a,b) ((a) > (b) ? (b) : (a)) +#endif + +#ifndef MAX +# define MAX(a,b) ((a) < (b) ? (b) : (a)) +#endif + +/* min & max without jumps */ +#define CV_IMIN(a, b) ((a) ^ (((a)^(b)) & (((a) < (b)) - 1))) + +#define CV_IMAX(a, b) ((a) ^ (((a)^(b)) & (((a) > (b)) - 1))) + +/* absolute value without jumps */ +#ifndef __cplusplus +# define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0)) +#else +# define CV_IABS(a) abs(a) +#endif +#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b))) +#define CV_SIGN(a) CV_CMP((a),0) + +CV_INLINE int cvRound( double value ) +{ +#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ && defined __SSE2__ && !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + return _mm_cvtsd_si32(t); +#elif defined _MSC_VER && defined _M_IX86 + int t; + __asm + { + fld value; + fistp t; + } + return t; +#elif defined _MSC_VER && defined _M_ARM && defined HAVE_TEGRA_OPTIMIZATION + TEGRA_ROUND(value); +#elif defined CV_ICC || defined __GNUC__ +# ifdef HAVE_TEGRA_OPTIMIZATION + TEGRA_ROUND(value); +# else + return (int)lrint(value); +# endif +#else + double intpart, fractpart; + fractpart = modf(value, &intpart); + if ((fabs(fractpart) != 0.5) || ((((int)intpart) % 2) != 0)) + return (int)(value + (value >= 0 ? 0.5 : -0.5)); + else + return (int)intpart; +#endif +} + +#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP) +# include "emmintrin.h" +#endif + +CV_INLINE int cvFloor( double value ) +{ +#if defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__ && !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i))); +#elif defined __GNUC__ + int i = (int)value; + return i - (i > value); +#else + int i = cvRound(value); + float diff = (float)(value - i); + return i - (diff < 0); +#endif +} + + +CV_INLINE int cvCeil( double value ) +{ +#if defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__&& !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t)); +#elif defined __GNUC__ + int i = (int)value; + return i + (i < value); +#else + int i = cvRound(value); + float diff = (float)(i - value); + return i + (diff < 0); +#endif +} + +#define cvInvSqrt(value) ((float)(1./sqrt(value))) +#define cvSqrt(value) ((float)sqrt(value)) + +CV_INLINE int cvIsNaN( double value ) +{ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) + + ((unsigned)ieee754.u != 0) > 0x7ff00000; +} + + +CV_INLINE int cvIsInf( double value ) +{ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 && + (unsigned)ieee754.u == 0; +} + + +/*************** Random number generation *******************/ + +typedef uint64 CvRNG; + +#define CV_RNG_COEFF 4164903690U + +CV_INLINE CvRNG cvRNG( int64 seed CV_DEFAULT(-1)) +{ + CvRNG rng = seed ? (uint64)seed : (uint64)(int64)-1; + return rng; +} + +/* Return random 32-bit unsigned integer: */ +CV_INLINE unsigned cvRandInt( CvRNG* rng ) +{ + uint64 temp = *rng; + temp = (uint64)(unsigned)temp*CV_RNG_COEFF + (temp >> 32); + *rng = temp; + return (unsigned)temp; +} + +/* Returns random floating-point number between 0 and 1: */ +CV_INLINE double cvRandReal( CvRNG* rng ) +{ + return cvRandInt(rng)*2.3283064365386962890625e-10 /* 2^-32 */; +} + +/****************************************************************************************\ +* Image type (IplImage) * +\****************************************************************************************/ + +#ifndef HAVE_IPL + +/* + * The following definitions (until #endif) + * is an extract from IPL headers. + * Copyright (c) 1995 Intel Corporation. + */ +#define IPL_DEPTH_SIGN 0x80000000 + +#define IPL_DEPTH_1U 1 +#define IPL_DEPTH_8U 8 +#define IPL_DEPTH_16U 16 +#define IPL_DEPTH_32F 32 + +#define IPL_DEPTH_8S (IPL_DEPTH_SIGN| 8) +#define IPL_DEPTH_16S (IPL_DEPTH_SIGN|16) +#define IPL_DEPTH_32S (IPL_DEPTH_SIGN|32) + +#define IPL_DATA_ORDER_PIXEL 0 +#define IPL_DATA_ORDER_PLANE 1 + +#define IPL_ORIGIN_TL 0 +#define IPL_ORIGIN_BL 1 + +#define IPL_ALIGN_4BYTES 4 +#define IPL_ALIGN_8BYTES 8 +#define IPL_ALIGN_16BYTES 16 +#define IPL_ALIGN_32BYTES 32 + +#define IPL_ALIGN_DWORD IPL_ALIGN_4BYTES +#define IPL_ALIGN_QWORD IPL_ALIGN_8BYTES + +#define IPL_BORDER_CONSTANT 0 +#define IPL_BORDER_REPLICATE 1 +#define IPL_BORDER_REFLECT 2 +#define IPL_BORDER_WRAP 3 + +typedef struct _IplImage +{ + int nSize; /* sizeof(IplImage) */ + int ID; /* version (=0)*/ + int nChannels; /* Most of OpenCV functions support 1,2,3 or 4 channels */ + int alphaChannel; /* Ignored by OpenCV */ + int depth; /* Pixel depth in bits: IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16S, + IPL_DEPTH_32S, IPL_DEPTH_32F and IPL_DEPTH_64F are supported. */ + char colorModel[4]; /* Ignored by OpenCV */ + char channelSeq[4]; /* ditto */ + int dataOrder; /* 0 - interleaved color channels, 1 - separate color channels. + cvCreateImage can only create interleaved images */ + int origin; /* 0 - top-left origin, + 1 - bottom-left origin (Windows bitmaps style). */ + int align; /* Alignment of image rows (4 or 8). + OpenCV ignores it and uses widthStep instead. */ + int width; /* Image width in pixels. */ + int height; /* Image height in pixels. */ + struct _IplROI *roi; /* Image ROI. If NULL, the whole image is selected. */ + struct _IplImage *maskROI; /* Must be NULL. */ + void *imageId; /* " " */ + struct _IplTileInfo *tileInfo; /* " " */ + int imageSize; /* Image data size in bytes + (==image->height*image->widthStep + in case of interleaved data)*/ + char *imageData; /* Pointer to aligned image data. */ + int widthStep; /* Size of aligned image row in bytes. */ + int BorderMode[4]; /* Ignored by OpenCV. */ + int BorderConst[4]; /* Ditto. */ + char *imageDataOrigin; /* Pointer to very origin of image data + (not necessarily aligned) - + needed for correct deallocation */ +} +IplImage; + +typedef struct _IplTileInfo IplTileInfo; + +typedef struct _IplROI +{ + int coi; /* 0 - no COI (all channels are selected), 1 - 0th channel is selected ...*/ + int xOffset; + int yOffset; + int width; + int height; +} +IplROI; + +typedef struct _IplConvKernel +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + int *values; + int nShiftR; +} +IplConvKernel; + +typedef struct _IplConvKernelFP +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + float *values; +} +IplConvKernelFP; + +#define IPL_IMAGE_HEADER 1 +#define IPL_IMAGE_DATA 2 +#define IPL_IMAGE_ROI 4 + +#endif/*HAVE_IPL*/ + +/* extra border mode */ +#define IPL_BORDER_REFLECT_101 4 +#define IPL_BORDER_TRANSPARENT 5 + +#define IPL_IMAGE_MAGIC_VAL ((int)sizeof(IplImage)) +#define CV_TYPE_NAME_IMAGE "opencv-image" + +#define CV_IS_IMAGE_HDR(img) \ + ((img) != NULL && ((const IplImage*)(img))->nSize == sizeof(IplImage)) + +#define CV_IS_IMAGE(img) \ + (CV_IS_IMAGE_HDR(img) && ((IplImage*)img)->imageData != NULL) + +/* for storing double-precision + floating point data in IplImage's */ +#define IPL_DEPTH_64F 64 + +/* get reference to pixel at (col,row), + for multi-channel images (col) should be multiplied by number of channels */ +#define CV_IMAGE_ELEM( image, elemtype, row, col ) \ + (((elemtype*)((image)->imageData + (image)->widthStep*(row)))[(col)]) + +/****************************************************************************************\ +* Matrix type (CvMat) * +\****************************************************************************************/ + +#define CV_CN_MAX 512 +#define CV_CN_SHIFT 3 +#define CV_DEPTH_MAX (1 << CV_CN_SHIFT) + +#define CV_8U 0 +#define CV_8S 1 +#define CV_16U 2 +#define CV_16S 3 +#define CV_32S 4 +#define CV_32F 5 +#define CV_64F 6 +#define CV_USRTYPE1 7 + +#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1) +#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK) + +#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT)) +#define CV_MAKE_TYPE CV_MAKETYPE + +#define CV_8UC1 CV_MAKETYPE(CV_8U,1) +#define CV_8UC2 CV_MAKETYPE(CV_8U,2) +#define CV_8UC3 CV_MAKETYPE(CV_8U,3) +#define CV_8UC4 CV_MAKETYPE(CV_8U,4) +#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n)) + +#define CV_8SC1 CV_MAKETYPE(CV_8S,1) +#define CV_8SC2 CV_MAKETYPE(CV_8S,2) +#define CV_8SC3 CV_MAKETYPE(CV_8S,3) +#define CV_8SC4 CV_MAKETYPE(CV_8S,4) +#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n)) + +#define CV_16UC1 CV_MAKETYPE(CV_16U,1) +#define CV_16UC2 CV_MAKETYPE(CV_16U,2) +#define CV_16UC3 CV_MAKETYPE(CV_16U,3) +#define CV_16UC4 CV_MAKETYPE(CV_16U,4) +#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n)) + +#define CV_16SC1 CV_MAKETYPE(CV_16S,1) +#define CV_16SC2 CV_MAKETYPE(CV_16S,2) +#define CV_16SC3 CV_MAKETYPE(CV_16S,3) +#define CV_16SC4 CV_MAKETYPE(CV_16S,4) +#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n)) + +#define CV_32SC1 CV_MAKETYPE(CV_32S,1) +#define CV_32SC2 CV_MAKETYPE(CV_32S,2) +#define CV_32SC3 CV_MAKETYPE(CV_32S,3) +#define CV_32SC4 CV_MAKETYPE(CV_32S,4) +#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n)) + +#define CV_32FC1 CV_MAKETYPE(CV_32F,1) +#define CV_32FC2 CV_MAKETYPE(CV_32F,2) +#define CV_32FC3 CV_MAKETYPE(CV_32F,3) +#define CV_32FC4 CV_MAKETYPE(CV_32F,4) +#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n)) + +#define CV_64FC1 CV_MAKETYPE(CV_64F,1) +#define CV_64FC2 CV_MAKETYPE(CV_64F,2) +#define CV_64FC3 CV_MAKETYPE(CV_64F,3) +#define CV_64FC4 CV_MAKETYPE(CV_64F,4) +#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n)) + +#define CV_AUTO_STEP 0x7fffffff +#define CV_WHOLE_ARR cvSlice( 0, 0x3fffffff ) + +#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT) +#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1) +#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1) +#define CV_MAT_TYPE(flags) ((flags) & CV_MAT_TYPE_MASK) +#define CV_MAT_CONT_FLAG_SHIFT 14 +#define CV_MAT_CONT_FLAG (1 << CV_MAT_CONT_FLAG_SHIFT) +#define CV_IS_MAT_CONT(flags) ((flags) & CV_MAT_CONT_FLAG) +#define CV_IS_CONT_MAT CV_IS_MAT_CONT +#define CV_SUBMAT_FLAG_SHIFT 15 +#define CV_SUBMAT_FLAG (1 << CV_SUBMAT_FLAG_SHIFT) +#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG) + +#define CV_MAGIC_MASK 0xFFFF0000 +#define CV_MAT_MAGIC_VAL 0x42420000 +#define CV_TYPE_NAME_MAT "opencv-matrix" + +typedef struct CvMat +{ + int type; + int step; + + /* for internal use only */ + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + short* s; + int* i; + float* fl; + double* db; + } data; + +#ifdef __cplusplus + union + { + int rows; + int height; + }; + + union + { + int cols; + int width; + }; +#else + int rows; + int cols; +#endif + +} +CvMat; + + +#define CV_IS_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols > 0 && ((const CvMat*)(mat))->rows > 0) + +#define CV_IS_MAT_HDR_Z(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols >= 0 && ((const CvMat*)(mat))->rows >= 0) + +#define CV_IS_MAT(mat) \ + (CV_IS_MAT_HDR(mat) && ((const CvMat*)(mat))->data.ptr != NULL) + +#define CV_IS_MASK_ARR(mat) \ + (((mat)->type & (CV_MAT_TYPE_MASK & ~CV_8SC1)) == 0) + +#define CV_ARE_TYPES_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_TYPE_MASK) == 0) + +#define CV_ARE_CNS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_CN_MASK) == 0) + +#define CV_ARE_DEPTHS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_DEPTH_MASK) == 0) + +#define CV_ARE_SIZES_EQ(mat1, mat2) \ + ((mat1)->rows == (mat2)->rows && (mat1)->cols == (mat2)->cols) + +#define CV_IS_MAT_CONST(mat) \ + (((mat)->rows|(mat)->cols) == 1) + +/* Size of each channel item, + 0x124489 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */ +#define CV_ELEM_SIZE1(type) \ + ((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15) + +/* 0x3a50 = 11 10 10 01 01 00 00 ~ array of log2(sizeof(arr_type_elem)) */ +#define CV_ELEM_SIZE(type) \ + (CV_MAT_CN(type) << ((((sizeof(size_t)/4+1)*16384|0x3a50) >> CV_MAT_DEPTH(type)*2) & 3)) + +#define IPL2CV_DEPTH(depth) \ + ((((CV_8U)+(CV_16U<<4)+(CV_32F<<8)+(CV_64F<<16)+(CV_8S<<20)+ \ + (CV_16S<<24)+(CV_32S<<28)) >> ((((depth) & 0xF0) >> 2) + \ + (((depth) & IPL_DEPTH_SIGN) ? 20 : 0))) & 15) + +/* Inline constructor. No data is allocated internally!!! + * (Use together with cvCreateData, or use cvCreateMat instead to + * get a matrix with allocated data): + */ +CV_INLINE CvMat cvMat( int rows, int cols, int type, void* data CV_DEFAULT(NULL)) +{ + CvMat m; + + assert( (unsigned)CV_MAT_DEPTH(type) <= CV_64F ); + type = CV_MAT_TYPE(type); + m.type = CV_MAT_MAGIC_VAL | CV_MAT_CONT_FLAG | type; + m.cols = cols; + m.rows = rows; + m.step = m.cols*CV_ELEM_SIZE(type); + m.data.ptr = (uchar*)data; + m.refcount = NULL; + m.hdr_refcount = 0; + + return m; +} + + +#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \ + (assert( (unsigned)(row) < (unsigned)(mat).rows && \ + (unsigned)(col) < (unsigned)(mat).cols ), \ + (mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col)) + +#define CV_MAT_ELEM_PTR( mat, row, col ) \ + CV_MAT_ELEM_PTR_FAST( mat, row, col, CV_ELEM_SIZE((mat).type) ) + +#define CV_MAT_ELEM( mat, elemtype, row, col ) \ + (*(elemtype*)CV_MAT_ELEM_PTR_FAST( mat, row, col, sizeof(elemtype))) + + +CV_INLINE double cvmGet( const CvMat* mat, int row, int col ) +{ + int type; + + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + return ((float*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col]; + else + { + assert( type == CV_64FC1 ); + return ((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col]; + } +} + + +CV_INLINE void cvmSet( CvMat* mat, int row, int col, double value ) +{ + int type; + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + ((float*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = (float)value; + else + { + assert( type == CV_64FC1 ); + ((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = (double)value; + } +} + + +CV_INLINE int cvIplDepth( int type ) +{ + int depth = CV_MAT_DEPTH(type); + return CV_ELEM_SIZE1(depth)*8 | (depth == CV_8S || depth == CV_16S || + depth == CV_32S ? IPL_DEPTH_SIGN : 0); +} + + +/****************************************************************************************\ +* Multi-dimensional dense array (CvMatND) * +\****************************************************************************************/ + +#define CV_MATND_MAGIC_VAL 0x42430000 +#define CV_TYPE_NAME_MATND "opencv-nd-matrix" + +#define CV_MAX_DIM 32 +#define CV_MAX_DIM_HEAP 1024 + +typedef struct CvMatND +{ + int type; + int dims; + + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + float* fl; + double* db; + int* i; + short* s; + } data; + + struct + { + int size; + int step; + } + dim[CV_MAX_DIM]; +} +CvMatND; + +#define CV_IS_MATND_HDR(mat) \ + ((mat) != NULL && (((const CvMatND*)(mat))->type & CV_MAGIC_MASK) == CV_MATND_MAGIC_VAL) + +#define CV_IS_MATND(mat) \ + (CV_IS_MATND_HDR(mat) && ((const CvMatND*)(mat))->data.ptr != NULL) + + +/****************************************************************************************\ +* Multi-dimensional sparse array (CvSparseMat) * +\****************************************************************************************/ + +#define CV_SPARSE_MAT_MAGIC_VAL 0x42440000 +#define CV_TYPE_NAME_SPARSE_MAT "opencv-sparse-matrix" + +struct CvSet; + +typedef struct CvSparseMat +{ + int type; + int dims; + int* refcount; + int hdr_refcount; + + struct CvSet* heap; + void** hashtable; + int hashsize; + int valoffset; + int idxoffset; + int size[CV_MAX_DIM]; +} +CvSparseMat; + +#define CV_IS_SPARSE_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvSparseMat*)(mat))->type & CV_MAGIC_MASK) == CV_SPARSE_MAT_MAGIC_VAL) + +#define CV_IS_SPARSE_MAT(mat) \ + CV_IS_SPARSE_MAT_HDR(mat) + +/**************** iteration through a sparse array *****************/ + +typedef struct CvSparseNode +{ + unsigned hashval; + struct CvSparseNode* next; +} +CvSparseNode; + +typedef struct CvSparseMatIterator +{ + CvSparseMat* mat; + CvSparseNode* node; + int curidx; +} +CvSparseMatIterator; + +#define CV_NODE_VAL(mat,node) ((void*)((uchar*)(node) + (mat)->valoffset)) +#define CV_NODE_IDX(mat,node) ((int*)((uchar*)(node) + (mat)->idxoffset)) + +/****************************************************************************************\ +* Histogram * +\****************************************************************************************/ + +typedef int CvHistType; + +#define CV_HIST_MAGIC_VAL 0x42450000 +#define CV_HIST_UNIFORM_FLAG (1 << 10) + +/* indicates whether bin ranges are set already or not */ +#define CV_HIST_RANGES_FLAG (1 << 11) + +#define CV_HIST_ARRAY 0 +#define CV_HIST_SPARSE 1 +#define CV_HIST_TREE CV_HIST_SPARSE + +/* should be used as a parameter only, + it turns to CV_HIST_UNIFORM_FLAG of hist->type */ +#define CV_HIST_UNIFORM 1 + +typedef struct CvHistogram +{ + int type; + CvArr* bins; + float thresh[CV_MAX_DIM][2]; /* For uniform histograms. */ + float** thresh2; /* For non-uniform histograms. */ + CvMatND mat; /* Embedded matrix header for array histograms. */ +} +CvHistogram; + +#define CV_IS_HIST( hist ) \ + ((hist) != NULL && \ + (((CvHistogram*)(hist))->type & CV_MAGIC_MASK) == CV_HIST_MAGIC_VAL && \ + (hist)->bins != NULL) + +#define CV_IS_UNIFORM_HIST( hist ) \ + (((hist)->type & CV_HIST_UNIFORM_FLAG) != 0) + +#define CV_IS_SPARSE_HIST( hist ) \ + CV_IS_SPARSE_MAT((hist)->bins) + +#define CV_HIST_HAS_RANGES( hist ) \ + (((hist)->type & CV_HIST_RANGES_FLAG) != 0) + +/****************************************************************************************\ +* Other supplementary data type definitions * +\****************************************************************************************/ + +/*************************************** CvRect *****************************************/ + +typedef struct CvRect +{ + int x; + int y; + int width; + int height; +} +CvRect; + +CV_INLINE CvRect cvRect( int x, int y, int width, int height ) +{ + CvRect r; + + r.x = x; + r.y = y; + r.width = width; + r.height = height; + + return r; +} + + +CV_INLINE IplROI cvRectToROI( CvRect rect, int coi ) +{ + IplROI roi; + roi.xOffset = rect.x; + roi.yOffset = rect.y; + roi.width = rect.width; + roi.height = rect.height; + roi.coi = coi; + + return roi; +} + + +CV_INLINE CvRect cvROIToRect( IplROI roi ) +{ + return cvRect( roi.xOffset, roi.yOffset, roi.width, roi.height ); +} + +/*********************************** CvTermCriteria *************************************/ + +#define CV_TERMCRIT_ITER 1 +#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER +#define CV_TERMCRIT_EPS 2 + +typedef struct CvTermCriteria +{ + int type; /* may be combination of + CV_TERMCRIT_ITER + CV_TERMCRIT_EPS */ + int max_iter; + double epsilon; +} +CvTermCriteria; + +CV_INLINE CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon ) +{ + CvTermCriteria t; + + t.type = type; + t.max_iter = max_iter; + t.epsilon = (float)epsilon; + + return t; +} + + +/******************************* CvPoint and variants ***********************************/ + +typedef struct CvPoint +{ + int x; + int y; +} +CvPoint; + + +CV_INLINE CvPoint cvPoint( int x, int y ) +{ + CvPoint p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint2D32f +{ + float x; + float y; +} +CvPoint2D32f; + + +CV_INLINE CvPoint2D32f cvPoint2D32f( double x, double y ) +{ + CvPoint2D32f p; + + p.x = (float)x; + p.y = (float)y; + + return p; +} + + +CV_INLINE CvPoint2D32f cvPointTo32f( CvPoint point ) +{ + return cvPoint2D32f( (float)point.x, (float)point.y ); +} + + +CV_INLINE CvPoint cvPointFrom32f( CvPoint2D32f point ) +{ + CvPoint ipt; + ipt.x = cvRound(point.x); + ipt.y = cvRound(point.y); + + return ipt; +} + + +typedef struct CvPoint3D32f +{ + float x; + float y; + float z; +} +CvPoint3D32f; + + +CV_INLINE CvPoint3D32f cvPoint3D32f( double x, double y, double z ) +{ + CvPoint3D32f p; + + p.x = (float)x; + p.y = (float)y; + p.z = (float)z; + + return p; +} + + +typedef struct CvPoint2D64f +{ + double x; + double y; +} +CvPoint2D64f; + + +CV_INLINE CvPoint2D64f cvPoint2D64f( double x, double y ) +{ + CvPoint2D64f p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint3D64f +{ + double x; + double y; + double z; +} +CvPoint3D64f; + + +CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z ) +{ + CvPoint3D64f p; + + p.x = x; + p.y = y; + p.z = z; + + return p; +} + + +/******************************** CvSize's & CvBox **************************************/ + +typedef struct CvSize +{ + int width; + int height; +} +CvSize; + +CV_INLINE CvSize cvSize( int width, int height ) +{ + CvSize s; + + s.width = width; + s.height = height; + + return s; +} + +typedef struct CvSize2D32f +{ + float width; + float height; +} +CvSize2D32f; + + +CV_INLINE CvSize2D32f cvSize2D32f( double width, double height ) +{ + CvSize2D32f s; + + s.width = (float)width; + s.height = (float)height; + + return s; +} + +typedef struct CvBox2D +{ + CvPoint2D32f center; /* Center of the box. */ + CvSize2D32f size; /* Box width and length. */ + float angle; /* Angle between the horizontal axis */ + /* and the first side (i.e. length) in degrees */ +} +CvBox2D; + + +/* Line iterator state: */ +typedef struct CvLineIterator +{ + /* Pointer to the current point: */ + uchar* ptr; + + /* Bresenham algorithm state: */ + int err; + int plus_delta; + int minus_delta; + int plus_step; + int minus_step; +} +CvLineIterator; + + + +/************************************* CvSlice ******************************************/ + +typedef struct CvSlice +{ + int start_index, end_index; +} +CvSlice; + +CV_INLINE CvSlice cvSlice( int start, int end ) +{ + CvSlice slice; + slice.start_index = start; + slice.end_index = end; + + return slice; +} + +#define CV_WHOLE_SEQ_END_INDEX 0x3fffffff +#define CV_WHOLE_SEQ cvSlice(0, CV_WHOLE_SEQ_END_INDEX) + + +/************************************* CvScalar *****************************************/ + +typedef struct CvScalar +{ + double val[4]; +} +CvScalar; + +CV_INLINE CvScalar cvScalar( double val0, double val1 CV_DEFAULT(0), + double val2 CV_DEFAULT(0), double val3 CV_DEFAULT(0)) +{ + CvScalar scalar; + scalar.val[0] = val0; scalar.val[1] = val1; + scalar.val[2] = val2; scalar.val[3] = val3; + return scalar; +} + + +CV_INLINE CvScalar cvRealScalar( double val0 ) +{ + CvScalar scalar; + scalar.val[0] = val0; + scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + return scalar; +} + +CV_INLINE CvScalar cvScalarAll( double val0123 ) +{ + CvScalar scalar; + scalar.val[0] = val0123; + scalar.val[1] = val0123; + scalar.val[2] = val0123; + scalar.val[3] = val0123; + return scalar; +} + +/****************************************************************************************\ +* Dynamic Data structures * +\****************************************************************************************/ + +/******************************** Memory storage ****************************************/ + +typedef struct CvMemBlock +{ + struct CvMemBlock* prev; + struct CvMemBlock* next; +} +CvMemBlock; + +#define CV_STORAGE_MAGIC_VAL 0x42890000 + +typedef struct CvMemStorage +{ + int signature; + CvMemBlock* bottom; /* First allocated block. */ + CvMemBlock* top; /* Current memory block - top of the stack. */ + struct CvMemStorage* parent; /* We get new blocks from parent as needed. */ + int block_size; /* Block size. */ + int free_space; /* Remaining free space in current block. */ +} +CvMemStorage; + +#define CV_IS_STORAGE(storage) \ + ((storage) != NULL && \ + (((CvMemStorage*)(storage))->signature & CV_MAGIC_MASK) == CV_STORAGE_MAGIC_VAL) + + +typedef struct CvMemStoragePos +{ + CvMemBlock* top; + int free_space; +} +CvMemStoragePos; + + +/*********************************** Sequence *******************************************/ + +typedef struct CvSeqBlock +{ + struct CvSeqBlock* prev; /* Previous sequence block. */ + struct CvSeqBlock* next; /* Next sequence block. */ + int start_index; /* Index of the first element in the block + */ + /* sequence->first->start_index. */ + int count; /* Number of elements in the block. */ + schar* data; /* Pointer to the first element of the block. */ +} +CvSeqBlock; + + +#define CV_TREE_NODE_FIELDS(node_type) \ + int flags; /* Miscellaneous flags. */ \ + int header_size; /* Size of sequence header. */ \ + struct node_type* h_prev; /* Previous sequence. */ \ + struct node_type* h_next; /* Next sequence. */ \ + struct node_type* v_prev; /* 2nd previous sequence. */ \ + struct node_type* v_next /* 2nd next sequence. */ + +/* + Read/Write sequence. + Elements can be dynamically inserted to or deleted from the sequence. +*/ +#define CV_SEQUENCE_FIELDS() \ + CV_TREE_NODE_FIELDS(CvSeq); \ + int total; /* Total number of elements. */ \ + int elem_size; /* Size of sequence element in bytes. */ \ + schar* block_max; /* Maximal bound of the last block. */ \ + schar* ptr; /* Current write pointer. */ \ + int delta_elems; /* Grow seq this many at a time. */ \ + CvMemStorage* storage; /* Where the seq is stored. */ \ + CvSeqBlock* free_blocks; /* Free blocks list. */ \ + CvSeqBlock* first; /* Pointer to the first sequence block. */ + +typedef struct CvSeq +{ + CV_SEQUENCE_FIELDS() +} +CvSeq; + +#define CV_TYPE_NAME_SEQ "opencv-sequence" +#define CV_TYPE_NAME_SEQ_TREE "opencv-sequence-tree" + +/*************************************** Set ********************************************/ +/* + Set. + Order is not preserved. There can be gaps between sequence elements. + After the element has been inserted it stays in the same place all the time. + The MSB(most-significant or sign bit) of the first field (flags) is 0 iff the element exists. +*/ +#define CV_SET_ELEM_FIELDS(elem_type) \ + int flags; \ + struct elem_type* next_free; + +typedef struct CvSetElem +{ + CV_SET_ELEM_FIELDS(CvSetElem) +} +CvSetElem; + +#define CV_SET_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvSetElem* free_elems; \ + int active_count; + +typedef struct CvSet +{ + CV_SET_FIELDS() +} +CvSet; + + +#define CV_SET_ELEM_IDX_MASK ((1 << 26) - 1) +#define CV_SET_ELEM_FREE_FLAG (1 << (sizeof(int)*8-1)) + +/* Checks whether the element pointed by ptr belongs to a set or not */ +#define CV_IS_SET_ELEM( ptr ) (((CvSetElem*)(ptr))->flags >= 0) + +/************************************* Graph ********************************************/ + +/* + We represent a graph as a set of vertices. + Vertices contain their adjacency lists (more exactly, pointers to first incoming or + outcoming edge (or 0 if isolated vertex)). Edges are stored in another set. + There is a singly-linked list of incoming/outcoming edges for each vertex. + + Each edge consists of + + o Two pointers to the starting and ending vertices + (vtx[0] and vtx[1] respectively). + + A graph may be oriented or not. In the latter case, edges between + vertex i to vertex j are not distinguished during search operations. + + o Two pointers to next edges for the starting and ending vertices, where + next[0] points to the next edge in the vtx[0] adjacency list and + next[1] points to the next edge in the vtx[1] adjacency list. +*/ +#define CV_GRAPH_EDGE_FIELDS() \ + int flags; \ + float weight; \ + struct CvGraphEdge* next[2]; \ + struct CvGraphVtx* vtx[2]; + + +#define CV_GRAPH_VERTEX_FIELDS() \ + int flags; \ + struct CvGraphEdge* first; + + +typedef struct CvGraphEdge +{ + CV_GRAPH_EDGE_FIELDS() +} +CvGraphEdge; + +typedef struct CvGraphVtx +{ + CV_GRAPH_VERTEX_FIELDS() +} +CvGraphVtx; + +typedef struct CvGraphVtx2D +{ + CV_GRAPH_VERTEX_FIELDS() + CvPoint2D32f* ptr; +} +CvGraphVtx2D; + +/* + Graph is "derived" from the set (this is set a of vertices) + and includes another set (edges) +*/ +#define CV_GRAPH_FIELDS() \ + CV_SET_FIELDS() \ + CvSet* edges; + +typedef struct CvGraph +{ + CV_GRAPH_FIELDS() +} +CvGraph; + +#define CV_TYPE_NAME_GRAPH "opencv-graph" + +/*********************************** Chain/Countour *************************************/ + +typedef struct CvChain +{ + CV_SEQUENCE_FIELDS() + CvPoint origin; +} +CvChain; + +#define CV_CONTOUR_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvRect rect; \ + int color; \ + int reserved[3]; + +typedef struct CvContour +{ + CV_CONTOUR_FIELDS() +} +CvContour; + +typedef CvContour CvPoint2DSeq; + +/****************************************************************************************\ +* Sequence types * +\****************************************************************************************/ + +#define CV_SEQ_MAGIC_VAL 0x42990000 + +#define CV_IS_SEQ(seq) \ + ((seq) != NULL && (((CvSeq*)(seq))->flags & CV_MAGIC_MASK) == CV_SEQ_MAGIC_VAL) + +#define CV_SET_MAGIC_VAL 0x42980000 +#define CV_IS_SET(set) \ + ((set) != NULL && (((CvSeq*)(set))->flags & CV_MAGIC_MASK) == CV_SET_MAGIC_VAL) + +#define CV_SEQ_ELTYPE_BITS 12 +#define CV_SEQ_ELTYPE_MASK ((1 << CV_SEQ_ELTYPE_BITS) - 1) + +#define CV_SEQ_ELTYPE_POINT CV_32SC2 /* (x,y) */ +#define CV_SEQ_ELTYPE_CODE CV_8UC1 /* freeman code: 0..7 */ +#define CV_SEQ_ELTYPE_GENERIC 0 +#define CV_SEQ_ELTYPE_PTR CV_USRTYPE1 +#define CV_SEQ_ELTYPE_PPOINT CV_SEQ_ELTYPE_PTR /* &(x,y) */ +#define CV_SEQ_ELTYPE_INDEX CV_32SC1 /* #(x,y) */ +#define CV_SEQ_ELTYPE_GRAPH_EDGE 0 /* &next_o, &next_d, &vtx_o, &vtx_d */ +#define CV_SEQ_ELTYPE_GRAPH_VERTEX 0 /* first_edge, &(x,y) */ +#define CV_SEQ_ELTYPE_TRIAN_ATR 0 /* vertex of the binary tree */ +#define CV_SEQ_ELTYPE_CONNECTED_COMP 0 /* connected component */ +#define CV_SEQ_ELTYPE_POINT3D CV_32FC3 /* (x,y,z) */ + +#define CV_SEQ_KIND_BITS 2 +#define CV_SEQ_KIND_MASK (((1 << CV_SEQ_KIND_BITS) - 1)<flags & CV_SEQ_ELTYPE_MASK) +#define CV_SEQ_KIND( seq ) ((seq)->flags & CV_SEQ_KIND_MASK ) + +/* flag checking */ +#define CV_IS_SEQ_INDEX( seq ) ((CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_INDEX) && \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_GENERIC)) + +#define CV_IS_SEQ_CURVE( seq ) (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE) +#define CV_IS_SEQ_CLOSED( seq ) (((seq)->flags & CV_SEQ_FLAG_CLOSED) != 0) +#define CV_IS_SEQ_CONVEX( seq ) 0 +#define CV_IS_SEQ_HOLE( seq ) (((seq)->flags & CV_SEQ_FLAG_HOLE) != 0) +#define CV_IS_SEQ_SIMPLE( seq ) 1 + +/* type checking macros */ +#define CV_IS_SEQ_POINT_SET( seq ) \ + ((CV_SEQ_ELTYPE(seq) == CV_32SC2 || CV_SEQ_ELTYPE(seq) == CV_32FC2)) + +#define CV_IS_SEQ_POINT_SUBSET( seq ) \ + (CV_IS_SEQ_INDEX( seq ) || CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_PPOINT) + +#define CV_IS_SEQ_POLYLINE( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && CV_IS_SEQ_POINT_SET(seq)) + +#define CV_IS_SEQ_POLYGON( seq ) \ + (CV_IS_SEQ_POLYLINE(seq) && CV_IS_SEQ_CLOSED(seq)) + +#define CV_IS_SEQ_CHAIN( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && (seq)->elem_size == 1) + +#define CV_IS_SEQ_CONTOUR( seq ) \ + (CV_IS_SEQ_CLOSED(seq) && (CV_IS_SEQ_POLYLINE(seq) || CV_IS_SEQ_CHAIN(seq))) + +#define CV_IS_SEQ_CHAIN_CONTOUR( seq ) \ + (CV_IS_SEQ_CHAIN( seq ) && CV_IS_SEQ_CLOSED( seq )) + +#define CV_IS_SEQ_POLYGON_TREE( seq ) \ + (CV_SEQ_ELTYPE (seq) == CV_SEQ_ELTYPE_TRIAN_ATR && \ + CV_SEQ_KIND( seq ) == CV_SEQ_KIND_BIN_TREE ) + +#define CV_IS_GRAPH( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_GRAPH) + +#define CV_IS_GRAPH_ORIENTED( seq ) \ + (((seq)->flags & CV_GRAPH_FLAG_ORIENTED) != 0) + +#define CV_IS_SUBDIV2D( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_SUBDIV2D) + +/****************************************************************************************/ +/* Sequence writer & reader */ +/****************************************************************************************/ + +#define CV_SEQ_WRITER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* the sequence written */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to free space */ \ + schar* block_min; /* pointer to the beginning of block*/\ + schar* block_max; /* pointer to the end of block */ + +typedef struct CvSeqWriter +{ + CV_SEQ_WRITER_FIELDS() +} +CvSeqWriter; + + +#define CV_SEQ_READER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* sequence, beign read */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to element be read next */ \ + schar* block_min; /* pointer to the beginning of block */\ + schar* block_max; /* pointer to the end of block */ \ + int delta_index;/* = seq->first->start_index */ \ + schar* prev_elem; /* pointer to previous element */ + + +typedef struct CvSeqReader +{ + CV_SEQ_READER_FIELDS() +} +CvSeqReader; + +/****************************************************************************************/ +/* Operations on sequences */ +/****************************************************************************************/ + +#define CV_SEQ_ELEM( seq, elem_type, index ) \ +/* assert gives some guarantee that parameter is valid */ \ +( assert(sizeof((seq)->first[0]) == sizeof(CvSeqBlock) && \ + (seq)->elem_size == sizeof(elem_type)), \ + (elem_type*)((seq)->first && (unsigned)index < \ + (unsigned)((seq)->first->count) ? \ + (seq)->first->data + (index) * sizeof(elem_type) : \ + cvGetSeqElem( (CvSeq*)(seq), (index) ))) +#define CV_GET_SEQ_ELEM( elem_type, seq, index ) CV_SEQ_ELEM( (seq), elem_type, (index) ) + +/* Add element to sequence: */ +#define CV_WRITE_SEQ_ELEM_VAR( elem_ptr, writer ) \ +{ \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + memcpy((writer).ptr, elem_ptr, (writer).seq->elem_size);\ + (writer).ptr += (writer).seq->elem_size; \ +} + +#define CV_WRITE_SEQ_ELEM( elem, writer ) \ +{ \ + assert( (writer).seq->elem_size == sizeof(elem)); \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + assert( (writer).ptr <= (writer).block_max - sizeof(elem));\ + memcpy((writer).ptr, &(elem), sizeof(elem)); \ + (writer).ptr += sizeof(elem); \ +} + + +/* Move reader position forward: */ +#define CV_NEXT_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr += (elem_size)) >= (reader).block_max ) \ + { \ + cvChangeSeqBlock( &(reader), 1 ); \ + } \ +} + + +/* Move reader position backward: */ +#define CV_PREV_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr -= (elem_size)) < (reader).block_min ) \ + { \ + cvChangeSeqBlock( &(reader), -1 ); \ + } \ +} + +/* Read element and move read position forward: */ +#define CV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy( &(elem), (reader).ptr, sizeof((elem))); \ + CV_NEXT_SEQ_ELEM( sizeof(elem), reader ) \ +} + +/* Read element and move read position backward: */ +#define CV_REV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy(&(elem), (reader).ptr, sizeof((elem))); \ + CV_PREV_SEQ_ELEM( sizeof(elem), reader ) \ +} + + +#define CV_READ_CHAIN_POINT( _pt, reader ) \ +{ \ + (_pt) = (reader).pt; \ + if( (reader).ptr ) \ + { \ + CV_READ_SEQ_ELEM( (reader).code, (reader)); \ + assert( ((reader).code & ~7) == 0 ); \ + (reader).pt.x += (reader).deltas[(int)(reader).code][0]; \ + (reader).pt.y += (reader).deltas[(int)(reader).code][1]; \ + } \ +} + +#define CV_CURRENT_POINT( reader ) (*((CvPoint*)((reader).ptr))) +#define CV_PREV_POINT( reader ) (*((CvPoint*)((reader).prev_elem))) + +#define CV_READ_EDGE( pt1, pt2, reader ) \ +{ \ + assert( sizeof(pt1) == sizeof(CvPoint) && \ + sizeof(pt2) == sizeof(CvPoint) && \ + reader.seq->elem_size == sizeof(CvPoint)); \ + (pt1) = CV_PREV_POINT( reader ); \ + (pt2) = CV_CURRENT_POINT( reader ); \ + (reader).prev_elem = (reader).ptr; \ + CV_NEXT_SEQ_ELEM( sizeof(CvPoint), (reader)); \ +} + +/************ Graph macros ************/ + +/* Return next graph edge for given vertex: */ +#define CV_NEXT_GRAPH_EDGE( edge, vertex ) \ + (assert((edge)->vtx[0] == (vertex) || (edge)->vtx[1] == (vertex)), \ + (edge)->next[(edge)->vtx[1] == (vertex)]) + + + +/****************************************************************************************\ +* Data structures for persistence (a.k.a serialization) functionality * +\****************************************************************************************/ + +/* "black box" file storage */ +typedef struct CvFileStorage CvFileStorage; + +/* Storage flags: */ +#define CV_STORAGE_READ 0 +#define CV_STORAGE_WRITE 1 +#define CV_STORAGE_WRITE_TEXT CV_STORAGE_WRITE +#define CV_STORAGE_WRITE_BINARY CV_STORAGE_WRITE +#define CV_STORAGE_APPEND 2 +#define CV_STORAGE_MEMORY 4 +#define CV_STORAGE_FORMAT_MASK (7<<3) +#define CV_STORAGE_FORMAT_AUTO 0 +#define CV_STORAGE_FORMAT_XML 8 +#define CV_STORAGE_FORMAT_YAML 16 + +/* List of attributes: */ +typedef struct CvAttrList +{ + const char** attr; /* NULL-terminated array of (attribute_name,attribute_value) pairs. */ + struct CvAttrList* next; /* Pointer to next chunk of the attributes list. */ +} +CvAttrList; + +CV_INLINE CvAttrList cvAttrList( const char** attr CV_DEFAULT(NULL), + CvAttrList* next CV_DEFAULT(NULL) ) +{ + CvAttrList l; + l.attr = attr; + l.next = next; + + return l; +} + +struct CvTypeInfo; + +#define CV_NODE_NONE 0 +#define CV_NODE_INT 1 +#define CV_NODE_INTEGER CV_NODE_INT +#define CV_NODE_REAL 2 +#define CV_NODE_FLOAT CV_NODE_REAL +#define CV_NODE_STR 3 +#define CV_NODE_STRING CV_NODE_STR +#define CV_NODE_REF 4 /* not used */ +#define CV_NODE_SEQ 5 +#define CV_NODE_MAP 6 +#define CV_NODE_TYPE_MASK 7 + +#define CV_NODE_TYPE(flags) ((flags) & CV_NODE_TYPE_MASK) + +/* file node flags */ +#define CV_NODE_FLOW 8 /* Used only for writing structures in YAML format. */ +#define CV_NODE_USER 16 +#define CV_NODE_EMPTY 32 +#define CV_NODE_NAMED 64 + +#define CV_NODE_IS_INT(flags) (CV_NODE_TYPE(flags) == CV_NODE_INT) +#define CV_NODE_IS_REAL(flags) (CV_NODE_TYPE(flags) == CV_NODE_REAL) +#define CV_NODE_IS_STRING(flags) (CV_NODE_TYPE(flags) == CV_NODE_STRING) +#define CV_NODE_IS_SEQ(flags) (CV_NODE_TYPE(flags) == CV_NODE_SEQ) +#define CV_NODE_IS_MAP(flags) (CV_NODE_TYPE(flags) == CV_NODE_MAP) +#define CV_NODE_IS_COLLECTION(flags) (CV_NODE_TYPE(flags) >= CV_NODE_SEQ) +#define CV_NODE_IS_FLOW(flags) (((flags) & CV_NODE_FLOW) != 0) +#define CV_NODE_IS_EMPTY(flags) (((flags) & CV_NODE_EMPTY) != 0) +#define CV_NODE_IS_USER(flags) (((flags) & CV_NODE_USER) != 0) +#define CV_NODE_HAS_NAME(flags) (((flags) & CV_NODE_NAMED) != 0) + +#define CV_NODE_SEQ_SIMPLE 256 +#define CV_NODE_SEQ_IS_SIMPLE(seq) (((seq)->flags & CV_NODE_SEQ_SIMPLE) != 0) + +typedef struct CvString +{ + int len; + char* ptr; +} +CvString; + +/* All the keys (names) of elements in the readed file storage + are stored in the hash to speed up the lookup operations: */ +typedef struct CvStringHashNode +{ + unsigned hashval; + CvString str; + struct CvStringHashNode* next; +} +CvStringHashNode; + +typedef struct CvGenericHash CvFileNodeHash; + +/* Basic element of the file storage - scalar or collection: */ +typedef struct CvFileNode +{ + int tag; + struct CvTypeInfo* info; /* type information + (only for user-defined object, for others it is 0) */ + union + { + double f; /* scalar floating-point number */ + int i; /* scalar integer number */ + CvString str; /* text string */ + CvSeq* seq; /* sequence (ordered collection of file nodes) */ + CvFileNodeHash* map; /* map (collection of named file nodes) */ + } data; +} +CvFileNode; + +#ifdef __cplusplus +extern "C" { +#endif +typedef int (CV_CDECL *CvIsInstanceFunc)( const void* struct_ptr ); +typedef void (CV_CDECL *CvReleaseFunc)( void** struct_dblptr ); +typedef void* (CV_CDECL *CvReadFunc)( CvFileStorage* storage, CvFileNode* node ); +typedef void (CV_CDECL *CvWriteFunc)( CvFileStorage* storage, const char* name, + const void* struct_ptr, CvAttrList attributes ); +typedef void* (CV_CDECL *CvCloneFunc)( const void* struct_ptr ); +#ifdef __cplusplus +} +#endif + +typedef struct CvTypeInfo +{ + int flags; + int header_size; + struct CvTypeInfo* prev; + struct CvTypeInfo* next; + const char* type_name; + CvIsInstanceFunc is_instance; + CvReleaseFunc release; + CvReadFunc read; + CvWriteFunc write; + CvCloneFunc clone; +} +CvTypeInfo; + + +/**** System data types ******/ + +typedef struct CvPluginFuncInfo +{ + void** func_addr; + void* default_func_addr; + const char* func_names; + int search_modules; + int loaded_from; +} +CvPluginFuncInfo; + +typedef struct CvModuleInfo +{ + struct CvModuleInfo* next; + const char* name; + const char* version; + CvPluginFuncInfo* func_tab; +} +CvModuleInfo; + +#endif /*__OPENCV_CORE_TYPES_H__*/ + +/* End of file. */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/version.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/version.hpp new file mode 100644 index 0000000..63c2935 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/version.hpp @@ -0,0 +1,72 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright( C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +//(including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort(including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + definition of the current version of OpenCV + Usefull to test in user programs +*/ + +#ifndef __OPENCV_VERSION_HPP__ +#define __OPENCV_VERSION_HPP__ + +#define CV_VERSION_EPOCH 2 +#define CV_VERSION_MAJOR 4 +#define CV_VERSION_MINOR 9 +#define CV_VERSION_REVISION 0 + +#define CVAUX_STR_EXP(__A) #__A +#define CVAUX_STR(__A) CVAUX_STR_EXP(__A) + +#define CVAUX_STRW_EXP(__A) L#__A +#define CVAUX_STRW(__A) CVAUX_STRW_EXP(__A) + +#if CV_VERSION_REVISION +# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) "." CVAUX_STR(CV_VERSION_REVISION) +#else +# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) +#endif + +/* old style version constants*/ +#define CV_MAJOR_VERSION CV_VERSION_EPOCH +#define CV_MINOR_VERSION CV_VERSION_MAJOR +#define CV_SUBMINOR_VERSION CV_VERSION_MINOR + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/wimage.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/wimage.hpp new file mode 100644 index 0000000..c7afa8c --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/core/wimage.hpp @@ -0,0 +1,621 @@ +/////////////////////////////////////////////////////////////////////////////// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to +// this license. If you do not agree to this license, do not download, +// install, copy or use the software. +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Google, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation or contributors may not be used to endorse +// or promote products derived from this software without specific +// prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" +// and any express or implied warranties, including, but not limited to, the +// implied warranties of merchantability and fitness for a particular purpose +// are disclaimed. In no event shall the Intel Corporation or contributors be +// liable for any direct, indirect, incidental, special, exemplary, or +// consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. + + +///////////////////////////////////////////////////////////////////////////////// +// +// Image class which provides a thin layer around an IplImage. The goals +// of the class design are: +// 1. All the data has explicit ownership to avoid memory leaks +// 2. No hidden allocations or copies for performance. +// 3. Easy access to OpenCV methods (which will access IPP if available) +// 4. Can easily treat external data as an image +// 5. Easy to create images which are subsets of other images +// 6. Fast pixel access which can take advantage of number of channels +// if known at compile time. +// +// The WImage class is the image class which provides the data accessors. +// The 'W' comes from the fact that it is also a wrapper around the popular +// but inconvenient IplImage class. A WImage can be constructed either using a +// WImageBuffer class which allocates and frees the data, +// or using a WImageView class which constructs a subimage or a view into +// external data. The view class does no memory management. Each class +// actually has two versions, one when the number of channels is known at +// compile time and one when it isn't. Using the one with the number of +// channels specified can provide some compile time optimizations by using the +// fact that the number of channels is a constant. +// +// We use the convention (c,r) to refer to column c and row r with (0,0) being +// the upper left corner. This is similar to standard Euclidean coordinates +// with the first coordinate varying in the horizontal direction and the second +// coordinate varying in the vertical direction. +// Thus (c,r) is usually in the domain [0, width) X [0, height) +// +// Example usage: +// WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar +// WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix +// vector vec(10, 3.0f); +// WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data +// +// im.SetZero(); // same as cvSetZero(im.Ipl()) +// *im(2, 3) = 15; // Modify the element at column 2, row 3 +// MySetRand(&sub_im); +// +// // Copy the second row into the first. This can be done with no memory +// // allocation and will use SSE if IPP is available. +// int w = im.Width(); +// im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1)); +// +// // Doesn't care about source of data since using WImage +// void MySetRand(WImage_b* im) { // Works with any number of channels +// for (int r = 0; r < im->Height(); ++r) { +// float* row = im->Row(r); +// for (int c = 0; c < im->Width(); ++c) { +// for (int ch = 0; ch < im->Channels(); ++ch, ++row) { +// *row = uchar(rand() & 255); +// } +// } +// } +// } +// +// Functions that are not part of the basic image allocation, viewing, and +// access should come from OpenCV, except some useful functions that are not +// part of OpenCV can be found in wimage_util.h +#ifndef __OPENCV_CORE_WIMAGE_HPP__ +#define __OPENCV_CORE_WIMAGE_HPP__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus + +namespace cv { + +template class WImage; +template class WImageBuffer; +template class WImageView; + +template class WImageC; +template class WImageBufferC; +template class WImageViewC; + +// Commonly used typedefs. +typedef WImage WImage_b; +typedef WImageView WImageView_b; +typedef WImageBuffer WImageBuffer_b; + +typedef WImageC WImage1_b; +typedef WImageViewC WImageView1_b; +typedef WImageBufferC WImageBuffer1_b; + +typedef WImageC WImage3_b; +typedef WImageViewC WImageView3_b; +typedef WImageBufferC WImageBuffer3_b; + +typedef WImage WImage_f; +typedef WImageView WImageView_f; +typedef WImageBuffer WImageBuffer_f; + +typedef WImageC WImage1_f; +typedef WImageViewC WImageView1_f; +typedef WImageBufferC WImageBuffer1_f; + +typedef WImageC WImage3_f; +typedef WImageViewC WImageView3_f; +typedef WImageBufferC WImageBuffer3_f; + +// There isn't a standard for signed and unsigned short so be more +// explicit in the typename for these cases. +typedef WImage WImage_16s; +typedef WImageView WImageView_16s; +typedef WImageBuffer WImageBuffer_16s; + +typedef WImageC WImage1_16s; +typedef WImageViewC WImageView1_16s; +typedef WImageBufferC WImageBuffer1_16s; + +typedef WImageC WImage3_16s; +typedef WImageViewC WImageView3_16s; +typedef WImageBufferC WImageBuffer3_16s; + +typedef WImage WImage_16u; +typedef WImageView WImageView_16u; +typedef WImageBuffer WImageBuffer_16u; + +typedef WImageC WImage1_16u; +typedef WImageViewC WImageView1_16u; +typedef WImageBufferC WImageBuffer1_16u; + +typedef WImageC WImage3_16u; +typedef WImageViewC WImageView3_16u; +typedef WImageBufferC WImageBuffer3_16u; + +// +// WImage definitions +// +// This WImage class gives access to the data it refers to. It can be +// constructed either by allocating the data with a WImageBuffer class or +// using the WImageView class to refer to a subimage or outside data. +template +class WImage +{ +public: + typedef T BaseType; + + // WImage is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImage() = 0; + + // Accessors + IplImage* Ipl() {return image_; } + const IplImage* Ipl() const {return image_; } + T* ImageData() { return reinterpret_cast(image_->imageData); } + const T* ImageData() const { + return reinterpret_cast(image_->imageData); + } + + int Width() const {return image_->width; } + int Height() const {return image_->height; } + + // WidthStep is the number of bytes to go to the pixel with the next y coord + int WidthStep() const {return image_->widthStep; } + + int Channels() const {return image_->nChannels; } + int ChannelSize() const {return sizeof(T); } // number of bytes per channel + + // Number of bytes per pixel + int PixelSize() const {return Channels() * ChannelSize(); } + + // Return depth type (e.g. IPL_DEPTH_8U, IPL_DEPTH_32F) which is the number + // of bits per channel and with the signed bit set. + // This is known at compile time using specializations. + int Depth() const; + + inline const T* Row(int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + inline T* Row(int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + // Pixel accessors which returns a pointer to the start of the channel + inline T* operator() (int c, int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + inline const T* operator() (int c, int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImage& src) { cvCopy(src.Ipl(), image_); } + + // Set contents to zero which is just a convenient to cvSetZero + void SetZero() { cvSetZero(image_); } + + // Construct a view into a region of this image + WImageView View(int c, int r, int width, int height); + +protected: + // Disallow copy and assignment + WImage(const WImage&); + void operator=(const WImage&); + + explicit WImage(IplImage* img) : image_(img) { + assert(!img || img->depth == Depth()); + } + + void SetIpl(IplImage* image) { + assert(!image || image->depth == Depth()); + image_ = image; + } + + IplImage* image_; +}; + + + +// Image class when both the pixel type and number of channels +// are known at compile time. This wrapper will speed up some of the operations +// like accessing individual pixels using the () operator. +template +class WImageC : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + explicit WImageC(IplImage* img) : WImage(img) { + assert(!img || img->nChannels == Channels()); + } + + // Construct a view into a region of this image + WImageViewC View(int c, int r, int width, int height); + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImageC& src) { + cvCopy(src.Ipl(), WImage::image_); + } + + // WImageC is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImageC() = 0; + + int Channels() const {return C; } + +protected: + // Disallow copy and assignment + WImageC(const WImageC&); + void operator=(const WImageC&); + + void SetIpl(IplImage* image) { + assert(!image || image->depth == WImage::Depth()); + WImage::SetIpl(image); + } +}; + +// +// WImageBuffer definitions +// +// Image class which owns the data, so it can be allocated and is always +// freed. It cannot be copied but can be explicity cloned. +// +template +class WImageBuffer : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Default constructor which creates an object that can be + WImageBuffer() : WImage(0) {} + + WImageBuffer(int width, int height, int nchannels) : WImage(0) { + Allocate(width, height, nchannels); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBuffer(IplImage* img) : WImage(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height, int nchannels); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImage::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImage& src) { + Allocate(src.Width(), src.Height(), src.Channels()); + CopyFrom(src); + } + + ~WImageBuffer() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImage::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBuffer(const WImageBuffer&); + void operator=(const WImageBuffer&); +}; + +// Like a WImageBuffer class but when the number of channels is known +// at compile time. +template +class WImageBufferC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor which creates an object that can be + WImageBufferC() : WImageC(0) {} + + WImageBufferC(int width, int height) : WImageC(0) { + Allocate(width, height); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBufferC(IplImage* img) : WImageC(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImageC::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImageC& src) { + Allocate(src.Width(), src.Height()); + CopyFrom(src); + } + + ~WImageBufferC() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImageC::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBufferC(const WImageBufferC&); + void operator=(const WImageBufferC&); +}; + +// +// WImageView definitions +// +// View into an image class which allows treating a subimage as an image +// or treating external data as an image +// +template +class WImageView : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageView(WImage* img, int c, int r, int width, int height); + + // Refer to external data. + // If not given width_step assumed to be same as width. + WImageView(T* data, int width, int height, int channels, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageView(IplImage* img) : WImage(img) {} + + // Copy constructor + WImageView(const WImage& img) : WImage(0) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + } + + WImageView& operator=(const WImage& img) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +template +class WImageViewC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor needed for vectors of views. + WImageViewC(); + + virtual ~WImageViewC() {} + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageViewC(WImageC* img, + int c, int r, int width, int height); + + // Refer to external data + WImageViewC(T* data, int width, int height, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageViewC(IplImage* img) : WImageC(img) {} + + // Copy constructor which does a shallow copy to allow multiple views + // of same data. gcc-4.1.1 gets confused if both versions of + // the constructor and assignment operator are not provided. + WImageViewC(const WImageC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + WImageViewC(const WImageViewC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + + WImageViewC& operator=(const WImageC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + WImageViewC& operator=(const WImageViewC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +// Specializations for depth +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32F; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_64F; } + +// +// Pure virtual destructors still need to be defined. +// +template inline WImage::~WImage() {} +template inline WImageC::~WImageC() {} + +// +// Allocate ImageData +// +template +inline void WImageBuffer::Allocate(int width, int height, int nchannels) +{ + if (IsNull() || WImage::Width() != width || + WImage::Height() != height || WImage::Channels() != nchannels) { + ReleaseImage(); + WImage::image_ = cvCreateImage(cvSize(width, height), + WImage::Depth(), nchannels); + } +} + +template +inline void WImageBufferC::Allocate(int width, int height) +{ + if (IsNull() || WImage::Width() != width || WImage::Height() != height) { + ReleaseImage(); + WImageC::SetIpl(cvCreateImage(cvSize(width, height),WImage::Depth(), C)); + } +} + +// +// ImageView methods +// +template +WImageView::WImageView(WImage* img, int c, int r, int width, int height) + : WImage(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImage::SetIpl(&header_); +} + +template +WImageView::WImageView(T* data, int width, int height, int nchannels, int width_step) + : WImage(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), nchannels); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImage::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(WImageC* img, int c, int r, int width, int height) + : WImageC(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC() : WImageC(0) { + cvInitImageHeader(&header_, cvSize(0, 0), WImage::Depth(), C); + header_.imageData = reinterpret_cast(0); + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(T* data, int width, int height, int width_step) + : WImageC(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), C); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImageC::SetIpl(&header_); +} + +// Construct a view into a region of an image +template +WImageView WImage::View(int c, int r, int width, int height) { + return WImageView(this, c, r, width, height); +} + +template +WImageViewC WImageC::View(int c, int r, int width, int height) { + return WImageViewC(this, c, r, width, height); +} + +} // end of namespace + +#endif // __cplusplus + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/features2d/features2d.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/features2d/features2d.hpp new file mode 100644 index 0000000..7536128 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/features2d/features2d.hpp @@ -0,0 +1,1611 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_FEATURES_2D_HPP__ +#define __OPENCV_FEATURES_2D_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/flann/miniflann.hpp" + +#ifdef __cplusplus +#include + +namespace cv +{ + +CV_EXPORTS bool initModule_features2d(); + +/*! + The Keypoint Class + + The class instance stores a keypoint, i.e. a point feature found by one of many available keypoint detectors, such as + Harris corner detector, cv::FAST, cv::StarDetector, cv::SURF, cv::SIFT, cv::LDetector etc. + + The keypoint is characterized by the 2D position, scale + (proportional to the diameter of the neighborhood that needs to be taken into account), + orientation and some other parameters. The keypoint neighborhood is then analyzed by another algorithm that builds a descriptor + (usually represented as a feature vector). The keypoints representing the same object in different images can then be matched using + cv::KDTree or another method. +*/ +class CV_EXPORTS_W_SIMPLE KeyPoint +{ +public: + //! the default constructor + CV_WRAP KeyPoint() : pt(0,0), size(0), angle(-1), response(0), octave(0), class_id(-1) {} + //! the full constructor + KeyPoint(Point2f _pt, float _size, float _angle=-1, + float _response=0, int _octave=0, int _class_id=-1) + : pt(_pt), size(_size), angle(_angle), + response(_response), octave(_octave), class_id(_class_id) {} + //! another form of the full constructor + CV_WRAP KeyPoint(float x, float y, float _size, float _angle=-1, + float _response=0, int _octave=0, int _class_id=-1) + : pt(x, y), size(_size), angle(_angle), + response(_response), octave(_octave), class_id(_class_id) {} + + size_t hash() const; + + //! converts vector of keypoints to vector of points + static void convert(const vector& keypoints, + CV_OUT vector& points2f, + const vector& keypointIndexes=vector()); + //! converts vector of points to the vector of keypoints, where each keypoint is assigned the same size and the same orientation + static void convert(const vector& points2f, + CV_OUT vector& keypoints, + float size=1, float response=1, int octave=0, int class_id=-1); + + //! computes overlap for pair of keypoints; + //! overlap is a ratio between area of keypoint regions intersection and + //! area of keypoint regions union (now keypoint region is circle) + static float overlap(const KeyPoint& kp1, const KeyPoint& kp2); + + CV_PROP_RW Point2f pt; //!< coordinates of the keypoints + CV_PROP_RW float size; //!< diameter of the meaningful keypoint neighborhood + CV_PROP_RW float angle; //!< computed orientation of the keypoint (-1 if not applicable); + //!< it's in [0,360) degrees and measured relative to + //!< image coordinate system, ie in clockwise. + CV_PROP_RW float response; //!< the response by which the most strong keypoints have been selected. Can be used for the further sorting or subsampling + CV_PROP_RW int octave; //!< octave (pyramid layer) from which the keypoint has been extracted + CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to) +}; + +//! writes vector of keypoints to the file storage +CV_EXPORTS void write(FileStorage& fs, const string& name, const vector& keypoints); +//! reads vector of keypoints from the specified file storage node +CV_EXPORTS void read(const FileNode& node, CV_OUT vector& keypoints); + +/* + * A class filters a vector of keypoints. + * Because now it is difficult to provide a convenient interface for all usage scenarios of the keypoints filter class, + * it has only several needed by now static methods. + */ +class CV_EXPORTS KeyPointsFilter +{ +public: + KeyPointsFilter(){} + + /* + * Remove keypoints within borderPixels of an image edge. + */ + static void runByImageBorder( vector& keypoints, Size imageSize, int borderSize ); + /* + * Remove keypoints of sizes out of range. + */ + static void runByKeypointSize( vector& keypoints, float minSize, + float maxSize=FLT_MAX ); + /* + * Remove keypoints from some image by mask for pixels of this image. + */ + static void runByPixelsMask( vector& keypoints, const Mat& mask ); + /* + * Remove duplicated keypoints. + */ + static void removeDuplicated( vector& keypoints ); + + /* + * Retain the specified number of the best keypoints (according to the response) + */ + static void retainBest( vector& keypoints, int npoints ); +}; + + +/************************************ Base Classes ************************************/ + +/* + * Abstract base class for 2D image feature detectors. + */ +class CV_EXPORTS_W FeatureDetector : public virtual Algorithm +{ +public: + virtual ~FeatureDetector(); + + /* + * Detect keypoints in an image. + * image The image. + * keypoints The detected keypoints. + * mask Mask specifying where to look for keypoints (optional). Must be a char + * matrix with non-zero values in the region of interest. + */ + CV_WRAP void detect( const Mat& image, CV_OUT vector& keypoints, const Mat& mask=Mat() ) const; + + /* + * Detect keypoints in an image set. + * images Image collection. + * keypoints Collection of keypoints detected in an input images. keypoints[i] is a set of keypoints detected in an images[i]. + * masks Masks for image set. masks[i] is a mask for images[i]. + */ + void detect( const vector& images, vector >& keypoints, const vector& masks=vector() ) const; + + // Return true if detector object is empty + CV_WRAP virtual bool empty() const; + + // Create feature detector by detector name. + CV_WRAP static Ptr create( const string& detectorType ); + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const = 0; + + /* + * Remove keypoints that are not in the mask. + * Helper function, useful when wrapping a library call for keypoint detection that + * does not support a mask argument. + */ + static void removeInvalidPoints( const Mat& mask, vector& keypoints ); +}; + + +/* + * Abstract base class for computing descriptors for image keypoints. + * + * In this interface we assume a keypoint descriptor can be represented as a + * dense, fixed-dimensional vector of some basic type. Most descriptors used + * in practice follow this pattern, as it makes it very easy to compute + * distances between descriptors. Therefore we represent a collection of + * descriptors as a Mat, where each row is one keypoint descriptor. + */ +class CV_EXPORTS_W DescriptorExtractor : public virtual Algorithm +{ +public: + virtual ~DescriptorExtractor(); + + /* + * Compute the descriptors for a set of keypoints in an image. + * image The image. + * keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed. + * descriptors Copmputed descriptors. Row i is the descriptor for keypoint i. + */ + CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT vector& keypoints, CV_OUT Mat& descriptors ) const; + + /* + * Compute the descriptors for a keypoints collection detected in image collection. + * images Image collection. + * keypoints Input keypoints collection. keypoints[i] is keypoints detected in images[i]. + * Keypoints for which a descriptor cannot be computed are removed. + * descriptors Descriptor collection. descriptors[i] are descriptors computed for set keypoints[i]. + */ + void compute( const vector& images, vector >& keypoints, vector& descriptors ) const; + + CV_WRAP virtual int descriptorSize() const = 0; + CV_WRAP virtual int descriptorType() const = 0; + + CV_WRAP virtual bool empty() const; + + CV_WRAP static Ptr create( const string& descriptorExtractorType ); + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const = 0; + + /* + * Remove keypoints within borderPixels of an image edge. + */ + static void removeBorderKeypoints( vector& keypoints, + Size imageSize, int borderSize ); +}; + + + +/* + * Abstract base class for simultaneous 2D feature detection descriptor extraction. + */ +class CV_EXPORTS_W Feature2D : public FeatureDetector, public DescriptorExtractor +{ +public: + /* + * Detect keypoints in an image. + * image The image. + * keypoints The detected keypoints. + * mask Mask specifying where to look for keypoints (optional). Must be a char + * matrix with non-zero values in the region of interest. + * useProvidedKeypoints If true, the method will skip the detection phase and will compute + * descriptors for the provided keypoints + */ + CV_WRAP_AS(detectAndCompute) virtual void operator()( InputArray image, InputArray mask, + CV_OUT vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false ) const = 0; + + CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT std::vector& keypoints, CV_OUT Mat& descriptors ) const; + + // Create feature detector and descriptor extractor by name. + CV_WRAP static Ptr create( const string& name ); +}; + +/*! + BRISK implementation +*/ +class CV_EXPORTS_W BRISK : public Feature2D +{ +public: + CV_WRAP explicit BRISK(int thresh=30, int octaves=3, float patternScale=1.0f); + + virtual ~BRISK(); + + // returns the descriptor size in bytes + int descriptorSize() const; + // returns the descriptor type + int descriptorType() const; + + // Compute the BRISK features on an image + void operator()(InputArray image, InputArray mask, vector& keypoints) const; + + // Compute the BRISK features and descriptors on an image + void operator()( InputArray image, InputArray mask, vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints=false ) const; + + AlgorithmInfo* info() const; + + // custom setup + CV_WRAP explicit BRISK(std::vector &radiusList, std::vector &numberList, + float dMax=5.85f, float dMin=8.2f, std::vector indexChange=std::vector()); + + // call this to generate the kernel: + // circle of radius r (pixels), with n points; + // short pairings with dMax, long pairings with dMin + CV_WRAP void generateKernel(std::vector &radiusList, + std::vector &numberList, float dMax=5.85f, float dMin=8.2f, + std::vector indexChange=std::vector()); + +protected: + + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + void computeKeypointsNoOrientation(InputArray image, InputArray mask, vector& keypoints) const; + void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, vector& keypoints, + OutputArray descriptors, bool doDescriptors, bool doOrientation, + bool useProvidedKeypoints) const; + + // Feature parameters + CV_PROP_RW int threshold; + CV_PROP_RW int octaves; + + // some helper structures for the Brisk pattern representation + struct BriskPatternPoint{ + float x; // x coordinate relative to center + float y; // x coordinate relative to center + float sigma; // Gaussian smoothing sigma + }; + struct BriskShortPair{ + unsigned int i; // index of the first pattern point + unsigned int j; // index of other pattern point + }; + struct BriskLongPair{ + unsigned int i; // index of the first pattern point + unsigned int j; // index of other pattern point + int weighted_dx; // 1024.0/dx + int weighted_dy; // 1024.0/dy + }; + inline int smoothedIntensity(const cv::Mat& image, + const cv::Mat& integral,const float key_x, + const float key_y, const unsigned int scale, + const unsigned int rot, const unsigned int point) const; + // pattern properties + BriskPatternPoint* patternPoints_; //[i][rotation][scale] + unsigned int points_; // total number of collocation points + float* scaleList_; // lists the scaling per scale index [scale] + unsigned int* sizeList_; // lists the total pattern size per scale index [scale] + static const unsigned int scales_; // scales discretization + static const float scalerange_; // span of sizes 40->4 Octaves - else, this needs to be adjusted... + static const unsigned int n_rot_; // discretization of the rotation look-up + + // pairs + int strings_; // number of uchars the descriptor consists of + float dMax_; // short pair maximum distance + float dMin_; // long pair maximum distance + BriskShortPair* shortPairs_; // d<_dMax + BriskLongPair* longPairs_; // d>_dMin + unsigned int noShortPairs_; // number of shortParis + unsigned int noLongPairs_; // number of longParis + + // general + static const float basicSize_; +}; + + +/*! + ORB implementation. +*/ +class CV_EXPORTS_W ORB : public Feature2D +{ +public: + // the size of the signature in bytes + enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; + + CV_WRAP explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, + int firstLevel = 0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31 ); + + // returns the descriptor size in bytes + int descriptorSize() const; + // returns the descriptor type + int descriptorType() const; + + // Compute the ORB features and descriptors on an image + void operator()(InputArray image, InputArray mask, vector& keypoints) const; + + // Compute the ORB features and descriptors on an image + void operator()( InputArray image, InputArray mask, vector& keypoints, + OutputArray descriptors, bool useProvidedKeypoints=false ) const; + + AlgorithmInfo* info() const; + +protected: + + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + CV_PROP_RW int nfeatures; + CV_PROP_RW double scaleFactor; + CV_PROP_RW int nlevels; + CV_PROP_RW int edgeThreshold; + CV_PROP_RW int firstLevel; + CV_PROP_RW int WTA_K; + CV_PROP_RW int scoreType; + CV_PROP_RW int patchSize; +}; + +typedef ORB OrbFeatureDetector; +typedef ORB OrbDescriptorExtractor; + +/*! + FREAK implementation +*/ +class CV_EXPORTS FREAK : public DescriptorExtractor +{ +public: + /** Constructor + * @param orientationNormalized enable orientation normalization + * @param scaleNormalized enable scale normalization + * @param patternScale scaling of the description pattern + * @param nbOctave number of octaves covered by the detected keypoints + * @param selectedPairs (optional) user defined selected pairs + */ + explicit FREAK( bool orientationNormalized = true, + bool scaleNormalized = true, + float patternScale = 22.0f, + int nOctaves = 4, + const vector& selectedPairs = vector()); + FREAK( const FREAK& rhs ); + FREAK& operator=( const FREAK& ); + + virtual ~FREAK(); + + /** returns the descriptor length in bytes */ + virtual int descriptorSize() const; + + /** returns the descriptor type */ + virtual int descriptorType() const; + + /** select the 512 "best description pairs" + * @param images grayscale images set + * @param keypoints set of detected keypoints + * @param corrThresh correlation threshold + * @param verbose print construction information + * @return list of best pair indexes + */ + vector selectPairs( const vector& images, vector >& keypoints, + const double corrThresh = 0.7, bool verbose = true ); + + AlgorithmInfo* info() const; + + enum + { + NB_SCALES = 64, NB_PAIRS = 512, NB_ORIENPAIRS = 45 + }; + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + void buildPattern(); + uchar meanIntensity( const Mat& image, const Mat& integral, const float kp_x, const float kp_y, + const unsigned int scale, const unsigned int rot, const unsigned int point ) const; + + bool orientationNormalized; //true if the orientation is normalized, false otherwise + bool scaleNormalized; //true if the scale is normalized, false otherwise + double patternScale; //scaling of the pattern + int nOctaves; //number of octaves + bool extAll; // true if all pairs need to be extracted for pairs selection + + double patternScale0; + int nOctaves0; + vector selectedPairs0; + + struct PatternPoint + { + float x; // x coordinate relative to center + float y; // x coordinate relative to center + float sigma; // Gaussian smoothing sigma + }; + + struct DescriptionPair + { + uchar i; // index of the first point + uchar j; // index of the second point + }; + + struct OrientationPair + { + uchar i; // index of the first point + uchar j; // index of the second point + int weight_dx; // dx/(norm_sq))*4096 + int weight_dy; // dy/(norm_sq))*4096 + }; + + vector patternLookup; // look-up table for the pattern points (position+sigma of all points at all scales and orientation) + int patternSizes[NB_SCALES]; // size of the pattern at a specific scale (used to check if a point is within image boundaries) + DescriptionPair descriptionPairs[NB_PAIRS]; + OrientationPair orientationPairs[NB_ORIENPAIRS]; +}; + + +/*! + Maximal Stable Extremal Regions class. + + The class implements MSER algorithm introduced by J. Matas. + Unlike SIFT, SURF and many other detectors in OpenCV, this is salient region detector, + not the salient point detector. + + It returns the regions, each of those is encoded as a contour. +*/ +class CV_EXPORTS_W MSER : public FeatureDetector +{ +public: + //! the full constructor + CV_WRAP explicit MSER( int _delta=5, int _min_area=60, int _max_area=14400, + double _max_variation=0.25, double _min_diversity=.2, + int _max_evolution=200, double _area_threshold=1.01, + double _min_margin=0.003, int _edge_blur_size=5 ); + + //! the operator that extracts the MSERs from the image or the specific part of it + CV_WRAP_AS(detect) void operator()( const Mat& image, CV_OUT vector >& msers, + const Mat& mask=Mat() ) const; + AlgorithmInfo* info() const; + +protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int delta; + int minArea; + int maxArea; + double maxVariation; + double minDiversity; + int maxEvolution; + double areaThreshold; + double minMargin; + int edgeBlurSize; +}; + +typedef MSER MserFeatureDetector; + +/*! + The "Star" Detector. + + The class implements the keypoint detector introduced by K. Konolige. +*/ +class CV_EXPORTS_W StarDetector : public FeatureDetector +{ +public: + //! the full constructor + CV_WRAP StarDetector(int _maxSize=45, int _responseThreshold=30, + int _lineThresholdProjected=10, + int _lineThresholdBinarized=8, + int _suppressNonmaxSize=5); + + //! finds the keypoints in the image + CV_WRAP_AS(detect) void operator()(const Mat& image, + CV_OUT vector& keypoints) const; + + AlgorithmInfo* info() const; + +protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int maxSize; + int responseThreshold; + int lineThresholdProjected; + int lineThresholdBinarized; + int suppressNonmaxSize; +}; + +//! detects corners using FAST algorithm by E. Rosten +CV_EXPORTS void FAST( InputArray image, CV_OUT vector& keypoints, + int threshold, bool nonmaxSuppression=true ); + +CV_EXPORTS void FASTX( InputArray image, CV_OUT vector& keypoints, + int threshold, bool nonmaxSuppression, int type ); + +class CV_EXPORTS_W FastFeatureDetector : public FeatureDetector +{ +public: + + enum + { // Define it in old class to simplify migration to 2.5 + TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2 + }; + + CV_WRAP FastFeatureDetector( int threshold=10, bool nonmaxSuppression=true ); + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int threshold; + bool nonmaxSuppression; +}; + + +class CV_EXPORTS_W GFTTDetector : public FeatureDetector +{ +public: + CV_WRAP GFTTDetector( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, + int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int nfeatures; + double qualityLevel; + double minDistance; + int blockSize; + bool useHarrisDetector; + double k; +}; + +typedef GFTTDetector GoodFeaturesToTrackDetector; +typedef StarDetector StarFeatureDetector; + +class CV_EXPORTS_W SimpleBlobDetector : public FeatureDetector +{ +public: + struct CV_EXPORTS_W_SIMPLE Params + { + CV_WRAP Params(); + CV_PROP_RW float thresholdStep; + CV_PROP_RW float minThreshold; + CV_PROP_RW float maxThreshold; + CV_PROP_RW size_t minRepeatability; + CV_PROP_RW float minDistBetweenBlobs; + + CV_PROP_RW bool filterByColor; + CV_PROP_RW uchar blobColor; + + CV_PROP_RW bool filterByArea; + CV_PROP_RW float minArea, maxArea; + + CV_PROP_RW bool filterByCircularity; + CV_PROP_RW float minCircularity, maxCircularity; + + CV_PROP_RW bool filterByInertia; + CV_PROP_RW float minInertiaRatio, maxInertiaRatio; + + CV_PROP_RW bool filterByConvexity; + CV_PROP_RW float minConvexity, maxConvexity; + + void read( const FileNode& fn ); + void write( FileStorage& fs ) const; + }; + + CV_WRAP SimpleBlobDetector(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); + + virtual void read( const FileNode& fn ); + virtual void write( FileStorage& fs ) const; + +protected: + struct CV_EXPORTS Center + { + Point2d location; + double radius; + double confidence; + }; + + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + virtual void findBlobs(const Mat &image, const Mat &binaryImage, vector
¢ers) const; + + Params params; + AlgorithmInfo* info() const; +}; + + +class CV_EXPORTS DenseFeatureDetector : public FeatureDetector +{ +public: + explicit DenseFeatureDetector( float initFeatureScale=1.f, int featureScaleLevels=1, + float featureScaleMul=0.1f, + int initXyStep=6, int initImgBound=0, + bool varyXyStepWithScale=true, + bool varyImgBoundWithScale=false ); + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + double initFeatureScale; + int featureScaleLevels; + double featureScaleMul; + + int initXyStep; + int initImgBound; + + bool varyXyStepWithScale; + bool varyImgBoundWithScale; +}; + +/* + * Adapts a detector to partition the source image into a grid and detect + * points in each cell. + */ +class CV_EXPORTS_W GridAdaptedFeatureDetector : public FeatureDetector +{ +public: + /* + * detector Detector that will be adapted. + * maxTotalKeypoints Maximum count of keypoints detected on the image. Only the strongest keypoints + * will be keeped. + * gridRows Grid rows count. + * gridCols Grid column count. + */ + CV_WRAP GridAdaptedFeatureDetector( const Ptr& detector=0, + int maxTotalKeypoints=1000, + int gridRows=4, int gridCols=4 ); + + // TODO implement read/write + virtual bool empty() const; + + AlgorithmInfo* info() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + Ptr detector; + int maxTotalKeypoints; + int gridRows; + int gridCols; +}; + +/* + * Adapts a detector to detect points over multiple levels of a Gaussian + * pyramid. Useful for detectors that are not inherently scaled. + */ +class CV_EXPORTS_W PyramidAdaptedFeatureDetector : public FeatureDetector +{ +public: + // maxLevel - The 0-based index of the last pyramid layer + CV_WRAP PyramidAdaptedFeatureDetector( const Ptr& detector, int maxLevel=2 ); + + // TODO implement read/write + virtual bool empty() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + Ptr detector; + int maxLevel; +}; + +/** \brief A feature detector parameter adjuster, this is used by the DynamicAdaptedFeatureDetector + * and is a wrapper for FeatureDetector that allow them to be adjusted after a detection + */ +class CV_EXPORTS AdjusterAdapter: public FeatureDetector +{ +public: + /** pure virtual interface + */ + virtual ~AdjusterAdapter() {} + /** too few features were detected so, adjust the detector params accordingly + * \param min the minimum number of desired features + * \param n_detected the number previously detected + */ + virtual void tooFew(int min, int n_detected) = 0; + /** too many features were detected so, adjust the detector params accordingly + * \param max the maximum number of desired features + * \param n_detected the number previously detected + */ + virtual void tooMany(int max, int n_detected) = 0; + /** are params maxed out or still valid? + * \return false if the parameters can't be adjusted any more + */ + virtual bool good() const = 0; + + virtual Ptr clone() const = 0; + + static Ptr create( const string& detectorType ); +}; +/** \brief an adaptively adjusting detector that iteratively detects until the desired number + * of features are detected. + * Beware that this is not thread safe - as the adjustment of parameters breaks the const + * of the detection routine... + * /TODO Make this const correct and thread safe + * + * sample usage: + //will create a detector that attempts to find 100 - 110 FAST Keypoints, and will at most run + //FAST feature detection 10 times until that number of keypoints are found + Ptr detector(new DynamicAdaptedFeatureDetector(new FastAdjuster(20,true),100, 110, 10)); + + */ +class CV_EXPORTS DynamicAdaptedFeatureDetector: public FeatureDetector +{ +public: + + /** \param adjuster an AdjusterAdapter that will do the detection and parameter adjustment + * \param max_features the maximum desired number of features + * \param max_iters the maximum number of times to try to adjust the feature detector params + * for the FastAdjuster this can be high, but with Star or Surf this can get time consuming + * \param min_features the minimum desired features + */ + DynamicAdaptedFeatureDetector( const Ptr& adjuster, int min_features=400, int max_features=500, int max_iters=5 ); + + virtual bool empty() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + +private: + DynamicAdaptedFeatureDetector& operator=(const DynamicAdaptedFeatureDetector&); + DynamicAdaptedFeatureDetector(const DynamicAdaptedFeatureDetector&); + + int escape_iters_; + int min_features_, max_features_; + const Ptr adjuster_; +}; + +/**\brief an adjust for the FAST detector. This will basically decrement or increment the + * threshold by 1 + */ +class CV_EXPORTS FastAdjuster: public AdjusterAdapter +{ +public: + /**\param init_thresh the initial threshold to start with, default = 20 + * \param nonmax whether to use non max or not for fast feature detection + */ + FastAdjuster(int init_thresh=20, bool nonmax=true, int min_thresh=1, int max_thresh=200); + + virtual void tooFew(int minv, int n_detected); + virtual void tooMany(int maxv, int n_detected); + virtual bool good() const; + + virtual Ptr clone() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + int thresh_; + bool nonmax_; + int init_thresh_, min_thresh_, max_thresh_; +}; + + +/** An adjuster for StarFeatureDetector, this one adjusts the responseThreshold for now + * TODO find a faster way to converge the parameters for Star - use CvStarDetectorParams + */ +class CV_EXPORTS StarAdjuster: public AdjusterAdapter +{ +public: + StarAdjuster(double initial_thresh=30.0, double min_thresh=2., double max_thresh=200.); + + virtual void tooFew(int minv, int n_detected); + virtual void tooMany(int maxv, int n_detected); + virtual bool good() const; + + virtual Ptr clone() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + double thresh_, init_thresh_, min_thresh_, max_thresh_; +}; + +class CV_EXPORTS SurfAdjuster: public AdjusterAdapter +{ +public: + SurfAdjuster( double initial_thresh=400.f, double min_thresh=2, double max_thresh=1000 ); + + virtual void tooFew(int minv, int n_detected); + virtual void tooMany(int maxv, int n_detected); + virtual bool good() const; + + virtual Ptr clone() const; + +protected: + virtual void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + + double thresh_, init_thresh_, min_thresh_, max_thresh_; +}; + +CV_EXPORTS Mat windowedMatchingMask( const vector& keypoints1, const vector& keypoints2, + float maxDeltaX, float maxDeltaY ); + + + +/* + * OpponentColorDescriptorExtractor + * + * Adapts a descriptor extractor to compute descripors in Opponent Color Space + * (refer to van de Sande et al., CGIV 2008 "Color Descriptors for Object Category Recognition"). + * Input RGB image is transformed in Opponent Color Space. Then unadapted descriptor extractor + * (set in constructor) computes descriptors on each of the three channel and concatenate + * them into a single color descriptor. + */ +class CV_EXPORTS OpponentColorDescriptorExtractor : public DescriptorExtractor +{ +public: + OpponentColorDescriptorExtractor( const Ptr& descriptorExtractor ); + + virtual void read( const FileNode& ); + virtual void write( FileStorage& ) const; + + virtual int descriptorSize() const; + virtual int descriptorType() const; + + virtual bool empty() const; + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + + Ptr descriptorExtractor; +}; + +/* + * BRIEF Descriptor + */ +class CV_EXPORTS BriefDescriptorExtractor : public DescriptorExtractor +{ +public: + static const int PATCH_SIZE = 48; + static const int KERNEL_SIZE = 9; + + // bytes is a length of descriptor in bytes. It can be equal 16, 32 or 64 bytes. + BriefDescriptorExtractor( int bytes = 32 ); + + virtual void read( const FileNode& ); + virtual void write( FileStorage& ) const; + + virtual int descriptorSize() const; + virtual int descriptorType() const; + + /// @todo read and write for brief + + AlgorithmInfo* info() const; + +protected: + virtual void computeImpl(const Mat& image, vector& keypoints, Mat& descriptors) const; + + typedef void(*PixelTestFn)(const Mat&, const vector&, Mat&); + + int bytes_; + PixelTestFn test_fn_; +}; + + +/****************************************************************************************\ +* Distance * +\****************************************************************************************/ + +template +struct CV_EXPORTS Accumulator +{ + typedef T Type; +}; + +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; + +/* + * Squared Euclidean distance functor + */ +template +struct CV_EXPORTS SL2 +{ + enum { normType = NORM_L2SQR }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return normL2Sqr(a, b, size); + } +}; + +/* + * Euclidean distance functor + */ +template +struct CV_EXPORTS L2 +{ + enum { normType = NORM_L2 }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return (ResultType)sqrt((double)normL2Sqr(a, b, size)); + } +}; + +/* + * Manhattan distance (city block distance) functor + */ +template +struct CV_EXPORTS L1 +{ + enum { normType = NORM_L1 }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return normL1(a, b, size); + } +}; + +/* + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct CV_EXPORTS Hamming +{ + enum { normType = NORM_HAMMING }; + typedef unsigned char ValueType; + typedef int ResultType; + + /** this will count the bits in a ^ b + */ + ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const + { + return normHamming(a, b, size); + } +}; + +typedef Hamming HammingLUT; + +template struct HammingMultilevel +{ + enum { normType = NORM_HAMMING + (cellsize>1) }; + typedef unsigned char ValueType; + typedef int ResultType; + + ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const + { + return normHamming(a, b, size, cellsize); + } +}; + +/****************************************************************************************\ +* DMatch * +\****************************************************************************************/ +/* + * Struct for matching: query descriptor index, train descriptor index, train image index and distance between descriptors. + */ +struct CV_EXPORTS_W_SIMPLE DMatch +{ + CV_WRAP DMatch() : queryIdx(-1), trainIdx(-1), imgIdx(-1), distance(FLT_MAX) {} + CV_WRAP DMatch( int _queryIdx, int _trainIdx, float _distance ) : + queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1), distance(_distance) {} + CV_WRAP DMatch( int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) : + queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx), distance(_distance) {} + + CV_PROP_RW int queryIdx; // query descriptor index + CV_PROP_RW int trainIdx; // train descriptor index + CV_PROP_RW int imgIdx; // train image index + + CV_PROP_RW float distance; + + // less is better + bool operator<( const DMatch &m ) const + { + return distance < m.distance; + } +}; + +/****************************************************************************************\ +* DescriptorMatcher * +\****************************************************************************************/ +/* + * Abstract base class for matching two sets of descriptors. + */ +class CV_EXPORTS_W DescriptorMatcher : public Algorithm +{ +public: + virtual ~DescriptorMatcher(); + + /* + * Add descriptors to train descriptor collection. + * descriptors Descriptors to add. Each descriptors[i] is a descriptors set from one image. + */ + CV_WRAP virtual void add( const vector& descriptors ); + /* + * Get train descriptors collection. + */ + CV_WRAP const vector& getTrainDescriptors() const; + /* + * Clear train descriptors collection. + */ + CV_WRAP virtual void clear(); + + /* + * Return true if there are not train descriptors in collection. + */ + CV_WRAP virtual bool empty() const; + /* + * Return true if the matcher supports mask in match methods. + */ + CV_WRAP virtual bool isMaskSupported() const = 0; + + /* + * Train matcher (e.g. train flann index). + * In all methods to match the method train() is run every time before matching. + * Some descriptor matchers (e.g. BruteForceMatcher) have empty implementation + * of this method, other matchers really train their inner structures + * (e.g. FlannBasedMatcher trains flann::Index). So nonempty implementation + * of train() should check the class object state and do traing/retraining + * only if the state requires that (e.g. FlannBasedMatcher trains flann::Index + * if it has not trained yet or if new descriptors have been added to the train + * collection). + */ + CV_WRAP virtual void train(); + /* + * Group of methods to match descriptors from image pair. + * Method train() is run in this methods. + */ + // Find one best match for each query descriptor (if mask is empty). + CV_WRAP void match( const Mat& queryDescriptors, const Mat& trainDescriptors, + CV_OUT vector& matches, const Mat& mask=Mat() ) const; + // Find k best matches for each query descriptor (in increasing order of distances). + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. If compactResult is true + // matches vector will not contain matches for fully masked out query descriptors. + CV_WRAP void knnMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, + CV_OUT vector >& matches, int k, + const Mat& mask=Mat(), bool compactResult=false ) const; + // Find best matches for each query descriptor which have distance less than + // maxDistance (in increasing order of distances). + void radiusMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, + vector >& matches, float maxDistance, + const Mat& mask=Mat(), bool compactResult=false ) const; + /* + * Group of methods to match descriptors from one image to image set. + * See description of similar methods for matching image pair above. + */ + CV_WRAP void match( const Mat& queryDescriptors, CV_OUT vector& matches, + const vector& masks=vector() ); + CV_WRAP void knnMatch( const Mat& queryDescriptors, CV_OUT vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + void radiusMatch( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + // Reads matcher object from a file node + virtual void read( const FileNode& ); + // Writes matcher object to a file storage + virtual void write( FileStorage& ) const; + + // Clone the matcher. If emptyTrainData is false the method create deep copy of the object, i.e. copies + // both parameters and train data. If emptyTrainData is true the method create object copy with current parameters + // but with empty train data. + virtual Ptr clone( bool emptyTrainData=false ) const = 0; + + CV_WRAP static Ptr create( const string& descriptorMatcherType ); +protected: + /* + * Class to work with descriptors from several images as with one merged matrix. + * It is used e.g. in FlannBasedMatcher. + */ + class CV_EXPORTS DescriptorCollection + { + public: + DescriptorCollection(); + DescriptorCollection( const DescriptorCollection& collection ); + virtual ~DescriptorCollection(); + + // Vector of matrices "descriptors" will be merged to one matrix "mergedDescriptors" here. + void set( const vector& descriptors ); + virtual void clear(); + + const Mat& getDescriptors() const; + const Mat getDescriptor( int imgIdx, int localDescIdx ) const; + const Mat getDescriptor( int globalDescIdx ) const; + void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const; + + int size() const; + + protected: + Mat mergedDescriptors; + vector startIdxs; + }; + + // In fact the matching is implemented only by the following two methods. These methods suppose + // that the class object has been trained already. Public match methods call these methods + // after calling train(). + virtual void knnMatchImpl( const Mat& queryDescriptors, vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ) = 0; + virtual void radiusMatchImpl( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ) = 0; + + static bool isPossibleMatch( const Mat& mask, int queryIdx, int trainIdx ); + static bool isMaskedOut( const vector& masks, int queryIdx ); + + static Mat clone_op( Mat m ) { return m.clone(); } + void checkMasks( const vector& masks, int queryDescriptorsCount ) const; + + // Collection of descriptors from train images. + vector trainDescCollection; +}; + +/* + * Brute-force descriptor matcher. + * + * For each descriptor in the first set, this matcher finds the closest + * descriptor in the second set by trying each one. + * + * For efficiency, BruteForceMatcher is templated on the distance metric. + * For float descriptors, a common choice would be cv::L2. + */ +class CV_EXPORTS_W BFMatcher : public DescriptorMatcher +{ +public: + CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false ); + virtual ~BFMatcher() {} + + virtual bool isMaskSupported() const { return true; } + + virtual Ptr clone( bool emptyTrainData=false ) const; + + AlgorithmInfo* info() const; +protected: + virtual void knnMatchImpl( const Mat& queryDescriptors, vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + virtual void radiusMatchImpl( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + int normType; + bool crossCheck; +}; + + +/* + * Flann based matcher + */ +class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher +{ +public: + CV_WRAP FlannBasedMatcher( const Ptr& indexParams=new flann::KDTreeIndexParams(), + const Ptr& searchParams=new flann::SearchParams() ); + + virtual void add( const vector& descriptors ); + virtual void clear(); + + // Reads matcher object from a file node + virtual void read( const FileNode& ); + // Writes matcher object to a file storage + virtual void write( FileStorage& ) const; + + virtual void train(); + virtual bool isMaskSupported() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + + AlgorithmInfo* info() const; +protected: + static void convertToDMatches( const DescriptorCollection& descriptors, + const Mat& indices, const Mat& distances, + vector >& matches ); + + virtual void knnMatchImpl( const Mat& queryDescriptors, vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + virtual void radiusMatchImpl( const Mat& queryDescriptors, vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + Ptr indexParams; + Ptr searchParams; + Ptr flannIndex; + + DescriptorCollection mergedDescriptors; + int addedDescCount; +}; + +/****************************************************************************************\ +* GenericDescriptorMatcher * +\****************************************************************************************/ +/* + * Abstract interface for a keypoint descriptor and matcher + */ +class GenericDescriptorMatcher; +typedef GenericDescriptorMatcher GenericDescriptorMatch; + +class CV_EXPORTS GenericDescriptorMatcher +{ +public: + GenericDescriptorMatcher(); + virtual ~GenericDescriptorMatcher(); + + /* + * Add train collection: images and keypoints from them. + * images A set of train images. + * ketpoints Keypoint collection that have been detected on train images. + * + * Keypoints for which a descriptor cannot be computed are removed. Such keypoints + * must be filtered in this method befor adding keypoints to train collection "trainPointCollection". + * If inheritor class need perform such prefiltering the method add() must be overloaded. + * In the other class methods programmer has access to the train keypoints by a constant link. + */ + virtual void add( const vector& images, + vector >& keypoints ); + + const vector& getTrainImages() const; + const vector >& getTrainKeypoints() const; + + /* + * Clear images and keypoints storing in train collection. + */ + virtual void clear(); + /* + * Returns true if matcher supports mask to match descriptors. + */ + virtual bool isMaskSupported() = 0; + /* + * Train some inner structures (e.g. flann index or decision trees). + * train() methods is run every time in matching methods. So the method implementation + * should has a check whether these inner structures need be trained/retrained or not. + */ + virtual void train(); + + /* + * Classifies query keypoints. + * queryImage The query image + * queryKeypoints Keypoints from the query image + * trainImage The train image + * trainKeypoints Keypoints from the train image + */ + // Classify keypoints from query image under one train image. + void classify( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints ) const; + // Classify keypoints from query image under train image collection. + void classify( const Mat& queryImage, vector& queryKeypoints ); + + /* + * Group of methods to match keypoints from image pair. + * Keypoints for which a descriptor cannot be computed are removed. + * train() method is called here. + */ + // Find one best match for each query descriptor (if mask is empty). + void match( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints, + vector& matches, const Mat& mask=Mat() ) const; + // Find k best matches for each query keypoint (in increasing order of distances). + // compactResult is used when mask is not empty. If compactResult is false matches + // vector will have the same size as queryDescriptors rows. + // If compactResult is true matches vector will not contain matches for fully masked out query descriptors. + void knnMatch( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints, + vector >& matches, int k, + const Mat& mask=Mat(), bool compactResult=false ) const; + // Find best matches for each query descriptor which have distance less than maxDistance (in increasing order of distances). + void radiusMatch( const Mat& queryImage, vector& queryKeypoints, + const Mat& trainImage, vector& trainKeypoints, + vector >& matches, float maxDistance, + const Mat& mask=Mat(), bool compactResult=false ) const; + /* + * Group of methods to match keypoints from one image to image set. + * See description of similar methods for matching image pair above. + */ + void match( const Mat& queryImage, vector& queryKeypoints, + vector& matches, const vector& masks=vector() ); + void knnMatch( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks=vector(), bool compactResult=false ); + void radiusMatch( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks=vector(), bool compactResult=false ); + + // Reads matcher object from a file node + virtual void read( const FileNode& fn ); + // Writes matcher object to a file storage + virtual void write( FileStorage& fs ) const; + + // Return true if matching object is empty (e.g. feature detector or descriptor matcher are empty) + virtual bool empty() const; + + // Clone the matcher. If emptyTrainData is false the method create deep copy of the object, i.e. copies + // both parameters and train data. If emptyTrainData is true the method create object copy with current parameters + // but with empty train data. + virtual Ptr clone( bool emptyTrainData=false ) const = 0; + + static Ptr create( const string& genericDescritptorMatcherType, + const string ¶msFilename=string() ); + +protected: + // In fact the matching is implemented only by the following two methods. These methods suppose + // that the class object has been trained already. Public match methods call these methods + // after calling train(). + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ) = 0; + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ) = 0; + /* + * A storage for sets of keypoints together with corresponding images and class IDs + */ + class CV_EXPORTS KeyPointCollection + { + public: + KeyPointCollection(); + KeyPointCollection( const KeyPointCollection& collection ); + void add( const vector& images, const vector >& keypoints ); + void clear(); + + // Returns the total number of keypoints in the collection + size_t keypointCount() const; + size_t imageCount() const; + + const vector >& getKeypoints() const; + const vector& getKeypoints( int imgIdx ) const; + const KeyPoint& getKeyPoint( int imgIdx, int localPointIdx ) const; + const KeyPoint& getKeyPoint( int globalPointIdx ) const; + void getLocalIdx( int globalPointIdx, int& imgIdx, int& localPointIdx ) const; + + const vector& getImages() const; + const Mat& getImage( int imgIdx ) const; + + protected: + int pointCount; + + vector images; + vector > keypoints; + // global indices of the first points in each image, startIndices.size() = keypoints.size() + vector startIndices; + + private: + static Mat clone_op( Mat m ) { return m.clone(); } + }; + + KeyPointCollection trainPointCollection; +}; + + +/****************************************************************************************\ +* VectorDescriptorMatcher * +\****************************************************************************************/ + +/* + * A class used for matching descriptors that can be described as vectors in a finite-dimensional space + */ +class VectorDescriptorMatcher; +typedef VectorDescriptorMatcher VectorDescriptorMatch; + +class CV_EXPORTS VectorDescriptorMatcher : public GenericDescriptorMatcher +{ +public: + VectorDescriptorMatcher( const Ptr& extractor, const Ptr& matcher ); + virtual ~VectorDescriptorMatcher(); + + virtual void add( const vector& imgCollection, + vector >& pointCollection ); + + virtual void clear(); + + virtual void train(); + + virtual bool isMaskSupported(); + + virtual void read( const FileNode& fn ); + virtual void write( FileStorage& fs ) const; + virtual bool empty() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ); + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ); + + Ptr extractor; + Ptr matcher; +}; + +/****************************************************************************************\ +* Drawing functions * +\****************************************************************************************/ +struct CV_EXPORTS DrawMatchesFlags +{ + enum{ DEFAULT = 0, // Output image matrix will be created (Mat::create), + // i.e. existing memory of output image may be reused. + // Two source image, matches and single keypoints will be drawn. + // For each keypoint only the center point will be drawn (without + // the circle around keypoint with keypoint size and orientation). + DRAW_OVER_OUTIMG = 1, // Output image matrix will not be created (Mat::create). + // Matches will be drawn on existing content of output image. + NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn. + DRAW_RICH_KEYPOINTS = 4 // For each keypoint the circle around keypoint with keypoint size and + // orientation will be drawn. + }; +}; + +// Draw keypoints. +CV_EXPORTS_W void drawKeypoints( const Mat& image, const vector& keypoints, CV_OUT Mat& outImage, + const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT ); + +// Draws matches of keypints from two images on output image. +CV_EXPORTS void drawMatches( const Mat& img1, const vector& keypoints1, + const Mat& img2, const vector& keypoints2, + const vector& matches1to2, Mat& outImg, + const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), + const vector& matchesMask=vector(), int flags=DrawMatchesFlags::DEFAULT ); + +CV_EXPORTS void drawMatches( const Mat& img1, const vector& keypoints1, + const Mat& img2, const vector& keypoints2, + const vector >& matches1to2, Mat& outImg, + const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), + const vector >& matchesMask=vector >(), int flags=DrawMatchesFlags::DEFAULT ); + +/****************************************************************************************\ +* Functions to evaluate the feature detectors and [generic] descriptor extractors * +\****************************************************************************************/ + +CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2, + vector* keypoints1, vector* keypoints2, + float& repeatability, int& correspCount, + const Ptr& fdetector=Ptr() ); + +CV_EXPORTS void computeRecallPrecisionCurve( const vector >& matches1to2, + const vector >& correctMatches1to2Mask, + vector& recallPrecisionCurve ); + +CV_EXPORTS float getRecall( const vector& recallPrecisionCurve, float l_precision ); +CV_EXPORTS int getNearestPoint( const vector& recallPrecisionCurve, float l_precision ); + +CV_EXPORTS void evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, const Mat& H1to2, + vector& keypoints1, vector& keypoints2, + vector >* matches1to2, vector >* correctMatches1to2Mask, + vector& recallPrecisionCurve, + const Ptr& dmatch=Ptr() ); + + +/****************************************************************************************\ +* Bag of visual words * +\****************************************************************************************/ +/* + * Abstract base class for training of a 'bag of visual words' vocabulary from a set of descriptors + */ +class CV_EXPORTS BOWTrainer +{ +public: + BOWTrainer(); + virtual ~BOWTrainer(); + + void add( const Mat& descriptors ); + const vector& getDescriptors() const; + int descripotorsCount() const; + + virtual void clear(); + + /* + * Train visual words vocabulary, that is cluster training descriptors and + * compute cluster centers. + * Returns cluster centers. + * + * descriptors Training descriptors computed on images keypoints. + */ + virtual Mat cluster() const = 0; + virtual Mat cluster( const Mat& descriptors ) const = 0; + +protected: + vector descriptors; + int size; +}; + +/* + * This is BOWTrainer using cv::kmeans to get vocabulary. + */ +class CV_EXPORTS BOWKMeansTrainer : public BOWTrainer +{ +public: + BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(), + int attempts=3, int flags=KMEANS_PP_CENTERS ); + virtual ~BOWKMeansTrainer(); + + // Returns trained vocabulary (i.e. cluster centers). + virtual Mat cluster() const; + virtual Mat cluster( const Mat& descriptors ) const; + +protected: + + int clusterCount; + TermCriteria termcrit; + int attempts; + int flags; +}; + +/* + * Class to compute image descriptor using bag of visual words. + */ +class CV_EXPORTS BOWImgDescriptorExtractor +{ +public: + BOWImgDescriptorExtractor( const Ptr& dextractor, + const Ptr& dmatcher ); + virtual ~BOWImgDescriptorExtractor(); + + void setVocabulary( const Mat& vocabulary ); + const Mat& getVocabulary() const; + void compute( const Mat& image, vector& keypoints, Mat& imgDescriptor, + vector >* pointIdxsOfClusters=0, Mat* descriptors=0 ); + // compute() is not constant because DescriptorMatcher::match is not constant + + int descriptorSize() const; + int descriptorType() const; + +protected: + Mat vocabulary; + Ptr dextractor; + Ptr dmatcher; +}; + +} /* namespace cv */ + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/all_indices.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/all_indices.h new file mode 100644 index 0000000..ff53fd8 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/all_indices.h @@ -0,0 +1,155 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_ALL_INDICES_H_ +#define OPENCV_FLANN_ALL_INDICES_H_ + +#include "general.h" + +#include "nn_index.h" +#include "kdtree_index.h" +#include "kdtree_single_index.h" +#include "kmeans_index.h" +#include "composite_index.h" +#include "linear_index.h" +#include "hierarchical_clustering_index.h" +#include "lsh_index.h" +#include "autotuned_index.h" + + +namespace cvflann +{ + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_KDTREE_SINGLE: + nnIndex = new KDTreeSingleIndex(dataset, params, distance); + break; + case FLANN_INDEX_KDTREE: + nnIndex = new KDTreeIndex(dataset, params, distance); + break; + case FLANN_INDEX_KMEANS: + nnIndex = new KMeansIndex(dataset, params, distance); + break; + case FLANN_INDEX_COMPOSITE: + nnIndex = new CompositeIndex(dataset, params, distance); + break; + case FLANN_INDEX_AUTOTUNED: + nnIndex = new AutotunedIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + throw FLANNException("Unknown index type"); + } + + return nnIndex; + } +}; + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_KMEANS: + nnIndex = new KMeansIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + throw FLANNException("Unknown index type"); + } + + return nnIndex; + } +}; + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + throw FLANNException("Unknown index type"); + } + + return nnIndex; + } +}; + +template +NNIndex* create_index_by_type(const Matrix& dataset, const IndexParams& params, const Distance& distance) +{ + return index_creator::create(dataset, params,distance); +} + +} + +#endif /* OPENCV_FLANN_ALL_INDICES_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/allocator.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/allocator.h new file mode 100644 index 0000000..26091d0 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/allocator.h @@ -0,0 +1,188 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_ALLOCATOR_H_ +#define OPENCV_FLANN_ALLOCATOR_H_ + +#include +#include + + +namespace cvflann +{ + +/** + * Allocates (using C's malloc) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ +template +T* allocate(size_t count = 1) +{ + T* mem = (T*) ::malloc(sizeof(T)*count); + return mem; +} + + +/** + * Pooled storage allocator + * + * The following routines allow for the efficient allocation of storage in + * small chunks from a specified pool. Rather than allowing each structure + * to be freed individually, an entire pool of storage is freed at once. + * This method has two advantages over just using malloc() and free(). First, + * it is far more efficient for allocating small objects, as there is + * no overhead for remembering all the information needed to free each + * object or consolidating fragmented memory. Second, the decision about + * how long to keep an object is made at the time of allocation, and there + * is no need to track down all the objects to free them. + * + */ + +const size_t WORDSIZE=16; +const size_t BLOCKSIZE=8192; + +class PooledAllocator +{ + /* We maintain memory alignment to word boundaries by requiring that all + allocations be in multiples of the machine wordsize. */ + /* Size of machine word in bytes. Must be power of 2. */ + /* Minimum number of bytes requested at a time from the system. Must be multiple of WORDSIZE. */ + + + int remaining; /* Number of bytes left in current block of storage. */ + void* base; /* Pointer to base of current block of storage. */ + void* loc; /* Current location in block to next allocate memory. */ + int blocksize; + + +public: + int usedMemory; + int wastedMemory; + + /** + Default constructor. Initializes a new pool. + */ + PooledAllocator(int blockSize = BLOCKSIZE) + { + blocksize = blockSize; + remaining = 0; + base = NULL; + + usedMemory = 0; + wastedMemory = 0; + } + + /** + * Destructor. Frees all the memory allocated in this pool. + */ + ~PooledAllocator() + { + void* prev; + + while (base != NULL) { + prev = *((void**) base); /* Get pointer to prev block. */ + ::free(base); + base = prev; + } + } + + /** + * Returns a pointer to a piece of new memory of the given size in bytes + * allocated from the pool. + */ + void* allocateMemory(int size) + { + int blockSize; + + /* Round size up to a multiple of wordsize. The following expression + only works for WORDSIZE that is a power of 2, by masking last bits of + incremented size to zero. + */ + size = (size + (WORDSIZE - 1)) & ~(WORDSIZE - 1); + + /* Check whether a new block must be allocated. Note that the first word + of a block is reserved for a pointer to the previous block. + */ + if (size > remaining) { + + wastedMemory += remaining; + + /* Allocate new storage. */ + blockSize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ? + size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE; + + // use the standard C malloc to allocate memory + void* m = ::malloc(blockSize); + if (!m) { + fprintf(stderr,"Failed to allocate memory.\n"); + return NULL; + } + + /* Fill first word of new block with pointer to previous block. */ + ((void**) m)[0] = base; + base = m; + + int shift = 0; + //int shift = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1); + + remaining = blockSize - sizeof(void*) - shift; + loc = ((char*)m + sizeof(void*) + shift); + } + void* rloc = loc; + loc = (char*)loc + size; + remaining -= size; + + usedMemory += size; + + return rloc; + } + + /** + * Allocates (using this pool) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ + template + T* allocate(size_t count = 1) + { + T* mem = (T*) this->allocateMemory((int)(sizeof(T)*count)); + return mem; + } + +}; + +} + +#endif //OPENCV_FLANN_ALLOCATOR_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/any.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/any.h new file mode 100644 index 0000000..7140b2a --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/any.h @@ -0,0 +1,304 @@ +#ifndef OPENCV_FLANN_ANY_H_ +#define OPENCV_FLANN_ANY_H_ +/* + * (C) Copyright Christopher Diggins 2005-2011 + * (C) Copyright Pablo Aguilar 2005 + * (C) Copyright Kevlin Henney 2001 + * + * Distributed under the Boost Software License, Version 1.0. (See + * accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt + * + * Adapted for FLANN by Marius Muja + */ + +#include "defines.h" +#include +#include +#include + +namespace cvflann +{ + +namespace anyimpl +{ + +struct bad_any_cast +{ +}; + +struct empty_any +{ +}; + +inline std::ostream& operator <<(std::ostream& out, const empty_any&) +{ + out << "[empty_any]"; + return out; +} + +struct base_any_policy +{ + virtual void static_delete(void** x) = 0; + virtual void copy_from_value(void const* src, void** dest) = 0; + virtual void clone(void* const* src, void** dest) = 0; + virtual void move(void* const* src, void** dest) = 0; + virtual void* get_value(void** src) = 0; + virtual ::size_t get_size() = 0; + virtual const std::type_info& type() = 0; + virtual void print(std::ostream& out, void* const* src) = 0; + +#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY + virtual ~base_any_policy() {} +#endif +}; + +template +struct typed_base_any_policy : base_any_policy +{ + virtual ::size_t get_size() { return sizeof(T); } + virtual const std::type_info& type() { return typeid(T); } + +}; + +template +struct small_any_policy : typed_base_any_policy +{ + virtual void static_delete(void**) { } + virtual void copy_from_value(void const* src, void** dest) + { + new (dest) T(* reinterpret_cast(src)); + } + virtual void clone(void* const* src, void** dest) { *dest = *src; } + virtual void move(void* const* src, void** dest) { *dest = *src; } + virtual void* get_value(void** src) { return reinterpret_cast(src); } + virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast(src); } +}; + +template +struct big_any_policy : typed_base_any_policy +{ + virtual void static_delete(void** x) + { + if (* x) delete (* reinterpret_cast(x)); *x = NULL; + } + virtual void copy_from_value(void const* src, void** dest) + { + *dest = new T(*reinterpret_cast(src)); + } + virtual void clone(void* const* src, void** dest) + { + *dest = new T(**reinterpret_cast(src)); + } + virtual void move(void* const* src, void** dest) + { + (*reinterpret_cast(dest))->~T(); + **reinterpret_cast(dest) = **reinterpret_cast(src); + } + virtual void* get_value(void** src) { return *src; } + virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast(*src); } +}; + +template<> inline void big_any_policy::print(std::ostream& out, void* const* src) +{ + out << int(*reinterpret_cast(*src)); +} + +template<> inline void big_any_policy::print(std::ostream& out, void* const* src) +{ + out << int(*reinterpret_cast(*src)); +} + +template +struct choose_policy +{ + typedef big_any_policy type; +}; + +template +struct choose_policy +{ + typedef small_any_policy type; +}; + +struct any; + +/// Choosing the policy for an any type is illegal, but should never happen. +/// This is designed to throw a compiler error. +template<> +struct choose_policy +{ + typedef void type; +}; + +/// Specializations for small types. +#define SMALL_POLICY(TYPE) \ + template<> \ + struct choose_policy { typedef small_any_policy type; \ + } + +SMALL_POLICY(signed char); +SMALL_POLICY(unsigned char); +SMALL_POLICY(signed short); +SMALL_POLICY(unsigned short); +SMALL_POLICY(signed int); +SMALL_POLICY(unsigned int); +SMALL_POLICY(signed long); +SMALL_POLICY(unsigned long); +SMALL_POLICY(float); +SMALL_POLICY(bool); + +#undef SMALL_POLICY + +/// This function will return a different policy for each type. +template +base_any_policy* get_policy() +{ + static typename choose_policy::type policy; + return &policy; +} +} // namespace anyimpl + +struct any +{ +private: + // fields + anyimpl::base_any_policy* policy; + void* object; + +public: + /// Initializing constructor. + template + any(const T& x) + : policy(anyimpl::get_policy()), object(NULL) + { + assign(x); + } + + /// Empty constructor. + any() + : policy(anyimpl::get_policy()), object(NULL) + { } + + /// Special initializing constructor for string literals. + any(const char* x) + : policy(anyimpl::get_policy()), object(NULL) + { + assign(x); + } + + /// Copy constructor. + any(const any& x) + : policy(anyimpl::get_policy()), object(NULL) + { + assign(x); + } + + /// Destructor. + ~any() + { + policy->static_delete(&object); + } + + /// Assignment function from another any. + any& assign(const any& x) + { + reset(); + policy = x.policy; + policy->clone(&x.object, &object); + return *this; + } + + /// Assignment function. + template + any& assign(const T& x) + { + reset(); + policy = anyimpl::get_policy(); + policy->copy_from_value(&x, &object); + return *this; + } + + /// Assignment operator. + template + any& operator=(const T& x) + { + return assign(x); + } + + /// Assignment operator, specialed for literal strings. + /// They have types like const char [6] which don't work as expected. + any& operator=(const char* x) + { + return assign(x); + } + + /// Utility functions + any& swap(any& x) + { + std::swap(policy, x.policy); + std::swap(object, x.object); + return *this; + } + + /// Cast operator. You can only cast to the original type. + template + T& cast() + { + if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); + T* r = reinterpret_cast(policy->get_value(&object)); + return *r; + } + + /// Cast operator. You can only cast to the original type. + template + const T& cast() const + { + if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); + T* r = reinterpret_cast(policy->get_value(const_cast(&object))); + return *r; + } + + /// Returns true if the any contains no value. + bool empty() const + { + return policy->type() == typeid(anyimpl::empty_any); + } + + /// Frees any allocated memory, and sets the value to NULL. + void reset() + { + policy->static_delete(&object); + policy = anyimpl::get_policy(); + } + + /// Returns true if the two types are the same. + bool compatible(const any& x) const + { + return policy->type() == x.policy->type(); + } + + /// Returns if the type is compatible with the policy + template + bool has_type() + { + return policy->type() == typeid(T); + } + + const std::type_info& type() const + { + return policy->type(); + } + + friend std::ostream& operator <<(std::ostream& out, const any& any_val); +}; + +inline std::ostream& operator <<(std::ostream& out, const any& any_val) +{ + any_val.policy->print(out,&any_val.object); + return out; +} + +} + +#endif // OPENCV_FLANN_ANY_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/autotuned_index.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/autotuned_index.h new file mode 100644 index 0000000..8d53175 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/autotuned_index.h @@ -0,0 +1,583 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ +#ifndef OPENCV_FLANN_AUTOTUNED_INDEX_H_ +#define OPENCV_FLANN_AUTOTUNED_INDEX_H_ + +#include "general.h" +#include "nn_index.h" +#include "ground_truth.h" +#include "index_testing.h" +#include "sampling.h" +#include "kdtree_index.h" +#include "kdtree_single_index.h" +#include "kmeans_index.h" +#include "composite_index.h" +#include "linear_index.h" +#include "logger.h" + +namespace cvflann +{ + +template +NNIndex* create_index_by_type(const Matrix& dataset, const IndexParams& params, const Distance& distance); + + +struct AutotunedIndexParams : public IndexParams +{ + AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, float memory_weight = 0, float sample_fraction = 0.1) + { + (*this)["algorithm"] = FLANN_INDEX_AUTOTUNED; + // precision desired (used for autotuning, -1 otherwise) + (*this)["target_precision"] = target_precision; + // build tree time weighting factor + (*this)["build_weight"] = build_weight; + // index memory weighting factor + (*this)["memory_weight"] = memory_weight; + // what fraction of the dataset to use for autotuning + (*this)["sample_fraction"] = sample_fraction; + } +}; + + +template +class AutotunedIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + AutotunedIndex(const Matrix& inputData, const IndexParams& params = AutotunedIndexParams(), Distance d = Distance()) : + dataset_(inputData), distance_(d) + { + target_precision_ = get_param(params, "target_precision",0.8f); + build_weight_ = get_param(params,"build_weight", 0.01f); + memory_weight_ = get_param(params, "memory_weight", 0.0f); + sample_fraction_ = get_param(params,"sample_fraction", 0.1f); + bestIndex_ = NULL; + } + + AutotunedIndex(const AutotunedIndex&); + AutotunedIndex& operator=(const AutotunedIndex&); + + virtual ~AutotunedIndex() + { + if (bestIndex_ != NULL) { + delete bestIndex_; + bestIndex_ = NULL; + } + } + + /** + * Method responsible with building the index. + */ + virtual void buildIndex() + { + bestParams_ = estimateBuildParams(); + Logger::info("----------------------------------------------------\n"); + Logger::info("Autotuned parameters:\n"); + print_params(bestParams_); + Logger::info("----------------------------------------------------\n"); + + bestIndex_ = create_index_by_type(dataset_, bestParams_, distance_); + bestIndex_->buildIndex(); + speedup_ = estimateSearchParams(bestSearchParams_); + Logger::info("----------------------------------------------------\n"); + Logger::info("Search parameters:\n"); + print_params(bestSearchParams_); + Logger::info("----------------------------------------------------\n"); + } + + /** + * Saves the index to a stream + */ + virtual void saveIndex(FILE* stream) + { + save_value(stream, (int)bestIndex_->getType()); + bestIndex_->saveIndex(stream); + save_value(stream, get_param(bestSearchParams_, "checks")); + } + + /** + * Loads the index from a stream + */ + virtual void loadIndex(FILE* stream) + { + int index_type; + + load_value(stream, index_type); + IndexParams params; + params["algorithm"] = (flann_algorithm_t)index_type; + bestIndex_ = create_index_by_type(dataset_, params, distance_); + bestIndex_->loadIndex(stream); + int checks; + load_value(stream, checks); + bestSearchParams_["checks"] = checks; + } + + /** + * Method that searches for nearest-neighbors + */ + virtual void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + int checks = get_param(searchParams,"checks",FLANN_CHECKS_AUTOTUNED); + if (checks == FLANN_CHECKS_AUTOTUNED) { + bestIndex_->findNeighbors(result, vec, bestSearchParams_); + } + else { + bestIndex_->findNeighbors(result, vec, searchParams); + } + } + + + IndexParams getParameters() const + { + return bestIndex_->getParameters(); + } + + SearchParams getSearchParameters() const + { + return bestSearchParams_; + } + + float getSpeedup() const + { + return speedup_; + } + + + /** + * Number of features in this index. + */ + virtual size_t size() const + { + return bestIndex_->size(); + } + + /** + * The length of each vector in this index. + */ + virtual size_t veclen() const + { + return bestIndex_->veclen(); + } + + /** + * The amount of memory (in bytes) this index uses. + */ + virtual int usedMemory() const + { + return bestIndex_->usedMemory(); + } + + /** + * Algorithm name + */ + virtual flann_algorithm_t getType() const + { + return FLANN_INDEX_AUTOTUNED; + } + +private: + + struct CostData + { + float searchTimeCost; + float buildTimeCost; + float memoryCost; + float totalCost; + IndexParams params; + }; + + void evaluate_kmeans(CostData& cost) + { + StartStopTimer t; + int checks; + const int nn = 1; + + Logger::info("KMeansTree using params: max_iterations=%d, branching=%d\n", + get_param(cost.params,"iterations"), + get_param(cost.params,"branching")); + KMeansIndex kmeans(sampledDataset_, cost.params, distance_); + // measure index build time + t.start(); + kmeans.buildIndex(); + t.stop(); + float buildTime = (float)t.value; + + // measure search time + float searchTime = test_index_precision(kmeans, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn); + + float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float)); + cost.memoryCost = (kmeans.usedMemory() + datasetMemory) / datasetMemory; + cost.searchTimeCost = searchTime; + cost.buildTimeCost = buildTime; + Logger::info("KMeansTree buildTime=%g, searchTime=%g, build_weight=%g\n", buildTime, searchTime, build_weight_); + } + + + void evaluate_kdtree(CostData& cost) + { + StartStopTimer t; + int checks; + const int nn = 1; + + Logger::info("KDTree using params: trees=%d\n", get_param(cost.params,"trees")); + KDTreeIndex kdtree(sampledDataset_, cost.params, distance_); + + t.start(); + kdtree.buildIndex(); + t.stop(); + float buildTime = (float)t.value; + + //measure search time + float searchTime = test_index_precision(kdtree, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn); + + float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float)); + cost.memoryCost = (kdtree.usedMemory() + datasetMemory) / datasetMemory; + cost.searchTimeCost = searchTime; + cost.buildTimeCost = buildTime; + Logger::info("KDTree buildTime=%g, searchTime=%g\n", buildTime, searchTime); + } + + + // struct KMeansSimpleDownhillFunctor { + // + // Autotune& autotuner; + // KMeansSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}; + // + // float operator()(int* params) { + // + // float maxFloat = numeric_limits::max(); + // + // if (params[0]<2) return maxFloat; + // if (params[1]<0) return maxFloat; + // + // CostData c; + // c.params["algorithm"] = KMEANS; + // c.params["centers-init"] = CENTERS_RANDOM; + // c.params["branching"] = params[0]; + // c.params["max-iterations"] = params[1]; + // + // autotuner.evaluate_kmeans(c); + // + // return c.timeCost; + // + // } + // }; + // + // struct KDTreeSimpleDownhillFunctor { + // + // Autotune& autotuner; + // KDTreeSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {}; + // + // float operator()(int* params) { + // float maxFloat = numeric_limits::max(); + // + // if (params[0]<1) return maxFloat; + // + // CostData c; + // c.params["algorithm"] = KDTREE; + // c.params["trees"] = params[0]; + // + // autotuner.evaluate_kdtree(c); + // + // return c.timeCost; + // + // } + // }; + + + + void optimizeKMeans(std::vector& costs) + { + Logger::info("KMEANS, Step 1: Exploring parameter space\n"); + + // explore kmeans parameters space using combinations of the parameters below + int maxIterations[] = { 1, 5, 10, 15 }; + int branchingFactors[] = { 16, 32, 64, 128, 256 }; + + int kmeansParamSpaceSize = FLANN_ARRAY_LEN(maxIterations) * FLANN_ARRAY_LEN(branchingFactors); + costs.reserve(costs.size() + kmeansParamSpaceSize); + + // evaluate kmeans for all parameter combinations + for (size_t i = 0; i < FLANN_ARRAY_LEN(maxIterations); ++i) { + for (size_t j = 0; j < FLANN_ARRAY_LEN(branchingFactors); ++j) { + CostData cost; + cost.params["algorithm"] = FLANN_INDEX_KMEANS; + cost.params["centers_init"] = FLANN_CENTERS_RANDOM; + cost.params["iterations"] = maxIterations[i]; + cost.params["branching"] = branchingFactors[j]; + + evaluate_kmeans(cost); + costs.push_back(cost); + } + } + + // Logger::info("KMEANS, Step 2: simplex-downhill optimization\n"); + // + // const int n = 2; + // // choose initial simplex points as the best parameters so far + // int kmeansNMPoints[n*(n+1)]; + // float kmeansVals[n+1]; + // for (int i=0;i& costs) + { + Logger::info("KD-TREE, Step 1: Exploring parameter space\n"); + + // explore kd-tree parameters space using the parameters below + int testTrees[] = { 1, 4, 8, 16, 32 }; + + // evaluate kdtree for all parameter combinations + for (size_t i = 0; i < FLANN_ARRAY_LEN(testTrees); ++i) { + CostData cost; + cost.params["trees"] = testTrees[i]; + + evaluate_kdtree(cost); + costs.push_back(cost); + } + + // Logger::info("KD-TREE, Step 2: simplex-downhill optimization\n"); + // + // const int n = 1; + // // choose initial simplex points as the best parameters so far + // int kdtreeNMPoints[n*(n+1)]; + // float kdtreeVals[n+1]; + // for (int i=0;i costs; + + int sampleSize = int(sample_fraction_ * dataset_.rows); + int testSampleSize = std::min(sampleSize / 10, 1000); + + Logger::info("Entering autotuning, dataset size: %d, sampleSize: %d, testSampleSize: %d, target precision: %g\n", dataset_.rows, sampleSize, testSampleSize, target_precision_); + + // For a very small dataset, it makes no sense to build any fancy index, just + // use linear search + if (testSampleSize < 10) { + Logger::info("Choosing linear, dataset too small\n"); + return LinearIndexParams(); + } + + // We use a fraction of the original dataset to speedup the autotune algorithm + sampledDataset_ = random_sample(dataset_, sampleSize); + // We use a cross-validation approach, first we sample a testset from the dataset + testDataset_ = random_sample(sampledDataset_, testSampleSize, true); + + // We compute the ground truth using linear search + Logger::info("Computing ground truth... \n"); + gt_matches_ = Matrix(new int[testDataset_.rows], testDataset_.rows, 1); + StartStopTimer t; + t.start(); + compute_ground_truth(sampledDataset_, testDataset_, gt_matches_, 0, distance_); + t.stop(); + + CostData linear_cost; + linear_cost.searchTimeCost = (float)t.value; + linear_cost.buildTimeCost = 0; + linear_cost.memoryCost = 0; + linear_cost.params["algorithm"] = FLANN_INDEX_LINEAR; + + costs.push_back(linear_cost); + + // Start parameter autotune process + Logger::info("Autotuning parameters...\n"); + + optimizeKMeans(costs); + optimizeKDTree(costs); + + float bestTimeCost = costs[0].searchTimeCost; + for (size_t i = 0; i < costs.size(); ++i) { + float timeCost = costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost; + if (timeCost < bestTimeCost) { + bestTimeCost = timeCost; + } + } + + float bestCost = costs[0].searchTimeCost / bestTimeCost; + IndexParams bestParams = costs[0].params; + if (bestTimeCost > 0) { + for (size_t i = 0; i < costs.size(); ++i) { + float crtCost = (costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost) / bestTimeCost + + memory_weight_ * costs[i].memoryCost; + if (crtCost < bestCost) { + bestCost = crtCost; + bestParams = costs[i].params; + } + } + } + + delete[] gt_matches_.data; + delete[] testDataset_.data; + delete[] sampledDataset_.data; + + return bestParams; + } + + + + /** + * Estimates the search time parameters needed to get the desired precision. + * Precondition: the index is built + * Postcondition: the searchParams will have the optimum params set, also the speedup obtained over linear search. + */ + float estimateSearchParams(SearchParams& searchParams) + { + const int nn = 1; + const size_t SAMPLE_COUNT = 1000; + + assert(bestIndex_ != NULL); // must have a valid index + + float speedup = 0; + + int samples = (int)std::min(dataset_.rows / 10, SAMPLE_COUNT); + if (samples > 0) { + Matrix testDataset = random_sample(dataset_, samples); + + Logger::info("Computing ground truth\n"); + + // we need to compute the ground truth first + Matrix gt_matches(new int[testDataset.rows], testDataset.rows, 1); + StartStopTimer t; + t.start(); + compute_ground_truth(dataset_, testDataset, gt_matches, 1, distance_); + t.stop(); + float linear = (float)t.value; + + int checks; + Logger::info("Estimating number of checks\n"); + + float searchTime; + float cb_index; + if (bestIndex_->getType() == FLANN_INDEX_KMEANS) { + Logger::info("KMeans algorithm, estimating cluster border factor\n"); + KMeansIndex* kmeans = (KMeansIndex*)bestIndex_; + float bestSearchTime = -1; + float best_cb_index = -1; + int best_checks = -1; + for (cb_index = 0; cb_index < 1.1f; cb_index += 0.2f) { + kmeans->set_cb_index(cb_index); + searchTime = test_index_precision(*kmeans, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1); + if ((searchTime < bestSearchTime) || (bestSearchTime == -1)) { + bestSearchTime = searchTime; + best_cb_index = cb_index; + best_checks = checks; + } + } + searchTime = bestSearchTime; + cb_index = best_cb_index; + checks = best_checks; + + kmeans->set_cb_index(best_cb_index); + Logger::info("Optimum cb_index: %g\n", cb_index); + bestParams_["cb_index"] = cb_index; + } + else { + searchTime = test_index_precision(*bestIndex_, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1); + } + + Logger::info("Required number of checks: %d \n", checks); + searchParams["checks"] = checks; + + speedup = linear / searchTime; + + delete[] gt_matches.data; + delete[] testDataset.data; + } + + return speedup; + } + +private: + NNIndex* bestIndex_; + + IndexParams bestParams_; + SearchParams bestSearchParams_; + + Matrix sampledDataset_; + Matrix testDataset_; + Matrix gt_matches_; + + float speedup_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + /** + * Index parameters + */ + float target_precision_; + float build_weight_; + float memory_weight_; + float sample_fraction_; + + Distance distance_; + + +}; +} + +#endif /* OPENCV_FLANN_AUTOTUNED_INDEX_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/composite_index.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/composite_index.h new file mode 100644 index 0000000..527ca1a --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/composite_index.h @@ -0,0 +1,194 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_COMPOSITE_INDEX_H_ +#define OPENCV_FLANN_COMPOSITE_INDEX_H_ + +#include "general.h" +#include "nn_index.h" +#include "kdtree_index.h" +#include "kmeans_index.h" + +namespace cvflann +{ + +/** + * Index parameters for the CompositeIndex. + */ +struct CompositeIndexParams : public IndexParams +{ + CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 ) + { + (*this)["algorithm"] = FLANN_INDEX_KMEANS; + // number of randomized trees to use (for kdtree) + (*this)["trees"] = trees; + // branching factor + (*this)["branching"] = branching; + // max iterations to perform in one kmeans clustering (kmeans tree) + (*this)["iterations"] = iterations; + // algorithm used for picking the initial cluster centers for kmeans tree + (*this)["centers_init"] = centers_init; + // cluster boundary index. Used when searching the kmeans tree + (*this)["cb_index"] = cb_index; + } +}; + + +/** + * This index builds a kd-tree index and a k-means index and performs nearest + * neighbour search both indexes. This gives a slight boost in search performance + * as some of the neighbours that are missed by one index are found by the other. + */ +template +class CompositeIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + /** + * Index constructor + * @param inputData dataset containing the points to index + * @param params Index parameters + * @param d Distance functor + * @return + */ + CompositeIndex(const Matrix& inputData, const IndexParams& params = CompositeIndexParams(), + Distance d = Distance()) : index_params_(params) + { + kdtree_index_ = new KDTreeIndex(inputData, params, d); + kmeans_index_ = new KMeansIndex(inputData, params, d); + + } + + CompositeIndex(const CompositeIndex&); + CompositeIndex& operator=(const CompositeIndex&); + + virtual ~CompositeIndex() + { + delete kdtree_index_; + delete kmeans_index_; + } + + /** + * @return The index type + */ + flann_algorithm_t getType() const + { + return FLANN_INDEX_COMPOSITE; + } + + /** + * @return Size of the index + */ + size_t size() const + { + return kdtree_index_->size(); + } + + /** + * \returns The dimensionality of the features in this index. + */ + size_t veclen() const + { + return kdtree_index_->veclen(); + } + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + int usedMemory() const + { + return kmeans_index_->usedMemory() + kdtree_index_->usedMemory(); + } + + /** + * \brief Builds the index + */ + void buildIndex() + { + Logger::info("Building kmeans tree...\n"); + kmeans_index_->buildIndex(); + Logger::info("Building kdtree tree...\n"); + kdtree_index_->buildIndex(); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + void saveIndex(FILE* stream) + { + kmeans_index_->saveIndex(stream); + kdtree_index_->saveIndex(stream); + } + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + void loadIndex(FILE* stream) + { + kmeans_index_->loadIndex(stream); + kdtree_index_->loadIndex(stream); + } + + /** + * \returns The index parameters + */ + IndexParams getParameters() const + { + return index_params_; + } + + /** + * \brief Method that searches for nearest-neighbours + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + kmeans_index_->findNeighbors(result, vec, searchParams); + kdtree_index_->findNeighbors(result, vec, searchParams); + } + +private: + /** The k-means index */ + KMeansIndex* kmeans_index_; + + /** The kd-tree index */ + KDTreeIndex* kdtree_index_; + + /** The index parameters */ + const IndexParams index_params_; +}; + +} + +#endif //OPENCV_FLANN_COMPOSITE_INDEX_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/config.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/config.h new file mode 100644 index 0000000..56832fd --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/config.h @@ -0,0 +1,38 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_CONFIG_H_ +#define OPENCV_FLANN_CONFIG_H_ + +#ifdef FLANN_VERSION_ +#undef FLANN_VERSION_ +#endif +#define FLANN_VERSION_ "1.6.10" + +#endif /* OPENCV_FLANN_CONFIG_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/defines.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/defines.h new file mode 100644 index 0000000..13833b3 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/defines.h @@ -0,0 +1,176 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_DEFINES_H_ +#define OPENCV_FLANN_DEFINES_H_ + +#include "config.h" + +#ifdef FLANN_EXPORT +#undef FLANN_EXPORT +#endif +#ifdef WIN32 +/* win32 dll export/import directives */ + #ifdef FLANN_EXPORTS + #define FLANN_EXPORT __declspec(dllexport) + #elif defined(FLANN_STATIC) + #define FLANN_EXPORT + #else + #define FLANN_EXPORT __declspec(dllimport) + #endif +#else +/* unix needs nothing */ + #define FLANN_EXPORT +#endif + + +#ifdef FLANN_DEPRECATED +#undef FLANN_DEPRECATED +#endif +#ifdef __GNUC__ +#define FLANN_DEPRECATED __attribute__ ((deprecated)) +#elif defined(_MSC_VER) +#define FLANN_DEPRECATED __declspec(deprecated) +#else +#pragma message("WARNING: You need to implement FLANN_DEPRECATED for this compiler") +#define FLANN_DEPRECATED +#endif + + +#undef FLANN_PLATFORM_32_BIT +#undef FLANN_PLATFORM_64_BIT +#if defined __amd64__ || defined __x86_64__ || defined _WIN64 || defined _M_X64 +#define FLANN_PLATFORM_64_BIT +#else +#define FLANN_PLATFORM_32_BIT +#endif + + +#undef FLANN_ARRAY_LEN +#define FLANN_ARRAY_LEN(a) (sizeof(a)/sizeof(a[0])) + +namespace cvflann { + +/* Nearest neighbour index algorithms */ +enum flann_algorithm_t +{ + FLANN_INDEX_LINEAR = 0, + FLANN_INDEX_KDTREE = 1, + FLANN_INDEX_KMEANS = 2, + FLANN_INDEX_COMPOSITE = 3, + FLANN_INDEX_KDTREE_SINGLE = 4, + FLANN_INDEX_HIERARCHICAL = 5, + FLANN_INDEX_LSH = 6, + FLANN_INDEX_SAVED = 254, + FLANN_INDEX_AUTOTUNED = 255, + + // deprecated constants, should use the FLANN_INDEX_* ones instead + LINEAR = 0, + KDTREE = 1, + KMEANS = 2, + COMPOSITE = 3, + KDTREE_SINGLE = 4, + SAVED = 254, + AUTOTUNED = 255 +}; + + + +enum flann_centers_init_t +{ + FLANN_CENTERS_RANDOM = 0, + FLANN_CENTERS_GONZALES = 1, + FLANN_CENTERS_KMEANSPP = 2, + + // deprecated constants, should use the FLANN_CENTERS_* ones instead + CENTERS_RANDOM = 0, + CENTERS_GONZALES = 1, + CENTERS_KMEANSPP = 2 +}; + +enum flann_log_level_t +{ + FLANN_LOG_NONE = 0, + FLANN_LOG_FATAL = 1, + FLANN_LOG_ERROR = 2, + FLANN_LOG_WARN = 3, + FLANN_LOG_INFO = 4 +}; + +enum flann_distance_t +{ + FLANN_DIST_EUCLIDEAN = 1, + FLANN_DIST_L2 = 1, + FLANN_DIST_MANHATTAN = 2, + FLANN_DIST_L1 = 2, + FLANN_DIST_MINKOWSKI = 3, + FLANN_DIST_MAX = 4, + FLANN_DIST_HIST_INTERSECT = 5, + FLANN_DIST_HELLINGER = 6, + FLANN_DIST_CHI_SQUARE = 7, + FLANN_DIST_CS = 7, + FLANN_DIST_KULLBACK_LEIBLER = 8, + FLANN_DIST_KL = 8, + FLANN_DIST_HAMMING = 9, + + // deprecated constants, should use the FLANN_DIST_* ones instead + EUCLIDEAN = 1, + MANHATTAN = 2, + MINKOWSKI = 3, + MAX_DIST = 4, + HIST_INTERSECT = 5, + HELLINGER = 6, + CS = 7, + KL = 8, + KULLBACK_LEIBLER = 8 +}; + +enum flann_datatype_t +{ + FLANN_INT8 = 0, + FLANN_INT16 = 1, + FLANN_INT32 = 2, + FLANN_INT64 = 3, + FLANN_UINT8 = 4, + FLANN_UINT16 = 5, + FLANN_UINT32 = 6, + FLANN_UINT64 = 7, + FLANN_FLOAT32 = 8, + FLANN_FLOAT64 = 9 +}; + +enum +{ + FLANN_CHECKS_UNLIMITED = -1, + FLANN_CHECKS_AUTOTUNED = -2 +}; + +} + +#endif /* OPENCV_FLANN_DEFINES_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dist.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dist.h new file mode 100644 index 0000000..80ae2dc --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dist.h @@ -0,0 +1,817 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_DIST_H_ +#define OPENCV_FLANN_DIST_H_ + +#include +#include +#include +#ifdef _MSC_VER +typedef unsigned __int32 uint32_t; +typedef unsigned __int64 uint64_t; +#else +#include +#endif + +#include "defines.h" + +#if (defined WIN32 || defined _WIN32) && defined(_M_ARM) +# include +#endif + +#ifdef __ARM_NEON__ +# include "arm_neon.h" +#endif + +namespace cvflann +{ + +template +inline T abs(T x) { return (x<0) ? -x : x; } + +template<> +inline int abs(int x) { return ::abs(x); } + +template<> +inline float abs(float x) { return fabsf(x); } + +template<> +inline double abs(double x) { return fabs(x); } + +template +struct Accumulator { typedef T Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; + +#undef True +#undef False + +class True +{ +}; + +class False +{ +}; + + +/** + * Squared Euclidean distance functor. + * + * This is the simpler, unrolled version. This is preferable for + * very low dimensionality data (eg 3D points) + */ +template +struct L2_Simple +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = ResultType(); + ResultType diff; + for(size_t i = 0; i < size; ++i ) { + diff = *a++ - *b++; + result += diff*diff; + } + return result; + } + + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return (a-b)*(a-b); + } +}; + + + +/** + * Squared Euclidean distance functor, optimized version + */ +template +struct L2 +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the squared Euclidean distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + * + * The computation of squared root at the end is omitted for + * efficiency. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)(a[0] - b[0]); + diff1 = (ResultType)(a[1] - b[1]); + diff2 = (ResultType)(a[2] - b[2]); + diff3 = (ResultType)(a[3] - b[3]); + result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)(*a++ - *b++); + result += diff0 * diff0; + } + return result; + } + + /** + * Partial euclidean distance, using just one dimension. This is used by the + * kd-tree when computing partial distances while traversing the tree. + * + * Squared root is omitted for efficiency. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return (a-b)*(a-b); + } +}; + + +/* + * Manhattan distance functor, optimized version + */ +template +struct L1 +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the Manhattan (L_1) distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)abs(a[0] - b[0]); + diff1 = (ResultType)abs(a[1] - b[1]); + diff2 = (ResultType)abs(a[2] - b[2]); + diff3 = (ResultType)abs(a[3] - b[3]); + result += diff0 + diff1 + diff2 + diff3; + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)abs(*a++ - *b++); + result += diff0; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return abs(a-b); + } +}; + + + +template +struct MinkowskiDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + int order; + + MinkowskiDistance(int order_) : order(order_) {} + + /** + * Compute the Minkowsky (L_p) distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + * + * The computation of squared root at the end is omitted for + * efficiency. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)abs(a[0] - b[0]); + diff1 = (ResultType)abs(a[1] - b[1]); + diff2 = (ResultType)abs(a[2] - b[2]); + diff3 = (ResultType)abs(a[3] - b[3]); + result += pow(diff0,order) + pow(diff1,order) + pow(diff2,order) + pow(diff3,order); + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)abs(*a++ - *b++); + result += pow(diff0,order); + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return pow(static_cast(abs(a-b)),order); + } +}; + + + +template +struct MaxDistance +{ + typedef False is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the max distance (L_infinity) between two vectors. + * + * This distance is not a valid kdtree distance, it's not dimensionwise additive. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = abs(a[0] - b[0]); + diff1 = abs(a[1] - b[1]); + diff2 = abs(a[2] - b[2]); + diff3 = abs(a[3] - b[3]); + if (diff0>result) {result = diff0; } + if (diff1>result) {result = diff1; } + if (diff2>result) {result = diff2; } + if (diff3>result) {result = diff3; } + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = abs(*a++ - *b++); + result = (diff0>result) ? diff0 : result; + } + return result; + } + + /* This distance functor is not dimension-wise additive, which + * makes it an invalid kd-tree distance, not implementing the accum_dist method */ + +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct HammingLUT +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef unsigned char ElementType; + typedef int ResultType; + + /** this will count the bits in a ^ b + */ + ResultType operator()(const unsigned char* a, const unsigned char* b, int size) const + { + static const uchar popCountTable[] = + { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 + }; + ResultType result = 0; + for (int i = 0; i < size; i++) { + result += popCountTable[a[i] ^ b[i]]; + } + return result; + } +}; + +/** + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct HammingLUT2 +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef unsigned char ElementType; + typedef int ResultType; + + /** this will count the bits in a ^ b + */ + ResultType operator()(const unsigned char* a, const unsigned char* b, size_t size) const + { + static const uchar popCountTable[] = + { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 + }; + ResultType result = 0; + for (size_t i = 0; i < size; i++) { + result += popCountTable[a[i] ^ b[i]]; + } + return result; + } +}; + +/** + * Hamming distance functor (pop count between two binary vectors, i.e. xor them and count the number of bits set) + * That code was taken from brief.cpp in OpenCV + */ +template +struct Hamming +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + + typedef T ElementType; + typedef int ResultType; + + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = 0; +#ifdef __ARM_NEON__ + { + uint32x4_t bits = vmovq_n_u32(0); + for (size_t i = 0; i < size; i += 16) { + uint8x16_t A_vec = vld1q_u8 (a + i); + uint8x16_t B_vec = vld1q_u8 (b + i); + uint8x16_t AxorB = veorq_u8 (A_vec, B_vec); + uint8x16_t bitsSet = vcntq_u8 (AxorB); + uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet); + uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8); + bits = vaddq_u32(bits, bitSet4); + } + uint64x2_t bitSet2 = vpaddlq_u32 (bits); + result = vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),0); + result += vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),2); + } +#elif __GNUC__ + { + //for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll) + typedef unsigned long long pop_t; + const size_t modulo = size % sizeof(pop_t); + const pop_t* a2 = reinterpret_cast (a); + const pop_t* b2 = reinterpret_cast (b); + const pop_t* a2_end = a2 + (size / sizeof(pop_t)); + + for (; a2 != a2_end; ++a2, ++b2) result += __builtin_popcountll((*a2) ^ (*b2)); + + if (modulo) { + //in the case where size is not dividable by sizeof(size_t) + //need to mask off the bits at the end + pop_t a_final = 0, b_final = 0; + memcpy(&a_final, a2, modulo); + memcpy(&b_final, b2, modulo); + result += __builtin_popcountll(a_final ^ b_final); + } + } +#else // NO NEON and NOT GNUC + typedef unsigned long long pop_t; + HammingLUT lut; + result = lut(reinterpret_cast (a), + reinterpret_cast (b), size * sizeof(pop_t)); +#endif + return result; + } +}; + +template +struct Hamming2 +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef T ElementType; + typedef int ResultType; + + /** This is popcount_3() from: + * http://en.wikipedia.org/wiki/Hamming_weight */ + unsigned int popcnt32(uint32_t n) const + { + n -= ((n >> 1) & 0x55555555); + n = (n & 0x33333333) + ((n >> 2) & 0x33333333); + return (((n + (n >> 4))& 0xF0F0F0F)* 0x1010101) >> 24; + } + +#ifdef FLANN_PLATFORM_64_BIT + unsigned int popcnt64(uint64_t n) const + { + n -= ((n >> 1) & 0x5555555555555555); + n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333); + return (((n + (n >> 4))& 0x0f0f0f0f0f0f0f0f)* 0x0101010101010101) >> 56; + } +#endif + + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { +#ifdef FLANN_PLATFORM_64_BIT + const uint64_t* pa = reinterpret_cast(a); + const uint64_t* pb = reinterpret_cast(b); + ResultType result = 0; + size /= (sizeof(uint64_t)/sizeof(unsigned char)); + for(size_t i = 0; i < size; ++i ) { + result += popcnt64(*pa ^ *pb); + ++pa; + ++pb; + } +#else + const uint32_t* pa = reinterpret_cast(a); + const uint32_t* pb = reinterpret_cast(b); + ResultType result = 0; + size /= (sizeof(uint32_t)/sizeof(unsigned char)); + for(size_t i = 0; i < size; ++i ) { + result += popcnt32(*pa ^ *pb); + ++pa; + ++pb; + } +#endif + return result; + } +}; + + + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct HistIntersectionDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the histogram intersection distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType min0, min1, min2, min3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + min0 = (ResultType)(a[0] < b[0] ? a[0] : b[0]); + min1 = (ResultType)(a[1] < b[1] ? a[1] : b[1]); + min2 = (ResultType)(a[2] < b[2] ? a[2] : b[2]); + min3 = (ResultType)(a[3] < b[3] ? a[3] : b[3]); + result += min0 + min1 + min2 + min3; + a += 4; + b += 4; + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + min0 = (ResultType)(*a < *b ? *a : *b); + result += min0; + ++a; + ++b; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return a +struct HellingerDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the histogram intersection distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = sqrt(static_cast(a[0])) - sqrt(static_cast(b[0])); + diff1 = sqrt(static_cast(a[1])) - sqrt(static_cast(b[1])); + diff2 = sqrt(static_cast(a[2])) - sqrt(static_cast(b[2])); + diff3 = sqrt(static_cast(a[3])) - sqrt(static_cast(b[3])); + result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + a += 4; + b += 4; + } + while (a < last) { + diff0 = sqrt(static_cast(*a++)) - sqrt(static_cast(*b++)); + result += diff0 * diff0; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return sqrt(static_cast(a)) - sqrt(static_cast(b)); + } +}; + + +template +struct ChiSquareDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the chi-square distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType sum, diff; + Iterator1 last = a + size; + + while (a < last) { + sum = (ResultType)(*a + *b); + if (sum>0) { + diff = (ResultType)(*a - *b); + result += diff*diff/sum; + } + ++a; + ++b; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + ResultType result = ResultType(); + ResultType sum, diff; + + sum = (ResultType)(a+b); + if (sum>0) { + diff = (ResultType)(a-b); + result = diff*diff/sum; + } + return result; + } +}; + + +template +struct KL_Divergence +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + + /** + * Compute the Kullback–Leibler divergence + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + Iterator1 last = a + size; + + while (a < last) { + if (* a != 0) { + ResultType ratio = (ResultType)(*a / *b); + if (ratio>0) { + result += *a * log(ratio); + } + } + ++a; + ++b; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + ResultType result = ResultType(); + ResultType ratio = (ResultType)(a / b); + if (ratio>0) { + result = a * log(ratio); + } + return result; + } +}; + + + +/* + * This is a "zero iterator". It basically behaves like a zero filled + * array to all algorithms that use arrays as iterators (STL style). + * It's useful when there's a need to compute the distance between feature + * and origin it and allows for better compiler optimisation than using a + * zero-filled array. + */ +template +struct ZeroIterator +{ + + T operator*() + { + return 0; + } + + T operator[](int) + { + return 0; + } + + const ZeroIterator& operator ++() + { + return *this; + } + + ZeroIterator operator ++(int) + { + return *this; + } + + ZeroIterator& operator+=(int) + { + return *this; + } + +}; + +} + +#endif //OPENCV_FLANN_DIST_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dummy.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dummy.h new file mode 100644 index 0000000..26bd3fa --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dummy.h @@ -0,0 +1,16 @@ + +#ifndef OPENCV_FLANN_DUMMY_H_ +#define OPENCV_FLANN_DUMMY_H_ + +namespace cvflann +{ + +#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS +__declspec(dllexport) +#endif +void dummyfunc(); + +} + + +#endif /* OPENCV_FLANN_DUMMY_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dynamic_bitset.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dynamic_bitset.h new file mode 100644 index 0000000..bfd39ce --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/dynamic_bitset.h @@ -0,0 +1,159 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_DYNAMIC_BITSET_H_ +#define OPENCV_FLANN_DYNAMIC_BITSET_H_ + +#ifndef FLANN_USE_BOOST +# define FLANN_USE_BOOST 0 +#endif +//#define FLANN_USE_BOOST 1 +#if FLANN_USE_BOOST +#include +typedef boost::dynamic_bitset<> DynamicBitset; +#else + +#include + +#include "dist.h" + +namespace cvflann { + +/** Class re-implementing the boost version of it + * This helps not depending on boost, it also does not do the bound checks + * and has a way to reset a block for speed + */ +class DynamicBitset +{ +public: + /** @param default constructor + */ + DynamicBitset() + { + } + + /** @param only constructor we use in our code + * @param the size of the bitset (in bits) + */ + DynamicBitset(size_t sz) + { + resize(sz); + reset(); + } + + /** Sets all the bits to 0 + */ + void clear() + { + std::fill(bitset_.begin(), bitset_.end(), 0); + } + + /** @brief checks if the bitset is empty + * @return true if the bitset is empty + */ + bool empty() const + { + return bitset_.empty(); + } + + /** @param set all the bits to 0 + */ + void reset() + { + std::fill(bitset_.begin(), bitset_.end(), 0); + } + + /** @brief set one bit to 0 + * @param + */ + void reset(size_t index) + { + bitset_[index / cell_bit_size_] &= ~(size_t(1) << (index % cell_bit_size_)); + } + + /** @brief sets a specific bit to 0, and more bits too + * This function is useful when resetting a given set of bits so that the + * whole bitset ends up being 0: if that's the case, we don't care about setting + * other bits to 0 + * @param + */ + void reset_block(size_t index) + { + bitset_[index / cell_bit_size_] = 0; + } + + /** @param resize the bitset so that it contains at least size bits + * @param size + */ + void resize(size_t sz) + { + size_ = sz; + bitset_.resize(sz / cell_bit_size_ + 1); + } + + /** @param set a bit to true + * @param index the index of the bit to set to 1 + */ + void set(size_t index) + { + bitset_[index / cell_bit_size_] |= size_t(1) << (index % cell_bit_size_); + } + + /** @param gives the number of contained bits + */ + size_t size() const + { + return size_; + } + + /** @param check if a bit is set + * @param index the index of the bit to check + * @return true if the bit is set + */ + bool test(size_t index) const + { + return (bitset_[index / cell_bit_size_] & (size_t(1) << (index % cell_bit_size_))) != 0; + } + +private: + std::vector bitset_; + size_t size_; + static const unsigned int cell_bit_size_ = CHAR_BIT * sizeof(size_t); +}; + +} // namespace cvflann + +#endif + +#endif // OPENCV_FLANN_DYNAMIC_BITSET_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/flann.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/flann.hpp new file mode 100644 index 0000000..d053488 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/flann.hpp @@ -0,0 +1,427 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef _OPENCV_FLANN_HPP_ +#define _OPENCV_FLANN_HPP_ + +#ifdef __cplusplus + +#include "opencv2/core/types_c.h" +#include "opencv2/core/core.hpp" +#include "opencv2/flann/flann_base.hpp" +#include "opencv2/flann/miniflann.hpp" + +namespace cvflann +{ + CV_EXPORTS flann_distance_t flann_distance_type(); + FLANN_DEPRECATED CV_EXPORTS void set_distance_type(flann_distance_t distance_type, int order); +} + + +namespace cv +{ +namespace flann +{ + +template struct CvType {}; +template <> struct CvType { static int type() { return CV_8U; } }; +template <> struct CvType { static int type() { return CV_8S; } }; +template <> struct CvType { static int type() { return CV_16U; } }; +template <> struct CvType { static int type() { return CV_16S; } }; +template <> struct CvType { static int type() { return CV_32S; } }; +template <> struct CvType { static int type() { return CV_32F; } }; +template <> struct CvType { static int type() { return CV_64F; } }; + + +// bring the flann parameters into this namespace +using ::cvflann::get_param; +using ::cvflann::print_params; + +// bring the flann distances into this namespace +using ::cvflann::L2_Simple; +using ::cvflann::L2; +using ::cvflann::L1; +using ::cvflann::MinkowskiDistance; +using ::cvflann::MaxDistance; +using ::cvflann::HammingLUT; +using ::cvflann::Hamming; +using ::cvflann::Hamming2; +using ::cvflann::HistIntersectionDistance; +using ::cvflann::HellingerDistance; +using ::cvflann::ChiSquareDistance; +using ::cvflann::KL_Divergence; + + + +template +class GenericIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + GenericIndex(const Mat& features, const ::cvflann::IndexParams& params, Distance distance = Distance()); + + ~GenericIndex(); + + void knnSearch(const vector& query, vector& indices, + vector& dists, int knn, const ::cvflann::SearchParams& params); + void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params); + + int radiusSearch(const vector& query, vector& indices, + vector& dists, DistanceType radius, const ::cvflann::SearchParams& params); + int radiusSearch(const Mat& query, Mat& indices, Mat& dists, + DistanceType radius, const ::cvflann::SearchParams& params); + + void save(std::string filename) { nnIndex->save(filename); } + + int veclen() const { return nnIndex->veclen(); } + + int size() const { return nnIndex->size(); } + + ::cvflann::IndexParams getParameters() { return nnIndex->getParameters(); } + + FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() { return nnIndex->getIndexParameters(); } + +private: + ::cvflann::Index* nnIndex; +}; + + +#define FLANN_DISTANCE_CHECK \ + if ( ::cvflann::flann_distance_type() != cvflann::FLANN_DIST_L2) { \ + printf("[WARNING] You are using cv::flann::Index (or cv::flann::GenericIndex) and have also changed "\ + "the distance using cvflann::set_distance_type. This is no longer working as expected "\ + "(cv::flann::Index always uses L2). You should create the index templated on the distance, "\ + "for example for L1 distance use: GenericIndex< L1 > \n"); \ + } + + +template +GenericIndex::GenericIndex(const Mat& dataset, const ::cvflann::IndexParams& params, Distance distance) +{ + CV_Assert(dataset.type() == CvType::type()); + CV_Assert(dataset.isContinuous()); + ::cvflann::Matrix m_dataset((ElementType*)dataset.ptr(0), dataset.rows, dataset.cols); + + nnIndex = new ::cvflann::Index(m_dataset, params, distance); + + FLANN_DISTANCE_CHECK + + nnIndex->buildIndex(); +} + +template +GenericIndex::~GenericIndex() +{ + delete nnIndex; +} + +template +void GenericIndex::knnSearch(const vector& query, vector& indices, vector& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + FLANN_DISTANCE_CHECK + + nnIndex->knnSearch(m_query,m_indices,m_dists,knn,searchParams); +} + + +template +void GenericIndex::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(queries.type() == CvType::type()); + CV_Assert(queries.isContinuous()); + ::cvflann::Matrix m_queries((ElementType*)queries.ptr(0), queries.rows, queries.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + FLANN_DISTANCE_CHECK + + nnIndex->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); +} + +template +int GenericIndex::radiusSearch(const vector& query, vector& indices, vector& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + FLANN_DISTANCE_CHECK + + return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +template +int GenericIndex::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(query.type() == CvType::type()); + CV_Assert(query.isContinuous()); + ::cvflann::Matrix m_query((ElementType*)query.ptr(0), query.rows, query.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + FLANN_DISTANCE_CHECK + + return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +/** + * @deprecated Use GenericIndex class instead + */ +template +class +#ifndef _MSC_VER + FLANN_DEPRECATED +#endif + Index_ { +public: + typedef typename L2::ElementType ElementType; + typedef typename L2::ResultType DistanceType; + + Index_(const Mat& features, const ::cvflann::IndexParams& params); + + ~Index_(); + + void knnSearch(const vector& query, vector& indices, vector& dists, int knn, const ::cvflann::SearchParams& params); + void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params); + + int radiusSearch(const vector& query, vector& indices, vector& dists, DistanceType radius, const ::cvflann::SearchParams& params); + int radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& params); + + void save(std::string filename) + { + if (nnIndex_L1) nnIndex_L1->save(filename); + if (nnIndex_L2) nnIndex_L2->save(filename); + } + + int veclen() const + { + if (nnIndex_L1) return nnIndex_L1->veclen(); + if (nnIndex_L2) return nnIndex_L2->veclen(); + } + + int size() const + { + if (nnIndex_L1) return nnIndex_L1->size(); + if (nnIndex_L2) return nnIndex_L2->size(); + } + + ::cvflann::IndexParams getParameters() + { + if (nnIndex_L1) return nnIndex_L1->getParameters(); + if (nnIndex_L2) return nnIndex_L2->getParameters(); + + } + + FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() + { + if (nnIndex_L1) return nnIndex_L1->getIndexParameters(); + if (nnIndex_L2) return nnIndex_L2->getIndexParameters(); + } + +private: + // providing backwards compatibility for L2 and L1 distances (most common) + ::cvflann::Index< L2 >* nnIndex_L2; + ::cvflann::Index< L1 >* nnIndex_L1; +}; + +#ifdef _MSC_VER +template +class FLANN_DEPRECATED Index_; +#endif + +template +Index_::Index_(const Mat& dataset, const ::cvflann::IndexParams& params) +{ + printf("[WARNING] The cv::flann::Index_ class is deperecated, use cv::flann::GenericIndex instead\n"); + + CV_Assert(dataset.type() == CvType::type()); + CV_Assert(dataset.isContinuous()); + ::cvflann::Matrix m_dataset((ElementType*)dataset.ptr(0), dataset.rows, dataset.cols); + + if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) { + nnIndex_L1 = NULL; + nnIndex_L2 = new ::cvflann::Index< L2 >(m_dataset, params); + } + else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) { + nnIndex_L1 = new ::cvflann::Index< L1 >(m_dataset, params); + nnIndex_L2 = NULL; + } + else { + printf("[ERROR] cv::flann::Index_ only provides backwards compatibility for the L1 and L2 distances. " + "For other distance types you must use cv::flann::GenericIndex\n"); + CV_Assert(0); + } + if (nnIndex_L1) nnIndex_L1->buildIndex(); + if (nnIndex_L2) nnIndex_L2->buildIndex(); +} + +template +Index_::~Index_() +{ + if (nnIndex_L1) delete nnIndex_L1; + if (nnIndex_L2) delete nnIndex_L2; +} + +template +void Index_::knnSearch(const vector& query, vector& indices, vector& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + if (nnIndex_L1) nnIndex_L1->knnSearch(m_query,m_indices,m_dists,knn,searchParams); + if (nnIndex_L2) nnIndex_L2->knnSearch(m_query,m_indices,m_dists,knn,searchParams); +} + + +template +void Index_::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(queries.type() == CvType::type()); + CV_Assert(queries.isContinuous()); + ::cvflann::Matrix m_queries((ElementType*)queries.ptr(0), queries.rows, queries.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + if (nnIndex_L1) nnIndex_L1->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); + if (nnIndex_L2) nnIndex_L2->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); +} + +template +int Index_::radiusSearch(const vector& query, vector& indices, vector& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); + if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +template +int Index_::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(query.type() == CvType::type()); + CV_Assert(query.isContinuous()); + ::cvflann::Matrix m_query((ElementType*)query.ptr(0), query.rows, query.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); + if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + + +template +int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params, + Distance d = Distance()) +{ + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + CV_Assert(features.type() == CvType::type()); + CV_Assert(features.isContinuous()); + ::cvflann::Matrix m_features((ElementType*)features.ptr(0), features.rows, features.cols); + + CV_Assert(centers.type() == CvType::type()); + CV_Assert(centers.isContinuous()); + ::cvflann::Matrix m_centers((DistanceType*)centers.ptr(0), centers.rows, centers.cols); + + return ::cvflann::hierarchicalClustering(m_features, m_centers, params, d); +} + + +template +FLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params) +{ + printf("[WARNING] cv::flann::hierarchicalClustering is deprecated, use " + "cv::flann::hierarchicalClustering instead\n"); + + if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) { + return hierarchicalClustering< L2 >(features, centers, params); + } + else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) { + return hierarchicalClustering< L1 >(features, centers, params); + } + else { + printf("[ERROR] cv::flann::hierarchicalClustering only provides backwards " + "compatibility for the L1 and L2 distances. " + "For other distance types you must use cv::flann::hierarchicalClustering\n"); + CV_Assert(0); + } +} + +} } // namespace cv::flann + +#endif // __cplusplus + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/flann_base.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/flann_base.hpp new file mode 100644 index 0000000..b5ba7d7 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/flann_base.hpp @@ -0,0 +1,291 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_BASE_HPP_ +#define OPENCV_FLANN_BASE_HPP_ + +#include +#include +#include +#include + +#include "general.h" +#include "matrix.h" +#include "params.h" +#include "saving.h" + +#include "all_indices.h" + +namespace cvflann +{ + +/** + * Sets the log level used for all flann functions + * @param level Verbosity level + */ +inline void log_verbosity(int level) +{ + if (level >= 0) { + Logger::setLevel(level); + } +} + +/** + * (Deprecated) Index parameters for creating a saved index. + */ +struct SavedIndexParams : public IndexParams +{ + SavedIndexParams(std::string filename) + { + (* this)["algorithm"] = FLANN_INDEX_SAVED; + (*this)["filename"] = filename; + } +}; + + +template +NNIndex* load_saved_index(const Matrix& dataset, const std::string& filename, Distance distance) +{ + typedef typename Distance::ElementType ElementType; + + FILE* fin = fopen(filename.c_str(), "rb"); + if (fin == NULL) { + return NULL; + } + IndexHeader header = load_header(fin); + if (header.data_type != Datatype::type()) { + throw FLANNException("Datatype of saved index is different than of the one to be created."); + } + if ((size_t(header.rows) != dataset.rows)||(size_t(header.cols) != dataset.cols)) { + throw FLANNException("The index saved belongs to a different dataset"); + } + + IndexParams params; + params["algorithm"] = header.index_type; + NNIndex* nnIndex = create_index_by_type(dataset, params, distance); + nnIndex->loadIndex(fin); + fclose(fin); + + return nnIndex; +} + + +template +class Index : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + Index(const Matrix& features, const IndexParams& params, Distance distance = Distance() ) + : index_params_(params) + { + flann_algorithm_t index_type = get_param(params,"algorithm"); + loaded_ = false; + + if (index_type == FLANN_INDEX_SAVED) { + nnIndex_ = load_saved_index(features, get_param(params,"filename"), distance); + loaded_ = true; + } + else { + nnIndex_ = create_index_by_type(features, params, distance); + } + } + + ~Index() + { + delete nnIndex_; + } + + /** + * Builds the index. + */ + void buildIndex() + { + if (!loaded_) { + nnIndex_->buildIndex(); + } + } + + void save(std::string filename) + { + FILE* fout = fopen(filename.c_str(), "wb"); + if (fout == NULL) { + throw FLANNException("Cannot open file"); + } + save_header(fout, *nnIndex_); + saveIndex(fout); + fclose(fout); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + virtual void saveIndex(FILE* stream) + { + nnIndex_->saveIndex(stream); + } + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + virtual void loadIndex(FILE* stream) + { + nnIndex_->loadIndex(stream); + } + + /** + * \returns number of features in this index. + */ + size_t veclen() const + { + return nnIndex_->veclen(); + } + + /** + * \returns The dimensionality of the features in this index. + */ + size_t size() const + { + return nnIndex_->size(); + } + + /** + * \returns The index type (kdtree, kmeans,...) + */ + flann_algorithm_t getType() const + { + return nnIndex_->getType(); + } + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + virtual int usedMemory() const + { + return nnIndex_->usedMemory(); + } + + + /** + * \returns The index parameters + */ + IndexParams getParameters() const + { + return nnIndex_->getParameters(); + } + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + nnIndex_->knnSearch(queries, indices, dists, knn, params); + } + + /** + * \brief Perform radius search + * \param[in] query The query point + * \param[out] indices The indinces of the neighbors found within the given radius + * \param[out] dists The distances to the nearest neighbors found + * \param[in] radius The radius used for search + * \param[in] params Search parameters + * \returns Number of neighbors found + */ + int radiusSearch(const Matrix& query, Matrix& indices, Matrix& dists, float radius, const SearchParams& params) + { + return nnIndex_->radiusSearch(query, indices, dists, radius, params); + } + + /** + * \brief Method that searches for nearest-neighbours + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + nnIndex_->findNeighbors(result, vec, searchParams); + } + + /** + * \brief Returns actual index + */ + FLANN_DEPRECATED NNIndex* getIndex() + { + return nnIndex_; + } + + /** + * \brief Returns index parameters. + * \deprecated use getParameters() instead. + */ + FLANN_DEPRECATED const IndexParams* getIndexParameters() + { + return &index_params_; + } + +private: + /** Pointer to actual index class */ + NNIndex* nnIndex_; + /** Indices if the index was loaded from a file */ + bool loaded_; + /** Parameters passed to the index */ + IndexParams index_params_; +}; + +/** + * Performs a hierarchical clustering of the points passed as argument and then takes a cut in the + * the clustering tree to return a flat clustering. + * @param[in] points Points to be clustered + * @param centers The computed cluster centres. Matrix should be preallocated and centers.rows is the + * number of clusters requested. + * @param params Clustering parameters (The same as for cvflann::KMeansIndex) + * @param d Distance to be used for clustering (eg: cvflann::L2) + * @return number of clusters computed (can be different than clusters.rows and is the highest number + * of the form (branching-1)*K+1 smaller than clusters.rows). + */ +template +int hierarchicalClustering(const Matrix& points, Matrix& centers, + const KMeansIndexParams& params, Distance d = Distance()) +{ + KMeansIndex kmeans(points, params, d); + kmeans.buildIndex(); + + int clusterNum = kmeans.getClusterCenters(centers); + return clusterNum; +} + +} +#endif /* OPENCV_FLANN_BASE_HPP_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/general.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/general.h new file mode 100644 index 0000000..87e7e2f --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/general.h @@ -0,0 +1,52 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_GENERAL_H_ +#define OPENCV_FLANN_GENERAL_H_ + +#include "defines.h" +#include +#include + +namespace cvflann +{ + +class FLANNException : public std::runtime_error +{ +public: + FLANNException(const char* message) : std::runtime_error(message) { } + + FLANNException(const std::string& message) : std::runtime_error(message) { } +}; + +} + + +#endif /* OPENCV_FLANN_GENERAL_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/ground_truth.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/ground_truth.h new file mode 100644 index 0000000..fd8f3ae --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/ground_truth.h @@ -0,0 +1,94 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_GROUND_TRUTH_H_ +#define OPENCV_FLANN_GROUND_TRUTH_H_ + +#include "dist.h" +#include "matrix.h" + + +namespace cvflann +{ + +template +void find_nearest(const Matrix& dataset, typename Distance::ElementType* query, int* matches, int nn, + int skip = 0, Distance distance = Distance()) +{ + typedef typename Distance::ResultType DistanceType; + int n = nn + skip; + + std::vector match(n); + std::vector dists(n); + + dists[0] = distance(dataset[0], query, dataset.cols); + match[0] = 0; + int dcnt = 1; + + for (size_t i=1; i=1 && dists[j] +void compute_ground_truth(const Matrix& dataset, const Matrix& testset, Matrix& matches, + int skip=0, Distance d = Distance()) +{ + for (size_t i=0; i(dataset, testset[i], matches[i], (int)matches.cols, skip, d); + } +} + + +} + +#endif //OPENCV_FLANN_GROUND_TRUTH_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/hdf5.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/hdf5.h new file mode 100644 index 0000000..ef3e999 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/hdf5.h @@ -0,0 +1,231 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_HDF5_H_ +#define OPENCV_FLANN_HDF5_H_ + +#include + +#include "matrix.h" + + +namespace cvflann +{ + +namespace +{ + +template +hid_t get_hdf5_type() +{ + throw FLANNException("Unsupported type for IO operations"); +} + +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_CHAR; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_UCHAR; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_SHORT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_USHORT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_INT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_UINT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_LONG; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_ULONG; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_FLOAT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_DOUBLE; } +} + + +#define CHECK_ERROR(x,y) if ((x)<0) throw FLANNException((y)); + +template +void save_to_file(const cvflann::Matrix& dataset, const std::string& filename, const std::string& name) +{ + +#if H5Eset_auto_vers == 2 + H5Eset_auto( H5E_DEFAULT, NULL, NULL ); +#else + H5Eset_auto( NULL, NULL ); +#endif + + herr_t status; + hid_t file_id; + file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); + if (file_id < 0) { + file_id = H5Fcreate(filename.c_str(), H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + } + CHECK_ERROR(file_id,"Error creating hdf5 file."); + + hsize_t dimsf[2]; // dataset dimensions + dimsf[0] = dataset.rows; + dimsf[1] = dataset.cols; + + hid_t space_id = H5Screate_simple(2, dimsf, NULL); + hid_t memspace_id = H5Screate_simple(2, dimsf, NULL); + + hid_t dataset_id; +#if H5Dcreate_vers == 2 + dataset_id = H5Dcreate2(file_id, name.c_str(), get_hdf5_type(), space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); +#else + dataset_id = H5Dcreate(file_id, name.c_str(), get_hdf5_type(), space_id, H5P_DEFAULT); +#endif + + if (dataset_id<0) { +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + } + CHECK_ERROR(dataset_id,"Error creating or opening dataset in file."); + + status = H5Dwrite(dataset_id, get_hdf5_type(), memspace_id, space_id, H5P_DEFAULT, dataset.data ); + CHECK_ERROR(status, "Error writing to dataset"); + + H5Sclose(memspace_id); + H5Sclose(space_id); + H5Dclose(dataset_id); + H5Fclose(file_id); + +} + + +template +void load_from_file(cvflann::Matrix& dataset, const std::string& filename, const std::string& name) +{ + herr_t status; + hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); + CHECK_ERROR(file_id,"Error opening hdf5 file."); + + hid_t dataset_id; +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + CHECK_ERROR(dataset_id,"Error opening dataset in file."); + + hid_t space_id = H5Dget_space(dataset_id); + + hsize_t dims_out[2]; + H5Sget_simple_extent_dims(space_id, dims_out, NULL); + + dataset = cvflann::Matrix(new T[dims_out[0]*dims_out[1]], dims_out[0], dims_out[1]); + + status = H5Dread(dataset_id, get_hdf5_type(), H5S_ALL, H5S_ALL, H5P_DEFAULT, dataset[0]); + CHECK_ERROR(status, "Error reading dataset"); + + H5Sclose(space_id); + H5Dclose(dataset_id); + H5Fclose(file_id); +} + + +#ifdef HAVE_MPI + +namespace mpi +{ +/** + * Loads a the hyperslice corresponding to this processor from a hdf5 file. + * @param flann_dataset Dataset where the data is loaded + * @param filename HDF5 file name + * @param name Name of dataset inside file + */ +template +void load_from_file(cvflann::Matrix& dataset, const std::string& filename, const std::string& name) +{ + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + int mpi_size, mpi_rank; + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + herr_t status; + + hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(plist_id, comm, info); + hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id); + CHECK_ERROR(file_id,"Error opening hdf5 file."); + H5Pclose(plist_id); + hid_t dataset_id; +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + CHECK_ERROR(dataset_id,"Error opening dataset in file."); + + hid_t space_id = H5Dget_space(dataset_id); + hsize_t dims[2]; + H5Sget_simple_extent_dims(space_id, dims, NULL); + + hsize_t count[2]; + hsize_t offset[2]; + + hsize_t item_cnt = dims[0]/mpi_size+(dims[0]%mpi_size==0 ? 0 : 1); + hsize_t cnt = (mpi_rank(), memspace_id, space_id, plist_id, dataset.data); + CHECK_ERROR(status, "Error reading dataset"); + + H5Pclose(plist_id); + H5Sclose(space_id); + H5Sclose(memspace_id); + H5Dclose(dataset_id); + H5Fclose(file_id); +} +} +#endif // HAVE_MPI +} // namespace cvflann::mpi + +#endif /* OPENCV_FLANN_HDF5_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/heap.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/heap.h new file mode 100644 index 0000000..92a6ea6 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/heap.h @@ -0,0 +1,165 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_HEAP_H_ +#define OPENCV_FLANN_HEAP_H_ + +#include +#include + +namespace cvflann +{ + +/** + * Priority Queue Implementation + * + * The priority queue is implemented with a heap. A heap is a complete + * (full) binary tree in which each parent is less than both of its + * children, but the order of the children is unspecified. + */ +template +class Heap +{ + + /** + * Storage array for the heap. + * Type T must be comparable. + */ + std::vector heap; + int length; + + /** + * Number of element in the heap + */ + int count; + + + +public: + /** + * Constructor. + * + * Params: + * sz = heap size + */ + + Heap(int sz) + { + length = sz; + heap.reserve(length); + count = 0; + } + + /** + * + * Returns: heap size + */ + int size() + { + return count; + } + + /** + * Tests if the heap is empty + * + * Returns: true is heap empty, false otherwise + */ + bool empty() + { + return size()==0; + } + + /** + * Clears the heap. + */ + void clear() + { + heap.clear(); + count = 0; + } + + struct CompareT + { + bool operator()(const T& t_1, const T& t_2) const + { + return t_2 < t_1; + } + }; + + /** + * Insert a new element in the heap. + * + * We select the next empty leaf node, and then keep moving any larger + * parents down until the right location is found to store this element. + * + * Params: + * value = the new element to be inserted in the heap + */ + void insert(T value) + { + /* If heap is full, then return without adding this element. */ + if (count == length) { + return; + } + + heap.push_back(value); + static CompareT compareT; + std::push_heap(heap.begin(), heap.end(), compareT); + ++count; + } + + + + /** + * Returns the node of minimum value from the heap (top of the heap). + * + * Params: + * value = out parameter used to return the min element + * Returns: false if heap empty + */ + bool popMin(T& value) + { + if (count == 0) { + return false; + } + + value = heap[0]; + static CompareT compareT; + std::pop_heap(heap.begin(), heap.end(), compareT); + heap.pop_back(); + --count; + + return true; /* Return old last node. */ + } +}; + +} + +#endif //OPENCV_FLANN_HEAP_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/hierarchical_clustering_index.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/hierarchical_clustering_index.h new file mode 100644 index 0000000..b511ee9 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/hierarchical_clustering_index.h @@ -0,0 +1,759 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ +#define OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ + +#include +#include +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dist.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + + +namespace cvflann +{ + +struct HierarchicalClusteringIndexParams : public IndexParams +{ + HierarchicalClusteringIndexParams(int branching = 32, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, + int trees = 4, int leaf_size = 100) + { + (*this)["algorithm"] = FLANN_INDEX_HIERARCHICAL; + // The branching factor used in the hierarchical clustering + (*this)["branching"] = branching; + // Algorithm used for picking the initial cluster centers + (*this)["centers_init"] = centers_init; + // number of parallel trees to build + (*this)["trees"] = trees; + // maximum leaf size + (*this)["leaf_size"] = leaf_size; + } +}; + + +/** + * Hierarchical index + * + * Contains a tree constructed through a hierarchical clustering + * and other information for indexing a set of points for nearest-neighbour matching. + */ +template +class HierarchicalClusteringIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + +private: + + + typedef void (HierarchicalClusteringIndex::* centersAlgFunction)(int, int*, int, int*, int&); + + /** + * The function used for choosing the cluster centers. + */ + centersAlgFunction chooseCenters; + + + + /** + * Chooses the initial centers in the k-means clustering in a random manner. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * indices_length = length of indices vector + * + */ + void chooseCentersRandom(int k, int* dsindices, int indices_length, int* centers, int& centers_length) + { + UniqueRandom r(indices_length); + + int index; + for (index=0; index=0 && rnd < n); + + centers[0] = dsindices[rnd]; + + int index; + for (index=1; indexbest_val) { + best_val = dist; + best_index = j; + } + } + if (best_index!=-1) { + centers[index] = dsindices[best_index]; + } + else { + break; + } + } + centers_length = index; + } + + + /** + * Chooses the initial centers in the k-means using the algorithm + * proposed in the KMeans++ paper: + * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding + * + * Implementation of this function was converted from the one provided in Arthur's code. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * Returns: + */ + void chooseCentersKMeanspp(int k, int* dsindices, int indices_length, int* centers, int& centers_length) + { + int n = indices_length; + + double currentPot = 0; + DistanceType* closestDistSq = new DistanceType[n]; + + // Choose one random center and set the closestDistSq values + int index = rand_int(n); + assert(index >=0 && index < n); + centers[0] = dsindices[index]; + + for (int i = 0; i < n; i++) { + closestDistSq[i] = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols); + currentPot += closestDistSq[i]; + } + + + const int numLocalTries = 1; + + // Choose each center + int centerCount; + for (centerCount = 1; centerCount < k; centerCount++) { + + // Repeat several trials + double bestNewPot = -1; + int bestNewIndex = 0; + for (int localTrial = 0; localTrial < numLocalTries; localTrial++) { + + // Choose our center - have to be slightly careful to return a valid answer even accounting + // for possible rounding errors + double randVal = rand_double(currentPot); + for (index = 0; index < n-1; index++) { + if (randVal <= closestDistSq[index]) break; + else randVal -= closestDistSq[index]; + } + + // Compute the new potential + double newPot = 0; + for (int i = 0; i < n; i++) newPot += std::min( distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols), closestDistSq[i] ); + + // Store the best result + if ((bestNewPot < 0)||(newPot < bestNewPot)) { + bestNewPot = newPot; + bestNewIndex = index; + } + } + + // Add the appropriate center + centers[centerCount] = dsindices[bestNewIndex]; + currentPot = bestNewPot; + for (int i = 0; i < n; i++) closestDistSq[i] = std::min( distance(dataset[dsindices[i]], dataset[dsindices[bestNewIndex]], dataset.cols), closestDistSq[i] ); + } + + centers_length = centerCount; + + delete[] closestDistSq; + } + + +public: + + + /** + * Index constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the hierarchical k-means algorithm + */ + HierarchicalClusteringIndex(const Matrix& inputData, const IndexParams& index_params = HierarchicalClusteringIndexParams(), + Distance d = Distance()) + : dataset(inputData), params(index_params), root(NULL), indices(NULL), distance(d) + { + memoryCounter = 0; + + size_ = dataset.rows; + veclen_ = dataset.cols; + + branching_ = get_param(params,"branching",32); + centers_init_ = get_param(params,"centers_init", FLANN_CENTERS_RANDOM); + trees_ = get_param(params,"trees",4); + leaf_size_ = get_param(params,"leaf_size",100); + + if (centers_init_==FLANN_CENTERS_RANDOM) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersRandom; + } + else if (centers_init_==FLANN_CENTERS_GONZALES) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersGonzales; + } + else if (centers_init_==FLANN_CENTERS_KMEANSPP) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersKMeanspp; + } + else { + throw FLANNException("Unknown algorithm for choosing initial centers."); + } + + trees_ = get_param(params,"trees",4); + root = new NodePtr[trees_]; + indices = new int*[trees_]; + + for (int i=0; i(); + computeClustering(root[i], indices[i], (int)size_, branching_,0); + } + } + + + flann_algorithm_t getType() const + { + return FLANN_INDEX_HIERARCHICAL; + } + + + void saveIndex(FILE* stream) + { + save_value(stream, branching_); + save_value(stream, trees_); + save_value(stream, centers_init_); + save_value(stream, leaf_size_); + save_value(stream, memoryCounter); + for (int i=0; i& result, const ElementType* vec, const SearchParams& searchParams) + { + + int maxChecks = get_param(searchParams,"checks",32); + + // Priority queue storing intermediate branches in the best-bin-first search + Heap* heap = new Heap((int)size_); + + std::vector checked(size_,false); + int checks = 0; + for (int i=0; ipopMin(branch) && (checks BranchSt; + + + + void save_tree(FILE* stream, NodePtr node, int num) + { + save_value(stream, *node); + if (node->childs==NULL) { + int indices_offset = (int)(node->indices - indices[num]); + save_value(stream, indices_offset); + } + else { + for(int i=0; ichilds[i], num); + } + } + } + + + void load_tree(FILE* stream, NodePtr& node, int num) + { + node = pool.allocate(); + load_value(stream, *node); + if (node->childs==NULL) { + int indices_offset; + load_value(stream, indices_offset); + node->indices = indices[num] + indices_offset; + } + else { + node->childs = pool.allocate(branching_); + for(int i=0; ichilds[i], num); + } + } + } + + + + + void computeLabels(int* dsindices, int indices_length, int* centers, int centers_length, int* labels, DistanceType& cost) + { + cost = 0; + for (int i=0; inew_dist) { + labels[i] = j; + dist = new_dist; + } + } + cost += dist; + } + } + + /** + * The method responsible with actually doing the recursive hierarchical + * clustering + * + * Params: + * node = the node to cluster + * indices = indices of the points belonging to the current node + * branching = the branching factor to use in the clustering + * + * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point) + */ + void computeClustering(NodePtr node, int* dsindices, int indices_length, int branching, int level) + { + node->size = indices_length; + node->level = level; + + if (indices_length < leaf_size_) { // leaf node + node->indices = dsindices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + std::vector centers(branching); + std::vector labels(indices_length); + + int centers_length; + (this->*chooseCenters)(branching, dsindices, indices_length, ¢ers[0], centers_length); + + if (centers_lengthindices = dsindices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + + // assign points to clusters + DistanceType cost; + computeLabels(dsindices, indices_length, ¢ers[0], centers_length, &labels[0], cost); + + node->childs = pool.allocate(branching); + int start = 0; + int end = start; + for (int i=0; ichilds[i] = pool.allocate(); + node->childs[i]->pivot = centers[i]; + node->childs[i]->indices = NULL; + computeClustering(node->childs[i],dsindices+start, end-start, branching, level+1); + start=end; + } + } + + + + /** + * Performs one descent in the hierarchical k-means tree. The branches not + * visited are stored in a priority queue. + * + * Params: + * node = node to explore + * result = container for the k-nearest neighbors found + * vec = query points + * checks = how many points in the dataset have been checked so far + * maxChecks = maximum dataset points to checks + */ + + + void findNN(NodePtr node, ResultSet& result, const ElementType* vec, int& checks, int maxChecks, + Heap* heap, std::vector& checked) + { + if (node->childs==NULL) { + if (checks>=maxChecks) { + if (result.full()) return; + } + for (int i=0; isize; ++i) { + int index = node->indices[i]; + if (!checked[index]) { + DistanceType dist = distance(dataset[index], vec, veclen_); + result.addPoint(dist, index); + checked[index] = true; + ++checks; + } + } + } + else { + DistanceType* domain_distances = new DistanceType[branching_]; + int best_index = 0; + domain_distances[best_index] = distance(vec, dataset[node->childs[best_index]->pivot], veclen_); + for (int i=1; ichilds[i]->pivot], veclen_); + if (domain_distances[i]insert(BranchSt(node->childs[i],domain_distances[i])); + } + } + delete[] domain_distances; + findNN(node->childs[best_index],result,vec, checks, maxChecks, heap, checked); + } + } + +private: + + + /** + * The dataset used by this index + */ + const Matrix dataset; + + /** + * Parameters used by this index + */ + IndexParams params; + + + /** + * Number of features in the dataset. + */ + size_t size_; + + /** + * Length of each feature. + */ + size_t veclen_; + + /** + * The root node in the tree. + */ + NodePtr* root; + + /** + * Array of indices to vectors in the dataset. + */ + int** indices; + + + /** + * The distance + */ + Distance distance; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool; + + /** + * Memory occupied by the index. + */ + int memoryCounter; + + /** index parameters */ + int branching_; + int trees_; + flann_centers_init_t centers_init_; + int leaf_size_; + + +}; + +} + +#endif /* OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/index_testing.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/index_testing.h new file mode 100644 index 0000000..d764004 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/index_testing.h @@ -0,0 +1,318 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_INDEX_TESTING_H_ +#define OPENCV_FLANN_INDEX_TESTING_H_ + +#include +#include +#include + +#include "matrix.h" +#include "nn_index.h" +#include "result_set.h" +#include "logger.h" +#include "timer.h" + + +namespace cvflann +{ + +inline int countCorrectMatches(int* neighbors, int* groundTruth, int n) +{ + int count = 0; + for (int i=0; i +typename Distance::ResultType computeDistanceRaport(const Matrix& inputData, typename Distance::ElementType* target, + int* neighbors, int* groundTruth, int veclen, int n, const Distance& distance) +{ + typedef typename Distance::ResultType DistanceType; + + DistanceType ret = 0; + for (int i=0; i +float search_with_ground_truth(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, int nn, int checks, + float& time, typename Distance::ResultType& dist, const Distance& distance, int skipMatches) +{ + typedef typename Distance::ResultType DistanceType; + + if (matches.cols resultSet(nn+skipMatches); + SearchParams searchParams(checks); + + std::vector indices(nn+skipMatches); + std::vector dists(nn+skipMatches); + int* neighbors = &indices[skipMatches]; + + int correct = 0; + DistanceType distR = 0; + StartStopTimer t; + int repeats = 0; + while (t.value<0.2) { + repeats++; + t.start(); + correct = 0; + distR = 0; + for (size_t i = 0; i < testData.rows; i++) { + resultSet.init(&indices[0], &dists[0]); + index.findNeighbors(resultSet, testData[i], searchParams); + + correct += countCorrectMatches(neighbors,matches[i], nn); + distR += computeDistanceRaport(inputData, testData[i], neighbors, matches[i], (int)testData.cols, nn, distance); + } + t.stop(); + } + time = float(t.value/repeats); + + float precicion = (float)correct/(nn*testData.rows); + + dist = distR/(testData.rows*nn); + + Logger::info("%8d %10.4g %10.5g %10.5g %10.5g\n", + checks, precicion, time, 1000.0 * time / testData.rows, dist); + + return precicion; +} + + +template +float test_index_checks(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + int checks, float& precision, const Distance& distance, int nn = 1, int skipMatches = 0) +{ + typedef typename Distance::ResultType DistanceType; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + float time = 0; + DistanceType dist = 0; + precision = search_with_ground_truth(index, inputData, testData, matches, nn, checks, time, dist, distance, skipMatches); + + return time; +} + +template +float test_index_precision(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + float precision, int& checks, const Distance& distance, int nn = 1, int skipMatches = 0) +{ + typedef typename Distance::ResultType DistanceType; + const float SEARCH_EPS = 0.001f; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + int c2 = 1; + float p2; + int c1 = 1; + //float p1; + float time; + DistanceType dist; + + p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches); + + if (p2>precision) { + Logger::info("Got as close as I can\n"); + checks = c2; + return time; + } + + while (p2SEARCH_EPS) { + Logger::info("Start linear estimation\n"); + // after we got to values in the vecinity of the desired precision + // use linear approximation get a better estimation + + cx = (c1+c2)/2; + realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches); + while (fabs(realPrecision-precision)>SEARCH_EPS) { + + if (realPrecision +void test_index_precisions(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + float* precisions, int precisions_length, const Distance& distance, int nn = 1, int skipMatches = 0, float maxTime = 0) +{ + typedef typename Distance::ResultType DistanceType; + + const float SEARCH_EPS = 0.001; + + // make sure precisions array is sorted + std::sort(precisions, precisions+precisions_length); + + int pindex = 0; + float precision = precisions[pindex]; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + int c2 = 1; + float p2; + + int c1 = 1; + float p1; + + float time; + DistanceType dist; + + p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches); + + // if precision for 1 run down the tree is already + // better then some of the requested precisions, then + // skip those + while (precisions[pindex] 0)&&(time > maxTime)&&(p2SEARCH_EPS) { + Logger::info("Start linear estimation\n"); + // after we got to values in the vecinity of the desired precision + // use linear approximation get a better estimation + + cx = (c1+c2)/2; + realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches); + while (fabs(realPrecision-precision)>SEARCH_EPS) { + + if (realPrecision +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dynamic_bitset.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + + +namespace cvflann +{ + +struct KDTreeIndexParams : public IndexParams +{ + KDTreeIndexParams(int trees = 4) + { + (*this)["algorithm"] = FLANN_INDEX_KDTREE; + (*this)["trees"] = trees; + } +}; + + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class KDTreeIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + /** + * KDTree constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the kdtree algorithm + */ + KDTreeIndex(const Matrix& inputData, const IndexParams& params = KDTreeIndexParams(), + Distance d = Distance() ) : + dataset_(inputData), index_params_(params), distance_(d) + { + size_ = dataset_.rows; + veclen_ = dataset_.cols; + + trees_ = get_param(index_params_,"trees",4); + tree_roots_ = new NodePtr[trees_]; + + // Create a permutable array of indices to the input vectors. + vind_.resize(size_); + for (size_t i = 0; i < size_; ++i) { + vind_[i] = int(i); + } + + mean_ = new DistanceType[veclen_]; + var_ = new DistanceType[veclen_]; + } + + + KDTreeIndex(const KDTreeIndex&); + KDTreeIndex& operator=(const KDTreeIndex&); + + /** + * Standard destructor + */ + ~KDTreeIndex() + { + if (tree_roots_!=NULL) { + delete[] tree_roots_; + } + delete[] mean_; + delete[] var_; + } + + /** + * Builds the index + */ + void buildIndex() + { + /* Construct the randomized trees. */ + for (int i = 0; i < trees_; i++) { + /* Randomize the order of vectors to allow for unbiased sampling. */ + std::random_shuffle(vind_.begin(), vind_.end()); + tree_roots_[i] = divideTree(&vind_[0], int(size_) ); + } + } + + + flann_algorithm_t getType() const + { + return FLANN_INDEX_KDTREE; + } + + + void saveIndex(FILE* stream) + { + save_value(stream, trees_); + for (int i=0; i& result, const ElementType* vec, const SearchParams& searchParams) + { + int maxChecks = get_param(searchParams,"checks", 32); + float epsError = 1+get_param(searchParams,"eps",0.0f); + + if (maxChecks==FLANN_CHECKS_UNLIMITED) { + getExactNeighbors(result, vec, epsError); + } + else { + getNeighbors(result, vec, maxChecks, epsError); + } + } + + IndexParams getParameters() const + { + return index_params_; + } + +private: + + + /*--------------------- Internal Data Structures --------------------------*/ + struct Node + { + /** + * Dimension used for subdivision. + */ + int divfeat; + /** + * The values used for subdivision. + */ + DistanceType divval; + /** + * The child nodes. + */ + Node* child1, * child2; + }; + typedef Node* NodePtr; + typedef BranchStruct BranchSt; + typedef BranchSt* Branch; + + + + void save_tree(FILE* stream, NodePtr tree) + { + save_value(stream, *tree); + if (tree->child1!=NULL) { + save_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + save_tree(stream, tree->child2); + } + } + + + void load_tree(FILE* stream, NodePtr& tree) + { + tree = pool_.allocate(); + load_value(stream, *tree); + if (tree->child1!=NULL) { + load_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + load_tree(stream, tree->child2); + } + } + + + /** + * Create a tree node that subdivides the list of vecs from vind[first] + * to vind[last]. The routine is called recursively on each sublist. + * Place a pointer to this new tree node in the location pTree. + * + * Params: pTree = the new node to create + * first = index of the first vector + * last = index of the last vector + */ + NodePtr divideTree(int* ind, int count) + { + NodePtr node = pool_.allocate(); // allocate memory + + /* If too few exemplars remain, then make this a leaf node. */ + if ( count == 1) { + node->child1 = node->child2 = NULL; /* Mark as leaf node. */ + node->divfeat = *ind; /* Store index of this vec. */ + } + else { + int idx; + int cutfeat; + DistanceType cutval; + meanSplit(ind, count, idx, cutfeat, cutval); + + node->divfeat = cutfeat; + node->divval = cutval; + node->child1 = divideTree(ind, idx); + node->child2 = divideTree(ind+idx, count-idx); + } + + return node; + } + + + /** + * Choose which feature to use in order to subdivide this set of vectors. + * Make a random choice among those with the highest variance, and use + * its variance as the threshold value. + */ + void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval) + { + memset(mean_,0,veclen_*sizeof(DistanceType)); + memset(var_,0,veclen_*sizeof(DistanceType)); + + /* Compute mean values. Only the first SAMPLE_MEAN values need to be + sampled to get a good estimate. + */ + int cnt = std::min((int)SAMPLE_MEAN+1, count); + for (int j = 0; j < cnt; ++j) { + ElementType* v = dataset_[ind[j]]; + for (size_t k=0; kcount/2) index = lim1; + else if (lim2 v[topind[num-1]])) { + /* Put this element at end of topind. */ + if (num < RAND_DIM) { + topind[num++] = i; /* Add to list. */ + } + else { + topind[num-1] = i; /* Replace last element. */ + } + /* Bubble end value down to right location by repeated swapping. */ + int j = num - 1; + while (j > 0 && v[topind[j]] > v[topind[j-1]]) { + std::swap(topind[j], topind[j-1]); + --j; + } + } + } + /* Select a random integer in range [0,num-1], and return that index. */ + int rnd = rand_int(num); + return (int)topind[rnd]; + } + + + /** + * Subdivide the list of points by a plane perpendicular on axe corresponding + * to the 'cutfeat' dimension at 'cutval' position. + * + * On return: + * dataset[ind[0..lim1-1]][cutfeat]cutval + */ + void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2) + { + /* Move vector indices for left subtree to front of list. */ + int left = 0; + int right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]=cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim1 = left; + right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left; + while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim2 = left; + } + + /** + * Performs an exact nearest neighbor search. The exact search performs a full + * traversal of the tree. + */ + void getExactNeighbors(ResultSet& result, const ElementType* vec, float epsError) + { + // checkID -= 1; /* Set a different unique ID for each search. */ + + if (trees_ > 1) { + fprintf(stderr,"It doesn't make any sense to use more than one tree for exact search"); + } + if (trees_>0) { + searchLevelExact(result, vec, tree_roots_[0], 0.0, epsError); + } + assert(result.full()); + } + + /** + * Performs the approximate nearest-neighbor search. The search is approximate + * because the tree traversal is abandoned after a given number of descends in + * the tree. + */ + void getNeighbors(ResultSet& result, const ElementType* vec, int maxCheck, float epsError) + { + int i; + BranchSt branch; + + int checkCount = 0; + Heap* heap = new Heap((int)size_); + DynamicBitset checked(size_); + + /* Search once through each tree down to root. */ + for (i = 0; i < trees_; ++i) { + searchLevel(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked); + } + + /* Keep searching other branches from heap until finished. */ + while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) { + searchLevel(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked); + } + + delete heap; + + assert(result.full()); + } + + + /** + * Search starting from a given node of the tree. Based on any mismatches at + * higher levels, all exemplars below this level must have a distance of + * at least "mindistsq". + */ + void searchLevel(ResultSet& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck, + float epsError, Heap* heap, DynamicBitset& checked) + { + if (result_set.worstDist()child1 == NULL)&&(node->child2 == NULL)) { + /* Do not check same node more than once when searching multiple trees. + Once a vector is checked, we set its location in vind to the + current checkID. + */ + int index = node->divfeat; + if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return; + checked.set(index); + checkCount++; + + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result_set.addPoint(dist,index); + + return; + } + + /* Which child branch should be taken first? */ + ElementType val = vec[node->divfeat]; + DistanceType diff = val - node->divval; + NodePtr bestChild = (diff < 0) ? node->child1 : node->child2; + NodePtr otherChild = (diff < 0) ? node->child2 : node->child1; + + /* Create a branch record for the branch not taken. Add distance + of this feature boundary (we don't attempt to correct for any + use of this feature in a parent node, which is unlikely to + happen and would have only a small effect). Don't bother + adding more branches to heap after halfway point, as cost of + adding exceeds their value. + */ + + DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat); + // if (2 * checkCount < maxCheck || !result.full()) { + if ((new_distsq*epsError < result_set.worstDist())|| !result_set.full()) { + heap->insert( BranchSt(otherChild, new_distsq) ); + } + + /* Call recursively to search next level down. */ + searchLevel(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked); + } + + /** + * Performs an exact search in the tree starting from a node. + */ + void searchLevelExact(ResultSet& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError) + { + /* If this is a leaf node, then do check and return. */ + if ((node->child1 == NULL)&&(node->child2 == NULL)) { + int index = node->divfeat; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result_set.addPoint(dist,index); + return; + } + + /* Which child branch should be taken first? */ + ElementType val = vec[node->divfeat]; + DistanceType diff = val - node->divval; + NodePtr bestChild = (diff < 0) ? node->child1 : node->child2; + NodePtr otherChild = (diff < 0) ? node->child2 : node->child1; + + /* Create a branch record for the branch not taken. Add distance + of this feature boundary (we don't attempt to correct for any + use of this feature in a parent node, which is unlikely to + happen and would have only a small effect). Don't bother + adding more branches to heap after halfway point, as cost of + adding exceeds their value. + */ + + DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat); + + /* Call recursively to search next level down. */ + searchLevelExact(result_set, vec, bestChild, mindist, epsError); + + if (new_distsq*epsError<=result_set.worstDist()) { + searchLevelExact(result_set, vec, otherChild, new_distsq, epsError); + } + } + + +private: + + enum + { + /** + * To improve efficiency, only SAMPLE_MEAN random values are used to + * compute the mean and variance at each level when building a tree. + * A value of 100 seems to perform as well as using all values. + */ + SAMPLE_MEAN = 100, + /** + * Top random dimensions to consider + * + * When creating random trees, the dimension on which to subdivide is + * selected at random from among the top RAND_DIM dimensions with the + * highest variance. A value of 5 works well. + */ + RAND_DIM=5 + }; + + + /** + * Number of randomized trees that are used + */ + int trees_; + + /** + * Array of indices to vectors in the dataset. + */ + std::vector vind_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + IndexParams index_params_; + + size_t size_; + size_t veclen_; + + + DistanceType* mean_; + DistanceType* var_; + + + /** + * Array of k-d trees used to find neighbours. + */ + NodePtr* tree_roots_; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool_; + + Distance distance_; + + +}; // class KDTreeForest + +} + +#endif //OPENCV_FLANN_KDTREE_INDEX_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/kdtree_single_index.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/kdtree_single_index.h new file mode 100644 index 0000000..30488ad --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/kdtree_single_index.h @@ -0,0 +1,634 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ +#define OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ + +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + +namespace cvflann +{ + +struct KDTreeSingleIndexParams : public IndexParams +{ + KDTreeSingleIndexParams(int leaf_max_size = 10, bool reorder = true, int dim = -1) + { + (*this)["algorithm"] = FLANN_INDEX_KDTREE_SINGLE; + (*this)["leaf_max_size"] = leaf_max_size; + (*this)["reorder"] = reorder; + (*this)["dim"] = dim; + } +}; + + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class KDTreeSingleIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + /** + * KDTree constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the kdtree algorithm + */ + KDTreeSingleIndex(const Matrix& inputData, const IndexParams& params = KDTreeSingleIndexParams(), + Distance d = Distance() ) : + dataset_(inputData), index_params_(params), distance_(d) + { + size_ = dataset_.rows; + dim_ = dataset_.cols; + int dim_param = get_param(params,"dim",-1); + if (dim_param>0) dim_ = dim_param; + leaf_max_size_ = get_param(params,"leaf_max_size",10); + reorder_ = get_param(params,"reorder",true); + + // Create a permutable array of indices to the input vectors. + vind_.resize(size_); + for (size_t i = 0; i < size_; i++) { + vind_[i] = (int)i; + } + } + + KDTreeSingleIndex(const KDTreeSingleIndex&); + KDTreeSingleIndex& operator=(const KDTreeSingleIndex&); + + /** + * Standard destructor + */ + ~KDTreeSingleIndex() + { + if (reorder_) delete[] data_.data; + } + + /** + * Builds the index + */ + void buildIndex() + { + computeBoundingBox(root_bbox_); + root_node_ = divideTree(0, (int)size_, root_bbox_ ); // construct the tree + + if (reorder_) { + delete[] data_.data; + data_ = cvflann::Matrix(new ElementType[size_*dim_], size_, dim_); + for (size_t i=0; i& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + assert(queries.cols == veclen()); + assert(indices.rows >= queries.rows); + assert(dists.rows >= queries.rows); + assert(int(indices.cols) >= knn); + assert(int(dists.cols) >= knn); + + KNNSimpleResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.init(indices[i], dists[i]); + findNeighbors(resultSet, queries[i], params); + } + } + + IndexParams getParameters() const + { + return index_params_; + } + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * maxCheck = the maximum number of restarts (in a best-bin-first manner) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + float epsError = 1+get_param(searchParams,"eps",0.0f); + + std::vector dists(dim_,0); + DistanceType distsq = computeInitialDistances(vec, dists); + searchLevel(result, vec, root_node_, distsq, dists, epsError); + } + +private: + + + /*--------------------- Internal Data Structures --------------------------*/ + struct Node + { + /** + * Indices of points in leaf node + */ + int left, right; + /** + * Dimension used for subdivision. + */ + int divfeat; + /** + * The values used for subdivision. + */ + DistanceType divlow, divhigh; + /** + * The child nodes. + */ + Node* child1, * child2; + }; + typedef Node* NodePtr; + + + struct Interval + { + DistanceType low, high; + }; + + typedef std::vector BoundingBox; + + typedef BranchStruct BranchSt; + typedef BranchSt* Branch; + + + + + void save_tree(FILE* stream, NodePtr tree) + { + save_value(stream, *tree); + if (tree->child1!=NULL) { + save_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + save_tree(stream, tree->child2); + } + } + + + void load_tree(FILE* stream, NodePtr& tree) + { + tree = pool_.allocate(); + load_value(stream, *tree); + if (tree->child1!=NULL) { + load_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + load_tree(stream, tree->child2); + } + } + + + void computeBoundingBox(BoundingBox& bbox) + { + bbox.resize(dim_); + for (size_t i=0; ibbox[i].high) bbox[i].high = (DistanceType)dataset_[k][i]; + } + } + } + + + /** + * Create a tree node that subdivides the list of vecs from vind[first] + * to vind[last]. The routine is called recursively on each sublist. + * Place a pointer to this new tree node in the location pTree. + * + * Params: pTree = the new node to create + * first = index of the first vector + * last = index of the last vector + */ + NodePtr divideTree(int left, int right, BoundingBox& bbox) + { + NodePtr node = pool_.allocate(); // allocate memory + + /* If too few exemplars remain, then make this a leaf node. */ + if ( (right-left) <= leaf_max_size_) { + node->child1 = node->child2 = NULL; /* Mark as leaf node. */ + node->left = left; + node->right = right; + + // compute bounding-box of leaf points + for (size_t i=0; idataset_[vind_[k]][i]) bbox[i].low=(DistanceType)dataset_[vind_[k]][i]; + if (bbox[i].highdivfeat = cutfeat; + + BoundingBox left_bbox(bbox); + left_bbox[cutfeat].high = cutval; + node->child1 = divideTree(left, left+idx, left_bbox); + + BoundingBox right_bbox(bbox); + right_bbox[cutfeat].low = cutval; + node->child2 = divideTree(left+idx, right, right_bbox); + + node->divlow = left_bbox[cutfeat].high; + node->divhigh = right_bbox[cutfeat].low; + + for (size_t i=0; imax_elem) max_elem = val; + } + } + + void middleSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox) + { + // find the largest span from the approximate bounding box + ElementType max_span = bbox[0].high-bbox[0].low; + cutfeat = 0; + cutval = (bbox[0].high+bbox[0].low)/2; + for (size_t i=1; imax_span) { + max_span = span; + cutfeat = i; + cutval = (bbox[i].high+bbox[i].low)/2; + } + } + + // compute exact span on the found dimension + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + cutval = (min_elem+max_elem)/2; + max_span = max_elem - min_elem; + + // check if a dimension of a largest span exists + size_t k = cutfeat; + for (size_t i=0; imax_span) { + computeMinMax(ind, count, i, min_elem, max_elem); + span = max_elem - min_elem; + if (span>max_span) { + max_span = span; + cutfeat = i; + cutval = (min_elem+max_elem)/2; + } + } + } + int lim1, lim2; + planeSplit(ind, count, cutfeat, cutval, lim1, lim2); + + if (lim1>count/2) index = lim1; + else if (lim2max_span) { + max_span = span; + } + } + DistanceType max_spread = -1; + cutfeat = 0; + for (size_t i=0; i(DistanceType)((1-EPS)*max_span)) { + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + DistanceType spread = (DistanceType)(max_elem-min_elem); + if (spread>max_spread) { + cutfeat = (int)i; + max_spread = spread; + } + } + } + // split in the middle + DistanceType split_val = (bbox[cutfeat].low+bbox[cutfeat].high)/2; + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + + if (split_valmax_elem) cutval = (DistanceType)max_elem; + else cutval = split_val; + + int lim1, lim2; + planeSplit(ind, count, cutfeat, cutval, lim1, lim2); + + if (lim1>count/2) index = lim1; + else if (lim2cutval + */ + void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2) + { + /* Move vector indices for left subtree to front of list. */ + int left = 0; + int right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]=cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + /* If either list is empty, it means that all remaining features + * are identical. Split in the middle to maintain a balanced tree. + */ + lim1 = left; + right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left; + while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim2 = left; + } + + DistanceType computeInitialDistances(const ElementType* vec, std::vector& dists) + { + DistanceType distsq = 0.0; + + for (size_t i = 0; i < dim_; ++i) { + if (vec[i] < root_bbox_[i].low) { + dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].low, (int)i); + distsq += dists[i]; + } + if (vec[i] > root_bbox_[i].high) { + dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].high, (int)i); + distsq += dists[i]; + } + } + + return distsq; + } + + /** + * Performs an exact search in the tree starting from a node. + */ + void searchLevel(ResultSet& result_set, const ElementType* vec, const NodePtr node, DistanceType mindistsq, + std::vector& dists, const float epsError) + { + /* If this is a leaf node, then do check and return. */ + if ((node->child1 == NULL)&&(node->child2 == NULL)) { + DistanceType worst_dist = result_set.worstDist(); + for (int i=node->left; iright; ++i) { + int index = reorder_ ? i : vind_[i]; + DistanceType dist = distance_(vec, data_[index], dim_, worst_dist); + if (distdivfeat; + ElementType val = vec[idx]; + DistanceType diff1 = val - node->divlow; + DistanceType diff2 = val - node->divhigh; + + NodePtr bestChild; + NodePtr otherChild; + DistanceType cut_dist; + if ((diff1+diff2)<0) { + bestChild = node->child1; + otherChild = node->child2; + cut_dist = distance_.accum_dist(val, node->divhigh, idx); + } + else { + bestChild = node->child2; + otherChild = node->child1; + cut_dist = distance_.accum_dist( val, node->divlow, idx); + } + + /* Call recursively to search next level down. */ + searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError); + + DistanceType dst = dists[idx]; + mindistsq = mindistsq + cut_dist - dst; + dists[idx] = cut_dist; + if (mindistsq*epsError<=result_set.worstDist()) { + searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError); + } + dists[idx] = dst; + } + +private: + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + IndexParams index_params_; + + int leaf_max_size_; + bool reorder_; + + + /** + * Array of indices to vectors in the dataset. + */ + std::vector vind_; + + Matrix data_; + + size_t size_; + size_t dim_; + + /** + * Array of k-d trees used to find neighbours. + */ + NodePtr root_node_; + + BoundingBox root_bbox_; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool_; + + Distance distance_; +}; // class KDTree + +} + +#endif //OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/kmeans_index.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/kmeans_index.h new file mode 100644 index 0000000..489ed80 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/kmeans_index.h @@ -0,0 +1,1117 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_KMEANS_INDEX_H_ +#define OPENCV_FLANN_KMEANS_INDEX_H_ + +#include +#include +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dist.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" +#include "logger.h" + + +namespace cvflann +{ + +struct KMeansIndexParams : public IndexParams +{ + KMeansIndexParams(int branching = 32, int iterations = 11, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 ) + { + (*this)["algorithm"] = FLANN_INDEX_KMEANS; + // branching factor + (*this)["branching"] = branching; + // max iterations to perform in one kmeans clustering (kmeans tree) + (*this)["iterations"] = iterations; + // algorithm used for picking the initial cluster centers for kmeans tree + (*this)["centers_init"] = centers_init; + // cluster boundary index. Used when searching the kmeans tree + (*this)["cb_index"] = cb_index; + } +}; + + +/** + * Hierarchical kmeans index + * + * Contains a tree constructed through a hierarchical kmeans clustering + * and other information for indexing a set of points for nearest-neighbour matching. + */ +template +class KMeansIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + + typedef void (KMeansIndex::* centersAlgFunction)(int, int*, int, int*, int&); + + /** + * The function used for choosing the cluster centers. + */ + centersAlgFunction chooseCenters; + + + + /** + * Chooses the initial centers in the k-means clustering in a random manner. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * indices_length = length of indices vector + * + */ + void chooseCentersRandom(int k, int* indices, int indices_length, int* centers, int& centers_length) + { + UniqueRandom r(indices_length); + + int index; + for (index=0; index=0 && rnd < n); + + centers[0] = indices[rnd]; + + int index; + for (index=1; indexbest_val) { + best_val = dist; + best_index = j; + } + } + if (best_index!=-1) { + centers[index] = indices[best_index]; + } + else { + break; + } + } + centers_length = index; + } + + + /** + * Chooses the initial centers in the k-means using the algorithm + * proposed in the KMeans++ paper: + * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding + * + * Implementation of this function was converted from the one provided in Arthur's code. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * Returns: + */ + void chooseCentersKMeanspp(int k, int* indices, int indices_length, int* centers, int& centers_length) + { + int n = indices_length; + + double currentPot = 0; + DistanceType* closestDistSq = new DistanceType[n]; + + // Choose one random center and set the closestDistSq values + int index = rand_int(n); + assert(index >=0 && index < n); + centers[0] = indices[index]; + + for (int i = 0; i < n; i++) { + closestDistSq[i] = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols); + currentPot += closestDistSq[i]; + } + + + const int numLocalTries = 1; + + // Choose each center + int centerCount; + for (centerCount = 1; centerCount < k; centerCount++) { + + // Repeat several trials + double bestNewPot = -1; + int bestNewIndex = -1; + for (int localTrial = 0; localTrial < numLocalTries; localTrial++) { + + // Choose our center - have to be slightly careful to return a valid answer even accounting + // for possible rounding errors + double randVal = rand_double(currentPot); + for (index = 0; index < n-1; index++) { + if (randVal <= closestDistSq[index]) break; + else randVal -= closestDistSq[index]; + } + + // Compute the new potential + double newPot = 0; + for (int i = 0; i < n; i++) newPot += std::min( distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols), closestDistSq[i] ); + + // Store the best result + if ((bestNewPot < 0)||(newPot < bestNewPot)) { + bestNewPot = newPot; + bestNewIndex = index; + } + } + + // Add the appropriate center + centers[centerCount] = indices[bestNewIndex]; + currentPot = bestNewPot; + for (int i = 0; i < n; i++) closestDistSq[i] = std::min( distance_(dataset_[indices[i]], dataset_[indices[bestNewIndex]], dataset_.cols), closestDistSq[i] ); + } + + centers_length = centerCount; + + delete[] closestDistSq; + } + + + +public: + + flann_algorithm_t getType() const + { + return FLANN_INDEX_KMEANS; + } + + /** + * Index constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the hierarchical k-means algorithm + */ + KMeansIndex(const Matrix& inputData, const IndexParams& params = KMeansIndexParams(), + Distance d = Distance()) + : dataset_(inputData), index_params_(params), root_(NULL), indices_(NULL), distance_(d) + { + memoryCounter_ = 0; + + size_ = dataset_.rows; + veclen_ = dataset_.cols; + + branching_ = get_param(params,"branching",32); + iterations_ = get_param(params,"iterations",11); + if (iterations_<0) { + iterations_ = (std::numeric_limits::max)(); + } + centers_init_ = get_param(params,"centers_init",FLANN_CENTERS_RANDOM); + + if (centers_init_==FLANN_CENTERS_RANDOM) { + chooseCenters = &KMeansIndex::chooseCentersRandom; + } + else if (centers_init_==FLANN_CENTERS_GONZALES) { + chooseCenters = &KMeansIndex::chooseCentersGonzales; + } + else if (centers_init_==FLANN_CENTERS_KMEANSPP) { + chooseCenters = &KMeansIndex::chooseCentersKMeanspp; + } + else { + throw FLANNException("Unknown algorithm for choosing initial centers."); + } + cb_index_ = 0.4f; + + } + + + KMeansIndex(const KMeansIndex&); + KMeansIndex& operator=(const KMeansIndex&); + + + /** + * Index destructor. + * + * Release the memory used by the index. + */ + virtual ~KMeansIndex() + { + if (root_ != NULL) { + free_centers(root_); + } + if (indices_!=NULL) { + delete[] indices_; + } + } + + /** + * Returns size of index. + */ + size_t size() const + { + return size_; + } + + /** + * Returns the length of an index feature. + */ + size_t veclen() const + { + return veclen_; + } + + + void set_cb_index( float index) + { + cb_index_ = index; + } + + /** + * Computes the inde memory usage + * Returns: memory used by the index + */ + int usedMemory() const + { + return pool_.usedMemory+pool_.wastedMemory+memoryCounter_; + } + + /** + * Builds the index + */ + void buildIndex() + { + if (branching_<2) { + throw FLANNException("Branching factor must be at least 2"); + } + + indices_ = new int[size_]; + for (size_t i=0; i(); + computeNodeStatistics(root_, indices_, (int)size_); + computeClustering(root_, indices_, (int)size_, branching_,0); + } + + + void saveIndex(FILE* stream) + { + save_value(stream, branching_); + save_value(stream, iterations_); + save_value(stream, memoryCounter_); + save_value(stream, cb_index_); + save_value(stream, *indices_, (int)size_); + + save_tree(stream, root_); + } + + + void loadIndex(FILE* stream) + { + load_value(stream, branching_); + load_value(stream, iterations_); + load_value(stream, memoryCounter_); + load_value(stream, cb_index_); + if (indices_!=NULL) { + delete[] indices_; + } + indices_ = new int[size_]; + load_value(stream, *indices_, size_); + + if (root_!=NULL) { + free_centers(root_); + } + load_tree(stream, root_); + + index_params_["algorithm"] = getType(); + index_params_["branching"] = branching_; + index_params_["iterations"] = iterations_; + index_params_["centers_init"] = centers_init_; + index_params_["cb_index"] = cb_index_; + + } + + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * searchParams = parameters that influence the search algorithm (checks, cb_index) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) + { + + int maxChecks = get_param(searchParams,"checks",32); + + if (maxChecks==FLANN_CHECKS_UNLIMITED) { + findExactNN(root_, result, vec); + } + else { + // Priority queue storing intermediate branches in the best-bin-first search + Heap* heap = new Heap((int)size_); + + int checks = 0; + findNN(root_, result, vec, checks, maxChecks, heap); + + BranchSt branch; + while (heap->popMin(branch) && (checks& centers) + { + int numClusters = centers.rows; + if (numClusters<1) { + throw FLANNException("Number of clusters must be at least 1"); + } + + DistanceType variance; + KMeansNodePtr* clusters = new KMeansNodePtr[numClusters]; + + int clusterCount = getMinVarianceClusters(root_, clusters, numClusters, variance); + + Logger::info("Clusters requested: %d, returning %d\n",numClusters, clusterCount); + + for (int i=0; ipivot; + for (size_t j=0; j BranchSt; + + + + + void save_tree(FILE* stream, KMeansNodePtr node) + { + save_value(stream, *node); + save_value(stream, *(node->pivot), (int)veclen_); + if (node->childs==NULL) { + int indices_offset = (int)(node->indices - indices_); + save_value(stream, indices_offset); + } + else { + for(int i=0; ichilds[i]); + } + } + } + + + void load_tree(FILE* stream, KMeansNodePtr& node) + { + node = pool_.allocate(); + load_value(stream, *node); + node->pivot = new DistanceType[veclen_]; + load_value(stream, *(node->pivot), (int)veclen_); + if (node->childs==NULL) { + int indices_offset; + load_value(stream, indices_offset); + node->indices = indices_ + indices_offset; + } + else { + node->childs = pool_.allocate(branching_); + for(int i=0; ichilds[i]); + } + } + } + + + /** + * Helper function + */ + void free_centers(KMeansNodePtr node) + { + delete[] node->pivot; + if (node->childs!=NULL) { + for (int k=0; kchilds[k]); + } + } + } + + /** + * Computes the statistics of a node (mean, radius, variance). + * + * Params: + * node = the node to use + * indices = the indices of the points belonging to the node + */ + void computeNodeStatistics(KMeansNodePtr node, int* indices, int indices_length) + { + + DistanceType radius = 0; + DistanceType variance = 0; + DistanceType* mean = new DistanceType[veclen_]; + memoryCounter_ += int(veclen_*sizeof(DistanceType)); + + memset(mean,0,veclen_*sizeof(DistanceType)); + + for (size_t i=0; i(), veclen_); + } + for (size_t j=0; j(), veclen_); + + DistanceType tmp = 0; + for (int i=0; iradius) { + radius = tmp; + } + } + + node->variance = variance; + node->radius = radius; + node->pivot = mean; + } + + + /** + * The method responsible with actually doing the recursive hierarchical + * clustering + * + * Params: + * node = the node to cluster + * indices = indices of the points belonging to the current node + * branching = the branching factor to use in the clustering + * + * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point) + */ + void computeClustering(KMeansNodePtr node, int* indices, int indices_length, int branching, int level) + { + node->size = indices_length; + node->level = level; + + if (indices_length < branching) { + node->indices = indices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + int* centers_idx = new int[branching]; + int centers_length; + (this->*chooseCenters)(branching, indices, indices_length, centers_idx, centers_length); + + if (centers_lengthindices = indices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + delete [] centers_idx; + return; + } + + + Matrix dcenters(new double[branching*veclen_],branching,veclen_); + for (int i=0; i radiuses(branching); + int* count = new int[branching]; + for (int i=0; inew_sq_dist) { + belongs_to[i] = j; + sq_dist = new_sq_dist; + } + } + if (sq_dist>radiuses[belongs_to[i]]) { + radiuses[belongs_to[i]] = sq_dist; + } + count[belongs_to[i]]++; + } + + bool converged = false; + int iteration = 0; + while (!converged && iterationnew_sq_dist) { + new_centroid = j; + sq_dist = new_sq_dist; + } + } + if (sq_dist>radiuses[new_centroid]) { + radiuses[new_centroid] = sq_dist; + } + if (new_centroid != belongs_to[i]) { + count[belongs_to[i]]--; + count[new_centroid]++; + belongs_to[i] = new_centroid; + + converged = false; + } + } + + for (int i=0; ichilds = pool_.allocate(branching); + int start = 0; + int end = start; + for (int c=0; c(), veclen_); + variance += d; + mean_radius += sqrt(d); + std::swap(indices[i],indices[end]); + std::swap(belongs_to[i],belongs_to[end]); + end++; + } + } + variance /= s; + mean_radius /= s; + variance -= distance_(centers[c], ZeroIterator(), veclen_); + + node->childs[c] = pool_.allocate(); + node->childs[c]->radius = radiuses[c]; + node->childs[c]->pivot = centers[c]; + node->childs[c]->variance = variance; + node->childs[c]->mean_radius = mean_radius; + node->childs[c]->indices = NULL; + computeClustering(node->childs[c],indices+start, end-start, branching, level+1); + start=end; + } + + delete[] dcenters.data; + delete[] centers; + delete[] count; + delete[] belongs_to; + } + + + + /** + * Performs one descent in the hierarchical k-means tree. The branches not + * visited are stored in a priority queue. + * + * Params: + * node = node to explore + * result = container for the k-nearest neighbors found + * vec = query points + * checks = how many points in the dataset have been checked so far + * maxChecks = maximum dataset points to checks + */ + + + void findNN(KMeansNodePtr node, ResultSet& result, const ElementType* vec, int& checks, int maxChecks, + Heap* heap) + { + // Ignore those clusters that are too far away + { + DistanceType bsq = distance_(vec, node->pivot, veclen_); + DistanceType rsq = node->radius; + DistanceType wsq = result.worstDist(); + + DistanceType val = bsq-rsq-wsq; + DistanceType val2 = val*val-4*rsq*wsq; + + //if (val>0) { + if ((val>0)&&(val2>0)) { + return; + } + } + + if (node->childs==NULL) { + if (checks>=maxChecks) { + if (result.full()) return; + } + checks += node->size; + for (int i=0; isize; ++i) { + int index = node->indices[i]; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result.addPoint(dist, index); + } + } + else { + DistanceType* domain_distances = new DistanceType[branching_]; + int closest_center = exploreNodeBranches(node, vec, domain_distances, heap); + delete[] domain_distances; + findNN(node->childs[closest_center],result,vec, checks, maxChecks, heap); + } + } + + /** + * Helper function that computes the nearest childs of a node to a given query point. + * Params: + * node = the node + * q = the query point + * distances = array with the distances to each child node. + * Returns: + */ + int exploreNodeBranches(KMeansNodePtr node, const ElementType* q, DistanceType* domain_distances, Heap* heap) + { + + int best_index = 0; + domain_distances[best_index] = distance_(q, node->childs[best_index]->pivot, veclen_); + for (int i=1; ichilds[i]->pivot, veclen_); + if (domain_distances[i]childs[best_index]->pivot; + for (int i=0; ichilds[i]->variance; + + // float dist_to_border = getDistanceToBorder(node.childs[i].pivot,best_center,q); + // if (domain_distances[i]insert(BranchSt(node->childs[i],domain_distances[i])); + } + } + + return best_index; + } + + + /** + * Function the performs exact nearest neighbor search by traversing the entire tree. + */ + void findExactNN(KMeansNodePtr node, ResultSet& result, const ElementType* vec) + { + // Ignore those clusters that are too far away + { + DistanceType bsq = distance_(vec, node->pivot, veclen_); + DistanceType rsq = node->radius; + DistanceType wsq = result.worstDist(); + + DistanceType val = bsq-rsq-wsq; + DistanceType val2 = val*val-4*rsq*wsq; + + // if (val>0) { + if ((val>0)&&(val2>0)) { + return; + } + } + + + if (node->childs==NULL) { + for (int i=0; isize; ++i) { + int index = node->indices[i]; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result.addPoint(dist, index); + } + } + else { + int* sort_indices = new int[branching_]; + + getCenterOrdering(node, vec, sort_indices); + + for (int i=0; ichilds[sort_indices[i]],result,vec); + } + + delete[] sort_indices; + } + } + + + /** + * Helper function. + * + * I computes the order in which to traverse the child nodes of a particular node. + */ + void getCenterOrdering(KMeansNodePtr node, const ElementType* q, int* sort_indices) + { + DistanceType* domain_distances = new DistanceType[branching_]; + for (int i=0; ichilds[i]->pivot, veclen_); + + int j=0; + while (domain_distances[j]j; --k) { + domain_distances[k] = domain_distances[k-1]; + sort_indices[k] = sort_indices[k-1]; + } + domain_distances[j] = dist; + sort_indices[j] = i; + } + delete[] domain_distances; + } + + /** + * Method that computes the squared distance from the query point q + * from inside region with center c to the border between this + * region and the region with center p + */ + DistanceType getDistanceToBorder(DistanceType* p, DistanceType* c, DistanceType* q) + { + DistanceType sum = 0; + DistanceType sum2 = 0; + + for (int i=0; ivariance*root->size; + + while (clusterCount::max)(); + int splitIndex = -1; + + for (int i=0; ichilds != NULL) { + + DistanceType variance = meanVariance - clusters[i]->variance*clusters[i]->size; + + for (int j=0; jchilds[j]->variance*clusters[i]->childs[j]->size; + } + if (variance clusters_length) break; + + meanVariance = minVariance; + + // split node + KMeansNodePtr toSplit = clusters[splitIndex]; + clusters[splitIndex] = toSplit->childs[0]; + for (int i=1; ichilds[i]; + } + } + + varianceValue = meanVariance/root->size; + return clusterCount; + } + +private: + /** The branching factor used in the hierarchical k-means clustering */ + int branching_; + + /** Maximum number of iterations to use when performing k-means clustering */ + int iterations_; + + /** Algorithm for choosing the cluster centers */ + flann_centers_init_t centers_init_; + + /** + * Cluster border index. This is used in the tree search phase when determining + * the closest cluster to explore next. A zero value takes into account only + * the cluster centres, a value greater then zero also take into account the size + * of the cluster. + */ + float cb_index_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + /** Index parameters */ + IndexParams index_params_; + + /** + * Number of features in the dataset. + */ + size_t size_; + + /** + * Length of each feature. + */ + size_t veclen_; + + /** + * The root node in the tree. + */ + KMeansNodePtr root_; + + /** + * Array of indices to vectors in the dataset. + */ + int* indices_; + + /** + * The distance + */ + Distance distance_; + + /** + * Pooled memory allocator. + */ + PooledAllocator pool_; + + /** + * Memory occupied by the index. + */ + int memoryCounter_; +}; + +} + +#endif //OPENCV_FLANN_KMEANS_INDEX_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/linear_index.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/linear_index.h new file mode 100644 index 0000000..5aa7a5c --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/linear_index.h @@ -0,0 +1,132 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_LINEAR_INDEX_H_ +#define OPENCV_FLANN_LINEAR_INDEX_H_ + +#include "general.h" +#include "nn_index.h" + +namespace cvflann +{ + +struct LinearIndexParams : public IndexParams +{ + LinearIndexParams() + { + (* this)["algorithm"] = FLANN_INDEX_LINEAR; + } +}; + +template +class LinearIndex : public NNIndex +{ +public: + + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + LinearIndex(const Matrix& inputData, const IndexParams& params = LinearIndexParams(), + Distance d = Distance()) : + dataset_(inputData), index_params_(params), distance_(d) + { + } + + LinearIndex(const LinearIndex&); + LinearIndex& operator=(const LinearIndex&); + + flann_algorithm_t getType() const + { + return FLANN_INDEX_LINEAR; + } + + + size_t size() const + { + return dataset_.rows; + } + + size_t veclen() const + { + return dataset_.cols; + } + + + int usedMemory() const + { + return 0; + } + + void buildIndex() + { + /* nothing to do here for linear search */ + } + + void saveIndex(FILE*) + { + /* nothing to do here for linear search */ + } + + + void loadIndex(FILE*) + { + /* nothing to do here for linear search */ + + index_params_["algorithm"] = getType(); + } + + void findNeighbors(ResultSet& resultSet, const ElementType* vec, const SearchParams& /*searchParams*/) + { + ElementType* data = dataset_.data; + for (size_t i = 0; i < dataset_.rows; ++i, data += dataset_.cols) { + DistanceType dist = distance_(data, vec, dataset_.cols); + resultSet.addPoint(dist, (int)i); + } + } + + IndexParams getParameters() const + { + return index_params_; + } + +private: + /** The dataset */ + const Matrix dataset_; + /** Index parameters */ + IndexParams index_params_; + /** Index distance */ + Distance distance_; + +}; + +} + +#endif // OPENCV_FLANN_LINEAR_INDEX_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/logger.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/logger.h new file mode 100644 index 0000000..24f3fb6 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/logger.h @@ -0,0 +1,130 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_LOGGER_H +#define OPENCV_FLANN_LOGGER_H + +#include +#include + +#include "defines.h" + + +namespace cvflann +{ + +class Logger +{ + Logger() : stream(stdout), logLevel(FLANN_LOG_WARN) {} + + ~Logger() + { + if ((stream!=NULL)&&(stream!=stdout)) { + fclose(stream); + } + } + + static Logger& instance() + { + static Logger logger; + return logger; + } + + void _setDestination(const char* name) + { + if (name==NULL) { + stream = stdout; + } + else { + stream = fopen(name,"w"); + if (stream == NULL) { + stream = stdout; + } + } + } + + int _log(int level, const char* fmt, va_list arglist) + { + if (level > logLevel ) return -1; + int ret = vfprintf(stream, fmt, arglist); + return ret; + } + +public: + /** + * Sets the logging level. All messages with lower priority will be ignored. + * @param level Logging level + */ + static void setLevel(int level) { instance().logLevel = level; } + + /** + * Sets the logging destination + * @param name Filename or NULL for console + */ + static void setDestination(const char* name) { instance()._setDestination(name); } + + /** + * Print log message + * @param level Log level + * @param fmt Message format + * @return + */ + static int log(int level, const char* fmt, ...) + { + va_list arglist; + va_start(arglist, fmt); + int ret = instance()._log(level,fmt,arglist); + va_end(arglist); + return ret; + } + +#define LOG_METHOD(NAME,LEVEL) \ + static int NAME(const char* fmt, ...) \ + { \ + va_list ap; \ + va_start(ap, fmt); \ + int ret = instance()._log(LEVEL, fmt, ap); \ + va_end(ap); \ + return ret; \ + } + + LOG_METHOD(fatal, FLANN_LOG_FATAL) + LOG_METHOD(error, FLANN_LOG_ERROR) + LOG_METHOD(warn, FLANN_LOG_WARN) + LOG_METHOD(info, FLANN_LOG_INFO) + +private: + FILE* stream; + int logLevel; +}; + +} + +#endif //OPENCV_FLANN_LOGGER_H diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/lsh_index.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/lsh_index.h new file mode 100644 index 0000000..4d4670e --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/lsh_index.h @@ -0,0 +1,392 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_LSH_INDEX_H_ +#define OPENCV_FLANN_LSH_INDEX_H_ + +#include +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "lsh_table.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + +namespace cvflann +{ + +struct LshIndexParams : public IndexParams +{ + LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2) + { + (* this)["algorithm"] = FLANN_INDEX_LSH; + // The number of hash tables to use + (*this)["table_number"] = table_number; + // The length of the key in the hash tables + (*this)["key_size"] = key_size; + // Number of levels to use in multi-probe (0 for standard LSH) + (*this)["multi_probe_level"] = multi_probe_level; + } +}; + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class LshIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + /** Constructor + * @param input_data dataset with the input features + * @param params parameters passed to the LSH algorithm + * @param d the distance used + */ + LshIndex(const Matrix& input_data, const IndexParams& params = LshIndexParams(), + Distance d = Distance()) : + dataset_(input_data), index_params_(params), distance_(d) + { + // cv::flann::IndexParams sets integer params as 'int', so it is used with get_param + // in place of 'unsigned int' + table_number_ = (unsigned int)get_param(index_params_,"table_number",12); + key_size_ = (unsigned int)get_param(index_params_,"key_size",20); + multi_probe_level_ = (unsigned int)get_param(index_params_,"multi_probe_level",2); + + feature_size_ = (unsigned)dataset_.cols; + fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); + } + + + LshIndex(const LshIndex&); + LshIndex& operator=(const LshIndex&); + + /** + * Builds the index + */ + void buildIndex() + { + tables_.resize(table_number_); + for (unsigned int i = 0; i < table_number_; ++i) { + lsh::LshTable& table = tables_[i]; + table = lsh::LshTable(feature_size_, key_size_); + + // Add the features to the table + table.add(dataset_); + } + } + + flann_algorithm_t getType() const + { + return FLANN_INDEX_LSH; + } + + + void saveIndex(FILE* stream) + { + save_value(stream,table_number_); + save_value(stream,key_size_); + save_value(stream,multi_probe_level_); + save_value(stream, dataset_); + } + + void loadIndex(FILE* stream) + { + load_value(stream, table_number_); + load_value(stream, key_size_); + load_value(stream, multi_probe_level_); + load_value(stream, dataset_); + // Building the index is so fast we can afford not storing it + buildIndex(); + + index_params_["algorithm"] = getType(); + index_params_["table_number"] = table_number_; + index_params_["key_size"] = key_size_; + index_params_["multi_probe_level"] = multi_probe_level_; + } + + /** + * Returns size of index. + */ + size_t size() const + { + return dataset_.rows; + } + + /** + * Returns the length of an index feature. + */ + size_t veclen() const + { + return feature_size_; + } + + /** + * Computes the index memory usage + * Returns: memory used by the index + */ + int usedMemory() const + { + return (int)(dataset_.rows * sizeof(int)); + } + + + IndexParams getParameters() const + { + return index_params_; + } + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + virtual void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + assert(queries.cols == veclen()); + assert(indices.rows >= queries.rows); + assert(dists.rows >= queries.rows); + assert(int(indices.cols) >= knn); + assert(int(dists.cols) >= knn); + + + KNNUniqueResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.clear(); + std::fill_n(indices[i], knn, -1); + std::fill_n(dists[i], knn, std::numeric_limits::max()); + findNeighbors(resultSet, queries[i], params); + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn); + else resultSet.copy(indices[i], dists[i], knn); + } + } + + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * maxCheck = the maximum number of restarts (in a best-bin-first manner) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& /*searchParams*/) + { + getNeighbors(vec, result); + } + +private: + /** Defines the comparator on score and index + */ + typedef std::pair ScoreIndexPair; + struct SortScoreIndexPairOnSecond + { + bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const + { + return left.second < right.second; + } + }; + + /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH + * @param key the key we build neighbors from + * @param lowest_index the lowest index of the bit set + * @param level the multi-probe level we are at + * @param xor_masks all the xor mask + */ + void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level, + std::vector& xor_masks) + { + xor_masks.push_back(key); + if (level == 0) return; + for (int index = lowest_index - 1; index >= 0; --index) { + // Create a new key + lsh::BucketKey new_key = key | (1 << index); + fill_xor_mask(new_key, index, level - 1, xor_masks); + } + } + + /** Performs the approximate nearest-neighbor search. + * @param vec the feature to analyze + * @param do_radius flag indicating if we check the radius too + * @param radius the radius if it is a radius search + * @param do_k flag indicating if we limit the number of nn + * @param k_nn the number of nearest neighbors + * @param checked_average used for debugging + */ + void getNeighbors(const ElementType* vec, bool /*do_radius*/, float radius, bool do_k, unsigned int k_nn, + float& /*checked_average*/) + { + static std::vector score_index_heap; + + if (do_k) { + unsigned int worst_score = std::numeric_limits::max(); + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols); + + if (hamming_distance < worst_score) { + // Insert the new element + score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); + std::push_heap(score_index_heap.begin(), score_index_heap.end()); + + if (score_index_heap.size() > (unsigned int)k_nn) { + // Remove the highest distance value as we have too many elements + std::pop_heap(score_index_heap.begin(), score_index_heap.end()); + score_index_heap.pop_back(); + // Keep track of the worst score + worst_score = score_index_heap.front().first; + } + } + } + } + } + } + else { + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + // Compute the Hamming distance + hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols); + if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); + } + } + } + } + } + + /** Performs the approximate nearest-neighbor search. + * This is a slower version than the above as it uses the ResultSet + * @param vec the feature to analyze + */ + void getNeighbors(const ElementType* vec, ResultSet& result) + { + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey((lsh::BucketKey)sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + // Compute the Hamming distance + hamming_distance = distance_(vec, dataset_[*training_index], (int)dataset_.cols); + result.addPoint(hamming_distance, *training_index); + } + } + } + } + + /** The different hash tables */ + std::vector > tables_; + + /** The data the LSH tables where built from */ + Matrix dataset_; + + /** The size of the features (as ElementType[]) */ + unsigned int feature_size_; + + IndexParams index_params_; + + /** table number */ + unsigned int table_number_; + /** key size */ + unsigned int key_size_; + /** How far should we look for neighbors in multi-probe LSH */ + unsigned int multi_probe_level_; + + /** The XOR masks to apply to a key to get the neighboring buckets */ + std::vector xor_masks_; + + Distance distance_; +}; +} + +#endif //OPENCV_FLANN_LSH_INDEX_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/lsh_table.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/lsh_table.h new file mode 100644 index 0000000..b0f3223 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/lsh_table.h @@ -0,0 +1,492 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_LSH_TABLE_H_ +#define OPENCV_FLANN_LSH_TABLE_H_ + +#include +#include +#include +#include +// TODO as soon as we use C++0x, use the code in USE_UNORDERED_MAP +#ifdef __GXX_EXPERIMENTAL_CXX0X__ +# define USE_UNORDERED_MAP 1 +#else +# define USE_UNORDERED_MAP 0 +#endif +#if USE_UNORDERED_MAP +#include +#else +#include +#endif +#include +#include + +#include "dynamic_bitset.h" +#include "matrix.h" + +namespace cvflann +{ + +namespace lsh +{ + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** What is stored in an LSH bucket + */ +typedef uint32_t FeatureIndex; +/** The id from which we can get a bucket back in an LSH table + */ +typedef unsigned int BucketKey; + +/** A bucket in an LSH table + */ +typedef std::vector Bucket; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** POD for stats about an LSH table + */ +struct LshStats +{ + std::vector bucket_sizes_; + size_t n_buckets_; + size_t bucket_size_mean_; + size_t bucket_size_median_; + size_t bucket_size_min_; + size_t bucket_size_max_; + size_t bucket_size_std_dev; + /** Each contained vector contains three value: beginning/end for interval, number of elements in the bin + */ + std::vector > size_histogram_; +}; + +/** Overload the << operator for LshStats + * @param out the streams + * @param stats the stats to display + * @return the streams + */ +inline std::ostream& operator <<(std::ostream& out, const LshStats& stats) +{ + int w = 20; + out << "Lsh Table Stats:\n" << std::setw(w) << std::setiosflags(std::ios::right) << "N buckets : " + << stats.n_buckets_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "mean size : " + << std::setiosflags(std::ios::left) << stats.bucket_size_mean_ << "\n" << std::setw(w) + << std::setiosflags(std::ios::right) << "median size : " << stats.bucket_size_median_ << "\n" << std::setw(w) + << std::setiosflags(std::ios::right) << "min size : " << std::setiosflags(std::ios::left) + << stats.bucket_size_min_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "max size : " + << std::setiosflags(std::ios::left) << stats.bucket_size_max_; + + // Display the histogram + out << std::endl << std::setw(w) << std::setiosflags(std::ios::right) << "histogram : " + << std::setiosflags(std::ios::left); + for (std::vector >::const_iterator iterator = stats.size_histogram_.begin(), end = + stats.size_histogram_.end(); iterator != end; ++iterator) out << (*iterator)[0] << "-" << (*iterator)[1] << ": " << (*iterator)[2] << ", "; + + return out; +} + + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Lsh hash table. As its key is a sub-feature, and as usually + * the size of it is pretty small, we keep it as a continuous memory array. + * The value is an index in the corpus of features (we keep it as an unsigned + * int for pure memory reasons, it could be a size_t) + */ +template +class LshTable +{ +public: + /** A container of all the feature indices. Optimized for space + */ +#if USE_UNORDERED_MAP + typedef std::unordered_map BucketsSpace; +#else + typedef std::map BucketsSpace; +#endif + + /** A container of all the feature indices. Optimized for speed + */ + typedef std::vector BucketsSpeed; + + /** Default constructor + */ + LshTable() + { + } + + /** Default constructor + * Create the mask and allocate the memory + * @param feature_size is the size of the feature (considered as a ElementType[]) + * @param key_size is the number of bits that are turned on in the feature + */ + LshTable(unsigned int /*feature_size*/, unsigned int /*key_size*/) + { + std::cerr << "LSH is not implemented for that type" << std::endl; + assert(0); + } + + /** Add a feature to the table + * @param value the value to store for that feature + * @param feature the feature itself + */ + void add(unsigned int value, const ElementType* feature) + { + // Add the value to the corresponding bucket + BucketKey key = (lsh::BucketKey)getKey(feature); + + switch (speed_level_) { + case kArray: + // That means we get the buckets from an array + buckets_speed_[key].push_back(value); + break; + case kBitsetHash: + // That means we can check the bitset for the presence of a key + key_bitset_.set(key); + buckets_space_[key].push_back(value); + break; + case kHash: + { + // That means we have to check for the hash table for the presence of a key + buckets_space_[key].push_back(value); + break; + } + } + } + + /** Add a set of features to the table + * @param dataset the values to store + */ + void add(Matrix dataset) + { +#if USE_UNORDERED_MAP + buckets_space_.rehash((buckets_space_.size() + dataset.rows) * 1.2); +#endif + // Add the features to the table + for (unsigned int i = 0; i < dataset.rows; ++i) add(i, dataset[i]); + // Now that the table is full, optimize it for speed/space + optimize(); + } + + /** Get a bucket given the key + * @param key + * @return + */ + inline const Bucket* getBucketFromKey(BucketKey key) const + { + // Generate other buckets + switch (speed_level_) { + case kArray: + // That means we get the buckets from an array + return &buckets_speed_[key]; + break; + case kBitsetHash: + // That means we can check the bitset for the presence of a key + if (key_bitset_.test(key)) return &buckets_space_.find(key)->second; + else return 0; + break; + case kHash: + { + // That means we have to check for the hash table for the presence of a key + BucketsSpace::const_iterator bucket_it, bucket_end = buckets_space_.end(); + bucket_it = buckets_space_.find(key); + // Stop here if that bucket does not exist + if (bucket_it == bucket_end) return 0; + else return &bucket_it->second; + break; + } + } + return 0; + } + + /** Compute the sub-signature of a feature + */ + size_t getKey(const ElementType* /*feature*/) const + { + std::cerr << "LSH is not implemented for that type" << std::endl; + assert(0); + return 1; + } + + /** Get statistics about the table + * @return + */ + LshStats getStats() const; + +private: + /** defines the speed fo the implementation + * kArray uses a vector for storing data + * kBitsetHash uses a hash map but checks for the validity of a key with a bitset + * kHash uses a hash map only + */ + enum SpeedLevel + { + kArray, kBitsetHash, kHash + }; + + /** Initialize some variables + */ + void initialize(size_t key_size) + { + const size_t key_size_lower_bound = 1; + //a value (size_t(1) << key_size) must fit the size_t type so key_size has to be strictly less than size of size_t + const size_t key_size_upper_bound = std::min(sizeof(BucketKey) * CHAR_BIT + 1, sizeof(size_t) * CHAR_BIT); + if (key_size < key_size_lower_bound || key_size >= key_size_upper_bound) + { + std::stringstream errorMessage; + errorMessage << "Invalid key_size (=" << key_size << "). Valid values for your system are " << key_size_lower_bound << " <= key_size < " << key_size_upper_bound << "."; + CV_Error(CV_StsBadArg, errorMessage.str()); + } + + speed_level_ = kHash; + key_size_ = (unsigned)key_size; + } + + /** Optimize the table for speed/space + */ + void optimize() + { + // If we are already using the fast storage, no need to do anything + if (speed_level_ == kArray) return; + + // Use an array if it will be more than half full + if (buckets_space_.size() > ((size_t(1) << key_size_) / 2)) { + speed_level_ = kArray; + // Fill the array version of it + buckets_speed_.resize(size_t(1) << key_size_); + for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) buckets_speed_[key_bucket->first] = key_bucket->second; + + // Empty the hash table + buckets_space_.clear(); + return; + } + + // If the bitset is going to use less than 10% of the RAM of the hash map (at least 1 size_t for the key and two + // for the vector) or less than 512MB (key_size_ <= 30) + if (((std::max(buckets_space_.size(), buckets_speed_.size()) * CHAR_BIT * 3 * sizeof(BucketKey)) / 10 + >= (size_t(1) << key_size_)) || (key_size_ <= 32)) { + speed_level_ = kBitsetHash; + key_bitset_.resize(size_t(1) << key_size_); + key_bitset_.reset(); + // Try with the BucketsSpace + for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) key_bitset_.set(key_bucket->first); + } + else { + speed_level_ = kHash; + key_bitset_.clear(); + } + } + + /** The vector of all the buckets if they are held for speed + */ + BucketsSpeed buckets_speed_; + + /** The hash table of all the buckets in case we cannot use the speed version + */ + BucketsSpace buckets_space_; + + /** What is used to store the data */ + SpeedLevel speed_level_; + + /** If the subkey is small enough, it will keep track of which subkeys are set through that bitset + * That is just a speedup so that we don't look in the hash table (which can be mush slower that checking a bitset) + */ + DynamicBitset key_bitset_; + + /** The size of the sub-signature in bits + */ + unsigned int key_size_; + + // Members only used for the unsigned char specialization + /** The mask to apply to a feature to get the hash key + * Only used in the unsigned char case + */ + std::vector mask_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Specialization for unsigned char + +template<> +inline LshTable::LshTable(unsigned int feature_size, unsigned int subsignature_size) +{ + initialize(subsignature_size); + // Allocate the mask + mask_ = std::vector((size_t)ceil((float)(feature_size * sizeof(char)) / (float)sizeof(size_t)), 0); + + // A bit brutal but fast to code + std::vector indices(feature_size * CHAR_BIT); + for (size_t i = 0; i < feature_size * CHAR_BIT; ++i) indices[i] = i; + std::random_shuffle(indices.begin(), indices.end()); + + // Generate a random set of order of subsignature_size_ bits + for (unsigned int i = 0; i < key_size_; ++i) { + size_t index = indices[i]; + + // Set that bit in the mask + size_t divisor = CHAR_BIT * sizeof(size_t); + size_t idx = index / divisor; //pick the right size_t index + mask_[idx] |= size_t(1) << (index % divisor); //use modulo to find the bit offset + } + + // Set to 1 if you want to display the mask for debug +#if 0 + { + size_t bcount = 0; + BOOST_FOREACH(size_t mask_block, mask_){ + out << std::setw(sizeof(size_t) * CHAR_BIT / 4) << std::setfill('0') << std::hex << mask_block + << std::endl; + bcount += __builtin_popcountll(mask_block); + } + out << "bit count : " << std::dec << bcount << std::endl; + out << "mask size : " << mask_.size() << std::endl; + return out; + } +#endif +} + +/** Return the Subsignature of a feature + * @param feature the feature to analyze + */ +template<> +inline size_t LshTable::getKey(const unsigned char* feature) const +{ + // no need to check if T is dividable by sizeof(size_t) like in the Hamming + // distance computation as we have a mask + const size_t* feature_block_ptr = reinterpret_cast ((const void*)feature); + + // Figure out the subsignature of the feature + // Given the feature ABCDEF, and the mask 001011, the output will be + // 000CEF + size_t subsignature = 0; + size_t bit_index = 1; + + for (std::vector::const_iterator pmask_block = mask_.begin(); pmask_block != mask_.end(); ++pmask_block) { + // get the mask and signature blocks + size_t feature_block = *feature_block_ptr; + size_t mask_block = *pmask_block; + while (mask_block) { + // Get the lowest set bit in the mask block + size_t lowest_bit = mask_block & (-(ptrdiff_t)mask_block); + // Add it to the current subsignature if necessary + subsignature += (feature_block & lowest_bit) ? bit_index : 0; + // Reset the bit in the mask block + mask_block ^= lowest_bit; + // increment the bit index for the subsignature + bit_index <<= 1; + } + // Check the next feature block + ++feature_block_ptr; + } + return subsignature; +} + +template<> +inline LshStats LshTable::getStats() const +{ + LshStats stats; + stats.bucket_size_mean_ = 0; + if ((buckets_speed_.empty()) && (buckets_space_.empty())) { + stats.n_buckets_ = 0; + stats.bucket_size_median_ = 0; + stats.bucket_size_min_ = 0; + stats.bucket_size_max_ = 0; + return stats; + } + + if (!buckets_speed_.empty()) { + for (BucketsSpeed::const_iterator pbucket = buckets_speed_.begin(); pbucket != buckets_speed_.end(); ++pbucket) { + stats.bucket_sizes_.push_back((lsh::FeatureIndex)pbucket->size()); + stats.bucket_size_mean_ += pbucket->size(); + } + stats.bucket_size_mean_ /= buckets_speed_.size(); + stats.n_buckets_ = buckets_speed_.size(); + } + else { + for (BucketsSpace::const_iterator x = buckets_space_.begin(); x != buckets_space_.end(); ++x) { + stats.bucket_sizes_.push_back((lsh::FeatureIndex)x->second.size()); + stats.bucket_size_mean_ += x->second.size(); + } + stats.bucket_size_mean_ /= buckets_space_.size(); + stats.n_buckets_ = buckets_space_.size(); + } + + std::sort(stats.bucket_sizes_.begin(), stats.bucket_sizes_.end()); + + // BOOST_FOREACH(int size, stats.bucket_sizes_) + // std::cout << size << " "; + // std::cout << std::endl; + stats.bucket_size_median_ = stats.bucket_sizes_[stats.bucket_sizes_.size() / 2]; + stats.bucket_size_min_ = stats.bucket_sizes_.front(); + stats.bucket_size_max_ = stats.bucket_sizes_.back(); + + // TODO compute mean and std + /*float mean, stddev; + stats.bucket_size_mean_ = mean; + stats.bucket_size_std_dev = stddev;*/ + + // Include a histogram of the buckets + unsigned int bin_start = 0; + unsigned int bin_end = 20; + bool is_new_bin = true; + for (std::vector::iterator iterator = stats.bucket_sizes_.begin(), end = stats.bucket_sizes_.end(); iterator + != end; ) + if (*iterator < bin_end) { + if (is_new_bin) { + stats.size_histogram_.push_back(std::vector(3, 0)); + stats.size_histogram_.back()[0] = bin_start; + stats.size_histogram_.back()[1] = bin_end - 1; + is_new_bin = false; + } + ++stats.size_histogram_.back()[2]; + ++iterator; + } + else { + bin_start += 20; + bin_end += 20; + is_new_bin = true; + } + + return stats; +} + +// End the two namespaces +} +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +#endif /* OPENCV_FLANN_LSH_TABLE_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/matrix.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/matrix.h new file mode 100644 index 0000000..51b6c63 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/matrix.h @@ -0,0 +1,116 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_DATASET_H_ +#define OPENCV_FLANN_DATASET_H_ + +#include + +#include "general.h" + +namespace cvflann +{ + +/** + * Class that implements a simple rectangular matrix stored in a memory buffer and + * provides convenient matrix-like access using the [] operators. + */ +template +class Matrix +{ +public: + typedef T type; + + size_t rows; + size_t cols; + size_t stride; + T* data; + + Matrix() : rows(0), cols(0), stride(0), data(NULL) + { + } + + Matrix(T* data_, size_t rows_, size_t cols_, size_t stride_ = 0) : + rows(rows_), cols(cols_), stride(stride_), data(data_) + { + if (stride==0) stride = cols; + } + + /** + * Convenience function for deallocating the storage data. + */ + FLANN_DEPRECATED void free() + { + fprintf(stderr, "The cvflann::Matrix::free() method is deprecated " + "and it does not do any memory deallocation any more. You are" + "responsible for deallocating the matrix memory (by doing" + "'delete[] matrix.data' for example)"); + } + + /** + * Operator that return a (pointer to a) row of the data. + */ + T* operator[](size_t index) const + { + return data+index*stride; + } +}; + + +class UntypedMatrix +{ +public: + size_t rows; + size_t cols; + void* data; + flann_datatype_t type; + + UntypedMatrix(void* data_, long rows_, long cols_) : + rows(rows_), cols(cols_), data(data_) + { + } + + ~UntypedMatrix() + { + } + + + template + Matrix as() + { + return Matrix((T*)data, rows, cols); + } +}; + + + +} + +#endif //OPENCV_FLANN_DATASET_H_ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/miniflann.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/miniflann.hpp new file mode 100644 index 0000000..18c9081 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/miniflann.hpp @@ -0,0 +1,162 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef _OPENCV_MINIFLANN_HPP_ +#define _OPENCV_MINIFLANN_HPP_ + +#ifdef __cplusplus + +#include "opencv2/core/core.hpp" +#include "opencv2/flann/defines.h" + +namespace cv +{ + +namespace flann +{ + +struct CV_EXPORTS IndexParams +{ + IndexParams(); + ~IndexParams(); + + std::string getString(const std::string& key, const std::string& defaultVal=std::string()) const; + int getInt(const std::string& key, int defaultVal=-1) const; + double getDouble(const std::string& key, double defaultVal=-1) const; + + void setString(const std::string& key, const std::string& value); + void setInt(const std::string& key, int value); + void setDouble(const std::string& key, double value); + void setFloat(const std::string& key, float value); + void setBool(const std::string& key, bool value); + void setAlgorithm(int value); + + void getAll(std::vector& names, + std::vector& types, + std::vector& strValues, + std::vector& numValues) const; + + void* params; +}; + +struct CV_EXPORTS KDTreeIndexParams : public IndexParams +{ + KDTreeIndexParams(int trees=4); +}; + +struct CV_EXPORTS LinearIndexParams : public IndexParams +{ + LinearIndexParams(); +}; + +struct CV_EXPORTS CompositeIndexParams : public IndexParams +{ + CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2 ); +}; + +struct CV_EXPORTS AutotunedIndexParams : public IndexParams +{ + AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, + float memory_weight = 0, float sample_fraction = 0.1); +}; + +struct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams +{ + HierarchicalClusteringIndexParams(int branching = 32, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, int trees = 4, int leaf_size = 100 ); +}; + +struct CV_EXPORTS KMeansIndexParams : public IndexParams +{ + KMeansIndexParams(int branching = 32, int iterations = 11, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2 ); +}; + +struct CV_EXPORTS LshIndexParams : public IndexParams +{ + LshIndexParams(int table_number, int key_size, int multi_probe_level); +}; + +struct CV_EXPORTS SavedIndexParams : public IndexParams +{ + SavedIndexParams(const std::string& filename); +}; + +struct CV_EXPORTS SearchParams : public IndexParams +{ + SearchParams( int checks = 32, float eps = 0, bool sorted = true ); +}; + +class CV_EXPORTS_W Index +{ +public: + CV_WRAP Index(); + CV_WRAP Index(InputArray features, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2); + virtual ~Index(); + + CV_WRAP virtual void build(InputArray features, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2); + CV_WRAP virtual void knnSearch(InputArray query, OutputArray indices, + OutputArray dists, int knn, const SearchParams& params=SearchParams()); + + CV_WRAP virtual int radiusSearch(InputArray query, OutputArray indices, + OutputArray dists, double radius, int maxResults, + const SearchParams& params=SearchParams()); + + CV_WRAP virtual void save(const std::string& filename) const; + CV_WRAP virtual bool load(InputArray features, const std::string& filename); + CV_WRAP virtual void release(); + CV_WRAP cvflann::flann_distance_t getDistance() const; + CV_WRAP cvflann::flann_algorithm_t getAlgorithm() const; + +protected: + cvflann::flann_distance_t distType; + cvflann::flann_algorithm_t algo; + int featureType; + void* index; +}; + +} } // namespace cv::flann + +#endif // __cplusplus + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/nn_index.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/nn_index.h new file mode 100644 index 0000000..d14e83a --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/nn_index.h @@ -0,0 +1,179 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_NNINDEX_H +#define OPENCV_FLANN_NNINDEX_H + +#include + +#include "general.h" +#include "matrix.h" +#include "result_set.h" +#include "params.h" + +namespace cvflann +{ + +/** + * Nearest-neighbour index base class + */ +template +class NNIndex +{ + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + +public: + + virtual ~NNIndex() {} + + /** + * \brief Builds the index + */ + virtual void buildIndex() = 0; + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + virtual void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + assert(queries.cols == veclen()); + assert(indices.rows >= queries.rows); + assert(dists.rows >= queries.rows); + assert(int(indices.cols) >= knn); + assert(int(dists.cols) >= knn); + +#if 0 + KNNResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.init(indices[i], dists[i]); + findNeighbors(resultSet, queries[i], params); + } +#else + KNNUniqueResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.clear(); + findNeighbors(resultSet, queries[i], params); + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn); + else resultSet.copy(indices[i], dists[i], knn); + } +#endif + } + + /** + * \brief Perform radius search + * \param[in] query The query point + * \param[out] indices The indinces of the neighbors found within the given radius + * \param[out] dists The distances to the nearest neighbors found + * \param[in] radius The radius used for search + * \param[in] params Search parameters + * \returns Number of neighbors found + */ + virtual int radiusSearch(const Matrix& query, Matrix& indices, Matrix& dists, float radius, const SearchParams& params) + { + if (query.rows != 1) { + fprintf(stderr, "I can only search one feature at a time for range search\n"); + return -1; + } + assert(query.cols == veclen()); + assert(indices.cols == dists.cols); + + int n = 0; + int* indices_ptr = NULL; + DistanceType* dists_ptr = NULL; + if (indices.cols > 0) { + n = (int)indices.cols; + indices_ptr = indices[0]; + dists_ptr = dists[0]; + } + + RadiusUniqueResultSet resultSet((DistanceType)radius); + resultSet.clear(); + findNeighbors(resultSet, query[0], params); + if (n>0) { + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices_ptr, dists_ptr, n); + else resultSet.copy(indices_ptr, dists_ptr, n); + } + + return (int)resultSet.size(); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + virtual void saveIndex(FILE* stream) = 0; + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + virtual void loadIndex(FILE* stream) = 0; + + /** + * \returns number of features in this index. + */ + virtual size_t size() const = 0; + + /** + * \returns The dimensionality of the features in this index. + */ + virtual size_t veclen() const = 0; + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + virtual int usedMemory() const = 0; + + /** + * \returns The index type (kdtree, kmeans,...) + */ + virtual flann_algorithm_t getType() const = 0; + + /** + * \returns The index parameters + */ + virtual IndexParams getParameters() const = 0; + + + /** + * \brief Method that searches for nearest-neighbours + */ + virtual void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) = 0; +}; + +} + +#endif //OPENCV_FLANN_NNINDEX_H diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/object_factory.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/object_factory.h new file mode 100644 index 0000000..7f971c5 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/object_factory.h @@ -0,0 +1,91 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_OBJECT_FACTORY_H_ +#define OPENCV_FLANN_OBJECT_FACTORY_H_ + +#include + +namespace cvflann +{ + +class CreatorNotFound +{ +}; + +template +class ObjectFactory +{ + typedef ObjectFactory ThisClass; + typedef std::map ObjectRegistry; + + // singleton class, private constructor + ObjectFactory() {} + +public: + + bool subscribe(UniqueIdType id, ObjectCreator creator) + { + if (object_registry.find(id) != object_registry.end()) return false; + + object_registry[id] = creator; + return true; + } + + bool unregister(UniqueIdType id) + { + return object_registry.erase(id) == 1; + } + + ObjectCreator create(UniqueIdType id) + { + typename ObjectRegistry::const_iterator iter = object_registry.find(id); + + if (iter == object_registry.end()) { + throw CreatorNotFound(); + } + + return iter->second; + } + + static ThisClass& instance() + { + static ThisClass the_factory; + return the_factory; + } +private: + ObjectRegistry object_registry; +}; + +} + +#endif /* OPENCV_FLANN_OBJECT_FACTORY_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/params.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/params.h new file mode 100644 index 0000000..fc2a906 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/params.h @@ -0,0 +1,96 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_PARAMS_H_ +#define OPENCV_FLANN_PARAMS_H_ + +#include "any.h" +#include "general.h" +#include +#include + + +namespace cvflann +{ + +typedef std::map IndexParams; + +struct SearchParams : public IndexParams +{ + SearchParams(int checks = 32, float eps = 0, bool sorted = true ) + { + // how many leafs to visit when searching for neighbours (-1 for unlimited) + (*this)["checks"] = checks; + // search for eps-approximate neighbours (default: 0) + (*this)["eps"] = eps; + // only for radius search, require neighbours sorted by distance (default: true) + (*this)["sorted"] = sorted; + } +}; + + +template +T get_param(const IndexParams& params, std::string name, const T& default_value) +{ + IndexParams::const_iterator it = params.find(name); + if (it != params.end()) { + return it->second.cast(); + } + else { + return default_value; + } +} + +template +T get_param(const IndexParams& params, std::string name) +{ + IndexParams::const_iterator it = params.find(name); + if (it != params.end()) { + return it->second.cast(); + } + else { + throw FLANNException(std::string("Missing parameter '")+name+std::string("' in the parameters given")); + } +} + +inline void print_params(const IndexParams& params) +{ + IndexParams::const_iterator it; + + for(it=params.begin(); it!=params.end(); ++it) { + std::cout << it->first << " : " << it->second << std::endl; + } +} + + + +} + + +#endif /* OPENCV_FLANN_PARAMS_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/random.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/random.h new file mode 100644 index 0000000..a3cf5ec --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/random.h @@ -0,0 +1,133 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_RANDOM_H +#define OPENCV_FLANN_RANDOM_H + +#include +#include +#include + +#include "general.h" + +namespace cvflann +{ + +/** + * Seeds the random number generator + * @param seed Random seed + */ +inline void seed_random(unsigned int seed) +{ + srand(seed); +} + +/* + * Generates a random double value. + */ +/** + * Generates a random double value. + * @param high Upper limit + * @param low Lower limit + * @return Random double value + */ +inline double rand_double(double high = 1.0, double low = 0) +{ + return low + ((high-low) * (std::rand() / (RAND_MAX + 1.0))); +} + +/** + * Generates a random integer value. + * @param high Upper limit + * @param low Lower limit + * @return Random integer value + */ +inline int rand_int(int high = RAND_MAX, int low = 0) +{ + return low + (int) ( double(high-low) * (std::rand() / (RAND_MAX + 1.0))); +} + +/** + * Random number generator that returns a distinct number from + * the [0,n) interval each time. + */ +class UniqueRandom +{ + std::vector vals_; + int size_; + int counter_; + +public: + /** + * Constructor. + * @param n Size of the interval from which to generate + * @return + */ + UniqueRandom(int n) + { + init(n); + } + + /** + * Initializes the number generator. + * @param n the size of the interval from which to generate random numbers. + */ + void init(int n) + { + // create and initialize an array of size n + vals_.resize(n); + size_ = n; + for (int i = 0; i < size_; ++i) vals_[i] = i; + + // shuffle the elements in the array + std::random_shuffle(vals_.begin(), vals_.end()); + + counter_ = 0; + } + + /** + * Return a distinct random integer in greater or equal to 0 and less + * than 'n' on each call. It should be called maximum 'n' times. + * Returns: a random integer + */ + int next() + { + if (counter_ == size_) { + return -1; + } + else { + return vals_[counter_++]; + } + } +}; + +} + +#endif //OPENCV_FLANN_RANDOM_H diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/result_set.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/result_set.h new file mode 100644 index 0000000..3adad46 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/result_set.h @@ -0,0 +1,542 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_RESULTSET_H +#define OPENCV_FLANN_RESULTSET_H + +#include +#include +#include +#include +#include +#include + +namespace cvflann +{ + +/* This record represents a branch point when finding neighbors in + the tree. It contains a record of the minimum distance to the query + point, as well as the node at which the search resumes. + */ + +template +struct BranchStruct +{ + T node; /* Tree node at which search resumes */ + DistanceType mindist; /* Minimum distance to query for all nodes below. */ + + BranchStruct() {} + BranchStruct(const T& aNode, DistanceType dist) : node(aNode), mindist(dist) {} + + bool operator<(const BranchStruct& rhs) const + { + return mindist +class ResultSet +{ +public: + virtual ~ResultSet() {} + + virtual bool full() const = 0; + + virtual void addPoint(DistanceType dist, int index) = 0; + + virtual DistanceType worstDist() const = 0; + +}; + +/** + * KNNSimpleResultSet does not ensure that the element it holds are unique. + * Is used in those cases where the nearest neighbour algorithm used does not + * attempt to insert the same element multiple times. + */ +template +class KNNSimpleResultSet : public ResultSet +{ + int* indices; + DistanceType* dists; + int capacity; + int count; + DistanceType worst_distance_; + +public: + KNNSimpleResultSet(int capacity_) : capacity(capacity_), count(0) + { + } + + void init(int* indices_, DistanceType* dists_) + { + indices = indices_; + dists = dists_; + count = 0; + worst_distance_ = (std::numeric_limits::max)(); + dists[capacity-1] = worst_distance_; + } + + size_t size() const + { + return count; + } + + bool full() const + { + return count == capacity; + } + + + void addPoint(DistanceType dist, int index) + { + if (dist >= worst_distance_) return; + int i; + for (i=count; i>0; --i) { +#ifdef FLANN_FIRST_MATCH + if ( (dists[i-1]>dist) || ((dist==dists[i-1])&&(indices[i-1]>index)) ) +#else + if (dists[i-1]>dist) +#endif + { + if (i +class KNNResultSet : public ResultSet +{ + int* indices; + DistanceType* dists; + int capacity; + int count; + DistanceType worst_distance_; + +public: + KNNResultSet(int capacity_) : capacity(capacity_), count(0) + { + } + + void init(int* indices_, DistanceType* dists_) + { + indices = indices_; + dists = dists_; + count = 0; + worst_distance_ = (std::numeric_limits::max)(); + dists[capacity-1] = worst_distance_; + } + + size_t size() const + { + return count; + } + + bool full() const + { + return count == capacity; + } + + + void addPoint(DistanceType dist, int index) + { + if (dist >= worst_distance_) return; + int i; + for (i = count; i > 0; --i) { +#ifdef FLANN_FIRST_MATCH + if ( (dists[i-1]<=dist) && ((dist!=dists[i-1])||(indices[i-1]<=index)) ) +#else + if (dists[i-1]<=dist) +#endif + { + // Check for duplicate indices + int j = i - 1; + while ((j >= 0) && (dists[j] == dist)) { + if (indices[j] == index) { + return; + } + --j; + } + break; + } + } + + if (count < capacity) ++count; + for (int j = count-1; j > i; --j) { + dists[j] = dists[j-1]; + indices[j] = indices[j-1]; + } + dists[i] = dist; + indices[i] = index; + worst_distance_ = dists[capacity-1]; + } + + DistanceType worstDist() const + { + return worst_distance_; + } +}; + + +/** + * A result-set class used when performing a radius based search. + */ +template +class RadiusResultSet : public ResultSet +{ + DistanceType radius; + int* indices; + DistanceType* dists; + size_t capacity; + size_t count; + +public: + RadiusResultSet(DistanceType radius_, int* indices_, DistanceType* dists_, int capacity_) : + radius(radius_), indices(indices_), dists(dists_), capacity(capacity_) + { + init(); + } + + ~RadiusResultSet() + { + } + + void init() + { + count = 0; + } + + size_t size() const + { + return count; + } + + bool full() const + { + return true; + } + + void addPoint(DistanceType dist, int index) + { + if (dist0)&&(count < capacity)) { + dists[count] = dist; + indices[count] = index; + } + count++; + } + } + + DistanceType worstDist() const + { + return radius; + } + +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors + * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays + */ +template +class UniqueResultSet : public ResultSet +{ +public: + struct DistIndex + { + DistIndex(DistanceType dist, unsigned int index) : + dist_(dist), index_(index) + { + } + bool operator<(const DistIndex dist_index) const + { + return (dist_ < dist_index.dist_) || ((dist_ == dist_index.dist_) && index_ < dist_index.index_); + } + DistanceType dist_; + unsigned int index_; + }; + + /** Default cosntructor */ + UniqueResultSet() : + worst_distance_(std::numeric_limits::max()) + { + } + + /** Check the status of the set + * @return true if we have k NN + */ + inline bool full() const + { + return is_full_; + } + + /** Remove all elements in the set + */ + virtual void clear() = 0; + + /** Copy the set to two C arrays + * @param indices pointer to a C array of indices + * @param dist pointer to a C array of distances + * @param n_neighbors the number of neighbors to copy + */ + virtual void copy(int* indices, DistanceType* dist, int n_neighbors = -1) const + { + if (n_neighbors < 0) { + for (typename std::set::const_iterator dist_index = dist_indices_.begin(), dist_index_end = + dist_indices_.end(); dist_index != dist_index_end; ++dist_index, ++indices, ++dist) { + *indices = dist_index->index_; + *dist = dist_index->dist_; + } + } + else { + int i = 0; + for (typename std::set::const_iterator dist_index = dist_indices_.begin(), dist_index_end = + dist_indices_.end(); (dist_index != dist_index_end) && (i < n_neighbors); ++dist_index, ++indices, ++dist, ++i) { + *indices = dist_index->index_; + *dist = dist_index->dist_; + } + } + } + + /** Copy the set to two C arrays but sort it according to the distance first + * @param indices pointer to a C array of indices + * @param dist pointer to a C array of distances + * @param n_neighbors the number of neighbors to copy + */ + virtual void sortAndCopy(int* indices, DistanceType* dist, int n_neighbors = -1) const + { + copy(indices, dist, n_neighbors); + } + + /** The number of neighbors in the set + * @return + */ + size_t size() const + { + return dist_indices_.size(); + } + + /** The distance of the furthest neighbor + * If we don't have enough neighbors, it returns the max possible value + * @return + */ + inline DistanceType worstDist() const + { + return worst_distance_; + } +protected: + /** Flag to say if the set is full */ + bool is_full_; + + /** The worst distance found so far */ + DistanceType worst_distance_; + + /** The best candidates so far */ + std::set dist_indices_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors + * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays + */ +template +class KNNUniqueResultSet : public UniqueResultSet +{ +public: + /** Constructor + * @param capacity the number of neighbors to store at max + */ + KNNUniqueResultSet(unsigned int capacity) : capacity_(capacity) + { + this->is_full_ = false; + this->clear(); + } + + /** Add a possible candidate to the best neighbors + * @param dist distance for that neighbor + * @param index index of that neighbor + */ + inline void addPoint(DistanceType dist, int index) + { + // Don't do anything if we are worse than the worst + if (dist >= worst_distance_) return; + dist_indices_.insert(DistIndex(dist, index)); + + if (is_full_) { + if (dist_indices_.size() > capacity_) { + dist_indices_.erase(*dist_indices_.rbegin()); + worst_distance_ = dist_indices_.rbegin()->dist_; + } + } + else if (dist_indices_.size() == capacity_) { + is_full_ = true; + worst_distance_ = dist_indices_.rbegin()->dist_; + } + } + + /** Remove all elements in the set + */ + void clear() + { + dist_indices_.clear(); + worst_distance_ = std::numeric_limits::max(); + is_full_ = false; + } + +protected: + typedef typename UniqueResultSet::DistIndex DistIndex; + using UniqueResultSet::is_full_; + using UniqueResultSet::worst_distance_; + using UniqueResultSet::dist_indices_; + + /** The number of neighbors to keep */ + unsigned int capacity_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the radius nearest neighbors + * It is more accurate than RadiusResult as it is not limited in the number of neighbors + */ +template +class RadiusUniqueResultSet : public UniqueResultSet +{ +public: + /** Constructor + * @param capacity the number of neighbors to store at max + */ + RadiusUniqueResultSet(DistanceType radius) : + radius_(radius) + { + is_full_ = true; + } + + /** Add a possible candidate to the best neighbors + * @param dist distance for that neighbor + * @param index index of that neighbor + */ + void addPoint(DistanceType dist, int index) + { + if (dist <= radius_) dist_indices_.insert(DistIndex(dist, index)); + } + + /** Remove all elements in the set + */ + inline void clear() + { + dist_indices_.clear(); + } + + + /** Check the status of the set + * @return alwys false + */ + inline bool full() const + { + return true; + } + + /** The distance of the furthest neighbor + * If we don't have enough neighbors, it returns the max possible value + * @return + */ + inline DistanceType worstDist() const + { + return radius_; + } +private: + typedef typename UniqueResultSet::DistIndex DistIndex; + using UniqueResultSet::dist_indices_; + using UniqueResultSet::is_full_; + + /** The furthest distance a neighbor can be */ + DistanceType radius_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors within a radius distance + */ +template +class KNNRadiusUniqueResultSet : public KNNUniqueResultSet +{ +public: + /** Constructor + * @param capacity the number of neighbors to store at max + */ + KNNRadiusUniqueResultSet(unsigned int capacity, DistanceType radius) + { + this->capacity_ = capacity; + this->radius_ = radius; + this->dist_indices_.reserve(capacity_); + this->clear(); + } + + /** Remove all elements in the set + */ + void clear() + { + dist_indices_.clear(); + worst_distance_ = radius_; + is_full_ = false; + } +private: + using KNNUniqueResultSet::dist_indices_; + using KNNUniqueResultSet::is_full_; + using KNNUniqueResultSet::worst_distance_; + + /** The maximum number of neighbors to consider */ + unsigned int capacity_; + + /** The maximum distance of a neighbor */ + DistanceType radius_; +}; +} + +#endif //OPENCV_FLANN_RESULTSET_H diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/sampling.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/sampling.h new file mode 100644 index 0000000..396f177 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/sampling.h @@ -0,0 +1,81 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_SAMPLING_H_ +#define OPENCV_FLANN_SAMPLING_H_ + +#include "matrix.h" +#include "random.h" + +namespace cvflann +{ + +template +Matrix random_sample(Matrix& srcMatrix, long size, bool remove = false) +{ + Matrix newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols); + + T* src,* dest; + for (long i=0; i +Matrix random_sample(const Matrix& srcMatrix, size_t size) +{ + UniqueRandom rand((int)srcMatrix.rows); + Matrix newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols); + + T* src,* dest; + for (size_t i=0; i +#include + +#include "general.h" +#include "nn_index.h" + +#ifdef FLANN_SIGNATURE_ +#undef FLANN_SIGNATURE_ +#endif +#define FLANN_SIGNATURE_ "FLANN_INDEX" + +namespace cvflann +{ + +template +struct Datatype {}; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT8; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT16; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT8; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT16; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_FLOAT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_FLOAT64; } }; + + +/** + * Structure representing the index header. + */ +struct IndexHeader +{ + char signature[16]; + char version[16]; + flann_datatype_t data_type; + flann_algorithm_t index_type; + size_t rows; + size_t cols; +}; + +/** + * Saves index header to stream + * + * @param stream - Stream to save to + * @param index - The index to save + */ +template +void save_header(FILE* stream, const NNIndex& index) +{ + IndexHeader header; + memset(header.signature, 0, sizeof(header.signature)); + strcpy(header.signature, FLANN_SIGNATURE_); + memset(header.version, 0, sizeof(header.version)); + strcpy(header.version, FLANN_VERSION_); + header.data_type = Datatype::type(); + header.index_type = index.getType(); + header.rows = index.size(); + header.cols = index.veclen(); + + std::fwrite(&header, sizeof(header),1,stream); +} + + +/** + * + * @param stream - Stream to load from + * @return Index header + */ +inline IndexHeader load_header(FILE* stream) +{ + IndexHeader header; + size_t read_size = fread(&header,sizeof(header),1,stream); + + if (read_size!=(size_t)1) { + throw FLANNException("Invalid index file, cannot read"); + } + + if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) { + throw FLANNException("Invalid index file, wrong signature"); + } + + return header; + +} + + +template +void save_value(FILE* stream, const T& value, size_t count = 1) +{ + fwrite(&value, sizeof(value),count, stream); +} + +template +void save_value(FILE* stream, const cvflann::Matrix& value) +{ + fwrite(&value, sizeof(value),1, stream); + fwrite(value.data, sizeof(T),value.rows*value.cols, stream); +} + +template +void save_value(FILE* stream, const std::vector& value) +{ + size_t size = value.size(); + fwrite(&size, sizeof(size_t), 1, stream); + fwrite(&value[0], sizeof(T), size, stream); +} + +template +void load_value(FILE* stream, T& value, size_t count = 1) +{ + size_t read_cnt = fread(&value, sizeof(value), count, stream); + if (read_cnt != count) { + throw FLANNException("Cannot read from file"); + } +} + +template +void load_value(FILE* stream, cvflann::Matrix& value) +{ + size_t read_cnt = fread(&value, sizeof(value), 1, stream); + if (read_cnt != 1) { + throw FLANNException("Cannot read from file"); + } + value.data = new T[value.rows*value.cols]; + read_cnt = fread(value.data, sizeof(T), value.rows*value.cols, stream); + if (read_cnt != (size_t)(value.rows*value.cols)) { + throw FLANNException("Cannot read from file"); + } +} + + +template +void load_value(FILE* stream, std::vector& value) +{ + size_t size; + size_t read_cnt = fread(&size, sizeof(size_t), 1, stream); + if (read_cnt!=1) { + throw FLANNException("Cannot read from file"); + } + value.resize(size); + read_cnt = fread(&value[0], sizeof(T), size, stream); + if (read_cnt != size) { + throw FLANNException("Cannot read from file"); + } +} + +} + +#endif /* OPENCV_FLANN_SAVING_H_ */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/simplex_downhill.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/simplex_downhill.h new file mode 100644 index 0000000..145901a --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/flann/simplex_downhill.h @@ -0,0 +1,186 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_SIMPLEX_DOWNHILL_H_ +#define OPENCV_FLANN_SIMPLEX_DOWNHILL_H_ + +namespace cvflann +{ + +/** + Adds val to array vals (and point to array points) and keeping the arrays sorted by vals. + */ +template +void addValue(int pos, float val, float* vals, T* point, T* points, int n) +{ + vals[pos] = val; + for (int i=0; i0 && vals[j] +float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL ) +{ + const int MAX_ITERATIONS = 10; + + assert(n>0); + + T* p_o = new T[n]; + T* p_r = new T[n]; + T* p_e = new T[n]; + + int alpha = 1; + + int iterations = 0; + + bool ownVals = false; + if (vals == NULL) { + ownVals = true; + vals = new float[n+1]; + for (int i=0; i MAX_ITERATIONS) break; + + // compute average of simplex points (except the highest point) + for (int j=0; j=vals[0])&&(val_r=vals[n]) { + for (int i=0; i + + +namespace cvflann +{ + +/** + * A start-stop timer class. + * + * Can be used to time portions of code. + */ +class StartStopTimer +{ + clock_t startTime; + +public: + /** + * Value of the timer. + */ + double value; + + + /** + * Constructor. + */ + StartStopTimer() + { + reset(); + } + + /** + * Starts the timer. + */ + void start() + { + startTime = clock(); + } + + /** + * Stops the timer and updates timer value. + */ + void stop() + { + clock_t stopTime = clock(); + value += ( (double)stopTime - startTime) / CLOCKS_PER_SEC; + } + + /** + * Resets the timer value to 0. + */ + void reset() + { + value = 0; + } + +}; + +} + +#endif // FLANN_TIMER_H diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/cap_ios.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/cap_ios.h new file mode 100644 index 0000000..db3928f --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/cap_ios.h @@ -0,0 +1,169 @@ +/* For iOS video I/O + * by Eduard Feicho on 29/07/12 + * Copyright 2012. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#import +#import +#import +#import +#include "opencv2/core/core.hpp" + +/////////////////////////////////////// CvAbstractCamera ///////////////////////////////////// + +@class CvAbstractCamera; + +@interface CvAbstractCamera : NSObject +{ + AVCaptureSession* captureSession; + AVCaptureConnection* videoCaptureConnection; + AVCaptureVideoPreviewLayer *captureVideoPreviewLayer; + + UIDeviceOrientation currentDeviceOrientation; + + BOOL cameraAvailable; + BOOL captureSessionLoaded; + BOOL running; + BOOL useAVCaptureVideoPreviewLayer; + + AVCaptureDevicePosition defaultAVCaptureDevicePosition; + AVCaptureVideoOrientation defaultAVCaptureVideoOrientation; + NSString *const defaultAVCaptureSessionPreset; + + int defaultFPS; + + UIView* parentView; + + int imageWidth; + int imageHeight; +} + +@property (nonatomic, retain) AVCaptureSession* captureSession; +@property (nonatomic, retain) AVCaptureConnection* videoCaptureConnection; + +@property (nonatomic, readonly) BOOL running; +@property (nonatomic, readonly) BOOL captureSessionLoaded; + +@property (nonatomic, assign) int defaultFPS; +@property (nonatomic, assign) AVCaptureDevicePosition defaultAVCaptureDevicePosition; +@property (nonatomic, assign) AVCaptureVideoOrientation defaultAVCaptureVideoOrientation; +@property (nonatomic, assign) BOOL useAVCaptureVideoPreviewLayer; +@property (nonatomic, strong) NSString *const defaultAVCaptureSessionPreset; + +@property (nonatomic, assign) int imageWidth; +@property (nonatomic, assign) int imageHeight; + +@property (nonatomic, retain) UIView* parentView; + +- (void)start; +- (void)stop; +- (void)switchCameras; + +- (id)initWithParentView:(UIView*)parent; + +- (void)createCaptureOutput; +- (void)createVideoPreviewLayer; +- (void)updateOrientation; + +- (void)lockFocus; +- (void)unlockFocus; +- (void)lockExposure; +- (void)unlockExposure; +- (void)lockBalance; +- (void)unlockBalance; + +@end + +///////////////////////////////// CvVideoCamera /////////////////////////////////////////// + +@class CvVideoCamera; + +@protocol CvVideoCameraDelegate + +#ifdef __cplusplus +// delegate method for processing image frames +- (void)processImage:(cv::Mat&)image; +#endif + +@end + +@interface CvVideoCamera : CvAbstractCamera +{ + AVCaptureVideoDataOutput *videoDataOutput; + + dispatch_queue_t videoDataOutputQueue; + CALayer *customPreviewLayer; + + BOOL grayscaleMode; + + BOOL recordVideo; + BOOL rotateVideo; + AVAssetWriterInput* recordAssetWriterInput; + AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor; + AVAssetWriter* recordAssetWriter; + + CMTime lastSampleTime; + +} + +@property (nonatomic, assign) id delegate; +@property (nonatomic, assign) BOOL grayscaleMode; + +@property (nonatomic, assign) BOOL recordVideo; +@property (nonatomic, assign) BOOL rotateVideo; +@property (nonatomic, retain) AVAssetWriterInput* recordAssetWriterInput; +@property (nonatomic, retain) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor; +@property (nonatomic, retain) AVAssetWriter* recordAssetWriter; + +- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation; +- (void)layoutPreviewLayer; +- (void)saveVideo; +- (NSURL *)videoFileURL; + + +@end + +///////////////////////////////// CvPhotoCamera /////////////////////////////////////////// + +@class CvPhotoCamera; + +@protocol CvPhotoCameraDelegate + +- (void)photoCamera:(CvPhotoCamera*)photoCamera capturedImage:(UIImage *)image; +- (void)photoCameraCancel:(CvPhotoCamera*)photoCamera; + +@end + +@interface CvPhotoCamera : CvAbstractCamera +{ + AVCaptureStillImageOutput *stillImageOutput; +} + +@property (nonatomic, assign) id delegate; + +- (void)takePicture; + +@end diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/highgui.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/highgui.hpp new file mode 100644 index 0000000..f6f2293 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/highgui.hpp @@ -0,0 +1,255 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_HIGHGUI_HPP__ +#define __OPENCV_HIGHGUI_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui_c.h" + +#ifdef __cplusplus + +struct CvCapture; +struct CvVideoWriter; + +namespace cv +{ + +enum { + // Flags for namedWindow + WINDOW_NORMAL = CV_WINDOW_NORMAL, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size + WINDOW_AUTOSIZE = CV_WINDOW_AUTOSIZE, // the user cannot resize the window, the size is constrainted by the image displayed + WINDOW_OPENGL = CV_WINDOW_OPENGL, // window with opengl support + + // Flags for set / getWindowProperty + WND_PROP_FULLSCREEN = CV_WND_PROP_FULLSCREEN, // fullscreen property + WND_PROP_AUTOSIZE = CV_WND_PROP_AUTOSIZE, // autosize property + WND_PROP_ASPECT_RATIO = CV_WND_PROP_ASPECTRATIO, // window's aspect ration + WND_PROP_OPENGL = CV_WND_PROP_OPENGL // opengl support +}; + +CV_EXPORTS_W void namedWindow(const string& winname, int flags = WINDOW_AUTOSIZE); +CV_EXPORTS_W void destroyWindow(const string& winname); +CV_EXPORTS_W void destroyAllWindows(); + +CV_EXPORTS_W int startWindowThread(); + +CV_EXPORTS_W int waitKey(int delay = 0); + +CV_EXPORTS_W void imshow(const string& winname, InputArray mat); + +CV_EXPORTS_W void resizeWindow(const string& winname, int width, int height); +CV_EXPORTS_W void moveWindow(const string& winname, int x, int y); + +CV_EXPORTS_W void setWindowProperty(const string& winname, int prop_id, double prop_value);//YV +CV_EXPORTS_W double getWindowProperty(const string& winname, int prop_id);//YV + +enum +{ + EVENT_MOUSEMOVE =0, + EVENT_LBUTTONDOWN =1, + EVENT_RBUTTONDOWN =2, + EVENT_MBUTTONDOWN =3, + EVENT_LBUTTONUP =4, + EVENT_RBUTTONUP =5, + EVENT_MBUTTONUP =6, + EVENT_LBUTTONDBLCLK =7, + EVENT_RBUTTONDBLCLK =8, + EVENT_MBUTTONDBLCLK =9 +}; + +enum +{ + EVENT_FLAG_LBUTTON =1, + EVENT_FLAG_RBUTTON =2, + EVENT_FLAG_MBUTTON =4, + EVENT_FLAG_CTRLKEY =8, + EVENT_FLAG_SHIFTKEY =16, + EVENT_FLAG_ALTKEY =32 +}; + +typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata); + +//! assigns callback for mouse events +CV_EXPORTS void setMouseCallback(const string& winname, MouseCallback onMouse, void* userdata = 0); + + +typedef void (CV_CDECL *TrackbarCallback)(int pos, void* userdata); + +CV_EXPORTS int createTrackbar(const string& trackbarname, const string& winname, + int* value, int count, + TrackbarCallback onChange = 0, + void* userdata = 0); + +CV_EXPORTS_W int getTrackbarPos(const string& trackbarname, const string& winname); +CV_EXPORTS_W void setTrackbarPos(const string& trackbarname, const string& winname, int pos); + +// OpenGL support + +typedef void (*OpenGlDrawCallback)(void* userdata); +CV_EXPORTS void setOpenGlDrawCallback(const string& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0); + +CV_EXPORTS void setOpenGlContext(const string& winname); + +CV_EXPORTS void updateWindow(const string& winname); + +// < Deperecated +CV_EXPORTS void pointCloudShow(const string& winname, const GlCamera& camera, const GlArrays& arr); +CV_EXPORTS void pointCloudShow(const string& winname, const GlCamera& camera, InputArray points, InputArray colors = noArray()); +// > + +//Only for Qt + +CV_EXPORTS CvFont fontQt(const string& nameFont, int pointSize=-1, + Scalar color=Scalar::all(0), int weight=CV_FONT_NORMAL, + int style=CV_STYLE_NORMAL, int spacing=0); +CV_EXPORTS void addText( const Mat& img, const string& text, Point org, CvFont font); + +CV_EXPORTS void displayOverlay(const string& winname, const string& text, int delayms CV_DEFAULT(0)); +CV_EXPORTS void displayStatusBar(const string& winname, const string& text, int delayms CV_DEFAULT(0)); + +CV_EXPORTS void saveWindowParameters(const string& windowName); +CV_EXPORTS void loadWindowParameters(const string& windowName); +CV_EXPORTS int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]); +CV_EXPORTS void stopLoop(); + +typedef void (CV_CDECL *ButtonCallback)(int state, void* userdata); +CV_EXPORTS int createButton( const string& bar_name, ButtonCallback on_change, + void* userdata=NULL, int type=CV_PUSH_BUTTON, + bool initial_button_state=0); + +//------------------------- + +enum +{ + // 8bit, color or not + IMREAD_UNCHANGED =-1, + // 8bit, gray + IMREAD_GRAYSCALE =0, + // ?, color + IMREAD_COLOR =1, + // any depth, ? + IMREAD_ANYDEPTH =2, + // ?, any color + IMREAD_ANYCOLOR =4 +}; + +enum +{ + IMWRITE_JPEG_QUALITY =1, + IMWRITE_PNG_COMPRESSION =16, + IMWRITE_PNG_STRATEGY =17, + IMWRITE_PNG_BILEVEL =18, + IMWRITE_PNG_STRATEGY_DEFAULT =0, + IMWRITE_PNG_STRATEGY_FILTERED =1, + IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, + IMWRITE_PNG_STRATEGY_RLE =3, + IMWRITE_PNG_STRATEGY_FIXED =4, + IMWRITE_PXM_BINARY =32 +}; + +CV_EXPORTS_W Mat imread( const string& filename, int flags=1 ); +CV_EXPORTS_W bool imwrite( const string& filename, InputArray img, + const vector& params=vector()); +CV_EXPORTS_W Mat imdecode( InputArray buf, int flags ); +CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst ); +CV_EXPORTS_W bool imencode( const string& ext, InputArray img, + CV_OUT vector& buf, + const vector& params=vector()); + +#ifndef CV_NO_VIDEO_CAPTURE_CPP_API + +template<> void CV_EXPORTS Ptr::delete_obj(); +template<> void CV_EXPORTS Ptr::delete_obj(); + +class CV_EXPORTS_W VideoCapture +{ +public: + CV_WRAP VideoCapture(); + CV_WRAP VideoCapture(const string& filename); + CV_WRAP VideoCapture(int device); + + virtual ~VideoCapture(); + CV_WRAP virtual bool open(const string& filename); + CV_WRAP virtual bool open(int device); + CV_WRAP virtual bool isOpened() const; + CV_WRAP virtual void release(); + + CV_WRAP virtual bool grab(); + CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int channel=0); + virtual VideoCapture& operator >> (CV_OUT Mat& image); + CV_WRAP virtual bool read(CV_OUT Mat& image); + + CV_WRAP virtual bool set(int propId, double value); + CV_WRAP virtual double get(int propId); + +protected: + Ptr cap; +}; + + +class CV_EXPORTS_W VideoWriter +{ +public: + CV_WRAP VideoWriter(); + CV_WRAP VideoWriter(const string& filename, int fourcc, double fps, + Size frameSize, bool isColor=true); + + virtual ~VideoWriter(); + CV_WRAP virtual bool open(const string& filename, int fourcc, double fps, + Size frameSize, bool isColor=true); + CV_WRAP virtual bool isOpened() const; + CV_WRAP virtual void release(); + virtual VideoWriter& operator << (const Mat& image); + CV_WRAP virtual void write(const Mat& image); + +protected: + Ptr writer; +}; + +#endif + +} + +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/highgui_c.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/highgui_c.h new file mode 100644 index 0000000..1f86abb --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/highgui_c.h @@ -0,0 +1,650 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_HIGHGUI_H__ +#define __OPENCV_HIGHGUI_H__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/****************************************************************************************\ +* Basic GUI functions * +\****************************************************************************************/ +//YV +//-----------New for Qt +/* For font */ +enum { CV_FONT_LIGHT = 25,//QFont::Light, + CV_FONT_NORMAL = 50,//QFont::Normal, + CV_FONT_DEMIBOLD = 63,//QFont::DemiBold, + CV_FONT_BOLD = 75,//QFont::Bold, + CV_FONT_BLACK = 87 //QFont::Black +}; + +enum { CV_STYLE_NORMAL = 0,//QFont::StyleNormal, + CV_STYLE_ITALIC = 1,//QFont::StyleItalic, + CV_STYLE_OBLIQUE = 2 //QFont::StyleOblique +}; +/* ---------*/ + +//for color cvScalar(blue_component, green_component, red\_component[, alpha_component]) +//and alpha= 0 <-> 0xFF (not transparent <-> transparent) +CVAPI(CvFont) cvFontQt(const char* nameFont, int pointSize CV_DEFAULT(-1), CvScalar color CV_DEFAULT(cvScalarAll(0)), int weight CV_DEFAULT(CV_FONT_NORMAL), int style CV_DEFAULT(CV_STYLE_NORMAL), int spacing CV_DEFAULT(0)); + +CVAPI(void) cvAddText(const CvArr* img, const char* text, CvPoint org, CvFont *arg2); + +CVAPI(void) cvDisplayOverlay(const char* name, const char* text, int delayms CV_DEFAULT(0)); +CVAPI(void) cvDisplayStatusBar(const char* name, const char* text, int delayms CV_DEFAULT(0)); + +CVAPI(void) cvSaveWindowParameters(const char* name); +CVAPI(void) cvLoadWindowParameters(const char* name); +CVAPI(int) cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]); +CVAPI(void) cvStopLoop( void ); + +typedef void (CV_CDECL *CvButtonCallback)(int state, void* userdata); +enum {CV_PUSH_BUTTON = 0, CV_CHECKBOX = 1, CV_RADIOBOX = 2}; +CVAPI(int) cvCreateButton( const char* button_name CV_DEFAULT(NULL),CvButtonCallback on_change CV_DEFAULT(NULL), void* userdata CV_DEFAULT(NULL) , int button_type CV_DEFAULT(CV_PUSH_BUTTON), int initial_button_state CV_DEFAULT(0)); +//---------------------- + + +/* this function is used to set some external parameters in case of X Window */ +CVAPI(int) cvInitSystem( int argc, char** argv ); + +CVAPI(int) cvStartWindowThread( void ); + +// --------- YV --------- +enum +{ + //These 3 flags are used by cvSet/GetWindowProperty + CV_WND_PROP_FULLSCREEN = 0, //to change/get window's fullscreen property + CV_WND_PROP_AUTOSIZE = 1, //to change/get window's autosize property + CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property + CV_WND_PROP_OPENGL = 3, //to change/get window's opengl support + + //These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty + CV_WINDOW_NORMAL = 0x00000000, //the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size + CV_WINDOW_AUTOSIZE = 0x00000001, //the user cannot resize the window, the size is constrainted by the image displayed + CV_WINDOW_OPENGL = 0x00001000, //window with opengl support + + //Those flags are only for Qt + CV_GUI_EXPANDED = 0x00000000, //status bar and tool bar + CV_GUI_NORMAL = 0x00000010, //old fashious way + + //These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty + CV_WINDOW_FULLSCREEN = 1,//change the window to fullscreen + CV_WINDOW_FREERATIO = 0x00000100,//the image expends as much as it can (no ratio constraint) + CV_WINDOW_KEEPRATIO = 0x00000000//the ration image is respected. +}; + +/* create window */ +CVAPI(int) cvNamedWindow( const char* name, int flags CV_DEFAULT(CV_WINDOW_AUTOSIZE) ); + +/* Set and Get Property of the window */ +CVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value); +CVAPI(double) cvGetWindowProperty(const char* name, int prop_id); + +/* display image within window (highgui windows remember their content) */ +CVAPI(void) cvShowImage( const char* name, const CvArr* image ); + +/* resize/move window */ +CVAPI(void) cvResizeWindow( const char* name, int width, int height ); +CVAPI(void) cvMoveWindow( const char* name, int x, int y ); + + +/* destroy window and all the trackers associated with it */ +CVAPI(void) cvDestroyWindow( const char* name ); + +CVAPI(void) cvDestroyAllWindows(void); + +/* get native window handle (HWND in case of Win32 and Widget in case of X Window) */ +CVAPI(void*) cvGetWindowHandle( const char* name ); + +/* get name of highgui window given its native handle */ +CVAPI(const char*) cvGetWindowName( void* window_handle ); + + +typedef void (CV_CDECL *CvTrackbarCallback)(int pos); + +/* create trackbar and display it on top of given window, set callback */ +CVAPI(int) cvCreateTrackbar( const char* trackbar_name, const char* window_name, + int* value, int count, CvTrackbarCallback on_change CV_DEFAULT(NULL)); + +typedef void (CV_CDECL *CvTrackbarCallback2)(int pos, void* userdata); + +CVAPI(int) cvCreateTrackbar2( const char* trackbar_name, const char* window_name, + int* value, int count, CvTrackbarCallback2 on_change, + void* userdata CV_DEFAULT(0)); + +/* retrieve or set trackbar position */ +CVAPI(int) cvGetTrackbarPos( const char* trackbar_name, const char* window_name ); +CVAPI(void) cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos ); + +enum +{ + CV_EVENT_MOUSEMOVE =0, + CV_EVENT_LBUTTONDOWN =1, + CV_EVENT_RBUTTONDOWN =2, + CV_EVENT_MBUTTONDOWN =3, + CV_EVENT_LBUTTONUP =4, + CV_EVENT_RBUTTONUP =5, + CV_EVENT_MBUTTONUP =6, + CV_EVENT_LBUTTONDBLCLK =7, + CV_EVENT_RBUTTONDBLCLK =8, + CV_EVENT_MBUTTONDBLCLK =9 +}; + +enum +{ + CV_EVENT_FLAG_LBUTTON =1, + CV_EVENT_FLAG_RBUTTON =2, + CV_EVENT_FLAG_MBUTTON =4, + CV_EVENT_FLAG_CTRLKEY =8, + CV_EVENT_FLAG_SHIFTKEY =16, + CV_EVENT_FLAG_ALTKEY =32 +}; + +typedef void (CV_CDECL *CvMouseCallback )(int event, int x, int y, int flags, void* param); + +/* assign callback for mouse events */ +CVAPI(void) cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, + void* param CV_DEFAULT(NULL)); + +enum +{ +/* 8bit, color or not */ + CV_LOAD_IMAGE_UNCHANGED =-1, +/* 8bit, gray */ + CV_LOAD_IMAGE_GRAYSCALE =0, +/* ?, color */ + CV_LOAD_IMAGE_COLOR =1, +/* any depth, ? */ + CV_LOAD_IMAGE_ANYDEPTH =2, +/* ?, any color */ + CV_LOAD_IMAGE_ANYCOLOR =4 +}; + +/* load image from file + iscolor can be a combination of above flags where CV_LOAD_IMAGE_UNCHANGED + overrides the other flags + using CV_LOAD_IMAGE_ANYCOLOR alone is equivalent to CV_LOAD_IMAGE_UNCHANGED + unless CV_LOAD_IMAGE_ANYDEPTH is specified images are converted to 8bit +*/ +CVAPI(IplImage*) cvLoadImage( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); +CVAPI(CvMat*) cvLoadImageM( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); + +enum +{ + CV_IMWRITE_JPEG_QUALITY =1, + CV_IMWRITE_PNG_COMPRESSION =16, + CV_IMWRITE_PNG_STRATEGY =17, + CV_IMWRITE_PNG_BILEVEL =18, + CV_IMWRITE_PNG_STRATEGY_DEFAULT =0, + CV_IMWRITE_PNG_STRATEGY_FILTERED =1, + CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, + CV_IMWRITE_PNG_STRATEGY_RLE =3, + CV_IMWRITE_PNG_STRATEGY_FIXED =4, + CV_IMWRITE_PXM_BINARY =32 +}; + +/* save image to file */ +CVAPI(int) cvSaveImage( const char* filename, const CvArr* image, + const int* params CV_DEFAULT(0) ); + +/* decode image stored in the buffer */ +CVAPI(IplImage*) cvDecodeImage( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); +CVAPI(CvMat*) cvDecodeImageM( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); + +/* encode image and store the result as a byte vector (single-row 8uC1 matrix) */ +CVAPI(CvMat*) cvEncodeImage( const char* ext, const CvArr* image, + const int* params CV_DEFAULT(0) ); + +enum +{ + CV_CVTIMG_FLIP =1, + CV_CVTIMG_SWAP_RB =2 +}; + +/* utility function: convert one image to another with optional vertical flip */ +CVAPI(void) cvConvertImage( const CvArr* src, CvArr* dst, int flags CV_DEFAULT(0)); + +/* wait for key event infinitely (delay<=0) or for "delay" milliseconds */ +CVAPI(int) cvWaitKey(int delay CV_DEFAULT(0)); + +// OpenGL support + +typedef void (CV_CDECL *CvOpenGlDrawCallback)(void* userdata); +CVAPI(void) cvSetOpenGlDrawCallback(const char* window_name, CvOpenGlDrawCallback callback, void* userdata CV_DEFAULT(NULL)); + +CVAPI(void) cvSetOpenGlContext(const char* window_name); +CVAPI(void) cvUpdateWindow(const char* window_name); + + +/****************************************************************************************\ +* Working with Video Files and Cameras * +\****************************************************************************************/ + +/* "black box" capture structure */ +typedef struct CvCapture CvCapture; + +/* start capturing frames from video file */ +CVAPI(CvCapture*) cvCreateFileCapture( const char* filename ); + +enum +{ + CV_CAP_ANY =0, // autodetect + + CV_CAP_MIL =100, // MIL proprietary drivers + + CV_CAP_VFW =200, // platform native + CV_CAP_V4L =200, + CV_CAP_V4L2 =200, + + CV_CAP_FIREWARE =300, // IEEE 1394 drivers + CV_CAP_FIREWIRE =300, + CV_CAP_IEEE1394 =300, + CV_CAP_DC1394 =300, + CV_CAP_CMU1394 =300, + + CV_CAP_STEREO =400, // TYZX proprietary drivers + CV_CAP_TYZX =400, + CV_TYZX_LEFT =400, + CV_TYZX_RIGHT =401, + CV_TYZX_COLOR =402, + CV_TYZX_Z =403, + + CV_CAP_QT =500, // QuickTime + + CV_CAP_UNICAP =600, // Unicap drivers + + CV_CAP_DSHOW =700, // DirectShow (via videoInput) + CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput) + + CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK + + CV_CAP_OPENNI =900, // OpenNI (for Kinect) + CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion) + + CV_CAP_ANDROID =1000, // Android + CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera + CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera + + CV_CAP_XIAPI =1100, // XIMEA Camera API + + CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API) + + CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK + + CV_CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK +}; + +/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */ +CVAPI(CvCapture*) cvCreateCameraCapture( int index ); + +/* grab a frame, return 1 on success, 0 on fail. + this function is thought to be fast */ +CVAPI(int) cvGrabFrame( CvCapture* capture ); + +/* get the frame grabbed with cvGrabFrame(..) + This function may apply some frame processing like + frame decompression, flipping etc. + !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */ +CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) ); + +/* Just a combination of cvGrabFrame and cvRetrieveFrame + !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */ +CVAPI(IplImage*) cvQueryFrame( CvCapture* capture ); + +/* stop capturing/reading and free resources */ +CVAPI(void) cvReleaseCapture( CvCapture** capture ); + +enum +{ + // modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode) + // every feature can have only one mode turned on at a time + CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically) + CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user + CV_CAP_PROP_DC1394_MODE_AUTO = -2, + CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1, + CV_CAP_PROP_POS_MSEC =0, + CV_CAP_PROP_POS_FRAMES =1, + CV_CAP_PROP_POS_AVI_RATIO =2, + CV_CAP_PROP_FRAME_WIDTH =3, + CV_CAP_PROP_FRAME_HEIGHT =4, + CV_CAP_PROP_FPS =5, + CV_CAP_PROP_FOURCC =6, + CV_CAP_PROP_FRAME_COUNT =7, + CV_CAP_PROP_FORMAT =8, + CV_CAP_PROP_MODE =9, + CV_CAP_PROP_BRIGHTNESS =10, + CV_CAP_PROP_CONTRAST =11, + CV_CAP_PROP_SATURATION =12, + CV_CAP_PROP_HUE =13, + CV_CAP_PROP_GAIN =14, + CV_CAP_PROP_EXPOSURE =15, + CV_CAP_PROP_CONVERT_RGB =16, + CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17, + CV_CAP_PROP_RECTIFICATION =18, + CV_CAP_PROP_MONOCROME =19, + CV_CAP_PROP_SHARPNESS =20, + CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera, + // user can adjust refernce level + // using this feature + CV_CAP_PROP_GAMMA =22, + CV_CAP_PROP_TEMPERATURE =23, + CV_CAP_PROP_TRIGGER =24, + CV_CAP_PROP_TRIGGER_DELAY =25, + CV_CAP_PROP_WHITE_BALANCE_RED_V =26, + CV_CAP_PROP_ZOOM =27, + CV_CAP_PROP_FOCUS =28, + CV_CAP_PROP_GUID =29, + CV_CAP_PROP_ISO_SPEED =30, + CV_CAP_PROP_MAX_DC1394 =31, + CV_CAP_PROP_BACKLIGHT =32, + CV_CAP_PROP_PAN =33, + CV_CAP_PROP_TILT =34, + CV_CAP_PROP_ROLL =35, + CV_CAP_PROP_IRIS =36, + CV_CAP_PROP_SETTINGS =37, + + CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only + CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed + CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed + + // OpenNI map generators + CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31, + CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30, + CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR, + + // Properties of cameras available through OpenNI interfaces + CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100, + CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm + CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm + CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels + CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag + CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map + // by changing depth generator's view point (if the flag is "on") or + // sets this view point to its normal one (if the flag is "off"). + CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105, + CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106, + CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107, + CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108, + + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109, + + CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT, + CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE, + CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE, + CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH, + CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION, + CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION, + + // Properties of cameras available through GStreamer interface + CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1 + CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast + + // Properties of cameras available through XIMEA SDK interface + CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping. + CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format. + CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels). + CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels). + CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger. + CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE. + CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input + CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode + CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level + CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output + CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode + CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED + CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality + CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition) + CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance + CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain + CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%). + CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure + CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure + CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %) + CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds + + // Properties for Android cameras + CV_CAP_PROP_ANDROID_FLASH_MODE = 8001, + CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002, + CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003, + CV_CAP_PROP_ANDROID_ANTIBANDING = 8004, + CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008, + CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009, + CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010, + + // Properties of cameras available through AVFOUNDATION interface + CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001, + CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, + CV_CAP_PROP_IOS_DEVICE_FLASH = 9003, + CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, + CV_CAP_PROP_IOS_DEVICE_TORCH = 9005, + + // Properties of cameras available through Smartek Giganetix Ethernet Vision interface + /* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */ + CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, + CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, + CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, + CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, + CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, + CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006, + + CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001, + CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002, + CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003, + CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004, + CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005, + CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006, + CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007, + + // Intel PerC streams + CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29, + CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28, + CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR +}; + +enum +{ + // Data given from depth generator. + CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1) + CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3) + CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1) + CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1) + CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1 + + // Data given from RGB image generator. + CV_CAP_OPENNI_BGR_IMAGE = 5, + CV_CAP_OPENNI_GRAY_IMAGE = 6 +}; + +// Supported output modes of OpenNI image generator +enum +{ + CV_CAP_OPENNI_VGA_30HZ = 0, + CV_CAP_OPENNI_SXGA_15HZ = 1, + CV_CAP_OPENNI_SXGA_30HZ = 2, + CV_CAP_OPENNI_QVGA_30HZ = 3, + CV_CAP_OPENNI_QVGA_60HZ = 4 +}; + +//supported by Android camera output formats +enum +{ + CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR + CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR, + CV_CAP_ANDROID_GREY_FRAME = 1, //Y + CV_CAP_ANDROID_COLOR_FRAME_RGB = 2, + CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3, + CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4 +}; + +// supported Android camera flash modes +enum +{ + CV_CAP_ANDROID_FLASH_MODE_AUTO = 0, + CV_CAP_ANDROID_FLASH_MODE_OFF, + CV_CAP_ANDROID_FLASH_MODE_ON, + CV_CAP_ANDROID_FLASH_MODE_RED_EYE, + CV_CAP_ANDROID_FLASH_MODE_TORCH +}; + +// supported Android camera focus modes +enum +{ + CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0, + CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE, + CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO, + CV_CAP_ANDROID_FOCUS_MODE_EDOF, + CV_CAP_ANDROID_FOCUS_MODE_FIXED, + CV_CAP_ANDROID_FOCUS_MODE_INFINITY, + CV_CAP_ANDROID_FOCUS_MODE_MACRO +}; + +// supported Android camera white balance modes +enum +{ + CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0, + CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT, + CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT, + CV_CAP_ANDROID_WHITE_BALANCE_SHADE, + CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT, + CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT +}; + +// supported Android camera antibanding modes +enum +{ + CV_CAP_ANDROID_ANTIBANDING_50HZ = 0, + CV_CAP_ANDROID_ANTIBANDING_60HZ, + CV_CAP_ANDROID_ANTIBANDING_AUTO, + CV_CAP_ANDROID_ANTIBANDING_OFF +}; + +enum +{ + CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth. + CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates. + CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam. + CV_CAP_INTELPERC_IMAGE = 3 +}; + +/* retrieve or set capture properties */ +CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id ); +CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value ); + +// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY +CVAPI(int) cvGetCaptureDomain( CvCapture* capture); + +/* "black box" video file writer structure */ +typedef struct CvVideoWriter CvVideoWriter; + +#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24)) + +CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4) +{ + return CV_FOURCC_MACRO(c1, c2, c3, c4); +} + +#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */ +#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */ + +/* initialize video file writer */ +CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc, + double fps, CvSize frame_size, + int is_color CV_DEFAULT(1)); + +//CVAPI(CvVideoWriter*) cvCreateImageSequenceWriter( const char* filename, +// int is_color CV_DEFAULT(1)); + +/* write frame to video file */ +CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image ); + +/* close video file writer */ +CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer ); + +/****************************************************************************************\ +* Obsolete functions/synonyms * +\****************************************************************************************/ + +#define cvCaptureFromFile cvCreateFileCapture +#define cvCaptureFromCAM cvCreateCameraCapture +#define cvCaptureFromAVI cvCaptureFromFile +#define cvCreateAVIWriter cvCreateVideoWriter +#define cvWriteToAVI cvWriteFrame +#define cvAddSearchPath(path) +#define cvvInitSystem cvInitSystem +#define cvvNamedWindow cvNamedWindow +#define cvvShowImage cvShowImage +#define cvvResizeWindow cvResizeWindow +#define cvvDestroyWindow cvDestroyWindow +#define cvvCreateTrackbar cvCreateTrackbar +#define cvvLoadImage(name) cvLoadImage((name),1) +#define cvvSaveImage cvSaveImage +#define cvvAddSearchPath cvAddSearchPath +#define cvvWaitKey(name) cvWaitKey(0) +#define cvvWaitKeyEx(name,delay) cvWaitKey(delay) +#define cvvConvertImage cvConvertImage +#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE +#define set_preprocess_func cvSetPreprocessFuncWin32 +#define set_postprocess_func cvSetPostprocessFuncWin32 + +#if defined WIN32 || defined _WIN32 + +CVAPI(void) cvSetPreprocessFuncWin32_(const void* callback); +CVAPI(void) cvSetPostprocessFuncWin32_(const void* callback); +#define cvSetPreprocessFuncWin32(callback) cvSetPreprocessFuncWin32_((const void*)(callback)) +#define cvSetPostprocessFuncWin32(callback) cvSetPostprocessFuncWin32_((const void*)(callback)) + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/ios.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/ios.h new file mode 100644 index 0000000..a7f0395 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/highgui/ios.h @@ -0,0 +1,49 @@ + +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/core/core.hpp" +#import "opencv2/highgui/cap_ios.h" + +UIImage* MatToUIImage(const cv::Mat& image); +void UIImageToMat(const UIImage* image, + cv::Mat& m, bool alphaExist = false); diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/imgproc.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/imgproc.hpp new file mode 100644 index 0000000..2fcccfe --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/imgproc.hpp @@ -0,0 +1,1303 @@ +/*! \file imgproc.hpp + \brief The Image Processing + */ + +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_HPP__ +#define __OPENCV_IMGPROC_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides + */ +namespace cv +{ + +//! various border interpolation methods +enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT, + BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP, + BORDER_REFLECT_101=IPL_BORDER_REFLECT_101, BORDER_REFLECT101=BORDER_REFLECT_101, + BORDER_TRANSPARENT=IPL_BORDER_TRANSPARENT, + BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 }; + +//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p. +CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType ); + +/*! + The Base Class for 1D or Row-wise Filters + + This is the base class for linear or non-linear filters that process 1D data. + In particular, such filters are used for the "horizontal" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. +*/ +class CV_EXPORTS BaseRowFilter +{ +public: + //! the default constructor + BaseRowFilter(); + //! the destructor + virtual ~BaseRowFilter(); + //! the filtering operator. Must be overrided in the derived classes. The horizontal border interpolation is done outside of the class. + virtual void operator()(const uchar* src, uchar* dst, + int width, int cn) = 0; + int ksize, anchor; +}; + + +/*! + The Base Class for Column-wise Filters + + This is the base class for linear or non-linear filters that process columns of 2D arrays. + Such filters are used for the "vertical" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Unlike cv::BaseRowFilter, cv::BaseColumnFilter may have some context information, + i.e. box filter keeps the sliding sum of elements. To reset the state BaseColumnFilter::reset() + must be called (e.g. the method is called by cv::FilterEngine) + */ +class CV_EXPORTS BaseColumnFilter +{ +public: + //! the default constructor + BaseColumnFilter(); + //! the destructor + virtual ~BaseColumnFilter(); + //! the filtering operator. Must be overrided in the derived classes. The vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width) = 0; + //! resets the internal buffers, if any + virtual void reset(); + int ksize, anchor; +}; + +/*! + The Base Class for Non-Separable 2D Filters. + + This is the base class for linear or non-linear 2D filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Similar to cv::BaseColumnFilter, the class may have some context information, + that should be reset using BaseFilter::reset() method before processing the new array. +*/ +class CV_EXPORTS BaseFilter +{ +public: + //! the default constructor + BaseFilter(); + //! the destructor + virtual ~BaseFilter(); + //! the filtering operator. The horizontal and the vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width, int cn) = 0; + //! resets the internal buffers, if any + virtual void reset(); + Size ksize; + Point anchor; +}; + +/*! + The Main Class for Image Filtering. + + The class can be used to apply an arbitrary filtering operation to an image. + It contains all the necessary intermediate buffers, it computes extrapolated values + of the "virtual" pixels outside of the image etc. + Pointers to the initialized cv::FilterEngine instances + are returned by various OpenCV functions, such as cv::createSeparableLinearFilter(), + cv::createLinearFilter(), cv::createGaussianFilter(), cv::createDerivFilter(), + cv::createBoxFilter() and cv::createMorphologyFilter(). + + Using the class you can process large images by parts and build complex pipelines + that include filtering as some of the stages. If all you need is to apply some pre-defined + filtering operation, you may use cv::filter2D(), cv::erode(), cv::dilate() etc. + functions that create FilterEngine internally. + + Here is the example on how to use the class to implement Laplacian operator, which is the sum of + second-order derivatives. More complex variant for different types is implemented in cv::Laplacian(). + + \code + void laplace_f(const Mat& src, Mat& dst) + { + CV_Assert( src.type() == CV_32F ); + // make sure the destination array has the proper size and type + dst.create(src.size(), src.type()); + + // get the derivative and smooth kernels for d2I/dx2. + // for d2I/dy2 we could use the same kernels, just swapped + Mat kd, ks; + getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); + + // let's process 10 source rows at once + int DELTA = std::min(10, src.rows); + Ptr Fxx = createSeparableLinearFilter(src.type(), + dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); + Ptr Fyy = createSeparableLinearFilter(src.type(), + dst.type(), ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); + + int y = Fxx->start(src), dsty = 0, dy = 0; + Fyy->start(src); + const uchar* sptr = src.data + y*src.step; + + // allocate the buffers for the spatial image derivatives; + // the buffers need to have more than DELTA rows, because at the + // last iteration the output may take max(kd.rows-1,ks.rows-1) + // rows more than the input. + Mat Ixx( DELTA + kd.rows - 1, src.cols, dst.type() ); + Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() ); + + // inside the loop we always pass DELTA rows to the filter + // (note that the "proceed" method takes care of possibe overflow, since + // it was given the actual image height in the "start" method) + // on output we can get: + // * < DELTA rows (the initial buffer accumulation stage) + // * = DELTA rows (settled state in the middle) + // * > DELTA rows (then the input image is over, but we generate + // "virtual" rows using the border mode and filter them) + // this variable number of output rows is dy. + // dsty is the current output row. + // sptr is the pointer to the first input row in the portion to process + for( ; dsty < dst.rows; sptr += DELTA*src.step, dsty += dy ) + { + Fxx->proceed( sptr, (int)src.step, DELTA, Ixx.data, (int)Ixx.step ); + dy = Fyy->proceed( sptr, (int)src.step, DELTA, d2y.data, (int)Iyy.step ); + if( dy > 0 ) + { + Mat dstripe = dst.rowRange(dsty, dsty + dy); + add(Ixx.rowRange(0, dy), Iyy.rowRange(0, dy), dstripe); + } + } + } + \endcode +*/ +class CV_EXPORTS FilterEngine +{ +public: + //! the default constructor + FilterEngine(); + //! the full constructor. Either _filter2D or both _rowFilter and _columnFilter must be non-empty. + FilterEngine(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, + int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! the destructor + virtual ~FilterEngine(); + //! reinitializes the engine. The previously assigned filters are released. + void init(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! starts filtering of the specified ROI of an image of size wholeSize. + virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1); + //! starts filtering of the specified ROI of the specified image. + virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1), + bool isolated=false, int maxBufRows=-1); + //! processes the next srcCount rows of the image. + virtual int proceed(const uchar* src, int srcStep, int srcCount, + uchar* dst, int dstStep); + //! applies filter to the specified ROI of the image. if srcRoi=(0,0,-1,-1), the whole image is filtered. + virtual void apply( const Mat& src, Mat& dst, + const Rect& srcRoi=Rect(0,0,-1,-1), + Point dstOfs=Point(0,0), + bool isolated=false); + //! returns true if the filter is separable + bool isSeparable() const { return (const BaseFilter*)filter2D == 0; } + //! returns the number + int remainingInputRows() const; + int remainingOutputRows() const; + + int srcType, dstType, bufType; + Size ksize; + Point anchor; + int maxWidth; + Size wholeSize; + Rect roi; + int dx1, dx2; + int rowBorderType, columnBorderType; + vector borderTab; + int borderElemSize; + vector ringBuf; + vector srcRow; + vector constBorderValue; + vector constBorderRow; + int bufStep, startY, startY0, endY, rowCount, dstY; + vector rows; + + Ptr filter2D; + Ptr rowFilter; + Ptr columnFilter; +}; + +//! type of the kernel +enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2, + KERNEL_SMOOTH=4, KERNEL_INTEGER=8 }; + +//! returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients. +CV_EXPORTS int getKernelType(InputArray kernel, Point anchor); + +//! returns the primitive row filter with the specified kernel +CV_EXPORTS Ptr getLinearRowFilter(int srcType, int bufType, + InputArray kernel, int anchor, + int symmetryType); + +//! returns the primitive column filter with the specified kernel +CV_EXPORTS Ptr getLinearColumnFilter(int bufType, int dstType, + InputArray kernel, int anchor, + int symmetryType, double delta=0, + int bits=0); + +//! returns 2D filter with the specified kernel +CV_EXPORTS Ptr getLinearFilter(int srcType, int dstType, + InputArray kernel, + Point anchor=Point(-1,-1), + double delta=0, int bits=0); + +//! returns the separable linear filter engine +CV_EXPORTS Ptr createSeparableLinearFilter(int srcType, int dstType, + InputArray rowKernel, InputArray columnKernel, + Point anchor=Point(-1,-1), double delta=0, + int rowBorderType=BORDER_DEFAULT, + int columnBorderType=-1, + const Scalar& borderValue=Scalar()); + +//! returns the non-separable linear filter engine +CV_EXPORTS Ptr createLinearFilter(int srcType, int dstType, + InputArray kernel, Point _anchor=Point(-1,-1), + double delta=0, int rowBorderType=BORDER_DEFAULT, + int columnBorderType=-1, const Scalar& borderValue=Scalar()); + +//! returns the Gaussian kernel with the specified parameters +CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F ); + +//! returns the Gaussian filter engine +CV_EXPORTS Ptr createGaussianFilter( int type, Size ksize, + double sigma1, double sigma2=0, + int borderType=BORDER_DEFAULT); +//! initializes kernels of the generalized Sobel operator +CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky, + int dx, int dy, int ksize, + bool normalize=false, int ktype=CV_32F ); +//! returns filter engine for the generalized Sobel operator +CV_EXPORTS Ptr createDerivFilter( int srcType, int dstType, + int dx, int dy, int ksize, + int borderType=BORDER_DEFAULT ); +//! returns horizontal 1D box filter +CV_EXPORTS Ptr getRowSumFilter(int srcType, int sumType, + int ksize, int anchor=-1); +//! returns vertical 1D box filter +CV_EXPORTS Ptr getColumnSumFilter( int sumType, int dstType, + int ksize, int anchor=-1, + double scale=1); +//! returns box filter engine +CV_EXPORTS Ptr createBoxFilter( int srcType, int dstType, Size ksize, + Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT); + +//! returns the Gabor kernel with the specified parameters +CV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd, + double gamma, double psi=CV_PI*0.5, int ktype=CV_64F ); + +//! type of morphological operation +enum { MORPH_ERODE=CV_MOP_ERODE, MORPH_DILATE=CV_MOP_DILATE, + MORPH_OPEN=CV_MOP_OPEN, MORPH_CLOSE=CV_MOP_CLOSE, + MORPH_GRADIENT=CV_MOP_GRADIENT, MORPH_TOPHAT=CV_MOP_TOPHAT, + MORPH_BLACKHAT=CV_MOP_BLACKHAT }; + +//! returns horizontal 1D morphological filter +CV_EXPORTS Ptr getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1); +//! returns vertical 1D morphological filter +CV_EXPORTS Ptr getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1); +//! returns 2D morphological filter +CV_EXPORTS Ptr getMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1)); + +//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation. +static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); } + +//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported. +CV_EXPORTS Ptr createMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT, + int columnBorderType=-1, + const Scalar& borderValue=morphologyDefaultBorderValue()); + +//! shape of the structuring element +enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 }; +//! returns structuring element of the specified shape and size +CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1)); + +template<> CV_EXPORTS void Ptr::delete_obj(); + +//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode +CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst, + int top, int bottom, int left, int right, + int borderType, const Scalar& value=Scalar() ); + +//! smooths the image using median filter. +CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize ); +//! smooths the image using Gaussian filter. +CV_EXPORTS_W void GaussianBlur( InputArray src, + OutputArray dst, Size ksize, + double sigmaX, double sigmaY=0, + int borderType=BORDER_DEFAULT ); +//! smooths the image using bilateral filter +CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, + double sigmaColor, double sigmaSpace, + int borderType=BORDER_DEFAULT ); +//! smooths the image using adaptive bilateral filter +CV_EXPORTS_W void adaptiveBilateralFilter( InputArray src, OutputArray dst, Size ksize, + double sigmaSpace, double maxSigmaColor = 20.0, Point anchor=Point(-1, -1), + int borderType=BORDER_DEFAULT ); +//! smooths the image using the box filter. Each pixel is processed in O(1) time +CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, + Size ksize, Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT ); +//! a synonym for normalized box filter +CV_EXPORTS_W void blur( InputArray src, OutputArray dst, + Size ksize, Point anchor=Point(-1,-1), + int borderType=BORDER_DEFAULT ); + +//! applies non-separable 2D linear filter to the image +CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernel, Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies separable 2D linear filter to the image +CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernelX, InputArray kernelY, + Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies generalized Sobel operator to the image +CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, int ksize=3, + double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies the vertical or horizontal Scharr operator to the image +CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Laplacian operator to the image +CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, + int ksize=1, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Canny edge detector and produces the edge map. +CV_EXPORTS_W void Canny( InputArray image, OutputArray edges, + double threshold1, double threshold2, + int apertureSize=3, bool L2gradient=false ); + +//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria +CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, + int blockSize, int ksize=3, + int borderType=BORDER_DEFAULT ); + +//! computes Harris cornerness criteria at each image pixel +CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, + int ksize, double k, + int borderType=BORDER_DEFAULT ); + +// low-level function for computing eigenvalues and eigenvectors of 2x2 matrices +CV_EXPORTS void eigen2x2( const float* a, float* e, int n ); + +//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix. +CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst, + int blockSize, int ksize, + int borderType=BORDER_DEFAULT ); + +//! computes another complex cornerness criteria at each pixel +CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, + int borderType=BORDER_DEFAULT ); + +//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria +CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners, + Size winSize, Size zeroZone, + TermCriteria criteria ); + +//! finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima +CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners, + int maxCorners, double qualityLevel, double minDistance, + InputArray mask=noArray(), int blockSize=3, + bool useHarrisDetector=false, double k=0.04 ); + +//! finds lines in the black-n-white image using the standard or pyramid Hough transform +CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double srn=0, double stn=0 ); + +//! finds line segments in the black-n-white image using probabilistic Hough transform +CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double minLineLength=0, double maxLineGap=0 ); + +//! finds circles in the grayscale image using 2+1 gradient Hough transform +CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles, + int method, double dp, double minDist, + double param1=100, double param2=100, + int minRadius=0, int maxRadius=0 ); + +enum +{ + GHT_POSITION = 0, + GHT_SCALE = 1, + GHT_ROTATION = 2 +}; + +//! finds arbitrary template in the grayscale image using Generalized Hough Transform +//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122. +//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038. +class CV_EXPORTS GeneralizedHough : public Algorithm +{ +public: + static Ptr create(int method); + + virtual ~GeneralizedHough(); + + //! set template to search + void setTemplate(InputArray templ, int cannyThreshold = 100, Point templCenter = Point(-1, -1)); + void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1)); + + //! find template on image + void detect(InputArray image, OutputArray positions, OutputArray votes = cv::noArray(), int cannyThreshold = 100); + void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = cv::noArray()); + + void release(); + +protected: + virtual void setTemplateImpl(const Mat& edges, const Mat& dx, const Mat& dy, Point templCenter) = 0; + virtual void detectImpl(const Mat& edges, const Mat& dx, const Mat& dy, OutputArray positions, OutputArray votes) = 0; + virtual void releaseImpl() = 0; + +private: + Mat edges_, dx_, dy_; +}; + +//! erodes the image (applies the local minimum operator) +CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! dilates the image (applies the local maximum operator) +CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! applies an advanced morphological operation to the image +CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst, + int op, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! interpolation algorithm +enum +{ + INTER_NEAREST=CV_INTER_NN, //!< nearest neighbor interpolation + INTER_LINEAR=CV_INTER_LINEAR, //!< bilinear interpolation + INTER_CUBIC=CV_INTER_CUBIC, //!< bicubic interpolation + INTER_AREA=CV_INTER_AREA, //!< area-based (or super) interpolation + INTER_LANCZOS4=CV_INTER_LANCZOS4, //!< Lanczos interpolation over 8x8 neighborhood + INTER_MAX=7, + WARP_INVERSE_MAP=CV_WARP_INVERSE_MAP +}; + +//! resizes the image +CV_EXPORTS_W void resize( InputArray src, OutputArray dst, + Size dsize, double fx=0, double fy=0, + int interpolation=INTER_LINEAR ); + +//! warps the image using affine transformation +CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +//! warps the image using perspective transformation +CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +enum +{ + INTER_BITS=5, INTER_BITS2=INTER_BITS*2, + INTER_TAB_SIZE=(1< CV_EXPORTS void Ptr::delete_obj(); + +//! computes the joint dense histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + OutputArray hist, int dims, const int* histSize, + const float** ranges, bool uniform=true, bool accumulate=false ); + +//! computes the joint sparse histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + SparseMat& hist, int dims, + const int* histSize, const float** ranges, + bool uniform=true, bool accumulate=false ); + +CV_EXPORTS_W void calcHist( InputArrayOfArrays images, + const vector& channels, + InputArray mask, OutputArray hist, + const vector& histSize, + const vector& ranges, + bool accumulate=false ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, InputArray hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, const SparseMat& hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const vector& channels, + InputArray hist, OutputArray dst, + const vector& ranges, + double scale ); + +/*CV_EXPORTS void calcBackProjectPatch( const Mat* images, int nimages, const int* channels, + InputArray hist, OutputArray dst, Size patchSize, + int method, double factor=1 ); + +CV_EXPORTS_W void calcBackProjectPatch( InputArrayOfArrays images, const vector& channels, + InputArray hist, OutputArray dst, Size patchSize, + int method, double factor=1 );*/ + +//! compares two histograms stored in dense arrays +CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method ); + +//! compares two histograms stored in sparse arrays +CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method ); + +//! normalizes the grayscale image brightness and contrast by normalizing its histogram +CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst ); + +class CV_EXPORTS_W CLAHE : public Algorithm +{ +public: + CV_WRAP virtual void apply(InputArray src, OutputArray dst) = 0; + + CV_WRAP virtual void setClipLimit(double clipLimit) = 0; + CV_WRAP virtual double getClipLimit() const = 0; + + CV_WRAP virtual void setTilesGridSize(Size tileGridSize) = 0; + CV_WRAP virtual Size getTilesGridSize() const = 0; + + CV_WRAP virtual void collectGarbage() = 0; +}; +CV_EXPORTS_W Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); + +CV_EXPORTS float EMD( InputArray signature1, InputArray signature2, + int distType, InputArray cost=noArray(), + float* lowerBound=0, OutputArray flow=noArray() ); + +//! segments the image using watershed algorithm +CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers ); + +//! filters image using meanshift algorithm +CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst, + double sp, double sr, int maxLevel=1, + TermCriteria termcrit=TermCriteria( + TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ); + +//! class of the pixel in GrabCut algorithm +enum +{ + GC_BGD = 0, //!< background + GC_FGD = 1, //!< foreground + GC_PR_BGD = 2, //!< most probably background + GC_PR_FGD = 3 //!< most probably foreground +}; + +//! GrabCut algorithm flags +enum +{ + GC_INIT_WITH_RECT = 0, + GC_INIT_WITH_MASK = 1, + GC_EVAL = 2 +}; + +//! segments the image using GrabCut algorithm +CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, + InputOutputArray bgdModel, InputOutputArray fgdModel, + int iterCount, int mode = GC_EVAL ); + +enum +{ + DIST_LABEL_CCOMP = 0, + DIST_LABEL_PIXEL = 1 +}; + +//! builds the discrete Voronoi diagram +CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst, + OutputArray labels, int distanceType, int maskSize, + int labelType=DIST_LABEL_CCOMP ); + +//! computes the distance transform map +CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst, + int distanceType, int maskSize ); + +enum { FLOODFILL_FIXED_RANGE = 1 << 16, FLOODFILL_MASK_ONLY = 1 << 17 }; + +//! fills the semi-uniform image region starting from the specified seed point +CV_EXPORTS int floodFill( InputOutputArray image, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + +//! fills the semi-uniform image region and/or the mask starting from the specified seed point +CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + + +enum +{ + COLOR_BGR2BGRA =0, + COLOR_RGB2RGBA =COLOR_BGR2BGRA, + + COLOR_BGRA2BGR =1, + COLOR_RGBA2RGB =COLOR_BGRA2BGR, + + COLOR_BGR2RGBA =2, + COLOR_RGB2BGRA =COLOR_BGR2RGBA, + + COLOR_RGBA2BGR =3, + COLOR_BGRA2RGB =COLOR_RGBA2BGR, + + COLOR_BGR2RGB =4, + COLOR_RGB2BGR =COLOR_BGR2RGB, + + COLOR_BGRA2RGBA =5, + COLOR_RGBA2BGRA =COLOR_BGRA2RGBA, + + COLOR_BGR2GRAY =6, + COLOR_RGB2GRAY =7, + COLOR_GRAY2BGR =8, + COLOR_GRAY2RGB =COLOR_GRAY2BGR, + COLOR_GRAY2BGRA =9, + COLOR_GRAY2RGBA =COLOR_GRAY2BGRA, + COLOR_BGRA2GRAY =10, + COLOR_RGBA2GRAY =11, + + COLOR_BGR2BGR565 =12, + COLOR_RGB2BGR565 =13, + COLOR_BGR5652BGR =14, + COLOR_BGR5652RGB =15, + COLOR_BGRA2BGR565 =16, + COLOR_RGBA2BGR565 =17, + COLOR_BGR5652BGRA =18, + COLOR_BGR5652RGBA =19, + + COLOR_GRAY2BGR565 =20, + COLOR_BGR5652GRAY =21, + + COLOR_BGR2BGR555 =22, + COLOR_RGB2BGR555 =23, + COLOR_BGR5552BGR =24, + COLOR_BGR5552RGB =25, + COLOR_BGRA2BGR555 =26, + COLOR_RGBA2BGR555 =27, + COLOR_BGR5552BGRA =28, + COLOR_BGR5552RGBA =29, + + COLOR_GRAY2BGR555 =30, + COLOR_BGR5552GRAY =31, + + COLOR_BGR2XYZ =32, + COLOR_RGB2XYZ =33, + COLOR_XYZ2BGR =34, + COLOR_XYZ2RGB =35, + + COLOR_BGR2YCrCb =36, + COLOR_RGB2YCrCb =37, + COLOR_YCrCb2BGR =38, + COLOR_YCrCb2RGB =39, + + COLOR_BGR2HSV =40, + COLOR_RGB2HSV =41, + + COLOR_BGR2Lab =44, + COLOR_RGB2Lab =45, + + COLOR_BayerBG2BGR =46, + COLOR_BayerGB2BGR =47, + COLOR_BayerRG2BGR =48, + COLOR_BayerGR2BGR =49, + + COLOR_BayerBG2RGB =COLOR_BayerRG2BGR, + COLOR_BayerGB2RGB =COLOR_BayerGR2BGR, + COLOR_BayerRG2RGB =COLOR_BayerBG2BGR, + COLOR_BayerGR2RGB =COLOR_BayerGB2BGR, + + COLOR_BGR2Luv =50, + COLOR_RGB2Luv =51, + COLOR_BGR2HLS =52, + COLOR_RGB2HLS =53, + + COLOR_HSV2BGR =54, + COLOR_HSV2RGB =55, + + COLOR_Lab2BGR =56, + COLOR_Lab2RGB =57, + COLOR_Luv2BGR =58, + COLOR_Luv2RGB =59, + COLOR_HLS2BGR =60, + COLOR_HLS2RGB =61, + + COLOR_BayerBG2BGR_VNG =62, + COLOR_BayerGB2BGR_VNG =63, + COLOR_BayerRG2BGR_VNG =64, + COLOR_BayerGR2BGR_VNG =65, + + COLOR_BayerBG2RGB_VNG =COLOR_BayerRG2BGR_VNG, + COLOR_BayerGB2RGB_VNG =COLOR_BayerGR2BGR_VNG, + COLOR_BayerRG2RGB_VNG =COLOR_BayerBG2BGR_VNG, + COLOR_BayerGR2RGB_VNG =COLOR_BayerGB2BGR_VNG, + + COLOR_BGR2HSV_FULL = 66, + COLOR_RGB2HSV_FULL = 67, + COLOR_BGR2HLS_FULL = 68, + COLOR_RGB2HLS_FULL = 69, + + COLOR_HSV2BGR_FULL = 70, + COLOR_HSV2RGB_FULL = 71, + COLOR_HLS2BGR_FULL = 72, + COLOR_HLS2RGB_FULL = 73, + + COLOR_LBGR2Lab = 74, + COLOR_LRGB2Lab = 75, + COLOR_LBGR2Luv = 76, + COLOR_LRGB2Luv = 77, + + COLOR_Lab2LBGR = 78, + COLOR_Lab2LRGB = 79, + COLOR_Luv2LBGR = 80, + COLOR_Luv2LRGB = 81, + + COLOR_BGR2YUV = 82, + COLOR_RGB2YUV = 83, + COLOR_YUV2BGR = 84, + COLOR_YUV2RGB = 85, + + COLOR_BayerBG2GRAY = 86, + COLOR_BayerGB2GRAY = 87, + COLOR_BayerRG2GRAY = 88, + COLOR_BayerGR2GRAY = 89, + + //YUV 4:2:0 formats family + COLOR_YUV2RGB_NV12 = 90, + COLOR_YUV2BGR_NV12 = 91, + COLOR_YUV2RGB_NV21 = 92, + COLOR_YUV2BGR_NV21 = 93, + COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21, + COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21, + + COLOR_YUV2RGBA_NV12 = 94, + COLOR_YUV2BGRA_NV12 = 95, + COLOR_YUV2RGBA_NV21 = 96, + COLOR_YUV2BGRA_NV21 = 97, + COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21, + COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21, + + COLOR_YUV2RGB_YV12 = 98, + COLOR_YUV2BGR_YV12 = 99, + COLOR_YUV2RGB_IYUV = 100, + COLOR_YUV2BGR_IYUV = 101, + COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV, + COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV, + COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12, + COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12, + + COLOR_YUV2RGBA_YV12 = 102, + COLOR_YUV2BGRA_YV12 = 103, + COLOR_YUV2RGBA_IYUV = 104, + COLOR_YUV2BGRA_IYUV = 105, + COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV, + COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV, + COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12, + COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12, + + COLOR_YUV2GRAY_420 = 106, + COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420, + COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420, + COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420, + + //YUV 4:2:2 formats family + COLOR_YUV2RGB_UYVY = 107, + COLOR_YUV2BGR_UYVY = 108, + //COLOR_YUV2RGB_VYUY = 109, + //COLOR_YUV2BGR_VYUY = 110, + COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY, + COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY, + COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY, + COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY, + + COLOR_YUV2RGBA_UYVY = 111, + COLOR_YUV2BGRA_UYVY = 112, + //COLOR_YUV2RGBA_VYUY = 113, + //COLOR_YUV2BGRA_VYUY = 114, + COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY, + COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY, + COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY, + COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY, + + COLOR_YUV2RGB_YUY2 = 115, + COLOR_YUV2BGR_YUY2 = 116, + COLOR_YUV2RGB_YVYU = 117, + COLOR_YUV2BGR_YVYU = 118, + COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2, + COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2, + COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2, + COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2, + + COLOR_YUV2RGBA_YUY2 = 119, + COLOR_YUV2BGRA_YUY2 = 120, + COLOR_YUV2RGBA_YVYU = 121, + COLOR_YUV2BGRA_YVYU = 122, + COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2, + COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2, + COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2, + COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2, + + COLOR_YUV2GRAY_UYVY = 123, + COLOR_YUV2GRAY_YUY2 = 124, + //COLOR_YUV2GRAY_VYUY = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2, + COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2, + COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2, + + // alpha premultiplication + COLOR_RGBA2mRGBA = 125, + COLOR_mRGBA2RGBA = 126, + + COLOR_RGB2YUV_I420 = 127, + COLOR_BGR2YUV_I420 = 128, + COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420, + COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420, + + COLOR_RGBA2YUV_I420 = 129, + COLOR_BGRA2YUV_I420 = 130, + COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420, + COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420, + COLOR_RGB2YUV_YV12 = 131, + COLOR_BGR2YUV_YV12 = 132, + COLOR_RGBA2YUV_YV12 = 133, + COLOR_BGRA2YUV_YV12 = 134, + + COLOR_COLORCVT_MAX = 135 +}; + + +//! converts image from one color space to another +CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn=0 ); + +//! raster image moments +class CV_EXPORTS_W_MAP Moments +{ +public: + //! the default constructor + Moments(); + //! the full constructor + Moments(double m00, double m10, double m01, double m20, double m11, + double m02, double m30, double m21, double m12, double m03 ); + //! the conversion from CvMoments + Moments( const CvMoments& moments ); + //! the conversion to CvMoments + operator CvMoments() const; + + //! spatial moments + CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; + //! central moments + CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03; + //! central normalized moments + CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03; +}; + +//! computes moments of the rasterized shape or a vector of points +CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage=false ); + +//! computes 7 Hu invariants from the moments +CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] ); +CV_EXPORTS_W void HuMoments( const Moments& m, CV_OUT OutputArray hu ); + +//! type of the template matching operation +enum { TM_SQDIFF=0, TM_SQDIFF_NORMED=1, TM_CCORR=2, TM_CCORR_NORMED=3, TM_CCOEFF=4, TM_CCOEFF_NORMED=5 }; + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ, + OutputArray result, int method ); + +//! mode of the contour retrieval algorithm +enum +{ + RETR_EXTERNAL=CV_RETR_EXTERNAL, //!< retrieve only the most external (top-level) contours + RETR_LIST=CV_RETR_LIST, //!< retrieve all the contours without any hierarchical information + RETR_CCOMP=CV_RETR_CCOMP, //!< retrieve the connected components (that can possibly be nested) + RETR_TREE=CV_RETR_TREE, //!< retrieve all the contours and the whole hierarchy + RETR_FLOODFILL=CV_RETR_FLOODFILL +}; + +//! the contour approximation algorithm +enum +{ + CHAIN_APPROX_NONE=CV_CHAIN_APPROX_NONE, + CHAIN_APPROX_SIMPLE=CV_CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1=CV_CHAIN_APPROX_TC89_L1, + CHAIN_APPROX_TC89_KCOS=CV_CHAIN_APPROX_TC89_KCOS +}; + +//! retrieves contours and the hierarchical information from black-n-white image. +CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours, + OutputArray hierarchy, int mode, + int method, Point offset=Point()); + +//! retrieves contours from black-n-white image. +CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours, + int mode, int method, Point offset=Point()); + +//! draws contours in the image +CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours, + int contourIdx, const Scalar& color, + int thickness=1, int lineType=8, + InputArray hierarchy=noArray(), + int maxLevel=INT_MAX, Point offset=Point() ); + +//! approximates contour or a curve using Douglas-Peucker algorithm +CV_EXPORTS_W void approxPolyDP( InputArray curve, + OutputArray approxCurve, + double epsilon, bool closed ); + +//! computes the contour perimeter (closed=true) or a curve length +CV_EXPORTS_W double arcLength( InputArray curve, bool closed ); +//! computes the bounding rectangle for a contour +CV_EXPORTS_W Rect boundingRect( InputArray points ); +//! computes the contour area +CV_EXPORTS_W double contourArea( InputArray contour, bool oriented=false ); +//! computes the minimal rotated rectangle for a set of points +CV_EXPORTS_W RotatedRect minAreaRect( InputArray points ); +//! computes the minimal enclosing circle for a set of points +CV_EXPORTS_W void minEnclosingCircle( InputArray points, + CV_OUT Point2f& center, CV_OUT float& radius ); +//! matches two contours using one of the available algorithms +CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2, + int method, double parameter ); +//! computes convex hull for a set of 2D points. +CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull, + bool clockwise=false, bool returnPoints=true ); +//! computes the contour convexity defects +CV_EXPORTS_W void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects ); + +//! returns true if the contour is convex. Does not support contours with self-intersection +CV_EXPORTS_W bool isContourConvex( InputArray contour ); + +//! finds intersection of two convex polygons +CV_EXPORTS_W float intersectConvexConvex( InputArray _p1, InputArray _p2, + OutputArray _p12, bool handleNested=true ); + +//! fits ellipse to the set of 2D points +CV_EXPORTS_W RotatedRect fitEllipse( InputArray points ); + +//! fits line to the set of 2D points using M-estimator algorithm +CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType, + double param, double reps, double aeps ); +//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary +CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist ); + + +class CV_EXPORTS_W Subdiv2D +{ +public: + enum + { + PTLOC_ERROR = -2, + PTLOC_OUTSIDE_RECT = -1, + PTLOC_INSIDE = 0, + PTLOC_VERTEX = 1, + PTLOC_ON_EDGE = 2 + }; + + enum + { + NEXT_AROUND_ORG = 0x00, + NEXT_AROUND_DST = 0x22, + PREV_AROUND_ORG = 0x11, + PREV_AROUND_DST = 0x33, + NEXT_AROUND_LEFT = 0x13, + NEXT_AROUND_RIGHT = 0x31, + PREV_AROUND_LEFT = 0x20, + PREV_AROUND_RIGHT = 0x02 + }; + + CV_WRAP Subdiv2D(); + CV_WRAP Subdiv2D(Rect rect); + CV_WRAP void initDelaunay(Rect rect); + + CV_WRAP int insert(Point2f pt); + CV_WRAP void insert(const vector& ptvec); + CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex); + + CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt=0); + CV_WRAP void getEdgeList(CV_OUT vector& edgeList) const; + CV_WRAP void getTriangleList(CV_OUT vector& triangleList) const; + CV_WRAP void getVoronoiFacetList(const vector& idx, CV_OUT vector >& facetList, + CV_OUT vector& facetCenters); + + CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge=0) const; + + CV_WRAP int getEdge( int edge, int nextEdgeType ) const; + CV_WRAP int nextEdge(int edge) const; + CV_WRAP int rotateEdge(int edge, int rotate) const; + CV_WRAP int symEdge(int edge) const; + CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt=0) const; + CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt=0) const; + +protected: + int newEdge(); + void deleteEdge(int edge); + int newPoint(Point2f pt, bool isvirtual, int firstEdge=0); + void deletePoint(int vtx); + void setEdgePoints( int edge, int orgPt, int dstPt ); + void splice( int edgeA, int edgeB ); + int connectEdges( int edgeA, int edgeB ); + void swapEdges( int edge ); + int isRightOf(Point2f pt, int edge) const; + void calcVoronoi(); + void clearVoronoi(); + void checkSubdiv() const; + + struct CV_EXPORTS Vertex + { + Vertex(); + Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0); + bool isvirtual() const; + bool isfree() const; + int firstEdge; + int type; + Point2f pt; + }; + struct CV_EXPORTS QuadEdge + { + QuadEdge(); + QuadEdge(int edgeidx); + bool isfree() const; + int next[4]; + int pt[4]; + }; + + vector vtx; + vector qedges; + int freeQEdge; + int freePoint; + bool validGeometry; + + int recentEdge; + Point2f topLeft; + Point2f bottomRight; +}; + +} + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/imgproc_c.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/imgproc_c.h new file mode 100644 index 0000000..46d9f01 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/imgproc_c.h @@ -0,0 +1,623 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_IMGPROC_C_H__ +#define __OPENCV_IMGPROC_IMGPROC_C_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** Background statistics accumulation *****************************/ + +/* Adds image to accumulator */ +CVAPI(void) cvAcc( const CvArr* image, CvArr* sum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds squared image to accumulator */ +CVAPI(void) cvSquareAcc( const CvArr* image, CvArr* sqsum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds a product of two images to accumulator */ +CVAPI(void) cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds image to accumulator with weights: acc = acc*(1-alpha) + image*alpha */ +CVAPI(void) cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, + const CvArr* mask CV_DEFAULT(NULL) ); + +/****************************************************************************************\ +* Image Processing * +\****************************************************************************************/ + +/* Copies source 2D array inside of the larger destination array and + makes a border of the specified type (IPL_BORDER_*) around the copied area. */ +CVAPI(void) cvCopyMakeBorder( const CvArr* src, CvArr* dst, CvPoint offset, + int bordertype, CvScalar value CV_DEFAULT(cvScalarAll(0))); + +/* Smoothes array (removes noise) */ +CVAPI(void) cvSmooth( const CvArr* src, CvArr* dst, + int smoothtype CV_DEFAULT(CV_GAUSSIAN), + int size1 CV_DEFAULT(3), + int size2 CV_DEFAULT(0), + double sigma1 CV_DEFAULT(0), + double sigma2 CV_DEFAULT(0)); + +/* Convolves the image with the kernel */ +CVAPI(void) cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, + CvPoint anchor CV_DEFAULT(cvPoint(-1,-1))); + +/* Finds integral image: SUM(X,Y) = sum(x. + After that sum of histogram bins is equal to */ +CVAPI(void) cvNormalizeHist( CvHistogram* hist, double factor ); + + +/* Clear all histogram bins that are below the threshold */ +CVAPI(void) cvThreshHist( CvHistogram* hist, double threshold ); + + +/* Compares two histogram */ +CVAPI(double) cvCompareHist( const CvHistogram* hist1, + const CvHistogram* hist2, + int method); + +/* Copies one histogram to another. Destination histogram is created if + the destination pointer is NULL */ +CVAPI(void) cvCopyHist( const CvHistogram* src, CvHistogram** dst ); + + +/* Calculates bayesian probabilistic histograms + (each or src and dst is an array of histograms */ +CVAPI(void) cvCalcBayesianProb( CvHistogram** src, int number, + CvHistogram** dst); + +/* Calculates array histogram */ +CVAPI(void) cvCalcArrHist( CvArr** arr, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ); + +CV_INLINE void cvCalcHist( IplImage** image, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ) +{ + cvCalcArrHist( (CvArr**)image, hist, accumulate, mask ); +} + +/* Calculates back project */ +CVAPI(void) cvCalcArrBackProject( CvArr** image, CvArr* dst, + const CvHistogram* hist ); +#define cvCalcBackProject(image, dst, hist) cvCalcArrBackProject((CvArr**)image, dst, hist) + + +/* Does some sort of template matching but compares histograms of + template and each window location */ +CVAPI(void) cvCalcArrBackProjectPatch( CvArr** image, CvArr* dst, CvSize range, + CvHistogram* hist, int method, + double factor ); +#define cvCalcBackProjectPatch( image, dst, range, hist, method, factor ) \ + cvCalcArrBackProjectPatch( (CvArr**)image, dst, range, hist, method, factor ) + + +/* calculates probabilistic density (divides one histogram by another) */ +CVAPI(void) cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, + CvHistogram* dst_hist, double scale CV_DEFAULT(255) ); + +/* equalizes histogram of 8-bit single-channel image */ +CVAPI(void) cvEqualizeHist( const CvArr* src, CvArr* dst ); + + +/* Applies distance transform to binary image */ +CVAPI(void) cvDistTransform( const CvArr* src, CvArr* dst, + int distance_type CV_DEFAULT(CV_DIST_L2), + int mask_size CV_DEFAULT(3), + const float* mask CV_DEFAULT(NULL), + CvArr* labels CV_DEFAULT(NULL), + int labelType CV_DEFAULT(CV_DIST_LABEL_CCOMP)); + + +/* Applies fixed-level threshold to grayscale image. + This is a basic operation applied before retrieving contours */ +CVAPI(double) cvThreshold( const CvArr* src, CvArr* dst, + double threshold, double max_value, + int threshold_type ); + +/* Applies adaptive threshold to grayscale image. + The two parameters for methods CV_ADAPTIVE_THRESH_MEAN_C and + CV_ADAPTIVE_THRESH_GAUSSIAN_C are: + neighborhood size (3, 5, 7 etc.), + and a constant subtracted from mean (...,-3,-2,-1,0,1,2,3,...) */ +CVAPI(void) cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, + int adaptive_method CV_DEFAULT(CV_ADAPTIVE_THRESH_MEAN_C), + int threshold_type CV_DEFAULT(CV_THRESH_BINARY), + int block_size CV_DEFAULT(3), + double param1 CV_DEFAULT(5)); + +/* Fills the connected component until the color difference gets large enough */ +CVAPI(void) cvFloodFill( CvArr* image, CvPoint seed_point, + CvScalar new_val, CvScalar lo_diff CV_DEFAULT(cvScalarAll(0)), + CvScalar up_diff CV_DEFAULT(cvScalarAll(0)), + CvConnectedComp* comp CV_DEFAULT(NULL), + int flags CV_DEFAULT(4), + CvArr* mask CV_DEFAULT(NULL)); + +/****************************************************************************************\ +* Feature detection * +\****************************************************************************************/ + +/* Runs canny edge detector */ +CVAPI(void) cvCanny( const CvArr* image, CvArr* edges, double threshold1, + double threshold2, int aperture_size CV_DEFAULT(3) ); + +/* Calculates constraint image for corner detection + Dx^2 * Dyy + Dxx * Dy^2 - 2 * Dx * Dy * Dxy. + Applying threshold to the result gives coordinates of corners */ +CVAPI(void) cvPreCornerDetect( const CvArr* image, CvArr* corners, + int aperture_size CV_DEFAULT(3) ); + +/* Calculates eigen values and vectors of 2x2 + gradient covariation matrix at every image pixel */ +CVAPI(void) cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Calculates minimal eigenvalue for 2x2 gradient covariation matrix at + every image pixel */ +CVAPI(void) cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Harris corner detector: + Calculates det(M) - k*(trace(M)^2), where M is 2x2 gradient covariation matrix for each pixel */ +CVAPI(void) cvCornerHarris( const CvArr* image, CvArr* harris_response, + int block_size, int aperture_size CV_DEFAULT(3), + double k CV_DEFAULT(0.04) ); + +/* Adjust corner position using some sort of gradient search */ +CVAPI(void) cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, + int count, CvSize win, CvSize zero_zone, + CvTermCriteria criteria ); + +/* Finds a sparse set of points within the selected region + that seem to be easy to track */ +CVAPI(void) cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, + CvArr* temp_image, CvPoint2D32f* corners, + int* corner_count, double quality_level, + double min_distance, + const CvArr* mask CV_DEFAULT(NULL), + int block_size CV_DEFAULT(3), + int use_harris CV_DEFAULT(0), + double k CV_DEFAULT(0.04) ); + +/* Finds lines on binary image using one of several methods. + line_storage is either memory storage or 1 x CvMat, its + number of columns is changed by the function. + method is one of CV_HOUGH_*; + rho, theta and threshold are used for each of those methods; + param1 ~ line length, param2 ~ line gap - for probabilistic, + param1 ~ srn, param2 ~ stn - for multi-scale */ +CVAPI(CvSeq*) cvHoughLines2( CvArr* image, void* line_storage, int method, + double rho, double theta, int threshold, + double param1 CV_DEFAULT(0), double param2 CV_DEFAULT(0)); + +/* Finds circles in the image */ +CVAPI(CvSeq*) cvHoughCircles( CvArr* image, void* circle_storage, + int method, double dp, double min_dist, + double param1 CV_DEFAULT(100), + double param2 CV_DEFAULT(100), + int min_radius CV_DEFAULT(0), + int max_radius CV_DEFAULT(0)); + +/* Fits a line into set of 2d or 3d points in a robust way (M-estimator technique) */ +CVAPI(void) cvFitLine( const CvArr* points, int dist_type, double param, + double reps, double aeps, float* line ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/types_c.h b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/types_c.h new file mode 100644 index 0000000..4aba0a8 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/imgproc/types_c.h @@ -0,0 +1,640 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_TYPES_C_H__ +#define __OPENCV_IMGPROC_TYPES_C_H__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Connected component structure */ +typedef struct CvConnectedComp +{ + double area; /* area of the connected component */ + CvScalar value; /* average color of the connected component */ + CvRect rect; /* ROI of the component */ + CvSeq* contour; /* optional component boundary + (the contour might have child contours corresponding to the holes)*/ +} +CvConnectedComp; + +/* Image smooth methods */ +enum +{ + CV_BLUR_NO_SCALE =0, + CV_BLUR =1, + CV_GAUSSIAN =2, + CV_MEDIAN =3, + CV_BILATERAL =4 +}; + +/* Filters used in pyramid decomposition */ +enum +{ + CV_GAUSSIAN_5x5 = 7 +}; + +/* Special filters */ +enum +{ + CV_SCHARR =-1, + CV_MAX_SOBEL_KSIZE =7 +}; + +/* Constants for color conversion */ +enum +{ + CV_BGR2BGRA =0, + CV_RGB2RGBA =CV_BGR2BGRA, + + CV_BGRA2BGR =1, + CV_RGBA2RGB =CV_BGRA2BGR, + + CV_BGR2RGBA =2, + CV_RGB2BGRA =CV_BGR2RGBA, + + CV_RGBA2BGR =3, + CV_BGRA2RGB =CV_RGBA2BGR, + + CV_BGR2RGB =4, + CV_RGB2BGR =CV_BGR2RGB, + + CV_BGRA2RGBA =5, + CV_RGBA2BGRA =CV_BGRA2RGBA, + + CV_BGR2GRAY =6, + CV_RGB2GRAY =7, + CV_GRAY2BGR =8, + CV_GRAY2RGB =CV_GRAY2BGR, + CV_GRAY2BGRA =9, + CV_GRAY2RGBA =CV_GRAY2BGRA, + CV_BGRA2GRAY =10, + CV_RGBA2GRAY =11, + + CV_BGR2BGR565 =12, + CV_RGB2BGR565 =13, + CV_BGR5652BGR =14, + CV_BGR5652RGB =15, + CV_BGRA2BGR565 =16, + CV_RGBA2BGR565 =17, + CV_BGR5652BGRA =18, + CV_BGR5652RGBA =19, + + CV_GRAY2BGR565 =20, + CV_BGR5652GRAY =21, + + CV_BGR2BGR555 =22, + CV_RGB2BGR555 =23, + CV_BGR5552BGR =24, + CV_BGR5552RGB =25, + CV_BGRA2BGR555 =26, + CV_RGBA2BGR555 =27, + CV_BGR5552BGRA =28, + CV_BGR5552RGBA =29, + + CV_GRAY2BGR555 =30, + CV_BGR5552GRAY =31, + + CV_BGR2XYZ =32, + CV_RGB2XYZ =33, + CV_XYZ2BGR =34, + CV_XYZ2RGB =35, + + CV_BGR2YCrCb =36, + CV_RGB2YCrCb =37, + CV_YCrCb2BGR =38, + CV_YCrCb2RGB =39, + + CV_BGR2HSV =40, + CV_RGB2HSV =41, + + CV_BGR2Lab =44, + CV_RGB2Lab =45, + + CV_BayerBG2BGR =46, + CV_BayerGB2BGR =47, + CV_BayerRG2BGR =48, + CV_BayerGR2BGR =49, + + CV_BayerBG2RGB =CV_BayerRG2BGR, + CV_BayerGB2RGB =CV_BayerGR2BGR, + CV_BayerRG2RGB =CV_BayerBG2BGR, + CV_BayerGR2RGB =CV_BayerGB2BGR, + + CV_BGR2Luv =50, + CV_RGB2Luv =51, + CV_BGR2HLS =52, + CV_RGB2HLS =53, + + CV_HSV2BGR =54, + CV_HSV2RGB =55, + + CV_Lab2BGR =56, + CV_Lab2RGB =57, + CV_Luv2BGR =58, + CV_Luv2RGB =59, + CV_HLS2BGR =60, + CV_HLS2RGB =61, + + CV_BayerBG2BGR_VNG =62, + CV_BayerGB2BGR_VNG =63, + CV_BayerRG2BGR_VNG =64, + CV_BayerGR2BGR_VNG =65, + + CV_BayerBG2RGB_VNG =CV_BayerRG2BGR_VNG, + CV_BayerGB2RGB_VNG =CV_BayerGR2BGR_VNG, + CV_BayerRG2RGB_VNG =CV_BayerBG2BGR_VNG, + CV_BayerGR2RGB_VNG =CV_BayerGB2BGR_VNG, + + CV_BGR2HSV_FULL = 66, + CV_RGB2HSV_FULL = 67, + CV_BGR2HLS_FULL = 68, + CV_RGB2HLS_FULL = 69, + + CV_HSV2BGR_FULL = 70, + CV_HSV2RGB_FULL = 71, + CV_HLS2BGR_FULL = 72, + CV_HLS2RGB_FULL = 73, + + CV_LBGR2Lab = 74, + CV_LRGB2Lab = 75, + CV_LBGR2Luv = 76, + CV_LRGB2Luv = 77, + + CV_Lab2LBGR = 78, + CV_Lab2LRGB = 79, + CV_Luv2LBGR = 80, + CV_Luv2LRGB = 81, + + CV_BGR2YUV = 82, + CV_RGB2YUV = 83, + CV_YUV2BGR = 84, + CV_YUV2RGB = 85, + + CV_BayerBG2GRAY = 86, + CV_BayerGB2GRAY = 87, + CV_BayerRG2GRAY = 88, + CV_BayerGR2GRAY = 89, + + //YUV 4:2:0 formats family + CV_YUV2RGB_NV12 = 90, + CV_YUV2BGR_NV12 = 91, + CV_YUV2RGB_NV21 = 92, + CV_YUV2BGR_NV21 = 93, + CV_YUV420sp2RGB = CV_YUV2RGB_NV21, + CV_YUV420sp2BGR = CV_YUV2BGR_NV21, + + CV_YUV2RGBA_NV12 = 94, + CV_YUV2BGRA_NV12 = 95, + CV_YUV2RGBA_NV21 = 96, + CV_YUV2BGRA_NV21 = 97, + CV_YUV420sp2RGBA = CV_YUV2RGBA_NV21, + CV_YUV420sp2BGRA = CV_YUV2BGRA_NV21, + + CV_YUV2RGB_YV12 = 98, + CV_YUV2BGR_YV12 = 99, + CV_YUV2RGB_IYUV = 100, + CV_YUV2BGR_IYUV = 101, + CV_YUV2RGB_I420 = CV_YUV2RGB_IYUV, + CV_YUV2BGR_I420 = CV_YUV2BGR_IYUV, + CV_YUV420p2RGB = CV_YUV2RGB_YV12, + CV_YUV420p2BGR = CV_YUV2BGR_YV12, + + CV_YUV2RGBA_YV12 = 102, + CV_YUV2BGRA_YV12 = 103, + CV_YUV2RGBA_IYUV = 104, + CV_YUV2BGRA_IYUV = 105, + CV_YUV2RGBA_I420 = CV_YUV2RGBA_IYUV, + CV_YUV2BGRA_I420 = CV_YUV2BGRA_IYUV, + CV_YUV420p2RGBA = CV_YUV2RGBA_YV12, + CV_YUV420p2BGRA = CV_YUV2BGRA_YV12, + + CV_YUV2GRAY_420 = 106, + CV_YUV2GRAY_NV21 = CV_YUV2GRAY_420, + CV_YUV2GRAY_NV12 = CV_YUV2GRAY_420, + CV_YUV2GRAY_YV12 = CV_YUV2GRAY_420, + CV_YUV2GRAY_IYUV = CV_YUV2GRAY_420, + CV_YUV2GRAY_I420 = CV_YUV2GRAY_420, + CV_YUV420sp2GRAY = CV_YUV2GRAY_420, + CV_YUV420p2GRAY = CV_YUV2GRAY_420, + + //YUV 4:2:2 formats family + CV_YUV2RGB_UYVY = 107, + CV_YUV2BGR_UYVY = 108, + //CV_YUV2RGB_VYUY = 109, + //CV_YUV2BGR_VYUY = 110, + CV_YUV2RGB_Y422 = CV_YUV2RGB_UYVY, + CV_YUV2BGR_Y422 = CV_YUV2BGR_UYVY, + CV_YUV2RGB_UYNV = CV_YUV2RGB_UYVY, + CV_YUV2BGR_UYNV = CV_YUV2BGR_UYVY, + + CV_YUV2RGBA_UYVY = 111, + CV_YUV2BGRA_UYVY = 112, + //CV_YUV2RGBA_VYUY = 113, + //CV_YUV2BGRA_VYUY = 114, + CV_YUV2RGBA_Y422 = CV_YUV2RGBA_UYVY, + CV_YUV2BGRA_Y422 = CV_YUV2BGRA_UYVY, + CV_YUV2RGBA_UYNV = CV_YUV2RGBA_UYVY, + CV_YUV2BGRA_UYNV = CV_YUV2BGRA_UYVY, + + CV_YUV2RGB_YUY2 = 115, + CV_YUV2BGR_YUY2 = 116, + CV_YUV2RGB_YVYU = 117, + CV_YUV2BGR_YVYU = 118, + CV_YUV2RGB_YUYV = CV_YUV2RGB_YUY2, + CV_YUV2BGR_YUYV = CV_YUV2BGR_YUY2, + CV_YUV2RGB_YUNV = CV_YUV2RGB_YUY2, + CV_YUV2BGR_YUNV = CV_YUV2BGR_YUY2, + + CV_YUV2RGBA_YUY2 = 119, + CV_YUV2BGRA_YUY2 = 120, + CV_YUV2RGBA_YVYU = 121, + CV_YUV2BGRA_YVYU = 122, + CV_YUV2RGBA_YUYV = CV_YUV2RGBA_YUY2, + CV_YUV2BGRA_YUYV = CV_YUV2BGRA_YUY2, + CV_YUV2RGBA_YUNV = CV_YUV2RGBA_YUY2, + CV_YUV2BGRA_YUNV = CV_YUV2BGRA_YUY2, + + CV_YUV2GRAY_UYVY = 123, + CV_YUV2GRAY_YUY2 = 124, + //CV_YUV2GRAY_VYUY = CV_YUV2GRAY_UYVY, + CV_YUV2GRAY_Y422 = CV_YUV2GRAY_UYVY, + CV_YUV2GRAY_UYNV = CV_YUV2GRAY_UYVY, + CV_YUV2GRAY_YVYU = CV_YUV2GRAY_YUY2, + CV_YUV2GRAY_YUYV = CV_YUV2GRAY_YUY2, + CV_YUV2GRAY_YUNV = CV_YUV2GRAY_YUY2, + + // alpha premultiplication + CV_RGBA2mRGBA = 125, + CV_mRGBA2RGBA = 126, + + CV_RGB2YUV_I420 = 127, + CV_BGR2YUV_I420 = 128, + CV_RGB2YUV_IYUV = CV_RGB2YUV_I420, + CV_BGR2YUV_IYUV = CV_BGR2YUV_I420, + + CV_RGBA2YUV_I420 = 129, + CV_BGRA2YUV_I420 = 130, + CV_RGBA2YUV_IYUV = CV_RGBA2YUV_I420, + CV_BGRA2YUV_IYUV = CV_BGRA2YUV_I420, + CV_RGB2YUV_YV12 = 131, + CV_BGR2YUV_YV12 = 132, + CV_RGBA2YUV_YV12 = 133, + CV_BGRA2YUV_YV12 = 134, + + CV_COLORCVT_MAX = 135 +}; + + +/* Sub-pixel interpolation methods */ +enum +{ + CV_INTER_NN =0, + CV_INTER_LINEAR =1, + CV_INTER_CUBIC =2, + CV_INTER_AREA =3, + CV_INTER_LANCZOS4 =4 +}; + +/* ... and other image warping flags */ +enum +{ + CV_WARP_FILL_OUTLIERS =8, + CV_WARP_INVERSE_MAP =16 +}; + +/* Shapes of a structuring element for morphological operations */ +enum +{ + CV_SHAPE_RECT =0, + CV_SHAPE_CROSS =1, + CV_SHAPE_ELLIPSE =2, + CV_SHAPE_CUSTOM =100 +}; + +/* Morphological operations */ +enum +{ + CV_MOP_ERODE =0, + CV_MOP_DILATE =1, + CV_MOP_OPEN =2, + CV_MOP_CLOSE =3, + CV_MOP_GRADIENT =4, + CV_MOP_TOPHAT =5, + CV_MOP_BLACKHAT =6 +}; + +/* Spatial and central moments */ +typedef struct CvMoments +{ + double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /* spatial moments */ + double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /* central moments */ + double inv_sqrt_m00; /* m00 != 0 ? 1/sqrt(m00) : 0 */ +} +CvMoments; + +/* Hu invariants */ +typedef struct CvHuMoments +{ + double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /* Hu invariants */ +} +CvHuMoments; + +/* Template matching methods */ +enum +{ + CV_TM_SQDIFF =0, + CV_TM_SQDIFF_NORMED =1, + CV_TM_CCORR =2, + CV_TM_CCORR_NORMED =3, + CV_TM_CCOEFF =4, + CV_TM_CCOEFF_NORMED =5 +}; + +typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param ); + +/* Contour retrieval modes */ +enum +{ + CV_RETR_EXTERNAL=0, + CV_RETR_LIST=1, + CV_RETR_CCOMP=2, + CV_RETR_TREE=3, + CV_RETR_FLOODFILL=4 +}; + +/* Contour approximation methods */ +enum +{ + CV_CHAIN_CODE=0, + CV_CHAIN_APPROX_NONE=1, + CV_CHAIN_APPROX_SIMPLE=2, + CV_CHAIN_APPROX_TC89_L1=3, + CV_CHAIN_APPROX_TC89_KCOS=4, + CV_LINK_RUNS=5 +}; + +/* +Internal structure that is used for sequental retrieving contours from the image. +It supports both hierarchical and plane variants of Suzuki algorithm. +*/ +typedef struct _CvContourScanner* CvContourScanner; + +/* Freeman chain reader state */ +typedef struct CvChainPtReader +{ + CV_SEQ_READER_FIELDS() + char code; + CvPoint pt; + schar deltas[8][2]; +} +CvChainPtReader; + +/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */ +#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \ + ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \ + (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \ + (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \ + (deltas)[6] = (step), (deltas)[7] = (step) + (nch)) + + +/****************************************************************************************\ +* Planar subdivisions * +\****************************************************************************************/ + +typedef size_t CvSubdiv2DEdge; + +#define CV_QUADEDGE2D_FIELDS() \ + int flags; \ + struct CvSubdiv2DPoint* pt[4]; \ + CvSubdiv2DEdge next[4]; + +#define CV_SUBDIV2D_POINT_FIELDS()\ + int flags; \ + CvSubdiv2DEdge first; \ + CvPoint2D32f pt; \ + int id; + +#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30) + +typedef struct CvQuadEdge2D +{ + CV_QUADEDGE2D_FIELDS() +} +CvQuadEdge2D; + +typedef struct CvSubdiv2DPoint +{ + CV_SUBDIV2D_POINT_FIELDS() +} +CvSubdiv2DPoint; + +#define CV_SUBDIV2D_FIELDS() \ + CV_GRAPH_FIELDS() \ + int quad_edges; \ + int is_geometry_valid; \ + CvSubdiv2DEdge recent_edge; \ + CvPoint2D32f topleft; \ + CvPoint2D32f bottomright; + +typedef struct CvSubdiv2D +{ + CV_SUBDIV2D_FIELDS() +} +CvSubdiv2D; + + +typedef enum CvSubdiv2DPointLocation +{ + CV_PTLOC_ERROR = -2, + CV_PTLOC_OUTSIDE_RECT = -1, + CV_PTLOC_INSIDE = 0, + CV_PTLOC_VERTEX = 1, + CV_PTLOC_ON_EDGE = 2 +} +CvSubdiv2DPointLocation; + +typedef enum CvNextEdgeType +{ + CV_NEXT_AROUND_ORG = 0x00, + CV_NEXT_AROUND_DST = 0x22, + CV_PREV_AROUND_ORG = 0x11, + CV_PREV_AROUND_DST = 0x33, + CV_NEXT_AROUND_LEFT = 0x13, + CV_NEXT_AROUND_RIGHT = 0x31, + CV_PREV_AROUND_LEFT = 0x20, + CV_PREV_AROUND_RIGHT = 0x02 +} +CvNextEdgeType; + +/* get the next edge with the same origin point (counterwise) */ +#define CV_SUBDIV2D_NEXT_EDGE( edge ) (((CvQuadEdge2D*)((edge) & ~3))->next[(edge)&3]) + + +/* Contour approximation algorithms */ +enum +{ + CV_POLY_APPROX_DP = 0 +}; + +/* Shape matching methods */ +enum +{ + CV_CONTOURS_MATCH_I1 =1, + CV_CONTOURS_MATCH_I2 =2, + CV_CONTOURS_MATCH_I3 =3 +}; + +/* Shape orientation */ +enum +{ + CV_CLOCKWISE =1, + CV_COUNTER_CLOCKWISE =2 +}; + + +/* Convexity defect */ +typedef struct CvConvexityDefect +{ + CvPoint* start; /* point of the contour where the defect begins */ + CvPoint* end; /* point of the contour where the defect ends */ + CvPoint* depth_point; /* the farthest from the convex hull point within the defect */ + float depth; /* distance between the farthest point and the convex hull */ +} CvConvexityDefect; + + +/* Histogram comparison methods */ +enum +{ + CV_COMP_CORREL =0, + CV_COMP_CHISQR =1, + CV_COMP_INTERSECT =2, + CV_COMP_BHATTACHARYYA =3, + CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA +}; + +/* Mask size for distance transform */ +enum +{ + CV_DIST_MASK_3 =3, + CV_DIST_MASK_5 =5, + CV_DIST_MASK_PRECISE =0 +}; + +/* Content of output label array: connected components or pixels */ +enum +{ + CV_DIST_LABEL_CCOMP = 0, + CV_DIST_LABEL_PIXEL = 1 +}; + +/* Distance types for Distance Transform and M-estimators */ +enum +{ + CV_DIST_USER =-1, /* User defined distance */ + CV_DIST_L1 =1, /* distance = |x1-x2| + |y1-y2| */ + CV_DIST_L2 =2, /* the simple euclidean distance */ + CV_DIST_C =3, /* distance = max(|x1-x2|,|y1-y2|) */ + CV_DIST_L12 =4, /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */ + CV_DIST_FAIR =5, /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */ + CV_DIST_WELSCH =6, /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */ + CV_DIST_HUBER =7 /* distance = |x| threshold ? max_value : 0 */ + CV_THRESH_BINARY_INV =1, /* value = value > threshold ? 0 : max_value */ + CV_THRESH_TRUNC =2, /* value = value > threshold ? threshold : value */ + CV_THRESH_TOZERO =3, /* value = value > threshold ? value : 0 */ + CV_THRESH_TOZERO_INV =4, /* value = value > threshold ? 0 : value */ + CV_THRESH_MASK =7, + CV_THRESH_OTSU =8 /* use Otsu algorithm to choose the optimal threshold value; + combine the flag with one of the above CV_THRESH_* values */ +}; + +/* Adaptive threshold methods */ +enum +{ + CV_ADAPTIVE_THRESH_MEAN_C =0, + CV_ADAPTIVE_THRESH_GAUSSIAN_C =1 +}; + +/* FloodFill flags */ +enum +{ + CV_FLOODFILL_FIXED_RANGE =(1 << 16), + CV_FLOODFILL_MASK_ONLY =(1 << 17) +}; + + +/* Canny edge detector flags */ +enum +{ + CV_CANNY_L2_GRADIENT =(1 << 31) +}; + +/* Variants of a Hough transform */ +enum +{ + CV_HOUGH_STANDARD =0, + CV_HOUGH_PROBABILISTIC =1, + CV_HOUGH_MULTI_SCALE =2, + CV_HOUGH_GRADIENT =3 +}; + + +/* Fast search data structures */ +struct CvFeatureTree; +struct CvLSH; +struct CvLSHOperations; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/blobtrack.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/blobtrack.hpp new file mode 100644 index 0000000..496b8be --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/blobtrack.hpp @@ -0,0 +1,948 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef __OPENCV_VIDEOSURVEILLANCE_H__ +#define __OPENCV_VIDEOSURVEILLANCE_H__ + +/* Turn off the functionality until cvaux/src/Makefile.am gets updated: */ +//#if _MSC_VER >= 1200 + +#include "opencv2/core/core_c.h" +#include + +#if (defined _MSC_VER && _MSC_VER >= 1200) || defined __BORLANDC__ +#define cv_stricmp stricmp +#define cv_strnicmp strnicmp +#if defined WINCE +#define strdup _strdup +#define stricmp _stricmp +#endif +#elif defined __GNUC__ || defined __sun +#define cv_stricmp strcasecmp +#define cv_strnicmp strncasecmp +#else +#error Do not know how to make case-insensitive string comparison on this platform +#endif + +//struct DefParam; +struct CvDefParam +{ + struct CvDefParam* next; + char* pName; + char* pComment; + double* pDouble; + double Double; + float* pFloat; + float Float; + int* pInt; + int Int; + char** pStr; + char* Str; +}; + +class CV_EXPORTS CvVSModule +{ +private: /* Internal data: */ + CvDefParam* m_pParamList; + char* m_pModuleTypeName; + char* m_pModuleName; + char* m_pNickName; +protected: + int m_Wnd; +public: /* Constructor and destructor: */ + CvVSModule(); + virtual ~CvVSModule(); +private: /* Internal functions: */ + void FreeParam(CvDefParam** pp); + CvDefParam* NewParam(const char* name); + CvDefParam* GetParamPtr(int index); + CvDefParam* GetParamPtr(const char* name); +protected: /* INTERNAL INTERFACE */ + int IsParam(const char* name); + void AddParam(const char* name, double* pAddr); + void AddParam(const char* name, float* pAddr); + void AddParam(const char* name, int* pAddr); + void AddParam(const char* name, const char** pAddr); + void AddParam(const char* name); + void CommentParam(const char* name, const char* pComment); + void SetTypeName(const char* name); + void SetModuleName(const char* name); + void DelParam(const char* name); + +public: /* EXTERNAL INTERFACE */ + const char* GetParamName(int index); + const char* GetParamComment(const char* name); + double GetParam(const char* name); + const char* GetParamStr(const char* name); + void SetParam(const char* name, double val); + void SetParamStr(const char* name, const char* str); + void TransferParamsFromChild(CvVSModule* pM, const char* prefix = NULL); + void TransferParamsToChild(CvVSModule* pM, char* prefix = NULL); + virtual void ParamUpdate(); + const char* GetTypeName(); + int IsModuleTypeName(const char* name); + char* GetModuleName(); + int IsModuleName(const char* name); + void SetNickName(const char* pStr); + const char* GetNickName(); + virtual void SaveState(CvFileStorage*); + virtual void LoadState(CvFileStorage*, CvFileNode*); + + virtual void Release() = 0; +};/* CvVMModule */ + +CV_EXPORTS void cvWriteStruct(CvFileStorage* fs, const char* name, void* addr, const char* desc, int num=1); +CV_EXPORTS void cvReadStructByName(CvFileStorage* fs, CvFileNode* node, const char* name, void* addr, const char* desc); + +/* FOREGROUND DETECTOR INTERFACE */ +class CV_EXPORTS CvFGDetector : public CvVSModule +{ +public: + CvFGDetector(); + virtual IplImage* GetMask() = 0; + /* Process current image: */ + virtual void Process(IplImage* pImg) = 0; + /* Release foreground detector: */ + virtual void Release() = 0; +}; + +CV_EXPORTS void cvReleaseFGDetector(CvFGDetector** ppT ); +CV_EXPORTS CvFGDetector* cvCreateFGDetectorBase(int type, void *param); + + +/* BLOB STRUCTURE*/ +struct CvBlob +{ + float x,y; /* blob position */ + float w,h; /* blob sizes */ + int ID; /* blob ID */ +}; + +inline CvBlob cvBlob(float x,float y, float w, float h) +{ + CvBlob B = {x,y,w,h,0}; + return B; +} +#define CV_BLOB_MINW 5 +#define CV_BLOB_MINH 5 +#define CV_BLOB_ID(pB) (((CvBlob*)(pB))->ID) +#define CV_BLOB_CENTER(pB) cvPoint2D32f(((CvBlob*)(pB))->x,((CvBlob*)(pB))->y) +#define CV_BLOB_X(pB) (((CvBlob*)(pB))->x) +#define CV_BLOB_Y(pB) (((CvBlob*)(pB))->y) +#define CV_BLOB_WX(pB) (((CvBlob*)(pB))->w) +#define CV_BLOB_WY(pB) (((CvBlob*)(pB))->h) +#define CV_BLOB_RX(pB) (0.5f*CV_BLOB_WX(pB)) +#define CV_BLOB_RY(pB) (0.5f*CV_BLOB_WY(pB)) +#define CV_BLOB_RECT(pB) cvRect(cvRound(((CvBlob*)(pB))->x-CV_BLOB_RX(pB)),cvRound(((CvBlob*)(pB))->y-CV_BLOB_RY(pB)),cvRound(CV_BLOB_WX(pB)),cvRound(CV_BLOB_WY(pB))) +/* END BLOB STRUCTURE*/ + + +/* simple BLOBLIST */ +class CV_EXPORTS CvBlobSeq +{ +public: + CvBlobSeq(int BlobSize = sizeof(CvBlob)) + { + m_pMem = cvCreateMemStorage(); + m_pSeq = cvCreateSeq(0,sizeof(CvSeq),BlobSize,m_pMem); + strcpy(m_pElemFormat,"ffffi"); + } + virtual ~CvBlobSeq() + { + cvReleaseMemStorage(&m_pMem); + }; + virtual CvBlob* GetBlob(int BlobIndex) + { + return (CvBlob*)cvGetSeqElem(m_pSeq,BlobIndex); + }; + virtual CvBlob* GetBlobByID(int BlobID) + { + int i; + for(i=0; itotal; ++i) + if(BlobID == CV_BLOB_ID(GetBlob(i))) + return GetBlob(i); + return NULL; + }; + virtual void DelBlob(int BlobIndex) + { + cvSeqRemove(m_pSeq,BlobIndex); + }; + virtual void DelBlobByID(int BlobID) + { + int i; + for(i=0; itotal; ++i) + { + if(BlobID == CV_BLOB_ID(GetBlob(i))) + { + DelBlob(i); + return; + } + } + }; + virtual void Clear() + { + cvClearSeq(m_pSeq); + }; + virtual void AddBlob(CvBlob* pB) + { + cvSeqPush(m_pSeq,pB); + }; + virtual int GetBlobNum() + { + return m_pSeq->total; + }; + virtual void Write(CvFileStorage* fs, const char* name) + { + const char* attr[] = {"dt",m_pElemFormat,NULL}; + if(fs) + { + cvWrite(fs,name,m_pSeq,cvAttrList(attr,NULL)); + } + } + virtual void Load(CvFileStorage* fs, CvFileNode* node) + { + if(fs==NULL) return; + CvSeq* pSeq = (CvSeq*)cvRead(fs, node); + if(pSeq) + { + int i; + cvClearSeq(m_pSeq); + for(i=0;itotal;++i) + { + void* pB = cvGetSeqElem( pSeq, i ); + cvSeqPush( m_pSeq, pB ); + } + } + } + void AddFormat(const char* str){strcat(m_pElemFormat,str);} +protected: + CvMemStorage* m_pMem; + CvSeq* m_pSeq; + char m_pElemFormat[1024]; +}; +/* simple BLOBLIST */ + + +/* simple TRACKLIST */ +struct CvBlobTrack +{ + int TrackID; + int StartFrame; + CvBlobSeq* pBlobSeq; +}; + +class CV_EXPORTS CvBlobTrackSeq +{ +public: + CvBlobTrackSeq(int TrackSize = sizeof(CvBlobTrack)); + virtual ~CvBlobTrackSeq(); + virtual CvBlobTrack* GetBlobTrack(int TrackIndex); + virtual CvBlobTrack* GetBlobTrackByID(int TrackID); + virtual void DelBlobTrack(int TrackIndex); + virtual void DelBlobTrackByID(int TrackID); + virtual void Clear(); + virtual void AddBlobTrack(int TrackID, int StartFrame = 0); + virtual int GetBlobTrackNum(); +protected: + CvMemStorage* m_pMem; + CvSeq* m_pSeq; +}; + +/* simple TRACKLIST */ + + +/* BLOB DETECTOR INTERFACE */ +class CV_EXPORTS CvBlobDetector: public CvVSModule +{ +public: + CvBlobDetector(){SetTypeName("BlobDetector");}; + /* Try to detect new blob entrance based on foreground mask. */ + /* pFGMask - image of foreground mask */ + /* pNewBlob - pointer to CvBlob structure which will be filled if new blob entrance detected */ + /* pOldBlobList - pointer to blob list which already exist on image */ + virtual int DetectNewBlob(IplImage* pImg, IplImage* pImgFG, CvBlobSeq* pNewBlobList, CvBlobSeq* pOldBlobList) = 0; + /* release blob detector */ + virtual void Release()=0; +}; + +/* Release any blob detector: */ +CV_EXPORTS void cvReleaseBlobDetector(CvBlobDetector** ppBD); + +/* Declarations of constructors of implemented modules: */ +CV_EXPORTS CvBlobDetector* cvCreateBlobDetectorSimple(); +CV_EXPORTS CvBlobDetector* cvCreateBlobDetectorCC(); + +struct CV_EXPORTS CvDetectedBlob : public CvBlob +{ + float response; +}; + +CV_INLINE CvDetectedBlob cvDetectedBlob( float x, float y, float w, float h, int ID = 0, float response = 0.0F ) +{ + CvDetectedBlob b; + b.x = x; b.y = y; b.w = w; b.h = h; b.ID = ID; b.response = response; + return b; +} + + +class CV_EXPORTS CvObjectDetector +{ +public: + CvObjectDetector( const char* /*detector_file_name*/ = 0 ); + ~CvObjectDetector(); + + /* + * Release the current detector and load new detector from file + * (if detector_file_name is not 0) + * Return true on success: + */ + bool Load( const char* /*detector_file_name*/ = 0 ); + + /* Return min detector window size: */ + CvSize GetMinWindowSize() const; + + /* Return max border: */ + int GetMaxBorderSize() const; + + /* + * Detect the object on the image and push the detected + * blobs into which must be the sequence of s + */ + void Detect( const CvArr* /*img*/, /* out */ CvBlobSeq* /*detected_blob_seq*/ = 0 ); + +protected: + class CvObjectDetectorImpl* impl; +}; + + +CV_INLINE CvRect cvRectIntersection( const CvRect r1, const CvRect r2 ) +{ + CvRect r = cvRect( MAX(r1.x, r2.x), MAX(r1.y, r2.y), 0, 0 ); + + r.width = MIN(r1.x + r1.width, r2.x + r2.width) - r.x; + r.height = MIN(r1.y + r1.height, r2.y + r2.height) - r.y; + + return r; +} + + +/* + * CvImageDrawer + * + * Draw on an image the specified ROIs from the source image and + * given blobs as ellipses or rectangles: + */ + +struct CvDrawShape +{ + enum {RECT, ELLIPSE} shape; + CvScalar color; +}; + +/*extern const CvDrawShape icv_shape[] = +{ + { CvDrawShape::ELLIPSE, CV_RGB(255,0,0) }, + { CvDrawShape::ELLIPSE, CV_RGB(0,255,0) }, + { CvDrawShape::ELLIPSE, CV_RGB(0,0,255) }, + { CvDrawShape::ELLIPSE, CV_RGB(255,255,0) }, + { CvDrawShape::ELLIPSE, CV_RGB(0,255,255) }, + { CvDrawShape::ELLIPSE, CV_RGB(255,0,255) } +};*/ + +class CV_EXPORTS CvImageDrawer +{ +public: + CvImageDrawer() : m_image(0) {} + ~CvImageDrawer() { cvReleaseImage( &m_image ); } + void SetShapes( const CvDrawShape* shapes, int num ); + /* must be the sequence of s */ + IplImage* Draw( const CvArr* src, CvBlobSeq* blob_seq = 0, const CvSeq* roi_seq = 0 ); + IplImage* GetImage() { return m_image; } +protected: + //static const int MAX_SHAPES = sizeof(icv_shape) / sizeof(icv_shape[0]);; + + IplImage* m_image; + CvDrawShape m_shape[16]; +}; + + + +/* Trajectory generation module: */ +class CV_EXPORTS CvBlobTrackGen: public CvVSModule +{ +public: + CvBlobTrackGen(){SetTypeName("BlobTrackGen");}; + virtual void SetFileName(char* pFileName) = 0; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process(IplImage* pImg = NULL, IplImage* pFG = NULL) = 0; + virtual void Release() = 0; +}; + +inline void cvReleaseBlobTrackGen(CvBlobTrackGen** pBTGen) +{ + if(*pBTGen)(*pBTGen)->Release(); + *pBTGen = 0; +} + +/* Declarations of constructors of implemented modules: */ +CV_EXPORTS CvBlobTrackGen* cvCreateModuleBlobTrackGen1(); +CV_EXPORTS CvBlobTrackGen* cvCreateModuleBlobTrackGenYML(); + + + +/* BLOB TRACKER INTERFACE */ +class CV_EXPORTS CvBlobTracker: public CvVSModule +{ +public: + CvBlobTracker(); + + /* Add new blob to track it and assign to this blob personal ID */ + /* pBlob - pointer to structure with blob parameters (ID is ignored)*/ + /* pImg - current image */ + /* pImgFG - current foreground mask */ + /* Return pointer to new added blob: */ + virtual CvBlob* AddBlob(CvBlob* pBlob, IplImage* pImg, IplImage* pImgFG = NULL ) = 0; + + /* Return number of currently tracked blobs: */ + virtual int GetBlobNum() = 0; + + /* Return pointer to specified by index blob: */ + virtual CvBlob* GetBlob(int BlobIndex) = 0; + + /* Delete blob by its index: */ + virtual void DelBlob(int BlobIndex) = 0; + + /* Process current image and track all existed blobs: */ + virtual void Process(IplImage* pImg, IplImage* pImgFG = NULL) = 0; + + /* Release blob tracker: */ + virtual void Release() = 0; + + + /* Process one blob (for multi hypothesis tracing): */ + virtual void ProcessBlob(int BlobIndex, CvBlob* pBlob, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL); + + /* Get confidence/wieght/probability (0-1) for blob: */ + virtual double GetConfidence(int /*BlobIndex*/, CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL); + + virtual double GetConfidenceList(CvBlobSeq* pBlobList, IplImage* pImg, IplImage* pImgFG = NULL); + + virtual void UpdateBlob(int /*BlobIndex*/, CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL); + + /* Update all blob models: */ + virtual void Update(IplImage* pImg, IplImage* pImgFG = NULL); + + /* Return pointer to blob by its unique ID: */ + virtual int GetBlobIndexByID(int BlobID); + + /* Return pointer to blob by its unique ID: */ + virtual CvBlob* GetBlobByID(int BlobID); + + /* Delete blob by its ID: */ + virtual void DelBlobByID(int BlobID); + + /* Set new parameters for specified (by index) blob: */ + virtual void SetBlob(int /*BlobIndex*/, CvBlob* /*pBlob*/); + + /* Set new parameters for specified (by ID) blob: */ + virtual void SetBlobByID(int BlobID, CvBlob* pBlob); + + /* =============== MULTI HYPOTHESIS INTERFACE ================== */ + + /* Return number of position hyposetis of currently tracked blob: */ + virtual int GetBlobHypNum(int /*BlobIdx*/); + + /* Return pointer to specified blob hypothesis by index blob: */ + virtual CvBlob* GetBlobHyp(int BlobIndex, int /*hypothesis*/); + + /* Set new parameters for specified (by index) blob hyp + * (can be called several times for each hyp ): + */ + virtual void SetBlobHyp(int /*BlobIndex*/, CvBlob* /*pBlob*/); +}; + +CV_EXPORTS void cvReleaseBlobTracker(CvBlobTracker**ppT ); +/* BLOB TRACKER INTERFACE */ + +/*BLOB TRACKER ONE INTERFACE */ +class CV_EXPORTS CvBlobTrackerOne : public CvVSModule +{ +public: + virtual void Init(CvBlob* pBlobInit, IplImage* pImg, IplImage* pImgFG = NULL) = 0; + virtual CvBlob* Process(CvBlob* pBlobPrev, IplImage* pImg, IplImage* pImgFG = NULL) = 0; + virtual void Release() = 0; + + /* Non-required methods: */ + virtual void SkipProcess(CvBlob* /*pBlobPrev*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL){}; + virtual void Update(CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL){}; + virtual void SetCollision(int /*CollisionFlag*/){}; /* call in case of blob collision situation*/ + virtual double GetConfidence(CvBlob* /*pBlob*/, IplImage* /*pImg*/, + IplImage* /*pImgFG*/ = NULL, IplImage* /*pImgUnusedReg*/ = NULL) + { + return 1; + }; +}; +inline void cvReleaseBlobTrackerOne(CvBlobTrackerOne **ppT ) +{ + ppT[0]->Release(); + ppT[0] = 0; +} +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerList(CvBlobTrackerOne* (*create)()); +/*BLOB TRACKER ONE INTERFACE */ + +/* Declarations of constructors of implemented modules: */ + +/* Some declarations for specific MeanShift tracker: */ +#define PROFILE_EPANECHNIKOV 0 +#define PROFILE_DOG 1 +struct CvBlobTrackerParamMS +{ + int noOfSigBits; + int appearance_profile; + int meanshift_profile; + float sigma; +}; + +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS1(CvBlobTrackerParamMS* param); +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS2(CvBlobTrackerParamMS* param); +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS1ByList(); + +/* Some declarations for specific Likelihood tracker: */ +struct CvBlobTrackerParamLH +{ + int HistType; /* see Prob.h */ + int ScaleAfter; +}; + +/* Without scale optimization: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerLHR(CvBlobTrackerParamLH* /*param*/ = NULL); + +/* With scale optimization: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerLHRS(CvBlobTrackerParamLH* /*param*/ = NULL); + +/* Simple blob tracker based on connected component tracking: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerCC(); + +/* Connected component tracking and mean-shift particle filter collion-resolver: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerCCMSPF(); + +/* Blob tracker that integrates meanshift and connected components: */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSFG(); +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSFGS(); + +/* Meanshift without connected-components */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS(); + +/* Particle filtering via Bhattacharya coefficient, which */ +/* is roughly the dot-product of two probability densities. */ +/* See: Real-Time Tracking of Non-Rigid Objects using Mean Shift */ +/* Comanicius, Ramesh, Meer, 2000, 8p */ +/* http://citeseer.ist.psu.edu/321441.html */ +CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSPF(); + +/* =========== tracker integrators trackers =============*/ + +/* Integrator based on Particle Filtering method: */ +//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIPF(); + +/* Rule based integrator: */ +//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIRB(); + +/* Integrator based on data fusion using particle filtering: */ +//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIPFDF(); + + + + +/* Trajectory postprocessing module: */ +class CV_EXPORTS CvBlobTrackPostProc: public CvVSModule +{ +public: + CvBlobTrackPostProc(){SetTypeName("BlobTrackPostProc");}; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process() = 0; + virtual int GetBlobNum() = 0; + virtual CvBlob* GetBlob(int index) = 0; + virtual void Release() = 0; + + /* Additional functionality: */ + virtual CvBlob* GetBlobByID(int BlobID) + { + int i; + for(i=GetBlobNum();i>0;i--) + { + CvBlob* pB=GetBlob(i-1); + if(pB->ID==BlobID) return pB; + } + return NULL; + }; +}; + +inline void cvReleaseBlobTrackPostProc(CvBlobTrackPostProc** pBTPP) +{ + if(pBTPP == NULL) return; + if(*pBTPP)(*pBTPP)->Release(); + *pBTPP = 0; +} + +/* Trajectory generation module: */ +class CV_EXPORTS CvBlobTrackPostProcOne: public CvVSModule +{ +public: + CvBlobTrackPostProcOne(){SetTypeName("BlobTrackPostOne");}; + virtual CvBlob* Process(CvBlob* pBlob) = 0; + virtual void Release() = 0; +}; + +/* Create blob tracking post processing module based on simle module: */ +CV_EXPORTS CvBlobTrackPostProc* cvCreateBlobTrackPostProcList(CvBlobTrackPostProcOne* (*create)()); + + +/* Declarations of constructors of implemented modules: */ +CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcKalman(); +CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcTimeAverRect(); +CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcTimeAverExp(); + + +/* PREDICTORS */ +/* blob PREDICTOR */ +class CvBlobTrackPredictor: public CvVSModule +{ +public: + CvBlobTrackPredictor(){SetTypeName("BlobTrackPredictor");}; + virtual CvBlob* Predict() = 0; + virtual void Update(CvBlob* pBlob) = 0; + virtual void Release() = 0; +}; +CV_EXPORTS CvBlobTrackPredictor* cvCreateModuleBlobTrackPredictKalman(); + + + +/* Trajectory analyser module: */ +class CV_EXPORTS CvBlobTrackAnalysis: public CvVSModule +{ +public: + CvBlobTrackAnalysis(){SetTypeName("BlobTrackAnalysis");}; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process(IplImage* pImg, IplImage* pFG) = 0; + virtual float GetState(int BlobID) = 0; + /* return 0 if trajectory is normal + return >0 if trajectory abnormal */ + virtual const char* GetStateDesc(int /*BlobID*/){return NULL;}; + virtual void SetFileName(char* /*DataBaseName*/){}; + virtual void Release() = 0; +}; + + +inline void cvReleaseBlobTrackAnalysis(CvBlobTrackAnalysis** pBTPP) +{ + if(pBTPP == NULL) return; + if(*pBTPP)(*pBTPP)->Release(); + *pBTPP = 0; +} + +/* Feature-vector generation module: */ +class CV_EXPORTS CvBlobTrackFVGen : public CvVSModule +{ +public: + CvBlobTrackFVGen(){SetTypeName("BlobTrackFVGen");}; + virtual void AddBlob(CvBlob* pBlob) = 0; + virtual void Process(IplImage* pImg, IplImage* pFG) = 0; + virtual void Release() = 0; + virtual int GetFVSize() = 0; + virtual int GetFVNum() = 0; + virtual float* GetFV(int index, int* pFVID) = 0; /* Returns pointer to FV, if return 0 then FV not created */ + virtual float* GetFVVar(){return NULL;}; /* Returns pointer to array of variation of values of FV, if returns 0 then FVVar does not exist. */ + virtual float* GetFVMin() = 0; /* Returns pointer to array of minimal values of FV, if returns 0 then FVrange does not exist */ + virtual float* GetFVMax() = 0; /* Returns pointer to array of maximal values of FV, if returns 0 then FVrange does not exist */ +}; + + +/* Trajectory Analyser module: */ +class CV_EXPORTS CvBlobTrackAnalysisOne +{ +public: + virtual ~CvBlobTrackAnalysisOne() {}; + virtual int Process(CvBlob* pBlob, IplImage* pImg, IplImage* pFG) = 0; + /* return 0 if trajectory is normal + return >0 if trajectory abnormal */ + virtual void Release() = 0; +}; + +/* Create blob tracking post processing module based on simle module: */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateBlobTrackAnalysisList(CvBlobTrackAnalysisOne* (*create)()); + +/* Declarations of constructors of implemented modules: */ + +/* Based on histogram analysis of 2D FV (x,y): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistP(); + +/* Based on histogram analysis of 4D FV (x,y,vx,vy): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistPV(); + +/* Based on histogram analysis of 5D FV (x,y,vx,vy,state): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistPVS(); + +/* Based on histogram analysis of 4D FV (startpos,stoppos): */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistSS(); + + + +/* Based on SVM classifier analysis of 2D FV (x,y): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMP(); + +/* Based on SVM classifier analysis of 4D FV (x,y,vx,vy): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPV(); + +/* Based on SVM classifier analysis of 5D FV (x,y,vx,vy,state): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPVS(); + +/* Based on SVM classifier analysis of 4D FV (startpos,stoppos): */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMSS(); + +/* Track analysis based on distance between tracks: */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisTrackDist(); + +/* Analyzer based on reation Road and height map: */ +//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysis3DRoadMap(); + +/* Analyzer that makes OR decision using set of analyzers: */ +CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisIOR(); + +/* Estimator of human height: */ +class CV_EXPORTS CvBlobTrackAnalysisHeight: public CvBlobTrackAnalysis +{ +public: + virtual double GetHeight(CvBlob* pB) = 0; +}; +//CV_EXPORTS CvBlobTrackAnalysisHeight* cvCreateModuleBlobTrackAnalysisHeightScale(); + + + +/* AUTO BLOB TRACKER INTERFACE -- pipeline of 3 modules: */ +class CV_EXPORTS CvBlobTrackerAuto: public CvVSModule +{ +public: + CvBlobTrackerAuto(){SetTypeName("BlobTrackerAuto");}; + virtual void Process(IplImage* pImg, IplImage* pMask = NULL) = 0; + virtual CvBlob* GetBlob(int index) = 0; + virtual CvBlob* GetBlobByID(int ID) = 0; + virtual int GetBlobNum() = 0; + virtual IplImage* GetFGMask(){return NULL;}; + virtual float GetState(int BlobID) = 0; + virtual const char* GetStateDesc(int BlobID) = 0; + /* return 0 if trajectory is normal; + * return >0 if trajectory abnormal. */ + virtual void Release() = 0; +}; +inline void cvReleaseBlobTrackerAuto(CvBlobTrackerAuto** ppT) +{ + ppT[0]->Release(); + ppT[0] = 0; +} +/* END AUTO BLOB TRACKER INTERFACE */ + + +/* Constructor functions and data for specific BlobTRackerAuto modules: */ + +/* Parameters of blobtracker auto ver1: */ +struct CvBlobTrackerAutoParam1 +{ + int FGTrainFrames; /* Number of frames needed for FG (foreground) detector to train. */ + + CvFGDetector* pFG; /* FGDetector module. If this field is NULL the Process FG mask is used. */ + + CvBlobDetector* pBD; /* Selected blob detector module. */ + /* If this field is NULL default blobdetector module will be created. */ + + CvBlobTracker* pBT; /* Selected blob tracking module. */ + /* If this field is NULL default blobtracker module will be created. */ + + CvBlobTrackGen* pBTGen; /* Selected blob trajectory generator. */ + /* If this field is NULL no generator is used. */ + + CvBlobTrackPostProc* pBTPP; /* Selected blob trajectory postprocessing module. */ + /* If this field is NULL no postprocessing is done. */ + + int UsePPData; + + CvBlobTrackAnalysis* pBTA; /* Selected blob trajectory analysis module. */ + /* If this field is NULL no track analysis is done. */ +}; + +/* Create blob tracker auto ver1: */ +CV_EXPORTS CvBlobTrackerAuto* cvCreateBlobTrackerAuto1(CvBlobTrackerAutoParam1* param = NULL); + +/* Simple loader for many auto trackers by its type : */ +inline CvBlobTrackerAuto* cvCreateBlobTrackerAuto(int type, void* param) +{ + if(type == 0) return cvCreateBlobTrackerAuto1((CvBlobTrackerAutoParam1*)param); + return 0; +} + + + +struct CvTracksTimePos +{ + int len1,len2; + int beg1,beg2; + int end1,end2; + int comLen; //common length for two tracks + int shift1,shift2; +}; + +/*CV_EXPORTS int cvCompareTracks( CvBlobTrackSeq *groundTruth, + CvBlobTrackSeq *result, + FILE *file);*/ + + +/* Constructor functions: */ + +CV_EXPORTS void cvCreateTracks_One(CvBlobTrackSeq *TS); +CV_EXPORTS void cvCreateTracks_Same(CvBlobTrackSeq *TS1, CvBlobTrackSeq *TS2); +CV_EXPORTS void cvCreateTracks_AreaErr(CvBlobTrackSeq *TS1, CvBlobTrackSeq *TS2, int addW, int addH); + + +/* HIST API */ +class CV_EXPORTS CvProb +{ +public: + virtual ~CvProb() {}; + + /* Calculate probability value: */ + virtual double Value(int* /*comp*/, int /*x*/ = 0, int /*y*/ = 0){return -1;}; + + /* Update histograpp Pnew = (1-W)*Pold + W*Padd*/ + /* W weight of new added prob */ + /* comps - matrix of new fetature vectors used to update prob */ + virtual void AddFeature(float W, int* comps, int x =0, int y = 0) = 0; + virtual void Scale(float factor = 0, int x = -1, int y = -1) = 0; + virtual void Release() = 0; +}; +inline void cvReleaseProb(CvProb** ppProb){ppProb[0]->Release();ppProb[0]=NULL;} +/* HIST API */ + +/* Some Prob: */ +CV_EXPORTS CvProb* cvCreateProbS(int dim, CvSize size, int sample_num); +CV_EXPORTS CvProb* cvCreateProbMG(int dim, CvSize size, int sample_num); +CV_EXPORTS CvProb* cvCreateProbMG2(int dim, CvSize size, int sample_num); +CV_EXPORTS CvProb* cvCreateProbHist(int dim, CvSize size); + +#define CV_BT_HIST_TYPE_S 0 +#define CV_BT_HIST_TYPE_MG 1 +#define CV_BT_HIST_TYPE_MG2 2 +#define CV_BT_HIST_TYPE_H 3 +inline CvProb* cvCreateProb(int type, int dim, CvSize size = cvSize(1,1), void* /*param*/ = NULL) +{ + if(type == CV_BT_HIST_TYPE_S) return cvCreateProbS(dim, size, -1); + if(type == CV_BT_HIST_TYPE_MG) return cvCreateProbMG(dim, size, -1); + if(type == CV_BT_HIST_TYPE_MG2) return cvCreateProbMG2(dim, size, -1); + if(type == CV_BT_HIST_TYPE_H) return cvCreateProbHist(dim, size); + return NULL; +} + + + +/* Noise type definitions: */ +#define CV_NOISE_NONE 0 +#define CV_NOISE_GAUSSIAN 1 +#define CV_NOISE_UNIFORM 2 +#define CV_NOISE_SPECKLE 3 +#define CV_NOISE_SALT_AND_PEPPER 4 + +/* Add some noise to image: */ +/* pImg - (input) image without noise */ +/* pImg - (output) image with noise */ +/* noise_type - type of added noise */ +/* CV_NOISE_GAUSSIAN - pImg += n , n - is gaussian noise with Ampl standart deviation */ +/* CV_NOISE_UNIFORM - pImg += n , n - is uniform noise with Ampl standart deviation */ +/* CV_NOISE_SPECKLE - pImg += n*pImg , n - is gaussian noise with Ampl standart deviation */ +/* CV_NOISE_SALT_AND_PAPPER - pImg = pImg with blacked and whited pixels, + Ampl is density of brocken pixels (0-there are not broken pixels, 1 - all pixels are broken)*/ +/* Ampl - "amplitude" of noise */ +//CV_EXPORTS void cvAddNoise(IplImage* pImg, int noise_type, double Ampl, CvRNG* rnd_state = NULL); + +/*================== GENERATOR OF TEST VIDEO SEQUENCE ===================== */ +typedef void CvTestSeq; + +/* pConfigfile - Name of file (yml or xml) with description of test sequence */ +/* videos - array of names of test videos described in "pConfigfile" file */ +/* numvideos - size of "videos" array */ +CV_EXPORTS CvTestSeq* cvCreateTestSeq(char* pConfigfile, char** videos, int numvideo, float Scale = 1, int noise_type = CV_NOISE_NONE, double noise_ampl = 0); +CV_EXPORTS void cvReleaseTestSeq(CvTestSeq** ppTestSeq); + +/* Generate next frame from test video seq and return pointer to it: */ +CV_EXPORTS IplImage* cvTestSeqQueryFrame(CvTestSeq* pTestSeq); + +/* Return pointer to current foreground mask: */ +CV_EXPORTS IplImage* cvTestSeqGetFGMask(CvTestSeq* pTestSeq); + +/* Return pointer to current image: */ +CV_EXPORTS IplImage* cvTestSeqGetImage(CvTestSeq* pTestSeq); + +/* Return frame size of result test video: */ +CV_EXPORTS CvSize cvTestSeqGetImageSize(CvTestSeq* pTestSeq); + +/* Return number of frames result test video: */ +CV_EXPORTS int cvTestSeqFrameNum(CvTestSeq* pTestSeq); + +/* Return number of existing objects. + * This is general number of any objects. + * For example number of trajectories may be equal or less than returned value: + */ +CV_EXPORTS int cvTestSeqGetObjectNum(CvTestSeq* pTestSeq); + +/* Return 0 if there is not position for current defined on current frame */ +/* Return 1 if there is object position and pPos was filled */ +CV_EXPORTS int cvTestSeqGetObjectPos(CvTestSeq* pTestSeq, int ObjIndex, CvPoint2D32f* pPos); +CV_EXPORTS int cvTestSeqGetObjectSize(CvTestSeq* pTestSeq, int ObjIndex, CvPoint2D32f* pSize); + +/* Add noise to final image: */ +CV_EXPORTS void cvTestSeqAddNoise(CvTestSeq* pTestSeq, int noise_type = CV_NOISE_NONE, double noise_ampl = 0); + +/* Add Intensity variation: */ +CV_EXPORTS void cvTestSeqAddIntensityVariation(CvTestSeq* pTestSeq, float DI_per_frame, float MinI, float MaxI); +CV_EXPORTS void cvTestSeqSetFrame(CvTestSeq* pTestSeq, int n); + +#endif + +/* End of file. */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/compat.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/compat.hpp new file mode 100644 index 0000000..5b5495e --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/compat.hpp @@ -0,0 +1,740 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright( C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +//(including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort(including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + A few macros and definitions for backward compatibility + with the previous versions of OpenCV. They are obsolete and + are likely to be removed in future. To check whether your code + uses any of these, define CV_NO_BACKWARD_COMPATIBILITY before + including cv.h. +*/ + +#ifndef __OPENCV_COMPAT_HPP__ +#define __OPENCV_COMPAT_HPP__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef int CvMatType; +typedef int CvDisMaskType; +typedef CvMat CvMatArray; + +typedef int CvThreshType; +typedef int CvAdaptiveThreshMethod; +typedef int CvCompareMethod; +typedef int CvFontFace; +typedef int CvPolyApproxMethod; +typedef int CvContoursMatchMethod; +typedef int CvContourTreesMatchMethod; +typedef int CvCoeffType; +typedef int CvRodriguesType; +typedef int CvElementShape; +typedef int CvMorphOp; +typedef int CvTemplMatchMethod; + +typedef CvPoint2D64f CvPoint2D64d; +typedef CvPoint3D64f CvPoint3D64d; + +enum +{ + CV_MAT32F = CV_32FC1, + CV_MAT3x1_32F = CV_32FC1, + CV_MAT4x1_32F = CV_32FC1, + CV_MAT3x3_32F = CV_32FC1, + CV_MAT4x4_32F = CV_32FC1, + + CV_MAT64D = CV_64FC1, + CV_MAT3x1_64D = CV_64FC1, + CV_MAT4x1_64D = CV_64FC1, + CV_MAT3x3_64D = CV_64FC1, + CV_MAT4x4_64D = CV_64FC1 +}; + +enum +{ + IPL_GAUSSIAN_5x5 = 7 +}; + +typedef CvBox2D CvBox2D32f; + +/* allocation/deallocation macros */ +#define cvCreateImageData cvCreateData +#define cvReleaseImageData cvReleaseData +#define cvSetImageData cvSetData +#define cvGetImageRawData cvGetRawData + +#define cvmAlloc cvCreateData +#define cvmFree cvReleaseData +#define cvmAllocArray cvCreateData +#define cvmFreeArray cvReleaseData + +#define cvIntegralImage cvIntegral +#define cvMatchContours cvMatchShapes + +CV_EXPORTS CvMat cvMatArray( int rows, int cols, int type, + int count, void* data CV_DEFAULT(0)); + +#define cvUpdateMHIByTime cvUpdateMotionHistory + +#define cvAccMask cvAcc +#define cvSquareAccMask cvSquareAcc +#define cvMultiplyAccMask cvMultiplyAcc +#define cvRunningAvgMask(imgY, imgU, mask, alpha) cvRunningAvg(imgY, imgU, alpha, mask) + +#define cvSetHistThresh cvSetHistBinRanges +#define cvCalcHistMask(img, mask, hist, doNotClear) cvCalcHist(img, hist, doNotClear, mask) + +CV_EXPORTS double cvMean( const CvArr* image, const CvArr* mask CV_DEFAULT(0)); +CV_EXPORTS double cvSumPixels( const CvArr* image ); +CV_EXPORTS void cvMean_StdDev( const CvArr* image, double* mean, double* sdv, + const CvArr* mask CV_DEFAULT(0)); + +CV_EXPORTS void cvmPerspectiveProject( const CvMat* mat, const CvArr* src, CvArr* dst ); +CV_EXPORTS void cvFillImage( CvArr* mat, double color ); + +#define cvCvtPixToPlane cvSplit +#define cvCvtPlaneToPix cvMerge + +typedef struct CvRandState +{ + CvRNG state; /* RNG state (the current seed and carry)*/ + int disttype; /* distribution type */ + CvScalar param[2]; /* parameters of RNG */ +} CvRandState; + +/* Changes RNG range while preserving RNG state */ +CV_EXPORTS void cvRandSetRange( CvRandState* state, double param1, + double param2, int index CV_DEFAULT(-1)); + +CV_EXPORTS void cvRandInit( CvRandState* state, double param1, + double param2, int seed, + int disttype CV_DEFAULT(CV_RAND_UNI)); + +/* Fills array with random numbers */ +CV_EXPORTS void cvRand( CvRandState* state, CvArr* arr ); + +#define cvRandNext( _state ) cvRandInt( &(_state)->state ) + +CV_EXPORTS void cvbRand( CvRandState* state, float* dst, int len ); + +CV_EXPORTS void cvbCartToPolar( const float* y, const float* x, + float* magnitude, float* angle, int len ); +CV_EXPORTS void cvbFastArctan( const float* y, const float* x, float* angle, int len ); +CV_EXPORTS void cvbSqrt( const float* x, float* y, int len ); +CV_EXPORTS void cvbInvSqrt( const float* x, float* y, int len ); +CV_EXPORTS void cvbReciprocal( const float* x, float* y, int len ); +CV_EXPORTS void cvbFastExp( const float* x, double* y, int len ); +CV_EXPORTS void cvbFastLog( const double* x, float* y, int len ); + +CV_EXPORTS CvRect cvContourBoundingRect( void* point_set, int update CV_DEFAULT(0)); + +CV_EXPORTS double cvPseudoInverse( const CvArr* src, CvArr* dst ); +#define cvPseudoInv cvPseudoInverse + +#define cvContourMoments( contour, moments ) cvMoments( contour, moments, 0 ) + +#define cvGetPtrAt cvPtr2D +#define cvGetAt cvGet2D +#define cvSetAt(arr,val,y,x) cvSet2D((arr),(y),(x),(val)) + +#define cvMeanMask cvMean +#define cvMean_StdDevMask(img,mask,mean,sdv) cvMean_StdDev(img,mean,sdv,mask) + +#define cvNormMask(imgA,imgB,mask,normType) cvNorm(imgA,imgB,normType,mask) + +#define cvMinMaxLocMask(img, mask, min_val, max_val, min_loc, max_loc) \ + cvMinMaxLoc(img, min_val, max_val, min_loc, max_loc, mask) + +#define cvRemoveMemoryManager cvSetMemoryManager + +#define cvmSetZero( mat ) cvSetZero( mat ) +#define cvmSetIdentity( mat ) cvSetIdentity( mat ) +#define cvmAdd( src1, src2, dst ) cvAdd( src1, src2, dst, 0 ) +#define cvmSub( src1, src2, dst ) cvSub( src1, src2, dst, 0 ) +#define cvmCopy( src, dst ) cvCopy( src, dst, 0 ) +#define cvmMul( src1, src2, dst ) cvMatMulAdd( src1, src2, 0, dst ) +#define cvmTranspose( src, dst ) cvT( src, dst ) +#define cvmInvert( src, dst ) cvInv( src, dst ) +#define cvmMahalanobis(vec1, vec2, mat) cvMahalanobis( vec1, vec2, mat ) +#define cvmDotProduct( vec1, vec2 ) cvDotProduct( vec1, vec2 ) +#define cvmCrossProduct(vec1, vec2,dst) cvCrossProduct( vec1, vec2, dst ) +#define cvmTrace( mat ) (cvTrace( mat )).val[0] +#define cvmMulTransposed( src, dst, order ) cvMulTransposed( src, dst, order ) +#define cvmEigenVV( mat, evec, eval, eps) cvEigenVV( mat, evec, eval, eps ) +#define cvmDet( mat ) cvDet( mat ) +#define cvmScale( src, dst, scale ) cvScale( src, dst, scale ) + +#define cvCopyImage( src, dst ) cvCopy( src, dst, 0 ) +#define cvReleaseMatHeader cvReleaseMat + +/* Calculates exact convex hull of 2d point set */ +CV_EXPORTS void cvConvexHull( CvPoint* points, int num_points, + CvRect* bound_rect, + int orientation, int* hull, int* hullsize ); + + +CV_EXPORTS void cvMinAreaRect( CvPoint* points, int n, + int left, int bottom, + int right, int top, + CvPoint2D32f* anchor, + CvPoint2D32f* vect1, + CvPoint2D32f* vect2 ); + +typedef int CvDisType; +typedef int CvChainApproxMethod; +typedef int CvContourRetrievalMode; + +CV_EXPORTS void cvFitLine3D( CvPoint3D32f* points, int count, int dist, + void *param, float reps, float aeps, float* line ); + +/* Fits a line into set of 2d points in a robust way (M-estimator technique) */ +CV_EXPORTS void cvFitLine2D( CvPoint2D32f* points, int count, int dist, + void *param, float reps, float aeps, float* line ); + +CV_EXPORTS void cvFitEllipse( const CvPoint2D32f* points, int count, CvBox2D* box ); + +/* Projects 2d points to one of standard coordinate planes + (i.e. removes one of coordinates) */ +CV_EXPORTS void cvProject3D( CvPoint3D32f* points3D, int count, + CvPoint2D32f* points2D, + int xIndx CV_DEFAULT(0), + int yIndx CV_DEFAULT(1)); + +/* Retrieves value of the particular bin + of x-dimensional (x=1,2,3,...) histogram */ +#define cvQueryHistValue_1D( hist, idx0 ) \ + ((float)cvGetReal1D( (hist)->bins, (idx0))) +#define cvQueryHistValue_2D( hist, idx0, idx1 ) \ + ((float)cvGetReal2D( (hist)->bins, (idx0), (idx1))) +#define cvQueryHistValue_3D( hist, idx0, idx1, idx2 ) \ + ((float)cvGetReal3D( (hist)->bins, (idx0), (idx1), (idx2))) +#define cvQueryHistValue_nD( hist, idx ) \ + ((float)cvGetRealND( (hist)->bins, (idx))) + +/* Returns pointer to the particular bin of x-dimesional histogram. + For sparse histogram the bin is created if it didn't exist before */ +#define cvGetHistValue_1D( hist, idx0 ) \ + ((float*)cvPtr1D( (hist)->bins, (idx0), 0)) +#define cvGetHistValue_2D( hist, idx0, idx1 ) \ + ((float*)cvPtr2D( (hist)->bins, (idx0), (idx1), 0)) +#define cvGetHistValue_3D( hist, idx0, idx1, idx2 ) \ + ((float*)cvPtr3D( (hist)->bins, (idx0), (idx1), (idx2), 0)) +#define cvGetHistValue_nD( hist, idx ) \ + ((float*)cvPtrND( (hist)->bins, (idx), 0)) + + +#define CV_IS_SET_ELEM_EXISTS CV_IS_SET_ELEM + + +CV_EXPORTS int cvHoughLines( CvArr* image, double rho, + double theta, int threshold, + float* lines, int linesNumber ); + +CV_EXPORTS int cvHoughLinesP( CvArr* image, double rho, + double theta, int threshold, + int lineLength, int lineGap, + int* lines, int linesNumber ); + + +CV_EXPORTS int cvHoughLinesSDiv( CvArr* image, double rho, int srn, + double theta, int stn, int threshold, + float* lines, int linesNumber ); + +CV_EXPORTS float cvCalcEMD( const float* signature1, int size1, + const float* signature2, int size2, + int dims, int dist_type CV_DEFAULT(CV_DIST_L2), + CvDistanceFunction dist_func CV_DEFAULT(0), + float* lower_bound CV_DEFAULT(0), + void* user_param CV_DEFAULT(0)); + +CV_EXPORTS void cvKMeans( int num_clusters, float** samples, + int num_samples, int vec_size, + CvTermCriteria termcrit, int* cluster_idx ); + +CV_EXPORTS void cvStartScanGraph( CvGraph* graph, CvGraphScanner* scanner, + CvGraphVtx* vtx CV_DEFAULT(NULL), + int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS)); + +CV_EXPORTS void cvEndScanGraph( CvGraphScanner* scanner ); + + +/* old drawing functions */ +CV_EXPORTS void cvLineAA( CvArr* img, CvPoint pt1, CvPoint pt2, + double color, int scale CV_DEFAULT(0)); + +CV_EXPORTS void cvCircleAA( CvArr* img, CvPoint center, int radius, + double color, int scale CV_DEFAULT(0) ); + +CV_EXPORTS void cvEllipseAA( CvArr* img, CvPoint center, CvSize axes, + double angle, double start_angle, + double end_angle, double color, + int scale CV_DEFAULT(0) ); + +CV_EXPORTS void cvPolyLineAA( CvArr* img, CvPoint** pts, int* npts, int contours, + int is_closed, double color, int scale CV_DEFAULT(0) ); + +/****************************************************************************************\ +* Pixel Access Macros * +\****************************************************************************************/ + +typedef struct _CvPixelPosition8u +{ + uchar* currline; /* pointer to the start of the current pixel line */ + uchar* topline; /* pointer to the start of the top pixel line */ + uchar* bottomline; /* pointer to the start of the first line */ + /* which is below the image */ + int x; /* current x coordinate ( in pixels ) */ + int width; /* width of the image ( in pixels ) */ + int height; /* height of the image ( in pixels ) */ + int step; /* distance between lines ( in elements of single */ + /* plane ) */ + int step_arr[3]; /* array: ( 0, -step, step ). It is used for */ + /* vertical moving */ +} CvPixelPosition8u; + +/* this structure differs from the above only in data type */ +typedef struct _CvPixelPosition8s +{ + schar* currline; + schar* topline; + schar* bottomline; + int x; + int width; + int height; + int step; + int step_arr[3]; +} CvPixelPosition8s; + +/* this structure differs from the CvPixelPosition8u only in data type */ +typedef struct _CvPixelPosition32f +{ + float* currline; + float* topline; + float* bottomline; + int x; + int width; + int height; + int step; + int step_arr[3]; +} CvPixelPosition32f; + + +/* Initialize one of the CvPixelPosition structures. */ +/* pos - initialized structure */ +/* origin - pointer to the left-top corner of the ROI */ +/* step - width of the whole image in bytes */ +/* roi - width & height of the ROI */ +/* x, y - initial position */ +#define CV_INIT_PIXEL_POS(pos, origin, _step, roi, _x, _y, orientation) \ + ( \ + (pos).step = (_step)/sizeof((pos).currline[0]) * (orientation ? -1 : 1), \ + (pos).width = (roi).width, \ + (pos).height = (roi).height, \ + (pos).bottomline = (origin) + (pos).step*(pos).height, \ + (pos).topline = (origin) - (pos).step, \ + (pos).step_arr[0] = 0, \ + (pos).step_arr[1] = -(pos).step, \ + (pos).step_arr[2] = (pos).step, \ + (pos).x = (_x), \ + (pos).currline = (origin) + (pos).step*(_y) ) + + +/* Move to specified point ( absolute shift ) */ +/* pos - position structure */ +/* x, y - coordinates of the new position */ +/* cs - number of the image channels */ +#define CV_MOVE_TO( pos, _x, _y, cs ) \ +((pos).currline = (_y) >= 0 && (_y) < (pos).height ? (pos).topline + ((_y)+1)*(pos).step : 0, \ + (pos).x = (_x) >= 0 && (_x) < (pos).width ? (_x) : 0, (pos).currline + (_x) * (cs) ) + +/* Get current coordinates */ +/* pos - position structure */ +/* x, y - coordinates of the new position */ +/* cs - number of the image channels */ +#define CV_GET_CURRENT( pos, cs ) ((pos).currline + (pos).x * (cs)) + +/* Move by one pixel relatively to current position */ +/* pos - position structure */ +/* cs - number of the image channels */ + +/* left */ +#define CV_MOVE_LEFT( pos, cs ) \ + ( --(pos).x >= 0 ? (pos).currline + (pos).x*(cs) : 0 ) + +/* right */ +#define CV_MOVE_RIGHT( pos, cs ) \ + ( ++(pos).x < (pos).width ? (pos).currline + (pos).x*(cs) : 0 ) + +/* up */ +#define CV_MOVE_UP( pos, cs ) \ + (((pos).currline -= (pos).step) != (pos).topline ? (pos).currline + (pos).x*(cs) : 0 ) + +/* down */ +#define CV_MOVE_DOWN( pos, cs ) \ + (((pos).currline += (pos).step) != (pos).bottomline ? (pos).currline + (pos).x*(cs) : 0 ) + +/* left up */ +#define CV_MOVE_LU( pos, cs ) ( CV_MOVE_LEFT(pos, cs), CV_MOVE_UP(pos, cs)) + +/* right up */ +#define CV_MOVE_RU( pos, cs ) ( CV_MOVE_RIGHT(pos, cs), CV_MOVE_UP(pos, cs)) + +/* left down */ +#define CV_MOVE_LD( pos, cs ) ( CV_MOVE_LEFT(pos, cs), CV_MOVE_DOWN(pos, cs)) + +/* right down */ +#define CV_MOVE_RD( pos, cs ) ( CV_MOVE_RIGHT(pos, cs), CV_MOVE_DOWN(pos, cs)) + + + +/* Move by one pixel relatively to current position with wrapping when the position */ +/* achieves image boundary */ +/* pos - position structure */ +/* cs - number of the image channels */ + +/* left */ +#define CV_MOVE_LEFT_WRAP( pos, cs ) \ + ((pos).currline + ( --(pos).x >= 0 ? (pos).x : ((pos).x = (pos).width-1))*(cs)) + +/* right */ +#define CV_MOVE_RIGHT_WRAP( pos, cs ) \ + ((pos).currline + ( ++(pos).x < (pos).width ? (pos).x : ((pos).x = 0))*(cs) ) + +/* up */ +#define CV_MOVE_UP_WRAP( pos, cs ) \ + ((((pos).currline -= (pos).step) != (pos).topline ? \ + (pos).currline : ((pos).currline = (pos).bottomline - (pos).step)) + (pos).x*(cs) ) + +/* down */ +#define CV_MOVE_DOWN_WRAP( pos, cs ) \ + ((((pos).currline += (pos).step) != (pos).bottomline ? \ + (pos).currline : ((pos).currline = (pos).topline + (pos).step)) + (pos).x*(cs) ) + +/* left up */ +#define CV_MOVE_LU_WRAP( pos, cs ) ( CV_MOVE_LEFT_WRAP(pos, cs), CV_MOVE_UP_WRAP(pos, cs)) +/* right up */ +#define CV_MOVE_RU_WRAP( pos, cs ) ( CV_MOVE_RIGHT_WRAP(pos, cs), CV_MOVE_UP_WRAP(pos, cs)) +/* left down */ +#define CV_MOVE_LD_WRAP( pos, cs ) ( CV_MOVE_LEFT_WRAP(pos, cs), CV_MOVE_DOWN_WRAP(pos, cs)) +/* right down */ +#define CV_MOVE_RD_WRAP( pos, cs ) ( CV_MOVE_RIGHT_WRAP(pos, cs), CV_MOVE_DOWN_WRAP(pos, cs)) + +/* Numeric constants which used for moving in arbitrary direction */ +enum +{ + CV_SHIFT_NONE = 2, + CV_SHIFT_LEFT = 1, + CV_SHIFT_RIGHT = 3, + CV_SHIFT_UP = 6, + CV_SHIFT_DOWN = 10, + CV_SHIFT_LU = 5, + CV_SHIFT_RU = 7, + CV_SHIFT_LD = 9, + CV_SHIFT_RD = 11 +}; + +/* Move by one pixel in specified direction */ +/* pos - position structure */ +/* shift - direction ( it's value must be one of the CV_SHIFT_Ö constants ) */ +/* cs - number of the image channels */ +#define CV_MOVE_PARAM( pos, shift, cs ) \ + ( (pos).currline += (pos).step_arr[(shift)>>2], (pos).x += ((shift)&3)-2, \ + ((pos).currline != (pos).topline && (pos).currline != (pos).bottomline && \ + (pos).x >= 0 && (pos).x < (pos).width) ? (pos).currline + (pos).x*(cs) : 0 ) + +/* Move by one pixel in specified direction with wrapping when the */ +/* position achieves image boundary */ +/* pos - position structure */ +/* shift - direction ( it's value must be one of the CV_SHIFT_Ö constants ) */ +/* cs - number of the image channels */ +#define CV_MOVE_PARAM_WRAP( pos, shift, cs ) \ + ( (pos).currline += (pos).step_arr[(shift)>>2], \ + (pos).currline = ((pos).currline == (pos).topline ? \ + (pos).bottomline - (pos).step : \ + (pos).currline == (pos).bottomline ? \ + (pos).topline + (pos).step : (pos).currline), \ + \ + (pos).x += ((shift)&3)-2, \ + (pos).x = ((pos).x < 0 ? (pos).width-1 : (pos).x >= (pos).width ? 0 : (pos).x), \ + \ + (pos).currline + (pos).x*(cs) ) + + +typedef float* CvVect32f; +typedef float* CvMatr32f; +typedef double* CvVect64d; +typedef double* CvMatr64d; + +CV_EXPORTS void cvUnDistortOnce( const CvArr* src, CvArr* dst, + const float* intrinsic_matrix, + const float* distortion_coeffs, + int interpolate ); + +/* the two functions below have quite hackerish implementations, use with care + (or, which is better, switch to cvUndistortInitMap and cvRemap instead */ +CV_EXPORTS void cvUnDistortInit( const CvArr* src, + CvArr* undistortion_map, + const float* A, const float* k, + int interpolate ); + +CV_EXPORTS void cvUnDistort( const CvArr* src, CvArr* dst, + const CvArr* undistortion_map, + int interpolate ); + +/* Find fundamental matrix */ +CV_EXPORTS void cvFindFundamentalMatrix( int* points1, int* points2, + int numpoints, int method, float* matrix ); + + +CV_EXPORTS int cvFindChessBoardCornerGuesses( const void* arr, void* thresharr, + CvMemStorage* storage, + CvSize pattern_size, CvPoint2D32f * corners, + int *corner_count ); + +/* Calibrates camera using multiple views of calibration pattern */ +CV_EXPORTS void cvCalibrateCamera( int image_count, int* _point_counts, + CvSize image_size, CvPoint2D32f* _image_points, CvPoint3D32f* _object_points, + float* _distortion_coeffs, float* _camera_matrix, float* _translation_vectors, + float* _rotation_matrices, int flags ); + + +CV_EXPORTS void cvCalibrateCamera_64d( int image_count, int* _point_counts, + CvSize image_size, CvPoint2D64f* _image_points, CvPoint3D64f* _object_points, + double* _distortion_coeffs, double* _camera_matrix, double* _translation_vectors, + double* _rotation_matrices, int flags ); + + +/* Find 3d position of object given intrinsic camera parameters, + 3d model of the object and projection of the object into view plane */ +CV_EXPORTS void cvFindExtrinsicCameraParams( int point_count, + CvSize image_size, CvPoint2D32f* _image_points, + CvPoint3D32f* _object_points, float* focal_length, + CvPoint2D32f principal_point, float* _distortion_coeffs, + float* _rotation_vector, float* _translation_vector ); + +/* Variant of the previous function that takes double-precision parameters */ +CV_EXPORTS void cvFindExtrinsicCameraParams_64d( int point_count, + CvSize image_size, CvPoint2D64f* _image_points, + CvPoint3D64f* _object_points, double* focal_length, + CvPoint2D64f principal_point, double* _distortion_coeffs, + double* _rotation_vector, double* _translation_vector ); + +/* Rodrigues transform */ +enum +{ + CV_RODRIGUES_M2V = 0, + CV_RODRIGUES_V2M = 1 +}; + +/* Converts rotation_matrix matrix to rotation_matrix vector or vice versa */ +CV_EXPORTS void cvRodrigues( CvMat* rotation_matrix, CvMat* rotation_vector, + CvMat* jacobian, int conv_type ); + +/* Does reprojection of 3d object points to the view plane */ +CV_EXPORTS void cvProjectPoints( int point_count, CvPoint3D64f* _object_points, + double* _rotation_vector, double* _translation_vector, + double* focal_length, CvPoint2D64f principal_point, + double* _distortion, CvPoint2D64f* _image_points, + double* _deriv_points_rotation_matrix, + double* _deriv_points_translation_vect, + double* _deriv_points_focal, + double* _deriv_points_principal_point, + double* _deriv_points_distortion_coeffs ); + + +/* Simpler version of the previous function */ +CV_EXPORTS void cvProjectPointsSimple( int point_count, CvPoint3D64f* _object_points, + double* _rotation_matrix, double* _translation_vector, + double* _camera_matrix, double* _distortion, CvPoint2D64f* _image_points ); + + +#define cvMake2DPoints cvConvertPointsHomogeneous +#define cvMake3DPoints cvConvertPointsHomogeneous + +#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform + +#define cvConvertPointsHomogenious cvConvertPointsHomogeneous + + +//////////////////////////////////// feature extractors: obsolete API ////////////////////////////////// + +typedef struct CvSURFPoint +{ + CvPoint2D32f pt; + + int laplacian; + int size; + float dir; + float hessian; + +} CvSURFPoint; + +CV_INLINE CvSURFPoint cvSURFPoint( CvPoint2D32f pt, int laplacian, + int size, float dir CV_DEFAULT(0), + float hessian CV_DEFAULT(0)) +{ + CvSURFPoint kp; + + kp.pt = pt; + kp.laplacian = laplacian; + kp.size = size; + kp.dir = dir; + kp.hessian = hessian; + + return kp; +} + +typedef struct CvSURFParams +{ + int extended; + int upright; + double hessianThreshold; + + int nOctaves; + int nOctaveLayers; + +} CvSURFParams; + +CVAPI(CvSURFParams) cvSURFParams( double hessianThreshold, int extended CV_DEFAULT(0) ); + +// If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed +// at the locations provided in keypoints (a CvSeq of CvSURFPoint). +CVAPI(void) cvExtractSURF( const CvArr* img, const CvArr* mask, + CvSeq** keypoints, CvSeq** descriptors, + CvMemStorage* storage, CvSURFParams params, + int useProvidedKeyPts CV_DEFAULT(0) ); + +/*! + Maximal Stable Regions Parameters + */ +typedef struct CvMSERParams +{ + //! delta, in the code, it compares (size_{i}-size_{i-delta})/size_{i-delta} + int delta; + //! prune the area which bigger than maxArea + int maxArea; + //! prune the area which smaller than minArea + int minArea; + //! prune the area have simliar size to its children + float maxVariation; + //! trace back to cut off mser with diversity < min_diversity + float minDiversity; + + /////// the next few params for MSER of color image + + //! for color image, the evolution steps + int maxEvolution; + //! the area threshold to cause re-initialize + double areaThreshold; + //! ignore too small margin + double minMargin; + //! the aperture size for edge blur + int edgeBlurSize; +} CvMSERParams; + +CVAPI(CvMSERParams) cvMSERParams( int delta CV_DEFAULT(5), int min_area CV_DEFAULT(60), + int max_area CV_DEFAULT(14400), float max_variation CV_DEFAULT(.25f), + float min_diversity CV_DEFAULT(.2f), int max_evolution CV_DEFAULT(200), + double area_threshold CV_DEFAULT(1.01), + double min_margin CV_DEFAULT(.003), + int edge_blur_size CV_DEFAULT(5) ); + +// Extracts the contours of Maximally Stable Extremal Regions +CVAPI(void) cvExtractMSER( CvArr* _img, CvArr* _mask, CvSeq** contours, CvMemStorage* storage, CvMSERParams params ); + + +typedef struct CvStarKeypoint +{ + CvPoint pt; + int size; + float response; +} CvStarKeypoint; + +CV_INLINE CvStarKeypoint cvStarKeypoint(CvPoint pt, int size, float response) +{ + CvStarKeypoint kpt; + kpt.pt = pt; + kpt.size = size; + kpt.response = response; + return kpt; +} + +typedef struct CvStarDetectorParams +{ + int maxSize; + int responseThreshold; + int lineThresholdProjected; + int lineThresholdBinarized; + int suppressNonmaxSize; +} CvStarDetectorParams; + +CV_INLINE CvStarDetectorParams cvStarDetectorParams( + int maxSize CV_DEFAULT(45), + int responseThreshold CV_DEFAULT(30), + int lineThresholdProjected CV_DEFAULT(10), + int lineThresholdBinarized CV_DEFAULT(8), + int suppressNonmaxSize CV_DEFAULT(5)) +{ + CvStarDetectorParams params; + params.maxSize = maxSize; + params.responseThreshold = responseThreshold; + params.lineThresholdProjected = lineThresholdProjected; + params.lineThresholdBinarized = lineThresholdBinarized; + params.suppressNonmaxSize = suppressNonmaxSize; + + return params; +} + +CVAPI(CvSeq*) cvGetStarKeypoints( const CvArr* img, CvMemStorage* storage, + CvStarDetectorParams params CV_DEFAULT(cvStarDetectorParams())); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/legacy.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/legacy.hpp new file mode 100644 index 0000000..96da25c --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/legacy.hpp @@ -0,0 +1,3436 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_LEGACY_HPP__ +#define __OPENCV_LEGACY_HPP__ + +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/calib3d/calib3d.hpp" +#include "opencv2/ml/ml.hpp" + +#ifdef __cplusplus +extern "C" { +#endif + +CVAPI(CvSeq*) cvSegmentImage( const CvArr* srcarr, CvArr* dstarr, + double canny_threshold, + double ffill_threshold, + CvMemStorage* storage ); + +/****************************************************************************************\ +* Eigen objects * +\****************************************************************************************/ + +typedef int (CV_CDECL * CvCallback)(int index, void* buffer, void* user_data); +typedef union +{ + CvCallback callback; + void* data; +} +CvInput; + +#define CV_EIGOBJ_NO_CALLBACK 0 +#define CV_EIGOBJ_INPUT_CALLBACK 1 +#define CV_EIGOBJ_OUTPUT_CALLBACK 2 +#define CV_EIGOBJ_BOTH_CALLBACK 3 + +/* Calculates covariation matrix of a set of arrays */ +CVAPI(void) cvCalcCovarMatrixEx( int nObjects, void* input, int ioFlags, + int ioBufSize, uchar* buffer, void* userData, + IplImage* avg, float* covarMatrix ); + +/* Calculates eigen values and vectors of covariation matrix of a set of + arrays */ +CVAPI(void) cvCalcEigenObjects( int nObjects, void* input, void* output, + int ioFlags, int ioBufSize, void* userData, + CvTermCriteria* calcLimit, IplImage* avg, + float* eigVals ); + +/* Calculates dot product (obj - avg) * eigObj (i.e. projects image to eigen vector) */ +CVAPI(double) cvCalcDecompCoeff( IplImage* obj, IplImage* eigObj, IplImage* avg ); + +/* Projects image to eigen space (finds all decomposion coefficients */ +CVAPI(void) cvEigenDecomposite( IplImage* obj, int nEigObjs, void* eigInput, + int ioFlags, void* userData, IplImage* avg, + float* coeffs ); + +/* Projects original objects used to calculate eigen space basis to that space */ +CVAPI(void) cvEigenProjection( void* eigInput, int nEigObjs, int ioFlags, + void* userData, float* coeffs, IplImage* avg, + IplImage* proj ); + +/****************************************************************************************\ +* 1D/2D HMM * +\****************************************************************************************/ + +typedef struct CvImgObsInfo +{ + int obs_x; + int obs_y; + int obs_size; + float* obs;//consequtive observations + + int* state;/* arr of pairs superstate/state to which observation belong */ + int* mix; /* number of mixture to which observation belong */ + +} CvImgObsInfo;/*struct for 1 image*/ + +typedef CvImgObsInfo Cv1DObsInfo; + +typedef struct CvEHMMState +{ + int num_mix; /*number of mixtures in this state*/ + float* mu; /*mean vectors corresponding to each mixture*/ + float* inv_var; /* square root of inversed variances corresp. to each mixture*/ + float* log_var_val; /* sum of 0.5 (LN2PI + ln(variance[i]) ) for i=1,n */ + float* weight; /*array of mixture weights. Summ of all weights in state is 1. */ + +} CvEHMMState; + +typedef struct CvEHMM +{ + int level; /* 0 - lowest(i.e its states are real states), ..... */ + int num_states; /* number of HMM states */ + float* transP;/*transition probab. matrices for states */ + float** obsProb; /* if level == 0 - array of brob matrices corresponding to hmm + if level == 1 - martix of matrices */ + union + { + CvEHMMState* state; /* if level == 0 points to real states array, + if not - points to embedded hmms */ + struct CvEHMM* ehmm; /* pointer to an embedded model or NULL, if it is a leaf */ + } u; + +} CvEHMM; + +/*CVAPI(int) icvCreate1DHMM( CvEHMM** this_hmm, + int state_number, int* num_mix, int obs_size ); + +CVAPI(int) icvRelease1DHMM( CvEHMM** phmm ); + +CVAPI(int) icvUniform1DSegm( Cv1DObsInfo* obs_info, CvEHMM* hmm ); + +CVAPI(int) icvInit1DMixSegm( Cv1DObsInfo** obs_info_array, int num_img, CvEHMM* hmm); + +CVAPI(int) icvEstimate1DHMMStateParams( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm); + +CVAPI(int) icvEstimate1DObsProb( CvImgObsInfo* obs_info, CvEHMM* hmm ); + +CVAPI(int) icvEstimate1DTransProb( Cv1DObsInfo** obs_info_array, + int num_seq, + CvEHMM* hmm ); + +CVAPI(float) icvViterbi( Cv1DObsInfo* obs_info, CvEHMM* hmm); + +CVAPI(int) icv1DMixSegmL2( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm );*/ + +/*********************************** Embedded HMMs *************************************/ + +/* Creates 2D HMM */ +CVAPI(CvEHMM*) cvCreate2DHMM( int* stateNumber, int* numMix, int obsSize ); + +/* Releases HMM */ +CVAPI(void) cvRelease2DHMM( CvEHMM** hmm ); + +#define CV_COUNT_OBS(roi, win, delta, numObs ) \ +{ \ + (numObs)->width =((roi)->width -(win)->width +(delta)->width)/(delta)->width; \ + (numObs)->height =((roi)->height -(win)->height +(delta)->height)/(delta)->height;\ +} + +/* Creates storage for observation vectors */ +CVAPI(CvImgObsInfo*) cvCreateObsInfo( CvSize numObs, int obsSize ); + +/* Releases storage for observation vectors */ +CVAPI(void) cvReleaseObsInfo( CvImgObsInfo** obs_info ); + + +/* The function takes an image on input and and returns the sequnce of observations + to be used with an embedded HMM; Each observation is top-left block of DCT + coefficient matrix */ +CVAPI(void) cvImgToObs_DCT( const CvArr* arr, float* obs, CvSize dctSize, + CvSize obsSize, CvSize delta ); + + +/* Uniformly segments all observation vectors extracted from image */ +CVAPI(void) cvUniformImgSegm( CvImgObsInfo* obs_info, CvEHMM* ehmm ); + +/* Does mixture segmentation of the states of embedded HMM */ +CVAPI(void) cvInitMixSegm( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/* Function calculates means, variances, weights of every Gaussian mixture + of every low-level state of embedded HMM */ +CVAPI(void) cvEstimateHMMStateParams( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/* Function computes transition probability matrices of embedded HMM + given observations segmentation */ +CVAPI(void) cvEstimateTransProb( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/* Function computes probabilities of appearing observations at any state + (i.e. computes P(obs|state) for every pair(obs,state)) */ +CVAPI(void) cvEstimateObsProb( CvImgObsInfo* obs_info, + CvEHMM* hmm ); + +/* Runs Viterbi algorithm for embedded HMM */ +CVAPI(float) cvEViterbi( CvImgObsInfo* obs_info, CvEHMM* hmm ); + + +/* Function clusters observation vectors from several images + given observations segmentation. + Euclidean distance used for clustering vectors. + Centers of clusters are given means of every mixture */ +CVAPI(void) cvMixSegmL2( CvImgObsInfo** obs_info_array, + int num_img, CvEHMM* hmm ); + +/****************************************************************************************\ +* A few functions from old stereo gesture recognition demosions * +\****************************************************************************************/ + +/* Creates hand mask image given several points on the hand */ +CVAPI(void) cvCreateHandMask( CvSeq* hand_points, + IplImage *img_mask, CvRect *roi); + +/* Finds hand region in range image data */ +CVAPI(void) cvFindHandRegion (CvPoint3D32f* points, int count, + CvSeq* indexs, + float* line, CvSize2D32f size, int flag, + CvPoint3D32f* center, + CvMemStorage* storage, CvSeq **numbers); + +/* Finds hand region in range image data (advanced version) */ +CVAPI(void) cvFindHandRegionA( CvPoint3D32f* points, int count, + CvSeq* indexs, + float* line, CvSize2D32f size, int jc, + CvPoint3D32f* center, + CvMemStorage* storage, CvSeq **numbers); + +/* Calculates the cooficients of the homography matrix */ +CVAPI(void) cvCalcImageHomography( float* line, CvPoint3D32f* center, + float* intrinsic, float* homography ); + +/****************************************************************************************\ +* More operations on sequences * +\****************************************************************************************/ + +/*****************************************************************************************/ + +#define CV_CURRENT_INT( reader ) (*((int *)(reader).ptr)) +#define CV_PREV_INT( reader ) (*((int *)(reader).prev_elem)) + +#define CV_GRAPH_WEIGHTED_VERTEX_FIELDS() CV_GRAPH_VERTEX_FIELDS()\ + float weight; + +#define CV_GRAPH_WEIGHTED_EDGE_FIELDS() CV_GRAPH_EDGE_FIELDS() + +typedef struct CvGraphWeightedVtx +{ + CV_GRAPH_WEIGHTED_VERTEX_FIELDS() +} CvGraphWeightedVtx; + +typedef struct CvGraphWeightedEdge +{ + CV_GRAPH_WEIGHTED_EDGE_FIELDS() +} CvGraphWeightedEdge; + +typedef enum CvGraphWeightType +{ + CV_NOT_WEIGHTED, + CV_WEIGHTED_VTX, + CV_WEIGHTED_EDGE, + CV_WEIGHTED_ALL +} CvGraphWeightType; + + +/* Calculates histogram of a contour */ +CVAPI(void) cvCalcPGH( const CvSeq* contour, CvHistogram* hist ); + +#define CV_DOMINANT_IPAN 1 + +/* Finds high-curvature points of the contour */ +CVAPI(CvSeq*) cvFindDominantPoints( CvSeq* contour, CvMemStorage* storage, + int method CV_DEFAULT(CV_DOMINANT_IPAN), + double parameter1 CV_DEFAULT(0), + double parameter2 CV_DEFAULT(0), + double parameter3 CV_DEFAULT(0), + double parameter4 CV_DEFAULT(0)); + +/*****************************************************************************************/ + + +/*******************************Stereo correspondence*************************************/ + +typedef struct CvCliqueFinder +{ + CvGraph* graph; + int** adj_matr; + int N; //graph size + + // stacks, counters etc/ + int k; //stack size + int* current_comp; + int** All; + + int* ne; + int* ce; + int* fixp; //node with minimal disconnections + int* nod; + int* s; //for selected candidate + int status; + int best_score; + int weighted; + int weighted_edges; + float best_weight; + float* edge_weights; + float* vertex_weights; + float* cur_weight; + float* cand_weight; + +} CvCliqueFinder; + +#define CLIQUE_TIME_OFF 2 +#define CLIQUE_FOUND 1 +#define CLIQUE_END 0 + +/*CVAPI(void) cvStartFindCliques( CvGraph* graph, CvCliqueFinder* finder, int reverse, + int weighted CV_DEFAULT(0), int weighted_edges CV_DEFAULT(0)); +CVAPI(int) cvFindNextMaximalClique( CvCliqueFinder* finder, int* clock_rest CV_DEFAULT(0) ); +CVAPI(void) cvEndFindCliques( CvCliqueFinder* finder ); + +CVAPI(void) cvBronKerbosch( CvGraph* graph );*/ + + +/*F/////////////////////////////////////////////////////////////////////////////////////// +// +// Name: cvSubgraphWeight +// Purpose: finds weight of subgraph in a graph +// Context: +// Parameters: +// graph - input graph. +// subgraph - sequence of pairwise different ints. These are indices of vertices of subgraph. +// weight_type - describes the way we measure weight. +// one of the following: +// CV_NOT_WEIGHTED - weight of a clique is simply its size +// CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices +// CV_WEIGHTED_EDGE - the same but edges +// CV_WEIGHTED_ALL - the same but both edges and vertices +// weight_vtx - optional vector of floats, with size = graph->total. +// If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL +// weights of vertices must be provided. If weight_vtx not zero +// these weights considered to be here, otherwise function assumes +// that vertices of graph are inherited from CvGraphWeightedVtx. +// weight_edge - optional matrix of floats, of width and height = graph->total. +// If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL +// weights of edges ought to be supplied. If weight_edge is not zero +// function finds them here, otherwise function expects +// edges of graph to be inherited from CvGraphWeightedEdge. +// If this parameter is not zero structure of the graph is determined from matrix +// rather than from CvGraphEdge's. In particular, elements corresponding to +// absent edges should be zero. +// Returns: +// weight of subgraph. +// Notes: +//F*/ +/*CVAPI(float) cvSubgraphWeight( CvGraph *graph, CvSeq *subgraph, + CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED), + CvVect32f weight_vtx CV_DEFAULT(0), + CvMatr32f weight_edge CV_DEFAULT(0) );*/ + + +/*F/////////////////////////////////////////////////////////////////////////////////////// +// +// Name: cvFindCliqueEx +// Purpose: tries to find clique with maximum possible weight in a graph +// Context: +// Parameters: +// graph - input graph. +// storage - memory storage to be used by the result. +// is_complementary - optional flag showing whether function should seek for clique +// in complementary graph. +// weight_type - describes our notion about weight. +// one of the following: +// CV_NOT_WEIGHTED - weight of a clique is simply its size +// CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices +// CV_WEIGHTED_EDGE - the same but edges +// CV_WEIGHTED_ALL - the same but both edges and vertices +// weight_vtx - optional vector of floats, with size = graph->total. +// If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL +// weights of vertices must be provided. If weight_vtx not zero +// these weights considered to be here, otherwise function assumes +// that vertices of graph are inherited from CvGraphWeightedVtx. +// weight_edge - optional matrix of floats, of width and height = graph->total. +// If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL +// weights of edges ought to be supplied. If weight_edge is not zero +// function finds them here, otherwise function expects +// edges of graph to be inherited from CvGraphWeightedEdge. +// Note that in case of CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL +// nonzero is_complementary implies nonzero weight_edge. +// start_clique - optional sequence of pairwise different ints. They are indices of +// vertices that shall be present in the output clique. +// subgraph_of_ban - optional sequence of (maybe equal) ints. They are indices of +// vertices that shall not be present in the output clique. +// clique_weight_ptr - optional output parameter. Weight of found clique stored here. +// num_generations - optional number of generations in evolutionary part of algorithm, +// zero forces to return first found clique. +// quality - optional parameter determining degree of required quality/speed tradeoff. +// Must be in the range from 0 to 9. +// 0 is fast and dirty, 9 is slow but hopefully yields good clique. +// Returns: +// sequence of pairwise different ints. +// These are indices of vertices that form found clique. +// Notes: +// in cases of CV_WEIGHTED_EDGE and CV_WEIGHTED_ALL weights should be nonnegative. +// start_clique has a priority over subgraph_of_ban. +//F*/ +/*CVAPI(CvSeq*) cvFindCliqueEx( CvGraph *graph, CvMemStorage *storage, + int is_complementary CV_DEFAULT(0), + CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED), + CvVect32f weight_vtx CV_DEFAULT(0), + CvMatr32f weight_edge CV_DEFAULT(0), + CvSeq *start_clique CV_DEFAULT(0), + CvSeq *subgraph_of_ban CV_DEFAULT(0), + float *clique_weight_ptr CV_DEFAULT(0), + int num_generations CV_DEFAULT(3), + int quality CV_DEFAULT(2) );*/ + + +#define CV_UNDEF_SC_PARAM 12345 //default value of parameters + +#define CV_IDP_BIRCHFIELD_PARAM1 25 +#define CV_IDP_BIRCHFIELD_PARAM2 5 +#define CV_IDP_BIRCHFIELD_PARAM3 12 +#define CV_IDP_BIRCHFIELD_PARAM4 15 +#define CV_IDP_BIRCHFIELD_PARAM5 25 + + +#define CV_DISPARITY_BIRCHFIELD 0 + + +/*F/////////////////////////////////////////////////////////////////////////// +// +// Name: cvFindStereoCorrespondence +// Purpose: find stereo correspondence on stereo-pair +// Context: +// Parameters: +// leftImage - left image of stereo-pair (format 8uC1). +// rightImage - right image of stereo-pair (format 8uC1). +// mode - mode of correspondence retrieval (now CV_DISPARITY_BIRCHFIELD only) +// dispImage - destination disparity image +// maxDisparity - maximal disparity +// param1, param2, param3, param4, param5 - parameters of algorithm +// Returns: +// Notes: +// Images must be rectified. +// All images must have format 8uC1. +//F*/ +CVAPI(void) +cvFindStereoCorrespondence( + const CvArr* leftImage, const CvArr* rightImage, + int mode, + CvArr* dispImage, + int maxDisparity, + double param1 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param2 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param3 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param4 CV_DEFAULT(CV_UNDEF_SC_PARAM), + double param5 CV_DEFAULT(CV_UNDEF_SC_PARAM) ); + +/*****************************************************************************************/ +/************ Epiline functions *******************/ + + + +typedef struct CvStereoLineCoeff +{ + double Xcoef; + double XcoefA; + double XcoefB; + double XcoefAB; + + double Ycoef; + double YcoefA; + double YcoefB; + double YcoefAB; + + double Zcoef; + double ZcoefA; + double ZcoefB; + double ZcoefAB; +}CvStereoLineCoeff; + + +typedef struct CvCamera +{ + float imgSize[2]; /* size of the camera view, used during calibration */ + float matrix[9]; /* intinsic camera parameters: [ fx 0 cx; 0 fy cy; 0 0 1 ] */ + float distortion[4]; /* distortion coefficients - two coefficients for radial distortion + and another two for tangential: [ k1 k2 p1 p2 ] */ + float rotMatr[9]; + float transVect[3]; /* rotation matrix and transition vector relatively + to some reference point in the space. */ +} CvCamera; + +typedef struct CvStereoCamera +{ + CvCamera* camera[2]; /* two individual camera parameters */ + float fundMatr[9]; /* fundamental matrix */ + + /* New part for stereo */ + CvPoint3D32f epipole[2]; + CvPoint2D32f quad[2][4]; /* coordinates of destination quadrangle after + epipolar geometry rectification */ + double coeffs[2][3][3];/* coefficients for transformation */ + CvPoint2D32f border[2][4]; + CvSize warpSize; + CvStereoLineCoeff* lineCoeffs; + int needSwapCameras;/* flag set to 1 if need to swap cameras for good reconstruction */ + float rotMatrix[9]; + float transVector[3]; +} CvStereoCamera; + + +typedef struct CvContourOrientation +{ + float egvals[2]; + float egvects[4]; + + float max, min; // minimum and maximum projections + int imax, imin; +} CvContourOrientation; + +#define CV_CAMERA_TO_WARP 1 +#define CV_WARP_TO_CAMERA 2 + +CVAPI(int) icvConvertWarpCoordinates(double coeffs[3][3], + CvPoint2D32f* cameraPoint, + CvPoint2D32f* warpPoint, + int direction); + +CVAPI(int) icvGetSymPoint3D( CvPoint3D64f pointCorner, + CvPoint3D64f point1, + CvPoint3D64f point2, + CvPoint3D64f *pointSym2); + +CVAPI(void) icvGetPieceLength3D(CvPoint3D64f point1,CvPoint3D64f point2,double* dist); + +CVAPI(int) icvCompute3DPoint( double alpha,double betta, + CvStereoLineCoeff* coeffs, + CvPoint3D64f* point); + +CVAPI(int) icvCreateConvertMatrVect( double* rotMatr1, + double* transVect1, + double* rotMatr2, + double* transVect2, + double* convRotMatr, + double* convTransVect); + +CVAPI(int) icvConvertPointSystem(CvPoint3D64f M2, + CvPoint3D64f* M1, + double* rotMatr, + double* transVect + ); + +CVAPI(int) icvComputeCoeffForStereo( CvStereoCamera* stereoCamera); + +CVAPI(int) icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross); +CVAPI(int) icvGetCrossLineDirect(CvPoint2D32f p1,CvPoint2D32f p2,float a,float b,float c,CvPoint2D32f* cross); +CVAPI(float) icvDefinePointPosition(CvPoint2D32f point1,CvPoint2D32f point2,CvPoint2D32f point); +CVAPI(int) icvStereoCalibration( int numImages, + int* nums, + CvSize imageSize, + CvPoint2D32f* imagePoints1, + CvPoint2D32f* imagePoints2, + CvPoint3D32f* objectPoints, + CvStereoCamera* stereoparams + ); + + +CVAPI(int) icvComputeRestStereoParams(CvStereoCamera *stereoparams); + +CVAPI(void) cvComputePerspectiveMap( const double coeffs[3][3], CvArr* rectMapX, CvArr* rectMapY ); + +CVAPI(int) icvComCoeffForLine( CvPoint2D64f point1, + CvPoint2D64f point2, + CvPoint2D64f point3, + CvPoint2D64f point4, + double* camMatr1, + double* rotMatr1, + double* transVect1, + double* camMatr2, + double* rotMatr2, + double* transVect2, + CvStereoLineCoeff* coeffs, + int* needSwapCameras); + +CVAPI(int) icvGetDirectionForPoint( CvPoint2D64f point, + double* camMatr, + CvPoint3D64f* direct); + +CVAPI(int) icvGetCrossLines(CvPoint3D64f point11,CvPoint3D64f point12, + CvPoint3D64f point21,CvPoint3D64f point22, + CvPoint3D64f* midPoint); + +CVAPI(int) icvComputeStereoLineCoeffs( CvPoint3D64f pointA, + CvPoint3D64f pointB, + CvPoint3D64f pointCam1, + double gamma, + CvStereoLineCoeff* coeffs); + +/*CVAPI(int) icvComputeFundMatrEpipoles ( double* camMatr1, + double* rotMatr1, + double* transVect1, + double* camMatr2, + double* rotMatr2, + double* transVect2, + CvPoint2D64f* epipole1, + CvPoint2D64f* epipole2, + double* fundMatr);*/ + +CVAPI(int) icvGetAngleLine( CvPoint2D64f startPoint, CvSize imageSize,CvPoint2D64f *point1,CvPoint2D64f *point2); + +CVAPI(void) icvGetCoefForPiece( CvPoint2D64f p_start,CvPoint2D64f p_end, + double *a,double *b,double *c, + int* result); + +/*CVAPI(void) icvGetCommonArea( CvSize imageSize, + CvPoint2D64f epipole1,CvPoint2D64f epipole2, + double* fundMatr, + double* coeff11,double* coeff12, + double* coeff21,double* coeff22, + int* result);*/ + +CVAPI(void) icvComputeeInfiniteProject1(double* rotMatr, + double* camMatr1, + double* camMatr2, + CvPoint2D32f point1, + CvPoint2D32f *point2); + +CVAPI(void) icvComputeeInfiniteProject2(double* rotMatr, + double* camMatr1, + double* camMatr2, + CvPoint2D32f* point1, + CvPoint2D32f point2); + +CVAPI(void) icvGetCrossDirectDirect( double* direct1,double* direct2, + CvPoint2D64f *cross,int* result); + +CVAPI(void) icvGetCrossPieceDirect( CvPoint2D64f p_start,CvPoint2D64f p_end, + double a,double b,double c, + CvPoint2D64f *cross,int* result); + +CVAPI(void) icvGetCrossPiecePiece( CvPoint2D64f p1_start,CvPoint2D64f p1_end, + CvPoint2D64f p2_start,CvPoint2D64f p2_end, + CvPoint2D64f* cross, + int* result); + +CVAPI(void) icvGetPieceLength(CvPoint2D64f point1,CvPoint2D64f point2,double* dist); + +CVAPI(void) icvGetCrossRectDirect( CvSize imageSize, + double a,double b,double c, + CvPoint2D64f *start,CvPoint2D64f *end, + int* result); + +CVAPI(void) icvProjectPointToImage( CvPoint3D64f point, + double* camMatr,double* rotMatr,double* transVect, + CvPoint2D64f* projPoint); + +CVAPI(void) icvGetQuadsTransform( CvSize imageSize, + double* camMatr1, + double* rotMatr1, + double* transVect1, + double* camMatr2, + double* rotMatr2, + double* transVect2, + CvSize* warpSize, + double quad1[4][2], + double quad2[4][2], + double* fundMatr, + CvPoint3D64f* epipole1, + CvPoint3D64f* epipole2 + ); + +CVAPI(void) icvGetQuadsTransformStruct( CvStereoCamera* stereoCamera); + +CVAPI(void) icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera); + +CVAPI(void) icvGetCutPiece( double* areaLineCoef1,double* areaLineCoef2, + CvPoint2D64f epipole, + CvSize imageSize, + CvPoint2D64f* point11,CvPoint2D64f* point12, + CvPoint2D64f* point21,CvPoint2D64f* point22, + int* result); + +CVAPI(void) icvGetMiddleAnglePoint( CvPoint2D64f basePoint, + CvPoint2D64f point1,CvPoint2D64f point2, + CvPoint2D64f* midPoint); + +CVAPI(void) icvGetNormalDirect(double* direct,CvPoint2D64f point,double* normDirect); + +CVAPI(double) icvGetVect(CvPoint2D64f basePoint,CvPoint2D64f point1,CvPoint2D64f point2); + +CVAPI(void) icvProjectPointToDirect( CvPoint2D64f point,double* lineCoeff, + CvPoint2D64f* projectPoint); + +CVAPI(void) icvGetDistanceFromPointToDirect( CvPoint2D64f point,double* lineCoef,double*dist); + +CVAPI(IplImage*) icvCreateIsometricImage( IplImage* src, IplImage* dst, + int desired_depth, int desired_num_channels ); + +CVAPI(void) cvDeInterlace( const CvArr* frame, CvArr* fieldEven, CvArr* fieldOdd ); + +/*CVAPI(int) icvSelectBestRt( int numImages, + int* numPoints, + CvSize imageSize, + CvPoint2D32f* imagePoints1, + CvPoint2D32f* imagePoints2, + CvPoint3D32f* objectPoints, + + CvMatr32f cameraMatrix1, + CvVect32f distortion1, + CvMatr32f rotMatrs1, + CvVect32f transVects1, + + CvMatr32f cameraMatrix2, + CvVect32f distortion2, + CvMatr32f rotMatrs2, + CvVect32f transVects2, + + CvMatr32f bestRotMatr, + CvVect32f bestTransVect + );*/ + + +/****************************************************************************************\ +* Contour Tree * +\****************************************************************************************/ + +/* Contour tree header */ +typedef struct CvContourTree +{ + CV_SEQUENCE_FIELDS() + CvPoint p1; /* the first point of the binary tree root segment */ + CvPoint p2; /* the last point of the binary tree root segment */ +} CvContourTree; + +/* Builds hierarhical representation of a contour */ +CVAPI(CvContourTree*) cvCreateContourTree( const CvSeq* contour, + CvMemStorage* storage, + double threshold ); + +/* Reconstruct (completelly or partially) contour a from contour tree */ +CVAPI(CvSeq*) cvContourFromContourTree( const CvContourTree* tree, + CvMemStorage* storage, + CvTermCriteria criteria ); + +/* Compares two contour trees */ +enum { CV_CONTOUR_TREES_MATCH_I1 = 1 }; + +CVAPI(double) cvMatchContourTrees( const CvContourTree* tree1, + const CvContourTree* tree2, + int method, double threshold ); + +/****************************************************************************************\ +* Contour Morphing * +\****************************************************************************************/ + +/* finds correspondence between two contours */ +CvSeq* cvCalcContoursCorrespondence( const CvSeq* contour1, + const CvSeq* contour2, + CvMemStorage* storage); + +/* morphs contours using the pre-calculated correspondence: + alpha=0 ~ contour1, alpha=1 ~ contour2 */ +CvSeq* cvMorphContours( const CvSeq* contour1, const CvSeq* contour2, + CvSeq* corr, double alpha, + CvMemStorage* storage ); + + +/****************************************************************************************\ +* Active Contours * +\****************************************************************************************/ + +#define CV_VALUE 1 +#define CV_ARRAY 2 +/* Updates active contour in order to minimize its cummulative + (internal and external) energy. */ +CVAPI(void) cvSnakeImage( const IplImage* image, CvPoint* points, + int length, float* alpha, + float* beta, float* gamma, + int coeff_usage, CvSize win, + CvTermCriteria criteria, int calc_gradient CV_DEFAULT(1)); + +/****************************************************************************************\ +* Texture Descriptors * +\****************************************************************************************/ + +#define CV_GLCM_OPTIMIZATION_NONE -2 +#define CV_GLCM_OPTIMIZATION_LUT -1 +#define CV_GLCM_OPTIMIZATION_HISTOGRAM 0 + +#define CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST 10 +#define CV_GLCMDESC_OPTIMIZATION_ALLOWTRIPLENEST 11 +#define CV_GLCMDESC_OPTIMIZATION_HISTOGRAM 4 + +#define CV_GLCMDESC_ENTROPY 0 +#define CV_GLCMDESC_ENERGY 1 +#define CV_GLCMDESC_HOMOGENITY 2 +#define CV_GLCMDESC_CONTRAST 3 +#define CV_GLCMDESC_CLUSTERTENDENCY 4 +#define CV_GLCMDESC_CLUSTERSHADE 5 +#define CV_GLCMDESC_CORRELATION 6 +#define CV_GLCMDESC_CORRELATIONINFO1 7 +#define CV_GLCMDESC_CORRELATIONINFO2 8 +#define CV_GLCMDESC_MAXIMUMPROBABILITY 9 + +#define CV_GLCM_ALL 0 +#define CV_GLCM_GLCM 1 +#define CV_GLCM_DESC 2 + +typedef struct CvGLCM CvGLCM; + +CVAPI(CvGLCM*) cvCreateGLCM( const IplImage* srcImage, + int stepMagnitude, + const int* stepDirections CV_DEFAULT(0), + int numStepDirections CV_DEFAULT(0), + int optimizationType CV_DEFAULT(CV_GLCM_OPTIMIZATION_NONE)); + +CVAPI(void) cvReleaseGLCM( CvGLCM** GLCM, int flag CV_DEFAULT(CV_GLCM_ALL)); + +CVAPI(void) cvCreateGLCMDescriptors( CvGLCM* destGLCM, + int descriptorOptimizationType + CV_DEFAULT(CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST)); + +CVAPI(double) cvGetGLCMDescriptor( CvGLCM* GLCM, int step, int descriptor ); + +CVAPI(void) cvGetGLCMDescriptorStatistics( CvGLCM* GLCM, int descriptor, + double* average, double* standardDeviation ); + +CVAPI(IplImage*) cvCreateGLCMImage( CvGLCM* GLCM, int step ); + +/****************************************************************************************\ +* Face eyes&mouth tracking * +\****************************************************************************************/ + + +typedef struct CvFaceTracker CvFaceTracker; + +#define CV_NUM_FACE_ELEMENTS 3 +enum CV_FACE_ELEMENTS +{ + CV_FACE_MOUTH = 0, + CV_FACE_LEFT_EYE = 1, + CV_FACE_RIGHT_EYE = 2 +}; + +CVAPI(CvFaceTracker*) cvInitFaceTracker(CvFaceTracker* pFaceTracking, const IplImage* imgGray, + CvRect* pRects, int nRects); +CVAPI(int) cvTrackFace( CvFaceTracker* pFaceTracker, IplImage* imgGray, + CvRect* pRects, int nRects, + CvPoint* ptRotate, double* dbAngleRotate); +CVAPI(void) cvReleaseFaceTracker(CvFaceTracker** ppFaceTracker); + + +typedef struct CvFace +{ + CvRect MouthRect; + CvRect LeftEyeRect; + CvRect RightEyeRect; +} CvFaceData; + +CvSeq * cvFindFace(IplImage * Image,CvMemStorage* storage); +CvSeq * cvPostBoostingFindFace(IplImage * Image,CvMemStorage* storage); + + +/****************************************************************************************\ +* 3D Tracker * +\****************************************************************************************/ + +typedef unsigned char CvBool; + +typedef struct Cv3dTracker2dTrackedObject +{ + int id; + CvPoint2D32f p; // pgruebele: So we do not loose precision, this needs to be float +} Cv3dTracker2dTrackedObject; + +CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint2D32f p) +{ + Cv3dTracker2dTrackedObject r; + r.id = id; + r.p = p; + return r; +} + +typedef struct Cv3dTrackerTrackedObject +{ + int id; + CvPoint3D32f p; // location of the tracked object +} Cv3dTrackerTrackedObject; + +CV_INLINE Cv3dTrackerTrackedObject cv3dTrackerTrackedObject(int id, CvPoint3D32f p) +{ + Cv3dTrackerTrackedObject r; + r.id = id; + r.p = p; + return r; +} + +typedef struct Cv3dTrackerCameraInfo +{ + CvBool valid; + float mat[4][4]; /* maps camera coordinates to world coordinates */ + CvPoint2D32f principal_point; /* copied from intrinsics so this structure */ + /* has all the info we need */ +} Cv3dTrackerCameraInfo; + +typedef struct Cv3dTrackerCameraIntrinsics +{ + CvPoint2D32f principal_point; + float focal_length[2]; + float distortion[4]; +} Cv3dTrackerCameraIntrinsics; + +CVAPI(CvBool) cv3dTrackerCalibrateCameras(int num_cameras, + const Cv3dTrackerCameraIntrinsics camera_intrinsics[], /* size is num_cameras */ + CvSize etalon_size, + float square_size, + IplImage *samples[], /* size is num_cameras */ + Cv3dTrackerCameraInfo camera_info[]); /* size is num_cameras */ + +CVAPI(int) cv3dTrackerLocateObjects(int num_cameras, int num_objects, + const Cv3dTrackerCameraInfo camera_info[], /* size is num_cameras */ + const Cv3dTracker2dTrackedObject tracking_info[], /* size is num_objects*num_cameras */ + Cv3dTrackerTrackedObject tracked_objects[]); /* size is num_objects */ +/**************************************************************************************** + tracking_info is a rectangular array; one row per camera, num_objects elements per row. + The id field of any unused slots must be -1. Ids need not be ordered or consecutive. On + completion, the return value is the number of objects located; i.e., the number of objects + visible by more than one camera. The id field of any unused slots in tracked objects is + set to -1. +****************************************************************************************/ + + +/****************************************************************************************\ +* Skeletons and Linear-Contour Models * +\****************************************************************************************/ + +typedef enum CvLeeParameters +{ + CV_LEE_INT = 0, + CV_LEE_FLOAT = 1, + CV_LEE_DOUBLE = 2, + CV_LEE_AUTO = -1, + CV_LEE_ERODE = 0, + CV_LEE_ZOOM = 1, + CV_LEE_NON = 2 +} CvLeeParameters; + +#define CV_NEXT_VORONOISITE2D( SITE ) ((SITE)->edge[0]->site[((SITE)->edge[0]->site[0] == (SITE))]) +#define CV_PREV_VORONOISITE2D( SITE ) ((SITE)->edge[1]->site[((SITE)->edge[1]->site[0] == (SITE))]) +#define CV_FIRST_VORONOIEDGE2D( SITE ) ((SITE)->edge[0]) +#define CV_LAST_VORONOIEDGE2D( SITE ) ((SITE)->edge[1]) +#define CV_NEXT_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[(EDGE)->site[0] != (SITE)]) +#define CV_PREV_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[2 + ((EDGE)->site[0] != (SITE))]) +#define CV_VORONOIEDGE2D_BEGINNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] != (SITE))]) +#define CV_VORONOIEDGE2D_ENDNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] == (SITE))]) +#define CV_TWIN_VORONOISITE2D( SITE, EDGE ) ( (EDGE)->site[((EDGE)->site[0] == (SITE))]) + +#define CV_VORONOISITE2D_FIELDS() \ + struct CvVoronoiNode2D *node[2]; \ + struct CvVoronoiEdge2D *edge[2]; + +typedef struct CvVoronoiSite2D +{ + CV_VORONOISITE2D_FIELDS() + struct CvVoronoiSite2D *next[2]; +} CvVoronoiSite2D; + +#define CV_VORONOIEDGE2D_FIELDS() \ + struct CvVoronoiNode2D *node[2]; \ + struct CvVoronoiSite2D *site[2]; \ + struct CvVoronoiEdge2D *next[4]; + +typedef struct CvVoronoiEdge2D +{ + CV_VORONOIEDGE2D_FIELDS() +} CvVoronoiEdge2D; + +#define CV_VORONOINODE2D_FIELDS() \ + CV_SET_ELEM_FIELDS(CvVoronoiNode2D) \ + CvPoint2D32f pt; \ + float radius; + +typedef struct CvVoronoiNode2D +{ + CV_VORONOINODE2D_FIELDS() +} CvVoronoiNode2D; + +#define CV_VORONOIDIAGRAM2D_FIELDS() \ + CV_GRAPH_FIELDS() \ + CvSet *sites; + +typedef struct CvVoronoiDiagram2D +{ + CV_VORONOIDIAGRAM2D_FIELDS() +} CvVoronoiDiagram2D; + +/* Computes Voronoi Diagram for given polygons with holes */ +CVAPI(int) cvVoronoiDiagramFromContour(CvSeq* ContourSeq, + CvVoronoiDiagram2D** VoronoiDiagram, + CvMemStorage* VoronoiStorage, + CvLeeParameters contour_type CV_DEFAULT(CV_LEE_INT), + int contour_orientation CV_DEFAULT(-1), + int attempt_number CV_DEFAULT(10)); + +/* Computes Voronoi Diagram for domains in given image */ +CVAPI(int) cvVoronoiDiagramFromImage(IplImage* pImage, + CvSeq** ContourSeq, + CvVoronoiDiagram2D** VoronoiDiagram, + CvMemStorage* VoronoiStorage, + CvLeeParameters regularization_method CV_DEFAULT(CV_LEE_NON), + float approx_precision CV_DEFAULT(CV_LEE_AUTO)); + +/* Deallocates the storage */ +CVAPI(void) cvReleaseVoronoiStorage(CvVoronoiDiagram2D* VoronoiDiagram, + CvMemStorage** pVoronoiStorage); + +/*********************** Linear-Contour Model ****************************/ + +struct CvLCMEdge; +struct CvLCMNode; + +typedef struct CvLCMEdge +{ + CV_GRAPH_EDGE_FIELDS() + CvSeq* chain; + float width; + int index1; + int index2; +} CvLCMEdge; + +typedef struct CvLCMNode +{ + CV_GRAPH_VERTEX_FIELDS() + CvContour* contour; +} CvLCMNode; + + +/* Computes hybrid model from Voronoi Diagram */ +CVAPI(CvGraph*) cvLinearContorModelFromVoronoiDiagram(CvVoronoiDiagram2D* VoronoiDiagram, + float maxWidth); + +/* Releases hybrid model storage */ +CVAPI(int) cvReleaseLinearContorModelStorage(CvGraph** Graph); + + +/* two stereo-related functions */ + +CVAPI(void) cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f vertex[4], double matrix[3][3], + CvArr* rectMap ); + +/*CVAPI(void) cvInitStereoRectification( CvStereoCamera* params, + CvArr* rectMap1, CvArr* rectMap2, + int do_undistortion );*/ + +/*************************** View Morphing Functions ************************/ + +typedef struct CvMatrix3 +{ + float m[3][3]; +} CvMatrix3; + +/* The order of the function corresponds to the order they should appear in + the view morphing pipeline */ + +/* Finds ending points of scanlines on left and right images of stereo-pair */ +CVAPI(void) cvMakeScanlines( const CvMatrix3* matrix, CvSize img_size, + int* scanlines1, int* scanlines2, + int* lengths1, int* lengths2, + int* line_count ); + +/* Grab pixel values from scanlines and stores them sequentially + (some sort of perspective image transform) */ +CVAPI(void) cvPreWarpImage( int line_count, + IplImage* img, + uchar* dst, + int* dst_nums, + int* scanlines); + +/* Approximate each grabbed scanline by a sequence of runs + (lossy run-length compression) */ +CVAPI(void) cvFindRuns( int line_count, + uchar* prewarp1, + uchar* prewarp2, + int* line_lengths1, + int* line_lengths2, + int* runs1, + int* runs2, + int* num_runs1, + int* num_runs2); + +/* Compares two sets of compressed scanlines */ +CVAPI(void) cvDynamicCorrespondMulti( int line_count, + int* first, + int* first_runs, + int* second, + int* second_runs, + int* first_corr, + int* second_corr); + +/* Finds scanline ending coordinates for some intermediate "virtual" camera position */ +CVAPI(void) cvMakeAlphaScanlines( int* scanlines1, + int* scanlines2, + int* scanlinesA, + int* lengths, + int line_count, + float alpha); + +/* Blends data of the left and right image scanlines to get + pixel values of "virtual" image scanlines */ +CVAPI(void) cvMorphEpilinesMulti( int line_count, + uchar* first_pix, + int* first_num, + uchar* second_pix, + int* second_num, + uchar* dst_pix, + int* dst_num, + float alpha, + int* first, + int* first_runs, + int* second, + int* second_runs, + int* first_corr, + int* second_corr); + +/* Does reverse warping of the morphing result to make + it fill the destination image rectangle */ +CVAPI(void) cvPostWarpImage( int line_count, + uchar* src, + int* src_nums, + IplImage* img, + int* scanlines); + +/* Deletes Moire (missed pixels that appear due to discretization) */ +CVAPI(void) cvDeleteMoire( IplImage* img ); + + +typedef struct CvConDensation +{ + int MP; + int DP; + float* DynamMatr; /* Matrix of the linear Dynamics system */ + float* State; /* Vector of State */ + int SamplesNum; /* Number of the Samples */ + float** flSamples; /* arr of the Sample Vectors */ + float** flNewSamples; /* temporary array of the Sample Vectors */ + float* flConfidence; /* Confidence for each Sample */ + float* flCumulative; /* Cumulative confidence */ + float* Temp; /* Temporary vector */ + float* RandomSample; /* RandomVector to update sample set */ + struct CvRandState* RandS; /* Array of structures to generate random vectors */ +} CvConDensation; + +/* Creates ConDensation filter state */ +CVAPI(CvConDensation*) cvCreateConDensation( int dynam_params, + int measure_params, + int sample_count ); + +/* Releases ConDensation filter state */ +CVAPI(void) cvReleaseConDensation( CvConDensation** condens ); + +/* Updates ConDensation filter by time (predict future state of the system) */ +CVAPI(void) cvConDensUpdateByTime( CvConDensation* condens); + +/* Initializes ConDensation filter samples */ +CVAPI(void) cvConDensInitSampleSet( CvConDensation* condens, CvMat* lower_bound, CvMat* upper_bound ); + +CV_INLINE int iplWidth( const IplImage* img ) +{ + return !img ? 0 : !img->roi ? img->width : img->roi->width; +} + +CV_INLINE int iplHeight( const IplImage* img ) +{ + return !img ? 0 : !img->roi ? img->height : img->roi->height; +} + +#ifdef __cplusplus +} +#endif + +#ifdef __cplusplus + +/****************************************************************************************\ +* Calibration engine * +\****************************************************************************************/ + +typedef enum CvCalibEtalonType +{ + CV_CALIB_ETALON_USER = -1, + CV_CALIB_ETALON_CHESSBOARD = 0, + CV_CALIB_ETALON_CHECKERBOARD = CV_CALIB_ETALON_CHESSBOARD +} +CvCalibEtalonType; + +class CV_EXPORTS CvCalibFilter +{ +public: + /* Constructor & destructor */ + CvCalibFilter(); + virtual ~CvCalibFilter(); + + /* Sets etalon type - one for all cameras. + etalonParams is used in case of pre-defined etalons (such as chessboard). + Number of elements in etalonParams is determined by etalonType. + E.g., if etalon type is CV_ETALON_TYPE_CHESSBOARD then: + etalonParams[0] is number of squares per one side of etalon + etalonParams[1] is number of squares per another side of etalon + etalonParams[2] is linear size of squares in the board in arbitrary units. + pointCount & points are used in case of + CV_CALIB_ETALON_USER (user-defined) etalon. */ + virtual bool + SetEtalon( CvCalibEtalonType etalonType, double* etalonParams, + int pointCount = 0, CvPoint2D32f* points = 0 ); + + /* Retrieves etalon parameters/or and points */ + virtual CvCalibEtalonType + GetEtalon( int* paramCount = 0, const double** etalonParams = 0, + int* pointCount = 0, const CvPoint2D32f** etalonPoints = 0 ) const; + + /* Sets number of cameras calibrated simultaneously. It is equal to 1 initially */ + virtual void SetCameraCount( int cameraCount ); + + /* Retrieves number of cameras */ + int GetCameraCount() const { return cameraCount; } + + /* Starts cameras calibration */ + virtual bool SetFrames( int totalFrames ); + + /* Stops cameras calibration */ + virtual void Stop( bool calibrate = false ); + + /* Retrieves number of cameras */ + bool IsCalibrated() const { return isCalibrated; } + + /* Feeds another serie of snapshots (one per each camera) to filter. + Etalon points on these images are found automatically. + If the function can't locate points, it returns false */ + virtual bool FindEtalon( IplImage** imgs ); + + /* The same but takes matrices */ + virtual bool FindEtalon( CvMat** imgs ); + + /* Lower-level function for feeding filter with already found etalon points. + Array of point arrays for each camera is passed. */ + virtual bool Push( const CvPoint2D32f** points = 0 ); + + /* Returns total number of accepted frames and, optionally, + total number of frames to collect */ + virtual int GetFrameCount( int* framesTotal = 0 ) const; + + /* Retrieves camera parameters for specified camera. + If camera is not calibrated the function returns 0 */ + virtual const CvCamera* GetCameraParams( int idx = 0 ) const; + + virtual const CvStereoCamera* GetStereoParams() const; + + /* Sets camera parameters for all cameras */ + virtual bool SetCameraParams( CvCamera* params ); + + /* Saves all camera parameters to file */ + virtual bool SaveCameraParams( const char* filename ); + + /* Loads all camera parameters from file */ + virtual bool LoadCameraParams( const char* filename ); + + /* Undistorts images using camera parameters. Some of src pointers can be NULL. */ + virtual bool Undistort( IplImage** src, IplImage** dst ); + + /* Undistorts images using camera parameters. Some of src pointers can be NULL. */ + virtual bool Undistort( CvMat** src, CvMat** dst ); + + /* Returns array of etalon points detected/partally detected + on the latest frame for idx-th camera */ + virtual bool GetLatestPoints( int idx, CvPoint2D32f** pts, + int* count, bool* found ); + + /* Draw the latest detected/partially detected etalon */ + virtual void DrawPoints( IplImage** dst ); + + /* Draw the latest detected/partially detected etalon */ + virtual void DrawPoints( CvMat** dst ); + + virtual bool Rectify( IplImage** srcarr, IplImage** dstarr ); + virtual bool Rectify( CvMat** srcarr, CvMat** dstarr ); + +protected: + + enum { MAX_CAMERAS = 3 }; + + /* etalon data */ + CvCalibEtalonType etalonType; + int etalonParamCount; + double* etalonParams; + int etalonPointCount; + CvPoint2D32f* etalonPoints; + CvSize imgSize; + CvMat* grayImg; + CvMat* tempImg; + CvMemStorage* storage; + + /* camera data */ + int cameraCount; + CvCamera cameraParams[MAX_CAMERAS]; + CvStereoCamera stereo; + CvPoint2D32f* points[MAX_CAMERAS]; + CvMat* undistMap[MAX_CAMERAS][2]; + CvMat* undistImg; + int latestCounts[MAX_CAMERAS]; + CvPoint2D32f* latestPoints[MAX_CAMERAS]; + CvMat* rectMap[MAX_CAMERAS][2]; + + /* Added by Valery */ + //CvStereoCamera stereoParams; + + int maxPoints; + int framesTotal; + int framesAccepted; + bool isCalibrated; +}; + +#include +#include + +class CV_EXPORTS CvImage +{ +public: + CvImage() : image(0), refcount(0) {} + CvImage( CvSize _size, int _depth, int _channels ) + { + image = cvCreateImage( _size, _depth, _channels ); + refcount = image ? new int(1) : 0; + } + + CvImage( IplImage* img ) : image(img) + { + refcount = image ? new int(1) : 0; + } + + CvImage( const CvImage& img ) : image(img.image), refcount(img.refcount) + { + if( refcount ) ++(*refcount); + } + + CvImage( const char* filename, const char* imgname=0, int color=-1 ) : image(0), refcount(0) + { load( filename, imgname, color ); } + + CvImage( CvFileStorage* fs, const char* mapname, const char* imgname ) : image(0), refcount(0) + { read( fs, mapname, imgname ); } + + CvImage( CvFileStorage* fs, const char* seqname, int idx ) : image(0), refcount(0) + { read( fs, seqname, idx ); } + + ~CvImage() + { + if( refcount && !(--*refcount) ) + { + cvReleaseImage( &image ); + delete refcount; + } + } + + CvImage clone() { return CvImage(image ? cvCloneImage(image) : 0); } + + void create( CvSize _size, int _depth, int _channels ) + { + if( !image || !refcount || + image->width != _size.width || image->height != _size.height || + image->depth != _depth || image->nChannels != _channels ) + attach( cvCreateImage( _size, _depth, _channels )); + } + + void release() { detach(); } + void clear() { detach(); } + + void attach( IplImage* img, bool use_refcount=true ) + { + if( refcount && --*refcount == 0 ) + { + cvReleaseImage( &image ); + delete refcount; + } + image = img; + refcount = use_refcount && image ? new int(1) : 0; + } + + void detach() + { + if( refcount && --*refcount == 0 ) + { + cvReleaseImage( &image ); + delete refcount; + } + image = 0; + refcount = 0; + } + + bool load( const char* filename, const char* imgname=0, int color=-1 ); + bool read( CvFileStorage* fs, const char* mapname, const char* imgname ); + bool read( CvFileStorage* fs, const char* seqname, int idx ); + void save( const char* filename, const char* imgname, const int* params=0 ); + void write( CvFileStorage* fs, const char* imgname ); + + void show( const char* window_name ); + bool is_valid() { return image != 0; } + + int width() const { return image ? image->width : 0; } + int height() const { return image ? image->height : 0; } + + CvSize size() const { return image ? cvSize(image->width, image->height) : cvSize(0,0); } + + CvSize roi_size() const + { + return !image ? cvSize(0,0) : + !image->roi ? cvSize(image->width,image->height) : + cvSize(image->roi->width, image->roi->height); + } + + CvRect roi() const + { + return !image ? cvRect(0,0,0,0) : + !image->roi ? cvRect(0,0,image->width,image->height) : + cvRect(image->roi->xOffset,image->roi->yOffset, + image->roi->width,image->roi->height); + } + + int coi() const { return !image || !image->roi ? 0 : image->roi->coi; } + + void set_roi(CvRect _roi) { cvSetImageROI(image,_roi); } + void reset_roi() { cvResetImageROI(image); } + void set_coi(int _coi) { cvSetImageCOI(image,_coi); } + int depth() const { return image ? image->depth : 0; } + int channels() const { return image ? image->nChannels : 0; } + int pix_size() const { return image ? ((image->depth & 255)>>3)*image->nChannels : 0; } + + uchar* data() { return image ? (uchar*)image->imageData : 0; } + const uchar* data() const { return image ? (const uchar*)image->imageData : 0; } + int step() const { return image ? image->widthStep : 0; } + int origin() const { return image ? image->origin : 0; } + + uchar* roi_row(int y) + { + assert(0<=y); + assert(!image ? + 1 : image->roi ? + yroi->height : yheight); + + return !image ? 0 : + !image->roi ? + (uchar*)(image->imageData + y*image->widthStep) : + (uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep + + image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels); + } + + const uchar* roi_row(int y) const + { + assert(0<=y); + assert(!image ? + 1 : image->roi ? + yroi->height : yheight); + + return !image ? 0 : + !image->roi ? + (const uchar*)(image->imageData + y*image->widthStep) : + (const uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep + + image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels); + } + + operator const IplImage* () const { return image; } + operator IplImage* () { return image; } + + CvImage& operator = (const CvImage& img) + { + if( img.refcount ) + ++*img.refcount; + if( refcount && !(--*refcount) ) + cvReleaseImage( &image ); + image=img.image; + refcount=img.refcount; + return *this; + } + +protected: + IplImage* image; + int* refcount; +}; + + +class CV_EXPORTS CvMatrix +{ +public: + CvMatrix() : matrix(0) {} + CvMatrix( int _rows, int _cols, int _type ) + { matrix = cvCreateMat( _rows, _cols, _type ); } + + CvMatrix( int _rows, int _cols, int _type, CvMat* hdr, + void* _data=0, int _step=CV_AUTOSTEP ) + { matrix = cvInitMatHeader( hdr, _rows, _cols, _type, _data, _step ); } + + CvMatrix( int rows, int cols, int type, CvMemStorage* storage, bool alloc_data=true ); + + CvMatrix( int _rows, int _cols, int _type, void* _data, int _step=CV_AUTOSTEP ) + { matrix = cvCreateMatHeader( _rows, _cols, _type ); + cvSetData( matrix, _data, _step ); } + + CvMatrix( CvMat* m ) + { matrix = m; } + + CvMatrix( const CvMatrix& m ) + { + matrix = m.matrix; + addref(); + } + + CvMatrix( const char* filename, const char* matname=0, int color=-1 ) : matrix(0) + { load( filename, matname, color ); } + + CvMatrix( CvFileStorage* fs, const char* mapname, const char* matname ) : matrix(0) + { read( fs, mapname, matname ); } + + CvMatrix( CvFileStorage* fs, const char* seqname, int idx ) : matrix(0) + { read( fs, seqname, idx ); } + + ~CvMatrix() + { + release(); + } + + CvMatrix clone() { return CvMatrix(matrix ? cvCloneMat(matrix) : 0); } + + void set( CvMat* m, bool add_ref ) + { + release(); + matrix = m; + if( add_ref ) + addref(); + } + + void create( int _rows, int _cols, int _type ) + { + if( !matrix || !matrix->refcount || + matrix->rows != _rows || matrix->cols != _cols || + CV_MAT_TYPE(matrix->type) != _type ) + set( cvCreateMat( _rows, _cols, _type ), false ); + } + + void addref() const + { + if( matrix ) + { + if( matrix->hdr_refcount ) + ++matrix->hdr_refcount; + else if( matrix->refcount ) + ++*matrix->refcount; + } + } + + void release() + { + if( matrix ) + { + if( matrix->hdr_refcount ) + { + if( --matrix->hdr_refcount == 0 ) + cvReleaseMat( &matrix ); + } + else if( matrix->refcount ) + { + if( --*matrix->refcount == 0 ) + cvFree( &matrix->refcount ); + } + matrix = 0; + } + } + + void clear() + { + release(); + } + + bool load( const char* filename, const char* matname=0, int color=-1 ); + bool read( CvFileStorage* fs, const char* mapname, const char* matname ); + bool read( CvFileStorage* fs, const char* seqname, int idx ); + void save( const char* filename, const char* matname, const int* params=0 ); + void write( CvFileStorage* fs, const char* matname ); + + void show( const char* window_name ); + + bool is_valid() { return matrix != 0; } + + int rows() const { return matrix ? matrix->rows : 0; } + int cols() const { return matrix ? matrix->cols : 0; } + + CvSize size() const + { + return !matrix ? cvSize(0,0) : cvSize(matrix->rows,matrix->cols); + } + + int type() const { return matrix ? CV_MAT_TYPE(matrix->type) : 0; } + int depth() const { return matrix ? CV_MAT_DEPTH(matrix->type) : 0; } + int channels() const { return matrix ? CV_MAT_CN(matrix->type) : 0; } + int pix_size() const { return matrix ? CV_ELEM_SIZE(matrix->type) : 0; } + + uchar* data() { return matrix ? matrix->data.ptr : 0; } + const uchar* data() const { return matrix ? matrix->data.ptr : 0; } + int step() const { return matrix ? matrix->step : 0; } + + void set_data( void* _data, int _step=CV_AUTOSTEP ) + { cvSetData( matrix, _data, _step ); } + + uchar* row(int i) { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; } + const uchar* row(int i) const + { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; } + + operator const CvMat* () const { return matrix; } + operator CvMat* () { return matrix; } + + CvMatrix& operator = (const CvMatrix& _m) + { + _m.addref(); + release(); + matrix = _m.matrix; + return *this; + } + +protected: + CvMat* matrix; +}; + +/****************************************************************************************\ + * CamShiftTracker * + \****************************************************************************************/ + +class CV_EXPORTS CvCamShiftTracker +{ +public: + + CvCamShiftTracker(); + virtual ~CvCamShiftTracker(); + + /**** Characteristics of the object that are calculated by track_object method *****/ + float get_orientation() const // orientation of the object in degrees + { return m_box.angle; } + float get_length() const // the larger linear size of the object + { return m_box.size.height; } + float get_width() const // the smaller linear size of the object + { return m_box.size.width; } + CvPoint2D32f get_center() const // center of the object + { return m_box.center; } + CvRect get_window() const // bounding rectangle for the object + { return m_comp.rect; } + + /*********************** Tracking parameters ************************/ + int get_threshold() const // thresholding value that applied to back project + { return m_threshold; } + + int get_hist_dims( int* dims = 0 ) const // returns number of histogram dimensions and sets + { return m_hist ? cvGetDims( m_hist->bins, dims ) : 0; } + + int get_min_ch_val( int channel ) const // get the minimum allowed value of the specified channel + { return m_min_ch_val[channel]; } + + int get_max_ch_val( int channel ) const // get the maximum allowed value of the specified channel + { return m_max_ch_val[channel]; } + + // set initial object rectangle (must be called before initial calculation of the histogram) + bool set_window( CvRect window) + { m_comp.rect = window; return true; } + + bool set_threshold( int threshold ) // threshold applied to the histogram bins + { m_threshold = threshold; return true; } + + bool set_hist_bin_range( int dim, int min_val, int max_val ); + + bool set_hist_dims( int c_dims, int* dims );// set the histogram parameters + + bool set_min_ch_val( int channel, int val ) // set the minimum allowed value of the specified channel + { m_min_ch_val[channel] = val; return true; } + bool set_max_ch_val( int channel, int val ) // set the maximum allowed value of the specified channel + { m_max_ch_val[channel] = val; return true; } + + /************************ The processing methods *********************************/ + // update object position + virtual bool track_object( const IplImage* cur_frame ); + + // update object histogram + virtual bool update_histogram( const IplImage* cur_frame ); + + // reset histogram + virtual void reset_histogram(); + + /************************ Retrieving internal data *******************************/ + // get back project image + virtual IplImage* get_back_project() + { return m_back_project; } + + float query( int* bin ) const + { return m_hist ? (float)cvGetRealND(m_hist->bins, bin) : 0.f; } + +protected: + + // internal method for color conversion: fills m_color_planes group + virtual void color_transform( const IplImage* img ); + + CvHistogram* m_hist; + + CvBox2D m_box; + CvConnectedComp m_comp; + + float m_hist_ranges_data[CV_MAX_DIM][2]; + float* m_hist_ranges[CV_MAX_DIM]; + + int m_min_ch_val[CV_MAX_DIM]; + int m_max_ch_val[CV_MAX_DIM]; + int m_threshold; + + IplImage* m_color_planes[CV_MAX_DIM]; + IplImage* m_back_project; + IplImage* m_temp; + IplImage* m_mask; +}; + +/****************************************************************************************\ +* Expectation - Maximization * +\****************************************************************************************/ +struct CV_EXPORTS_W_MAP CvEMParams +{ + CvEMParams(); + CvEMParams( int nclusters, int cov_mat_type=cv::EM::COV_MAT_DIAGONAL, + int start_step=cv::EM::START_AUTO_STEP, + CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), + const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 ); + + CV_PROP_RW int nclusters; + CV_PROP_RW int cov_mat_type; + CV_PROP_RW int start_step; + const CvMat* probs; + const CvMat* weights; + const CvMat* means; + const CvMat** covs; + CV_PROP_RW CvTermCriteria term_crit; +}; + + +class CV_EXPORTS_W CvEM : public CvStatModel +{ +public: + // Type of covariation matrices + enum { COV_MAT_SPHERICAL=cv::EM::COV_MAT_SPHERICAL, + COV_MAT_DIAGONAL =cv::EM::COV_MAT_DIAGONAL, + COV_MAT_GENERIC =cv::EM::COV_MAT_GENERIC }; + + // The initial step + enum { START_E_STEP=cv::EM::START_E_STEP, + START_M_STEP=cv::EM::START_M_STEP, + START_AUTO_STEP=cv::EM::START_AUTO_STEP }; + + CV_WRAP CvEM(); + CvEM( const CvMat* samples, const CvMat* sampleIdx=0, + CvEMParams params=CvEMParams(), CvMat* labels=0 ); + + virtual ~CvEM(); + + virtual bool train( const CvMat* samples, const CvMat* sampleIdx=0, + CvEMParams params=CvEMParams(), CvMat* labels=0 ); + + virtual float predict( const CvMat* sample, CV_OUT CvMat* probs ) const; + + CV_WRAP CvEM( const cv::Mat& samples, const cv::Mat& sampleIdx=cv::Mat(), + CvEMParams params=CvEMParams() ); + + CV_WRAP virtual bool train( const cv::Mat& samples, + const cv::Mat& sampleIdx=cv::Mat(), + CvEMParams params=CvEMParams(), + CV_OUT cv::Mat* labels=0 ); + + CV_WRAP virtual float predict( const cv::Mat& sample, CV_OUT cv::Mat* probs=0 ) const; + CV_WRAP virtual double calcLikelihood( const cv::Mat &sample ) const; + + CV_WRAP int getNClusters() const; + CV_WRAP cv::Mat getMeans() const; + CV_WRAP void getCovs(CV_OUT std::vector& covs) const; + CV_WRAP cv::Mat getWeights() const; + CV_WRAP cv::Mat getProbs() const; + + CV_WRAP inline double getLikelihood() const { return emObj.isTrained() ? logLikelihood : DBL_MAX; } + + CV_WRAP virtual void clear(); + + int get_nclusters() const; + const CvMat* get_means() const; + const CvMat** get_covs() const; + const CvMat* get_weights() const; + const CvMat* get_probs() const; + + inline double get_log_likelihood() const { return getLikelihood(); } + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* fs, const char* name ) const; + +protected: + void set_mat_hdrs(); + + cv::EM emObj; + cv::Mat probs; + double logLikelihood; + + CvMat meansHdr; + std::vector covsHdrs; + std::vector covsPtrs; + CvMat weightsHdr; + CvMat probsHdr; +}; + +namespace cv +{ + +typedef CvEMParams EMParams; +typedef CvEM ExpectationMaximization; + +/*! + The Patch Generator class + */ +class CV_EXPORTS PatchGenerator +{ +public: + PatchGenerator(); + PatchGenerator(double _backgroundMin, double _backgroundMax, + double _noiseRange, bool _randomBlur=true, + double _lambdaMin=0.6, double _lambdaMax=1.5, + double _thetaMin=-CV_PI, double _thetaMax=CV_PI, + double _phiMin=-CV_PI, double _phiMax=CV_PI ); + void operator()(const Mat& image, Point2f pt, Mat& patch, Size patchSize, RNG& rng) const; + void operator()(const Mat& image, const Mat& transform, Mat& patch, + Size patchSize, RNG& rng) const; + void warpWholeImage(const Mat& image, Mat& matT, Mat& buf, + CV_OUT Mat& warped, int border, RNG& rng) const; + void generateRandomTransform(Point2f srcCenter, Point2f dstCenter, + CV_OUT Mat& transform, RNG& rng, + bool inverse=false) const; + void setAffineParam(double lambda, double theta, double phi); + + double backgroundMin, backgroundMax; + double noiseRange; + bool randomBlur; + double lambdaMin, lambdaMax; + double thetaMin, thetaMax; + double phiMin, phiMax; +}; + + +class CV_EXPORTS LDetector +{ +public: + LDetector(); + LDetector(int _radius, int _threshold, int _nOctaves, + int _nViews, double _baseFeatureSize, double _clusteringDistance); + void operator()(const Mat& image, + CV_OUT vector& keypoints, + int maxCount=0, bool scaleCoords=true) const; + void operator()(const vector& pyr, + CV_OUT vector& keypoints, + int maxCount=0, bool scaleCoords=true) const; + void getMostStable2D(const Mat& image, CV_OUT vector& keypoints, + int maxCount, const PatchGenerator& patchGenerator) const; + void setVerbose(bool verbose); + + void read(const FileNode& node); + void write(FileStorage& fs, const String& name=String()) const; + + int radius; + int threshold; + int nOctaves; + int nViews; + bool verbose; + + double baseFeatureSize; + double clusteringDistance; +}; + +typedef LDetector YAPE; + +class CV_EXPORTS FernClassifier +{ +public: + FernClassifier(); + FernClassifier(const FileNode& node); + FernClassifier(const vector >& points, + const vector& refimgs, + const vector >& labels=vector >(), + int _nclasses=0, int _patchSize=PATCH_SIZE, + int _signatureSize=DEFAULT_SIGNATURE_SIZE, + int _nstructs=DEFAULT_STRUCTS, + int _structSize=DEFAULT_STRUCT_SIZE, + int _nviews=DEFAULT_VIEWS, + int _compressionMethod=COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual ~FernClassifier(); + virtual void read(const FileNode& n); + virtual void write(FileStorage& fs, const String& name=String()) const; + virtual void trainFromSingleView(const Mat& image, + const vector& keypoints, + int _patchSize=PATCH_SIZE, + int _signatureSize=DEFAULT_SIGNATURE_SIZE, + int _nstructs=DEFAULT_STRUCTS, + int _structSize=DEFAULT_STRUCT_SIZE, + int _nviews=DEFAULT_VIEWS, + int _compressionMethod=COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual void train(const vector >& points, + const vector& refimgs, + const vector >& labels=vector >(), + int _nclasses=0, int _patchSize=PATCH_SIZE, + int _signatureSize=DEFAULT_SIGNATURE_SIZE, + int _nstructs=DEFAULT_STRUCTS, + int _structSize=DEFAULT_STRUCT_SIZE, + int _nviews=DEFAULT_VIEWS, + int _compressionMethod=COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual int operator()(const Mat& img, Point2f kpt, vector& signature) const; + virtual int operator()(const Mat& patch, vector& signature) const; + virtual void clear(); + virtual bool empty() const; + void setVerbose(bool verbose); + + int getClassCount() const; + int getStructCount() const; + int getStructSize() const; + int getSignatureSize() const; + int getCompressionMethod() const; + Size getPatchSize() const; + + struct Feature + { + uchar x1, y1, x2, y2; + Feature() : x1(0), y1(0), x2(0), y2(0) {} + Feature(int _x1, int _y1, int _x2, int _y2) + : x1((uchar)_x1), y1((uchar)_y1), x2((uchar)_x2), y2((uchar)_y2) + {} + template bool operator ()(const Mat_<_Tp>& patch) const + { return patch(y1,x1) > patch(y2, x2); } + }; + + enum + { + PATCH_SIZE = 31, + DEFAULT_STRUCTS = 50, + DEFAULT_STRUCT_SIZE = 9, + DEFAULT_VIEWS = 5000, + DEFAULT_SIGNATURE_SIZE = 176, + COMPRESSION_NONE = 0, + COMPRESSION_RANDOM_PROJ = 1, + COMPRESSION_PCA = 2, + DEFAULT_COMPRESSION_METHOD = COMPRESSION_NONE + }; + +protected: + virtual void prepare(int _nclasses, int _patchSize, int _signatureSize, + int _nstructs, int _structSize, + int _nviews, int _compressionMethod); + virtual void finalize(RNG& rng); + virtual int getLeaf(int fidx, const Mat& patch) const; + + bool verbose; + int nstructs; + int structSize; + int nclasses; + int signatureSize; + int compressionMethod; + int leavesPerStruct; + Size patchSize; + vector features; + vector classCounters; + vector posteriors; +}; + + +/****************************************************************************************\ + * Calonder Classifier * + \****************************************************************************************/ + +struct RTreeNode; + +struct CV_EXPORTS BaseKeypoint +{ + int x; + int y; + IplImage* image; + + BaseKeypoint() + : x(0), y(0), image(NULL) + {} + + BaseKeypoint(int _x, int _y, IplImage* _image) + : x(_x), y(_y), image(_image) + {} +}; + +class CV_EXPORTS RandomizedTree +{ +public: + friend class RTreeClassifier; + + static const uchar PATCH_SIZE = 32; + static const int DEFAULT_DEPTH = 9; + static const int DEFAULT_VIEWS = 5000; + static const size_t DEFAULT_REDUCED_NUM_DIM = 176; + static float GET_LOWER_QUANT_PERC() { return .03f; } + static float GET_UPPER_QUANT_PERC() { return .92f; } + + RandomizedTree(); + ~RandomizedTree(); + + void train(vector const& base_set, RNG &rng, + int depth, int views, size_t reduced_num_dim, int num_quant_bits); + void train(vector const& base_set, RNG &rng, + PatchGenerator &make_patch, int depth, int views, size_t reduced_num_dim, + int num_quant_bits); + + // following two funcs are EXPERIMENTAL (do not use unless you know exactly what you do) + static void quantizeVector(float *vec, int dim, int N, float bnds[2], int clamp_mode=0); + static void quantizeVector(float *src, int dim, int N, float bnds[2], uchar *dst); + + // patch_data must be a 32x32 array (no row padding) + float* getPosterior(uchar* patch_data); + const float* getPosterior(uchar* patch_data) const; + uchar* getPosterior2(uchar* patch_data); + const uchar* getPosterior2(uchar* patch_data) const; + + void read(const char* file_name, int num_quant_bits); + void read(std::istream &is, int num_quant_bits); + void write(const char* file_name) const; + void write(std::ostream &os) const; + + int classes() { return classes_; } + int depth() { return depth_; } + + //void setKeepFloatPosteriors(bool b) { keep_float_posteriors_ = b; } + void discardFloatPosteriors() { freePosteriors(1); } + + inline void applyQuantization(int num_quant_bits) { makePosteriors2(num_quant_bits); } + + // debug + void savePosteriors(std::string url, bool append=false); + void savePosteriors2(std::string url, bool append=false); + +private: + int classes_; + int depth_; + int num_leaves_; + vector nodes_; + float **posteriors_; // 16-bytes aligned posteriors + uchar **posteriors2_; // 16-bytes aligned posteriors + vector leaf_counts_; + + void createNodes(int num_nodes, RNG &rng); + void allocPosteriorsAligned(int num_leaves, int num_classes); + void freePosteriors(int which); // which: 1=posteriors_, 2=posteriors2_, 3=both + void init(int classes, int depth, RNG &rng); + void addExample(int class_id, uchar* patch_data); + void finalize(size_t reduced_num_dim, int num_quant_bits); + int getIndex(uchar* patch_data) const; + inline float* getPosteriorByIndex(int index); + inline const float* getPosteriorByIndex(int index) const; + inline uchar* getPosteriorByIndex2(int index); + inline const uchar* getPosteriorByIndex2(int index) const; + //void makeRandomMeasMatrix(float *cs_phi, PHI_DISTR_TYPE dt, size_t reduced_num_dim); + void convertPosteriorsToChar(); + void makePosteriors2(int num_quant_bits); + void compressLeaves(size_t reduced_num_dim); + void estimateQuantPercForPosteriors(float perc[2]); +}; + + +inline uchar* getData(IplImage* image) +{ + return reinterpret_cast(image->imageData); +} + +inline float* RandomizedTree::getPosteriorByIndex(int index) +{ + return const_cast(const_cast(this)->getPosteriorByIndex(index)); +} + +inline const float* RandomizedTree::getPosteriorByIndex(int index) const +{ + return posteriors_[index]; +} + +inline uchar* RandomizedTree::getPosteriorByIndex2(int index) +{ + return const_cast(const_cast(this)->getPosteriorByIndex2(index)); +} + +inline const uchar* RandomizedTree::getPosteriorByIndex2(int index) const +{ + return posteriors2_[index]; +} + +struct CV_EXPORTS RTreeNode +{ + short offset1, offset2; + + RTreeNode() {} + RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2) + : offset1(y1*RandomizedTree::PATCH_SIZE + x1), + offset2(y2*RandomizedTree::PATCH_SIZE + x2) + {} + + //! Left child on 0, right child on 1 + inline bool operator() (uchar* patch_data) const + { + return patch_data[offset1] > patch_data[offset2]; + } +}; + +class CV_EXPORTS RTreeClassifier +{ +public: + static const int DEFAULT_TREES = 48; + static const size_t DEFAULT_NUM_QUANT_BITS = 4; + + RTreeClassifier(); + void train(vector const& base_set, + RNG &rng, + int num_trees = RTreeClassifier::DEFAULT_TREES, + int depth = RandomizedTree::DEFAULT_DEPTH, + int views = RandomizedTree::DEFAULT_VIEWS, + size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM, + int num_quant_bits = DEFAULT_NUM_QUANT_BITS); + void train(vector const& base_set, + RNG &rng, + PatchGenerator &make_patch, + int num_trees = RTreeClassifier::DEFAULT_TREES, + int depth = RandomizedTree::DEFAULT_DEPTH, + int views = RandomizedTree::DEFAULT_VIEWS, + size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM, + int num_quant_bits = DEFAULT_NUM_QUANT_BITS); + + // sig must point to a memory block of at least classes()*sizeof(float|uchar) bytes + void getSignature(IplImage *patch, uchar *sig) const; + void getSignature(IplImage *patch, float *sig) const; + void getSparseSignature(IplImage *patch, float *sig, float thresh) const; + // TODO: deprecated in favor of getSignature overload, remove + void getFloatSignature(IplImage *patch, float *sig) const { getSignature(patch, sig); } + + static int countNonZeroElements(float *vec, int n, double tol=1e-10); + static inline void safeSignatureAlloc(uchar **sig, int num_sig=1, int sig_len=176); + static inline uchar* safeSignatureAlloc(int num_sig=1, int sig_len=176); + + inline int classes() const { return classes_; } + inline int original_num_classes() const { return original_num_classes_; } + + void setQuantization(int num_quant_bits); + void discardFloatPosteriors(); + + void read(const char* file_name); + void read(std::istream &is); + void write(const char* file_name) const; + void write(std::ostream &os) const; + + // experimental and debug + void saveAllFloatPosteriors(std::string file_url); + void saveAllBytePosteriors(std::string file_url); + void setFloatPosteriorsFromTextfile_176(std::string url); + float countZeroElements(); + + vector trees_; + +private: + int classes_; + int num_quant_bits_; + mutable uchar **posteriors_; + mutable unsigned short *ptemp_; + int original_num_classes_; + bool keep_floats_; +}; + +/****************************************************************************************\ +* One-Way Descriptor * +\****************************************************************************************/ + +// CvAffinePose: defines a parameterized affine transformation of an image patch. +// An image patch is rotated on angle phi (in degrees), then scaled lambda1 times +// along horizontal and lambda2 times along vertical direction, and then rotated again +// on angle (theta - phi). +class CV_EXPORTS CvAffinePose +{ +public: + float phi; + float theta; + float lambda1; + float lambda2; +}; + +class CV_EXPORTS OneWayDescriptor +{ +public: + OneWayDescriptor(); + ~OneWayDescriptor(); + + // allocates memory for given descriptor parameters + void Allocate(int pose_count, CvSize size, int nChannels); + + // GenerateSamples: generates affine transformed patches with averaging them over small transformation variations. + // If external poses and transforms were specified, uses them instead of generating random ones + // - pose_count: the number of poses to be generated + // - frontal: the input patch (can be a roi in a larger image) + // - norm: if nonzero, normalizes the output patch so that the sum of pixel intensities is 1 + void GenerateSamples(int pose_count, IplImage* frontal, int norm = 0); + + // GenerateSamplesFast: generates affine transformed patches with averaging them over small transformation variations. + // Uses precalculated transformed pca components. + // - frontal: the input patch (can be a roi in a larger image) + // - pca_hr_avg: pca average vector + // - pca_hr_eigenvectors: pca eigenvectors + // - pca_descriptors: an array of precomputed descriptors of pca components containing their affine transformations + // pca_descriptors[0] corresponds to the average, pca_descriptors[1]-pca_descriptors[pca_dim] correspond to eigenvectors + void GenerateSamplesFast(IplImage* frontal, CvMat* pca_hr_avg, + CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors); + + // sets the poses and corresponding transforms + void SetTransforms(CvAffinePose* poses, CvMat** transforms); + + // Initialize: builds a descriptor. + // - pose_count: the number of poses to build. If poses were set externally, uses them rather than generating random ones + // - frontal: input patch. Can be a roi in a larger image + // - feature_name: the feature name to be associated with the descriptor + // - norm: if 1, the affine transformed patches are normalized so that their sum is 1 + void Initialize(int pose_count, IplImage* frontal, const char* feature_name = 0, int norm = 0); + + // InitializeFast: builds a descriptor using precomputed descriptors of pca components + // - pose_count: the number of poses to build + // - frontal: input patch. Can be a roi in a larger image + // - feature_name: the feature name to be associated with the descriptor + // - pca_hr_avg: average vector for PCA + // - pca_hr_eigenvectors: PCA eigenvectors (one vector per row) + // - pca_descriptors: precomputed descriptors of PCA components, the first descriptor for the average vector + // followed by the descriptors for eigenvectors + void InitializeFast(int pose_count, IplImage* frontal, const char* feature_name, + CvMat* pca_hr_avg, CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors); + + // ProjectPCASample: unwarps an image patch into a vector and projects it into PCA space + // - patch: input image patch + // - avg: PCA average vector + // - eigenvectors: PCA eigenvectors, one per row + // - pca_coeffs: output PCA coefficients + void ProjectPCASample(IplImage* patch, CvMat* avg, CvMat* eigenvectors, CvMat* pca_coeffs) const; + + // InitializePCACoeffs: projects all warped patches into PCA space + // - avg: PCA average vector + // - eigenvectors: PCA eigenvectors, one per row + void InitializePCACoeffs(CvMat* avg, CvMat* eigenvectors); + + // EstimatePose: finds the closest match between an input patch and a set of patches with different poses + // - patch: input image patch + // - pose_idx: the output index of the closest pose + // - distance: the distance to the closest pose (L2 distance) + void EstimatePose(IplImage* patch, int& pose_idx, float& distance) const; + + // EstimatePosePCA: finds the closest match between an input patch and a set of patches with different poses. + // The distance between patches is computed in PCA space + // - patch: input image patch + // - pose_idx: the output index of the closest pose + // - distance: distance to the closest pose (L2 distance in PCA space) + // - avg: PCA average vector. If 0, matching without PCA is used + // - eigenvectors: PCA eigenvectors, one per row + void EstimatePosePCA(CvArr* patch, int& pose_idx, float& distance, CvMat* avg, CvMat* eigenvalues) const; + + // GetPatchSize: returns the size of each image patch after warping (2 times smaller than the input patch) + CvSize GetPatchSize() const + { + return m_patch_size; + } + + // GetInputPatchSize: returns the required size of the patch that the descriptor is built from + // (2 time larger than the patch after warping) + CvSize GetInputPatchSize() const + { + return cvSize(m_patch_size.width*2, m_patch_size.height*2); + } + + // GetPatch: returns a patch corresponding to specified pose index + // - index: pose index + // - return value: the patch corresponding to specified pose index + IplImage* GetPatch(int index); + + // GetPose: returns a pose corresponding to specified pose index + // - index: pose index + // - return value: the pose corresponding to specified pose index + CvAffinePose GetPose(int index) const; + + // Save: saves all patches with different poses to a specified path + void Save(const char* path); + + // ReadByName: reads a descriptor from a file storage + // - fs: file storage + // - parent: parent node + // - name: node name + // - return value: 1 if succeeded, 0 otherwise + int ReadByName(CvFileStorage* fs, CvFileNode* parent, const char* name); + + // ReadByName: reads a descriptor from a file node + // - parent: parent node + // - name: node name + // - return value: 1 if succeeded, 0 otherwise + int ReadByName(const FileNode &parent, const char* name); + + // Write: writes a descriptor into a file storage + // - fs: file storage + // - name: node name + void Write(CvFileStorage* fs, const char* name); + + // GetFeatureName: returns a name corresponding to a feature + const char* GetFeatureName() const; + + // GetCenter: returns the center of the feature + CvPoint GetCenter() const; + + void SetPCADimHigh(int pca_dim_high) {m_pca_dim_high = pca_dim_high;}; + void SetPCADimLow(int pca_dim_low) {m_pca_dim_low = pca_dim_low;}; + + int GetPCADimLow() const; + int GetPCADimHigh() const; + + CvMat** GetPCACoeffs() const {return m_pca_coeffs;} + +protected: + int m_pose_count; // the number of poses + CvSize m_patch_size; // size of each image + IplImage** m_samples; // an array of length m_pose_count containing the patch in different poses + IplImage* m_input_patch; + IplImage* m_train_patch; + CvMat** m_pca_coeffs; // an array of length m_pose_count containing pca decomposition of the patch in different poses + CvAffinePose* m_affine_poses; // an array of poses + CvMat** m_transforms; // an array of affine transforms corresponding to poses + + string m_feature_name; // the name of the feature associated with the descriptor + CvPoint m_center; // the coordinates of the feature (the center of the input image ROI) + + int m_pca_dim_high; // the number of descriptor pca components to use for generating affine poses + int m_pca_dim_low; // the number of pca components to use for comparison +}; + + +// OneWayDescriptorBase: encapsulates functionality for training/loading a set of one way descriptors +// and finding the nearest closest descriptor to an input feature +class CV_EXPORTS OneWayDescriptorBase +{ +public: + + // creates an instance of OneWayDescriptor from a set of training files + // - patch_size: size of the input (large) patch + // - pose_count: the number of poses to generate for each descriptor + // - train_path: path to training files + // - pca_config: the name of the file that contains PCA for small patches (2 times smaller + // than patch_size each dimension + // - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size) + // - pca_desc_config: the name of the file that contains descriptors of PCA components + OneWayDescriptorBase(CvSize patch_size, int pose_count, const char* train_path = 0, const char* pca_config = 0, + const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1, + int pca_dim_high = 100, int pca_dim_low = 100); + + OneWayDescriptorBase(CvSize patch_size, int pose_count, const string &pca_filename, const string &train_path = string(), const string &images_list = string(), + float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1, + int pca_dim_high = 100, int pca_dim_low = 100); + + + virtual ~OneWayDescriptorBase(); + void clear (); + + + // Allocate: allocates memory for a given number of descriptors + void Allocate(int train_feature_count); + + // AllocatePCADescriptors: allocates memory for pca descriptors + void AllocatePCADescriptors(); + + // returns patch size + CvSize GetPatchSize() const {return m_patch_size;}; + // returns the number of poses for each descriptor + int GetPoseCount() const {return m_pose_count;}; + + // returns the number of pyramid levels + int GetPyrLevels() const {return m_pyr_levels;}; + + // returns the number of descriptors + int GetDescriptorCount() const {return m_train_feature_count;}; + + // CreateDescriptorsFromImage: creates descriptors for each of the input features + // - src: input image + // - features: input features + // - pyr_levels: the number of pyramid levels + void CreateDescriptorsFromImage(IplImage* src, const vector& features); + + // CreatePCADescriptors: generates descriptors for PCA components, needed for fast generation of feature descriptors + void CreatePCADescriptors(); + + // returns a feature descriptor by feature index + const OneWayDescriptor* GetDescriptor(int desc_idx) const {return &m_descriptors[desc_idx];}; + + // FindDescriptor: finds the closest descriptor + // - patch: input image patch + // - desc_idx: output index of the closest descriptor to the input patch + // - pose_idx: output index of the closest pose of the closest descriptor to the input patch + // - distance: distance from the input patch to the closest feature pose + // - _scales: scales of the input patch for each descriptor + // - scale_ranges: input scales variation (float[2]) + void FindDescriptor(IplImage* patch, int& desc_idx, int& pose_idx, float& distance, float* _scale = 0, float* scale_ranges = 0) const; + + // - patch: input image patch + // - n: number of the closest indexes + // - desc_idxs: output indexes of the closest descriptor to the input patch (n) + // - pose_idx: output indexes of the closest pose of the closest descriptor to the input patch (n) + // - distances: distance from the input patch to the closest feature pose (n) + // - _scales: scales of the input patch + // - scale_ranges: input scales variation (float[2]) + void FindDescriptor(IplImage* patch, int n, vector& desc_idxs, vector& pose_idxs, + vector& distances, vector& _scales, float* scale_ranges = 0) const; + + // FindDescriptor: finds the closest descriptor + // - src: input image + // - pt: center of the feature + // - desc_idx: output index of the closest descriptor to the input patch + // - pose_idx: output index of the closest pose of the closest descriptor to the input patch + // - distance: distance from the input patch to the closest feature pose + void FindDescriptor(IplImage* src, cv::Point2f pt, int& desc_idx, int& pose_idx, float& distance) const; + + // InitializePoses: generates random poses + void InitializePoses(); + + // InitializeTransformsFromPoses: generates 2x3 affine matrices from poses (initializes m_transforms) + void InitializeTransformsFromPoses(); + + // InitializePoseTransforms: subsequently calls InitializePoses and InitializeTransformsFromPoses + void InitializePoseTransforms(); + + // InitializeDescriptor: initializes a descriptor + // - desc_idx: descriptor index + // - train_image: image patch (ROI is supported) + // - feature_label: feature textual label + void InitializeDescriptor(int desc_idx, IplImage* train_image, const char* feature_label); + + void InitializeDescriptor(int desc_idx, IplImage* train_image, const KeyPoint& keypoint, const char* feature_label); + + // InitializeDescriptors: load features from an image and create descriptors for each of them + void InitializeDescriptors(IplImage* train_image, const vector& features, + const char* feature_label = "", int desc_start_idx = 0); + + // Write: writes this object to a file storage + // - fs: output filestorage + void Write (FileStorage &fs) const; + + // Read: reads OneWayDescriptorBase object from a file node + // - fn: input file node + void Read (const FileNode &fn); + + // LoadPCADescriptors: loads PCA descriptors from a file + // - filename: input filename + int LoadPCADescriptors(const char* filename); + + // LoadPCADescriptors: loads PCA descriptors from a file node + // - fn: input file node + int LoadPCADescriptors(const FileNode &fn); + + // SavePCADescriptors: saves PCA descriptors to a file + // - filename: output filename + void SavePCADescriptors(const char* filename); + + // SavePCADescriptors: saves PCA descriptors to a file storage + // - fs: output file storage + void SavePCADescriptors(CvFileStorage* fs) const; + + // GeneratePCA: calculate and save PCA components and descriptors + // - img_path: path to training PCA images directory + // - images_list: filename with filenames of training PCA images + void GeneratePCA(const char* img_path, const char* images_list, int pose_count=500); + + // SetPCAHigh: sets the high resolution pca matrices (copied to internal structures) + void SetPCAHigh(CvMat* avg, CvMat* eigenvectors); + + // SetPCALow: sets the low resolution pca matrices (copied to internal structures) + void SetPCALow(CvMat* avg, CvMat* eigenvectors); + + int GetLowPCA(CvMat** avg, CvMat** eigenvectors) + { + *avg = m_pca_avg; + *eigenvectors = m_pca_eigenvectors; + return m_pca_dim_low; + }; + + int GetPCADimLow() const {return m_pca_dim_low;}; + int GetPCADimHigh() const {return m_pca_dim_high;}; + + void ConvertDescriptorsArrayToTree(); // Converting pca_descriptors array to KD tree + + // GetPCAFilename: get default PCA filename + static string GetPCAFilename () { return "pca.yml"; } + + virtual bool empty() const { return m_train_feature_count <= 0 ? true : false; } + +protected: + CvSize m_patch_size; // patch size + int m_pose_count; // the number of poses for each descriptor + int m_train_feature_count; // the number of the training features + OneWayDescriptor* m_descriptors; // array of train feature descriptors + CvMat* m_pca_avg; // PCA average Vector for small patches + CvMat* m_pca_eigenvectors; // PCA eigenvectors for small patches + CvMat* m_pca_hr_avg; // PCA average Vector for large patches + CvMat* m_pca_hr_eigenvectors; // PCA eigenvectors for large patches + OneWayDescriptor* m_pca_descriptors; // an array of PCA descriptors + + cv::flann::Index* m_pca_descriptors_tree; + CvMat* m_pca_descriptors_matrix; + + CvAffinePose* m_poses; // array of poses + CvMat** m_transforms; // array of affine transformations corresponding to poses + + int m_pca_dim_high; + int m_pca_dim_low; + + int m_pyr_levels; + float scale_min; + float scale_max; + float scale_step; + + // SavePCAall: saves PCA components and descriptors to a file storage + // - fs: output file storage + void SavePCAall (FileStorage &fs) const; + + // LoadPCAall: loads PCA components and descriptors from a file node + // - fn: input file node + void LoadPCAall (const FileNode &fn); +}; + +class CV_EXPORTS OneWayDescriptorObject : public OneWayDescriptorBase +{ +public: + // creates an instance of OneWayDescriptorObject from a set of training files + // - patch_size: size of the input (large) patch + // - pose_count: the number of poses to generate for each descriptor + // - train_path: path to training files + // - pca_config: the name of the file that contains PCA for small patches (2 times smaller + // than patch_size each dimension + // - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size) + // - pca_desc_config: the name of the file that contains descriptors of PCA components + OneWayDescriptorObject(CvSize patch_size, int pose_count, const char* train_path, const char* pca_config, + const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1); + + OneWayDescriptorObject(CvSize patch_size, int pose_count, const string &pca_filename, + const string &train_path = string (), const string &images_list = string (), + float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1); + + + virtual ~OneWayDescriptorObject(); + + // Allocate: allocates memory for a given number of features + // - train_feature_count: the total number of features + // - object_feature_count: the number of features extracted from the object + void Allocate(int train_feature_count, int object_feature_count); + + + void SetLabeledFeatures(const vector& features) {m_train_features = features;}; + vector& GetLabeledFeatures() {return m_train_features;}; + const vector& GetLabeledFeatures() const {return m_train_features;}; + vector _GetLabeledFeatures() const; + + // IsDescriptorObject: returns 1 if descriptor with specified index is positive, otherwise 0 + int IsDescriptorObject(int desc_idx) const; + + // MatchPointToPart: returns the part number of a feature if it matches one of the object parts, otherwise -1 + int MatchPointToPart(CvPoint pt) const; + + // GetDescriptorPart: returns the part number of the feature corresponding to a specified descriptor + // - desc_idx: descriptor index + int GetDescriptorPart(int desc_idx) const; + + + void InitializeObjectDescriptors(IplImage* train_image, const vector& features, + const char* feature_label, int desc_start_idx = 0, float scale = 1.0f, + int is_background = 0); + + // GetObjectFeatureCount: returns the number of object features + int GetObjectFeatureCount() const {return m_object_feature_count;}; + +protected: + int* m_part_id; // contains part id for each of object descriptors + vector m_train_features; // train features + int m_object_feature_count; // the number of the positive features + +}; + + +/* + * OneWayDescriptorMatcher + */ +class OneWayDescriptorMatcher; +typedef OneWayDescriptorMatcher OneWayDescriptorMatch; + +class CV_EXPORTS OneWayDescriptorMatcher : public GenericDescriptorMatcher +{ +public: + class CV_EXPORTS Params + { + public: + static const int POSE_COUNT = 500; + static const int PATCH_WIDTH = 24; + static const int PATCH_HEIGHT = 24; + static float GET_MIN_SCALE() { return 0.7f; } + static float GET_MAX_SCALE() { return 1.5f; } + static float GET_STEP_SCALE() { return 1.2f; } + + Params( int poseCount = POSE_COUNT, + Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT), + string pcaFilename = string(), + string trainPath = string(), string trainImagesList = string(), + float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(), + float stepScale = GET_STEP_SCALE() ); + + int poseCount; + Size patchSize; + string pcaFilename; + string trainPath; + string trainImagesList; + + float minScale, maxScale, stepScale; + }; + + OneWayDescriptorMatcher( const Params& params=Params() ); + virtual ~OneWayDescriptorMatcher(); + + void initialize( const Params& params, const Ptr& base=Ptr() ); + + // Clears keypoints storing in collection and OneWayDescriptorBase + virtual void clear(); + + virtual void train(); + + virtual bool isMaskSupported(); + + virtual void read( const FileNode &fn ); + virtual void write( FileStorage& fs ) const; + + virtual bool empty() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + // Matches a set of keypoints from a single image of the training set. A rectangle with a center in a keypoint + // and size (patch_width/2*scale, patch_height/2*scale) is cropped from the source image for each + // keypoint. scale is iterated from DescriptorOneWayParams::min_scale to DescriptorOneWayParams::max_scale. + // The minimum distance to each training patch with all its affine poses is found over all scales. + // The class ID of a match is returned for each keypoint. The distance is calculated over PCA components + // loaded with DescriptorOneWay::Initialize, kd tree is used for finding minimum distances. + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ); + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ); + + Ptr base; + Params params; + int prevTrainCount; +}; + +/* + * FernDescriptorMatcher + */ +class FernDescriptorMatcher; +typedef FernDescriptorMatcher FernDescriptorMatch; + +class CV_EXPORTS FernDescriptorMatcher : public GenericDescriptorMatcher +{ +public: + class CV_EXPORTS Params + { + public: + Params( int nclasses=0, + int patchSize=FernClassifier::PATCH_SIZE, + int signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE, + int nstructs=FernClassifier::DEFAULT_STRUCTS, + int structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int nviews=FernClassifier::DEFAULT_VIEWS, + int compressionMethod=FernClassifier::COMPRESSION_NONE, + const PatchGenerator& patchGenerator=PatchGenerator() ); + + Params( const string& filename ); + + int nclasses; + int patchSize; + int signatureSize; + int nstructs; + int structSize; + int nviews; + int compressionMethod; + PatchGenerator patchGenerator; + + string filename; + }; + + FernDescriptorMatcher( const Params& params=Params() ); + virtual ~FernDescriptorMatcher(); + + virtual void clear(); + + virtual void train(); + + virtual bool isMaskSupported(); + + virtual void read( const FileNode &fn ); + virtual void write( FileStorage& fs ) const; + virtual bool empty() const; + + virtual Ptr clone( bool emptyTrainData=false ) const; + +protected: + virtual void knnMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, int k, + const vector& masks, bool compactResult ); + virtual void radiusMatchImpl( const Mat& queryImage, vector& queryKeypoints, + vector >& matches, float maxDistance, + const vector& masks, bool compactResult ); + + void trainFernClassifier(); + void calcBestProbAndMatchIdx( const Mat& image, const Point2f& pt, + float& bestProb, int& bestMatchIdx, vector& signature ); + Ptr classifier; + Params params; + int prevTrainCount; +}; + + +/* + * CalonderDescriptorExtractor + */ +template +class CV_EXPORTS CalonderDescriptorExtractor : public DescriptorExtractor +{ +public: + CalonderDescriptorExtractor( const string& classifierFile ); + + virtual void read( const FileNode &fn ); + virtual void write( FileStorage &fs ) const; + + virtual int descriptorSize() const { return classifier_.classes(); } + virtual int descriptorType() const { return DataType::type; } + + virtual bool empty() const; + +protected: + virtual void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + + RTreeClassifier classifier_; + static const int BORDER_SIZE = 16; +}; + +template +CalonderDescriptorExtractor::CalonderDescriptorExtractor(const std::string& classifier_file) +{ + classifier_.read( classifier_file.c_str() ); +} + +template +void CalonderDescriptorExtractor::computeImpl( const Mat& image, + vector& keypoints, + Mat& descriptors) const +{ + // Cannot compute descriptors for keypoints on the image border. + KeyPointsFilter::runByImageBorder(keypoints, image.size(), BORDER_SIZE); + + /// @todo Check 16-byte aligned + descriptors.create((int)keypoints.size(), classifier_.classes(), cv::DataType::type); + + int patchSize = RandomizedTree::PATCH_SIZE; + int offset = patchSize / 2; + for (size_t i = 0; i < keypoints.size(); ++i) + { + cv::Point2f pt = keypoints[i].pt; + IplImage ipl = image( Rect((int)(pt.x - offset), (int)(pt.y - offset), patchSize, patchSize) ); + classifier_.getSignature( &ipl, descriptors.ptr((int)i)); + } +} + +template +void CalonderDescriptorExtractor::read( const FileNode& ) +{} + +template +void CalonderDescriptorExtractor::write( FileStorage& ) const +{} + +template +bool CalonderDescriptorExtractor::empty() const +{ + return classifier_.trees_.empty(); +} + + +////////////////////// Brute Force Matcher ////////////////////////// + +template +class CV_EXPORTS BruteForceMatcher : public BFMatcher +{ +public: + BruteForceMatcher( Distance d = Distance() ) : BFMatcher(Distance::normType, false) {(void)d;} + virtual ~BruteForceMatcher() {} +}; + + +/****************************************************************************************\ +* Planar Object Detection * +\****************************************************************************************/ + +class CV_EXPORTS PlanarObjectDetector +{ +public: + PlanarObjectDetector(); + PlanarObjectDetector(const FileNode& node); + PlanarObjectDetector(const vector& pyr, int _npoints=300, + int _patchSize=FernClassifier::PATCH_SIZE, + int _nstructs=FernClassifier::DEFAULT_STRUCTS, + int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int _nviews=FernClassifier::DEFAULT_VIEWS, + const LDetector& detector=LDetector(), + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual ~PlanarObjectDetector(); + virtual void train(const vector& pyr, int _npoints=300, + int _patchSize=FernClassifier::PATCH_SIZE, + int _nstructs=FernClassifier::DEFAULT_STRUCTS, + int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int _nviews=FernClassifier::DEFAULT_VIEWS, + const LDetector& detector=LDetector(), + const PatchGenerator& patchGenerator=PatchGenerator()); + virtual void train(const vector& pyr, const vector& keypoints, + int _patchSize=FernClassifier::PATCH_SIZE, + int _nstructs=FernClassifier::DEFAULT_STRUCTS, + int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE, + int _nviews=FernClassifier::DEFAULT_VIEWS, + const LDetector& detector=LDetector(), + const PatchGenerator& patchGenerator=PatchGenerator()); + Rect getModelROI() const; + vector getModelPoints() const; + const LDetector& getDetector() const; + const FernClassifier& getClassifier() const; + void setVerbose(bool verbose); + + void read(const FileNode& node); + void write(FileStorage& fs, const String& name=String()) const; + bool operator()(const Mat& image, CV_OUT Mat& H, CV_OUT vector& corners) const; + bool operator()(const vector& pyr, const vector& keypoints, + CV_OUT Mat& H, CV_OUT vector& corners, + CV_OUT vector* pairs=0) const; + +protected: + bool verbose; + Rect modelROI; + vector modelPoints; + LDetector ldetector; + FernClassifier fernClassifier; +}; + +} + +// 2009-01-12, Xavier Delacour + +struct lsh_hash { + int h1, h2; +}; + +struct CvLSHOperations +{ + virtual ~CvLSHOperations() {} + + virtual int vector_add(const void* data) = 0; + virtual void vector_remove(int i) = 0; + virtual const void* vector_lookup(int i) = 0; + virtual void vector_reserve(int n) = 0; + virtual unsigned int vector_count() = 0; + + virtual void hash_insert(lsh_hash h, int l, int i) = 0; + virtual void hash_remove(lsh_hash h, int l, int i) = 0; + virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0; +}; + +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Splits color or grayscale image into multiple connected components + of nearly the same color/brightness using modification of Burt algorithm. + comp with contain a pointer to sequence (CvSeq) + of connected components (CvConnectedComp) */ +CVAPI(void) cvPyrSegmentation( IplImage* src, IplImage* dst, + CvMemStorage* storage, CvSeq** comp, + int level, double threshold1, + double threshold2 ); + +/****************************************************************************************\ +* Planar subdivisions * +\****************************************************************************************/ + +/* Initializes Delaunay triangulation */ +CVAPI(void) cvInitSubdivDelaunay2D( CvSubdiv2D* subdiv, CvRect rect ); + +/* Creates new subdivision */ +CVAPI(CvSubdiv2D*) cvCreateSubdiv2D( int subdiv_type, int header_size, + int vtx_size, int quadedge_size, + CvMemStorage* storage ); + +/************************* high-level subdivision functions ***************************/ + +/* Simplified Delaunay diagram creation */ +CV_INLINE CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage ) +{ + CvSubdiv2D* subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv), + sizeof(CvSubdiv2DPoint), sizeof(CvQuadEdge2D), storage ); + + cvInitSubdivDelaunay2D( subdiv, rect ); + return subdiv; +} + + +/* Inserts new point to the Delaunay triangulation */ +CVAPI(CvSubdiv2DPoint*) cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt); + +/* Locates a point within the Delaunay triangulation (finds the edge + the point is left to or belongs to, or the triangulation point the given + point coinsides with */ +CVAPI(CvSubdiv2DPointLocation) cvSubdiv2DLocate( + CvSubdiv2D* subdiv, CvPoint2D32f pt, + CvSubdiv2DEdge* edge, + CvSubdiv2DPoint** vertex CV_DEFAULT(NULL) ); + +/* Calculates Voronoi tesselation (i.e. coordinates of Voronoi points) */ +CVAPI(void) cvCalcSubdivVoronoi2D( CvSubdiv2D* subdiv ); + + +/* Removes all Voronoi points from the tesselation */ +CVAPI(void) cvClearSubdivVoronoi2D( CvSubdiv2D* subdiv ); + + +/* Finds the nearest to the given point vertex in subdivision. */ +CVAPI(CvSubdiv2DPoint*) cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt ); + + +/************ Basic quad-edge navigation and operations ************/ + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DNextEdge( CvSubdiv2DEdge edge ) +{ + return CV_SUBDIV2D_NEXT_EDGE(edge); +} + + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DRotateEdge( CvSubdiv2DEdge edge, int rotate ) +{ + return (edge & ~3) + ((edge + rotate) & 3); +} + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DSymEdge( CvSubdiv2DEdge edge ) +{ + return edge ^ 2; +} + +CV_INLINE CvSubdiv2DEdge cvSubdiv2DGetEdge( CvSubdiv2DEdge edge, CvNextEdgeType type ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + edge = e->next[(edge + (int)type) & 3]; + return (edge & ~3) + ((edge + ((int)type >> 4)) & 3); +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeOrg( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[edge & 3]; +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[(edge + 2) & 3]; +} + +/****************************************************************************************\ +* Additional operations on Subdivisions * +\****************************************************************************************/ + +// paints voronoi diagram: just demo function +CVAPI(void) icvDrawMosaic( CvSubdiv2D* subdiv, IplImage* src, IplImage* dst ); + +// checks planar subdivision for correctness. It is not an absolute check, +// but it verifies some relations between quad-edges +CVAPI(int) icvSubdiv2DCheck( CvSubdiv2D* subdiv ); + +// returns squared distance between two 2D points with floating-point coordinates. +CV_INLINE double icvSqDist2D32f( CvPoint2D32f pt1, CvPoint2D32f pt2 ) +{ + double dx = pt1.x - pt2.x; + double dy = pt1.y - pt2.y; + + return dx*dx + dy*dy; +} + + + + +CV_INLINE double cvTriangleArea( CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f c ) +{ + return ((double)b.x - a.x) * ((double)c.y - a.y) - ((double)b.y - a.y) * ((double)c.x - a.x); +} + + +/* Constructs kd-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateKDTree(CvMat* desc); + +/* Constructs spill-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateSpillTree( const CvMat* raw_data, + const int naive CV_DEFAULT(50), + const double rho CV_DEFAULT(.7), + const double tau CV_DEFAULT(.1) ); + +/* Release feature tree */ +CVAPI(void) cvReleaseFeatureTree(struct CvFeatureTree* tr); + +/* Searches feature tree for k nearest neighbors of given reference points, + searching (in case of kd-tree/bbf) at most emax leaves. */ +CVAPI(void) cvFindFeatures(struct CvFeatureTree* tr, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax CV_DEFAULT(20)); + +/* Search feature tree for all points that are inlier to given rect region. + Only implemented for kd trees */ +CVAPI(int) cvFindFeaturesBoxed(struct CvFeatureTree* tr, + CvMat* bounds_min, CvMat* bounds_max, + CvMat* out_indices); + + +/* Construct a Locality Sensitive Hash (LSH) table, for indexing d-dimensional vectors of + given type. Vectors will be hashed L times with k-dimensional p-stable (p=2) functions. */ +CVAPI(struct CvLSH*) cvCreateLSH(struct CvLSHOperations* ops, int d, + int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Construct in-memory LSH table, with n bins. */ +CVAPI(struct CvLSH*) cvCreateMemoryLSH(int d, int n, int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Free the given LSH structure. */ +CVAPI(void) cvReleaseLSH(struct CvLSH** lsh); + +/* Return the number of vectors in the LSH. */ +CVAPI(unsigned int) LSHSize(struct CvLSH* lsh); + +/* Add vectors to the LSH structure, optionally returning indices. */ +CVAPI(void) cvLSHAdd(struct CvLSH* lsh, const CvMat* data, CvMat* indices CV_DEFAULT(0)); + +/* Remove vectors from LSH, as addressed by given indices. */ +CVAPI(void) cvLSHRemove(struct CvLSH* lsh, const CvMat* indices); + +/* Query the LSH n times for at most k nearest points; data is n x d, + indices and dist are n x k. At most emax stored points will be accessed. */ +CVAPI(void) cvLSHQuery(struct CvLSH* lsh, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax); + +/* Kolmogorov-Zabin stereo-correspondence algorithm (a.k.a. KZ1) */ +#define CV_STEREO_GC_OCCLUDED SHRT_MAX + +typedef struct CvStereoGCState +{ + int Ithreshold; + int interactionRadius; + float K, lambda, lambda1, lambda2; + int occlusionCost; + int minDisparity; + int numberOfDisparities; + int maxIters; + + CvMat* left; + CvMat* right; + CvMat* dispLeft; + CvMat* dispRight; + CvMat* ptrLeft; + CvMat* ptrRight; + CvMat* vtxBuf; + CvMat* edgeBuf; +} CvStereoGCState; + +CVAPI(CvStereoGCState*) cvCreateStereoGCState( int numberOfDisparities, int maxIters ); +CVAPI(void) cvReleaseStereoGCState( CvStereoGCState** state ); + +CVAPI(void) cvFindStereoCorrespondenceGC( const CvArr* left, const CvArr* right, + CvArr* disparityLeft, CvArr* disparityRight, + CvStereoGCState* state, + int useDisparityGuess CV_DEFAULT(0) ); + +/* Calculates optical flow for 2 images using classical Lucas & Kanade algorithm */ +CVAPI(void) cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr, + CvSize win_size, CvArr* velx, CvArr* vely ); + +/* Calculates optical flow for 2 images using block matching algorithm */ +CVAPI(void) cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr, + CvSize block_size, CvSize shift_size, + CvSize max_range, int use_previous, + CvArr* velx, CvArr* vely ); + +/* Calculates Optical flow for 2 images using Horn & Schunck algorithm */ +CVAPI(void) cvCalcOpticalFlowHS( const CvArr* prev, const CvArr* curr, + int use_previous, CvArr* velx, CvArr* vely, + double lambda, CvTermCriteria criteria ); + + +/****************************************************************************************\ +* Background/foreground segmentation * +\****************************************************************************************/ + +/* We discriminate between foreground and background pixels + * by building and maintaining a model of the background. + * Any pixel which does not fit this model is then deemed + * to be foreground. + * + * At present we support two core background models, + * one of which has two variations: + * + * o CV_BG_MODEL_FGD: latest and greatest algorithm, described in + * + * Foreground Object Detection from Videos Containing Complex Background. + * Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian. + * ACM MM2003 9p + * + * o CV_BG_MODEL_FGD_SIMPLE: + * A code comment describes this as a simplified version of the above, + * but the code is in fact currently identical + * + * o CV_BG_MODEL_MOG: "Mixture of Gaussians", older algorithm, described in + * + * Moving target classification and tracking from real-time video. + * A Lipton, H Fujijoshi, R Patil + * Proceedings IEEE Workshop on Application of Computer Vision pp 8-14 1998 + * + * Learning patterns of activity using real-time tracking + * C Stauffer and W Grimson August 2000 + * IEEE Transactions on Pattern Analysis and Machine Intelligence 22(8):747-757 + */ + + +#define CV_BG_MODEL_FGD 0 +#define CV_BG_MODEL_MOG 1 /* "Mixture of Gaussians". */ +#define CV_BG_MODEL_FGD_SIMPLE 2 + +struct CvBGStatModel; + +typedef void (CV_CDECL * CvReleaseBGStatModel)( struct CvBGStatModel** bg_model ); +typedef int (CV_CDECL * CvUpdateBGStatModel)( IplImage* curr_frame, struct CvBGStatModel* bg_model, + double learningRate ); + +#define CV_BG_STAT_MODEL_FIELDS() \ +int type; /*type of BG model*/ \ +CvReleaseBGStatModel release; \ +CvUpdateBGStatModel update; \ +IplImage* background; /*8UC3 reference background image*/ \ +IplImage* foreground; /*8UC1 foreground image*/ \ +IplImage** layers; /*8UC3 reference background image, can be null */ \ +int layer_count; /* can be zero */ \ +CvMemStorage* storage; /*storage for foreground_regions*/ \ +CvSeq* foreground_regions /*foreground object contours*/ + +typedef struct CvBGStatModel +{ + CV_BG_STAT_MODEL_FIELDS(); +} CvBGStatModel; + +// + +// Releases memory used by BGStatModel +CVAPI(void) cvReleaseBGStatModel( CvBGStatModel** bg_model ); + +// Updates statistical model and returns number of found foreground regions +CVAPI(int) cvUpdateBGStatModel( IplImage* current_frame, CvBGStatModel* bg_model, + double learningRate CV_DEFAULT(-1)); + +// Performs FG post-processing using segmentation +// (all pixels of a region will be classified as foreground if majority of pixels of the region are FG). +// parameters: +// segments - pointer to result of segmentation (for example MeanShiftSegmentation) +// bg_model - pointer to CvBGStatModel structure +CVAPI(void) cvRefineForegroundMaskBySegm( CvSeq* segments, CvBGStatModel* bg_model ); + +/* Common use change detection function */ +CVAPI(int) cvChangeDetection( IplImage* prev_frame, + IplImage* curr_frame, + IplImage* change_mask ); + +/* + Interface of ACM MM2003 algorithm + */ + +/* Default parameters of foreground detection algorithm: */ +#define CV_BGFG_FGD_LC 128 +#define CV_BGFG_FGD_N1C 15 +#define CV_BGFG_FGD_N2C 25 + +#define CV_BGFG_FGD_LCC 64 +#define CV_BGFG_FGD_N1CC 25 +#define CV_BGFG_FGD_N2CC 40 + +/* Background reference image update parameter: */ +#define CV_BGFG_FGD_ALPHA_1 0.1f + +/* stat model update parameter + * 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG) + */ +#define CV_BGFG_FGD_ALPHA_2 0.005f + +/* start value for alpha parameter (to fast initiate statistic model) */ +#define CV_BGFG_FGD_ALPHA_3 0.1f + +#define CV_BGFG_FGD_DELTA 2 + +#define CV_BGFG_FGD_T 0.9f + +#define CV_BGFG_FGD_MINAREA 15.f + +#define CV_BGFG_FGD_BG_UPDATE_TRESH 0.5f + +/* See the above-referenced Li/Huang/Gu/Tian paper + * for a full description of these background-model + * tuning parameters. + * + * Nomenclature: 'c' == "color", a three-component red/green/blue vector. + * We use histograms of these to model the range of + * colors we've seen at a given background pixel. + * + * 'cc' == "color co-occurrence", a six-component vector giving + * RGB color for both this frame and preceding frame. + * We use histograms of these to model the range of + * color CHANGES we've seen at a given background pixel. + */ +typedef struct CvFGDStatModelParams +{ + int Lc; /* Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. */ + int N1c; /* Number of color vectors used to model normal background color variation at a given pixel. */ + int N2c; /* Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. */ + /* Used to allow the first N1c vectors to adapt over time to changing background. */ + + int Lcc; /* Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. */ + int N1cc; /* Number of color co-occurrence vectors used to model normal background color variation at a given pixel. */ + int N2cc; /* Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. */ + /* Used to allow the first N1cc vectors to adapt over time to changing background. */ + + int is_obj_without_holes;/* If TRUE we ignore holes within foreground blobs. Defaults to TRUE. */ + int perform_morphing; /* Number of erode-dilate-erode foreground-blob cleanup iterations. */ + /* These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. */ + + float alpha1; /* How quickly we forget old background pixel values seen. Typically set to 0.1 */ + float alpha2; /* "Controls speed of feature learning". Depends on T. Typical value circa 0.005. */ + float alpha3; /* Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. */ + + float delta; /* Affects color and color co-occurrence quantization, typically set to 2. */ + float T; /* "A percentage value which determines when new features can be recognized as new background." (Typically 0.9).*/ + float minArea; /* Discard foreground blobs whose bounding box is smaller than this threshold. */ +} CvFGDStatModelParams; + +typedef struct CvBGPixelCStatTable +{ + float Pv, Pvb; + uchar v[3]; +} CvBGPixelCStatTable; + +typedef struct CvBGPixelCCStatTable +{ + float Pv, Pvb; + uchar v[6]; +} CvBGPixelCCStatTable; + +typedef struct CvBGPixelStat +{ + float Pbc; + float Pbcc; + CvBGPixelCStatTable* ctable; + CvBGPixelCCStatTable* cctable; + uchar is_trained_st_model; + uchar is_trained_dyn_model; +} CvBGPixelStat; + + +typedef struct CvFGDStatModel +{ + CV_BG_STAT_MODEL_FIELDS(); + CvBGPixelStat* pixel_stat; + IplImage* Ftd; + IplImage* Fbd; + IplImage* prev_frame; + CvFGDStatModelParams params; +} CvFGDStatModel; + +/* Creates FGD model */ +CVAPI(CvBGStatModel*) cvCreateFGDStatModel( IplImage* first_frame, + CvFGDStatModelParams* parameters CV_DEFAULT(NULL)); + +/* + Interface of Gaussian mixture algorithm + + "An improved adaptive background mixture model for real-time tracking with shadow detection" + P. KadewTraKuPong and R. Bowden, + Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001." + http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf + */ + +/* Note: "MOG" == "Mixture Of Gaussians": */ + +#define CV_BGFG_MOG_MAX_NGAUSSIANS 500 + +/* default parameters of gaussian background detection algorithm */ +#define CV_BGFG_MOG_BACKGROUND_THRESHOLD 0.7 /* threshold sum of weights for background test */ +#define CV_BGFG_MOG_STD_THRESHOLD 2.5 /* lambda=2.5 is 99% */ +#define CV_BGFG_MOG_WINDOW_SIZE 200 /* Learning rate; alpha = 1/CV_GBG_WINDOW_SIZE */ +#define CV_BGFG_MOG_NGAUSSIANS 5 /* = K = number of Gaussians in mixture */ +#define CV_BGFG_MOG_WEIGHT_INIT 0.05 +#define CV_BGFG_MOG_SIGMA_INIT 30 +#define CV_BGFG_MOG_MINAREA 15.f + + +#define CV_BGFG_MOG_NCOLORS 3 + +typedef struct CvGaussBGStatModelParams +{ + int win_size; /* = 1/alpha */ + int n_gauss; + double bg_threshold, std_threshold, minArea; + double weight_init, variance_init; +}CvGaussBGStatModelParams; + +typedef struct CvGaussBGValues +{ + int match_sum; + double weight; + double variance[CV_BGFG_MOG_NCOLORS]; + double mean[CV_BGFG_MOG_NCOLORS]; +} CvGaussBGValues; + +typedef struct CvGaussBGPoint +{ + CvGaussBGValues* g_values; +} CvGaussBGPoint; + + +typedef struct CvGaussBGModel +{ + CV_BG_STAT_MODEL_FIELDS(); + CvGaussBGStatModelParams params; + CvGaussBGPoint* g_point; + int countFrames; + void* mog; +} CvGaussBGModel; + + +/* Creates Gaussian mixture background model */ +CVAPI(CvBGStatModel*) cvCreateGaussianBGModel( IplImage* first_frame, + CvGaussBGStatModelParams* parameters CV_DEFAULT(NULL)); + + +typedef struct CvBGCodeBookElem +{ + struct CvBGCodeBookElem* next; + int tLastUpdate; + int stale; + uchar boxMin[3]; + uchar boxMax[3]; + uchar learnMin[3]; + uchar learnMax[3]; +} CvBGCodeBookElem; + +typedef struct CvBGCodeBookModel +{ + CvSize size; + int t; + uchar cbBounds[3]; + uchar modMin[3]; + uchar modMax[3]; + CvBGCodeBookElem** cbmap; + CvMemStorage* storage; + CvBGCodeBookElem* freeList; +} CvBGCodeBookModel; + +CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel( void ); +CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model ); + +CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image, + CvRect roi CV_DEFAULT(cvRect(0,0,0,0)), + const CvArr* mask CV_DEFAULT(0) ); + +CVAPI(int) cvBGCodeBookDiff( const CvBGCodeBookModel* model, const CvArr* image, + CvArr* fgmask, CvRect roi CV_DEFAULT(cvRect(0,0,0,0)) ); + +CVAPI(void) cvBGCodeBookClearStale( CvBGCodeBookModel* model, int staleThresh, + CvRect roi CV_DEFAULT(cvRect(0,0,0,0)), + const CvArr* mask CV_DEFAULT(0) ); + +CVAPI(CvSeq*) cvSegmentFGMask( CvArr *fgmask, int poly1Hull0 CV_DEFAULT(1), + float perimScale CV_DEFAULT(4.f), + CvMemStorage* storage CV_DEFAULT(0), + CvPoint offset CV_DEFAULT(cvPoint(0,0))); + +#ifdef __cplusplus +} +#endif + +#endif + +/* End of file. */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/streams.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/streams.hpp new file mode 100644 index 0000000..e164bf4 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/legacy/streams.hpp @@ -0,0 +1,92 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CVSTREAMS_H__ +#define __OPENCV_CVSTREAMS_H__ + +#ifdef WIN32 +#include /* !!! IF YOU'VE GOT AN ERROR HERE, PLEASE READ BELOW !!! */ +/***************** How to get Visual Studio understand streams.h ****************\ + +You need DirectShow SDK that is now a part of Platform SDK +(Windows Server 2003 SP1 SDK or later), +and DirectX SDK (2006 April or later). + +1. Download the Platform SDK from + http://www.microsoft.com/msdownload/platformsdk/sdkupdate/ + and DirectX SDK from msdn.microsoft.com/directx/ + (They are huge, but you can download it by parts). + If it doesn't work for you, consider HighGUI that can capture video via VFW or MIL + +2. Install Platform SDK together with DirectShow SDK. + Install DirectX (with or without sample code). + +3. Build baseclasses. + See \samples\multimedia\directshow\readme.txt. + +4. Copy the built libraries (called strmbase.lib and strmbasd.lib + in Release and Debug versions, respectively) to + \lib. + +5. In Developer Studio add the following paths: + \include + \include + \samples\multimedia\directshow\baseclasses + to the includes' search path + (at Tools->Options->Directories->Include files in case of Visual Studio 6.0, + at Tools->Options->Projects and Solutions->VC++ Directories->Include files in case + of Visual Studio 2005) + Add + \lib + \lib + to the libraries' search path (in the same dialog, ...->"Library files" page) + + NOTE: PUT THE ADDED LINES ON THE VERY TOP OF THE LISTS, OTHERWISE YOU MAY STILL GET + COMPILER OR LINKER ERRORS. This is necessary, because Visual Studio + may include older versions of the same headers and libraries. + +6. Now you can build OpenCV DirectShow filters. + +\***********************************************************************************/ + +#endif + +#endif diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/ml/ml.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/ml/ml.hpp new file mode 100644 index 0000000..d86ecde --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/ml/ml.hpp @@ -0,0 +1,2147 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_ML_HPP__ +#define __OPENCV_ML_HPP__ + +#include "opencv2/core/core.hpp" +#include + +#ifdef __cplusplus + +#include +#include +#include + +// Apple defines a check() macro somewhere in the debug headers +// that interferes with a method definiton in this header +#undef check + +/****************************************************************************************\ +* Main struct definitions * +\****************************************************************************************/ + +/* log(2*PI) */ +#define CV_LOG2PI (1.8378770664093454835606594728112) + +/* columns of matrix are training samples */ +#define CV_COL_SAMPLE 0 + +/* rows of matrix are training samples */ +#define CV_ROW_SAMPLE 1 + +#define CV_IS_ROW_SAMPLE(flags) ((flags) & CV_ROW_SAMPLE) + +struct CvVectors +{ + int type; + int dims, count; + CvVectors* next; + union + { + uchar** ptr; + float** fl; + double** db; + } data; +}; + +#if 0 +/* A structure, representing the lattice range of statmodel parameters. + It is used for optimizing statmodel parameters by cross-validation method. + The lattice is logarithmic, so must be greater then 1. */ +typedef struct CvParamLattice +{ + double min_val; + double max_val; + double step; +} +CvParamLattice; + +CV_INLINE CvParamLattice cvParamLattice( double min_val, double max_val, + double log_step ) +{ + CvParamLattice pl; + pl.min_val = MIN( min_val, max_val ); + pl.max_val = MAX( min_val, max_val ); + pl.step = MAX( log_step, 1. ); + return pl; +} + +CV_INLINE CvParamLattice cvDefaultParamLattice( void ) +{ + CvParamLattice pl = {0,0,0}; + return pl; +} +#endif + +/* Variable type */ +#define CV_VAR_NUMERICAL 0 +#define CV_VAR_ORDERED 0 +#define CV_VAR_CATEGORICAL 1 + +#define CV_TYPE_NAME_ML_SVM "opencv-ml-svm" +#define CV_TYPE_NAME_ML_KNN "opencv-ml-knn" +#define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian" +#define CV_TYPE_NAME_ML_EM "opencv-ml-em" +#define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree" +#define CV_TYPE_NAME_ML_TREE "opencv-ml-tree" +#define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp" +#define CV_TYPE_NAME_ML_CNN "opencv-ml-cnn" +#define CV_TYPE_NAME_ML_RTREES "opencv-ml-random-trees" +#define CV_TYPE_NAME_ML_ERTREES "opencv-ml-extremely-randomized-trees" +#define CV_TYPE_NAME_ML_GBT "opencv-ml-gradient-boosting-trees" + +#define CV_TRAIN_ERROR 0 +#define CV_TEST_ERROR 1 + +class CV_EXPORTS_W CvStatModel +{ +public: + CvStatModel(); + virtual ~CvStatModel(); + + virtual void clear(); + + CV_WRAP virtual void save( const char* filename, const char* name=0 ) const; + CV_WRAP virtual void load( const char* filename, const char* name=0 ); + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + +protected: + const char* default_model_name; +}; + +/****************************************************************************************\ +* Normal Bayes Classifier * +\****************************************************************************************/ + +/* The structure, representing the grid range of statmodel parameters. + It is used for optimizing statmodel accuracy by varying model parameters, + the accuracy estimate being computed by cross-validation. + The grid is logarithmic, so must be greater then 1. */ + +class CvMLData; + +struct CV_EXPORTS_W_MAP CvParamGrid +{ + // SVM params type + enum { SVM_C=0, SVM_GAMMA=1, SVM_P=2, SVM_NU=3, SVM_COEF=4, SVM_DEGREE=5 }; + + CvParamGrid() + { + min_val = max_val = step = 0; + } + + CvParamGrid( double min_val, double max_val, double log_step ); + //CvParamGrid( int param_id ); + bool check() const; + + CV_PROP_RW double min_val; + CV_PROP_RW double max_val; + CV_PROP_RW double step; +}; + +inline CvParamGrid::CvParamGrid( double _min_val, double _max_val, double _log_step ) +{ + min_val = _min_val; + max_val = _max_val; + step = _log_step; +} + +class CV_EXPORTS_W CvNormalBayesClassifier : public CvStatModel +{ +public: + CV_WRAP CvNormalBayesClassifier(); + virtual ~CvNormalBayesClassifier(); + + CvNormalBayesClassifier( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx=0, const CvMat* sampleIdx=0 ); + + virtual bool train( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx = 0, const CvMat* sampleIdx=0, bool update=false ); + + virtual float predict( const CvMat* samples, CV_OUT CvMat* results=0 ) const; + CV_WRAP virtual void clear(); + + CV_WRAP CvNormalBayesClassifier( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat() ); + CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx = cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(), + bool update=false ); + CV_WRAP virtual float predict( const cv::Mat& samples, CV_OUT cv::Mat* results=0 ) const; + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + +protected: + int var_count, var_all; + CvMat* var_idx; + CvMat* cls_labels; + CvMat** count; + CvMat** sum; + CvMat** productsum; + CvMat** avg; + CvMat** inv_eigen_values; + CvMat** cov_rotate_mats; + CvMat* c; +}; + + +/****************************************************************************************\ +* K-Nearest Neighbour Classifier * +\****************************************************************************************/ + +// k Nearest Neighbors +class CV_EXPORTS_W CvKNearest : public CvStatModel +{ +public: + + CV_WRAP CvKNearest(); + virtual ~CvKNearest(); + + CvKNearest( const CvMat* trainData, const CvMat* responses, + const CvMat* sampleIdx=0, bool isRegression=false, int max_k=32 ); + + virtual bool train( const CvMat* trainData, const CvMat* responses, + const CvMat* sampleIdx=0, bool is_regression=false, + int maxK=32, bool updateBase=false ); + + virtual float find_nearest( const CvMat* samples, int k, CV_OUT CvMat* results=0, + const float** neighbors=0, CV_OUT CvMat* neighborResponses=0, CV_OUT CvMat* dist=0 ) const; + + CV_WRAP CvKNearest( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& sampleIdx=cv::Mat(), bool isRegression=false, int max_k=32 ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& sampleIdx=cv::Mat(), bool isRegression=false, + int maxK=32, bool updateBase=false ); + + virtual float find_nearest( const cv::Mat& samples, int k, cv::Mat* results=0, + const float** neighbors=0, cv::Mat* neighborResponses=0, + cv::Mat* dist=0 ) const; + CV_WRAP virtual float find_nearest( const cv::Mat& samples, int k, CV_OUT cv::Mat& results, + CV_OUT cv::Mat& neighborResponses, CV_OUT cv::Mat& dists) const; + + virtual void clear(); + int get_max_k() const; + int get_var_count() const; + int get_sample_count() const; + bool is_regression() const; + + virtual float write_results( int k, int k1, int start, int end, + const float* neighbor_responses, const float* dist, CvMat* _results, + CvMat* _neighbor_responses, CvMat* _dist, Cv32suf* sort_buf ) const; + + virtual void find_neighbors_direct( const CvMat* _samples, int k, int start, int end, + float* neighbor_responses, const float** neighbors, float* dist ) const; + +protected: + + int max_k, var_count; + int total; + bool regression; + CvVectors* samples; +}; + +/****************************************************************************************\ +* Support Vector Machines * +\****************************************************************************************/ + +// SVM training parameters +struct CV_EXPORTS_W_MAP CvSVMParams +{ + CvSVMParams(); + CvSVMParams( int svm_type, int kernel_type, + double degree, double gamma, double coef0, + double Cvalue, double nu, double p, + CvMat* class_weights, CvTermCriteria term_crit ); + + CV_PROP_RW int svm_type; + CV_PROP_RW int kernel_type; + CV_PROP_RW double degree; // for poly + CV_PROP_RW double gamma; // for poly/rbf/sigmoid + CV_PROP_RW double coef0; // for poly/sigmoid + + CV_PROP_RW double C; // for CV_SVM_C_SVC, CV_SVM_EPS_SVR and CV_SVM_NU_SVR + CV_PROP_RW double nu; // for CV_SVM_NU_SVC, CV_SVM_ONE_CLASS, and CV_SVM_NU_SVR + CV_PROP_RW double p; // for CV_SVM_EPS_SVR + CvMat* class_weights; // for CV_SVM_C_SVC + CV_PROP_RW CvTermCriteria term_crit; // termination criteria +}; + + +struct CV_EXPORTS CvSVMKernel +{ + typedef void (CvSVMKernel::*Calc)( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + CvSVMKernel(); + CvSVMKernel( const CvSVMParams* params, Calc _calc_func ); + virtual bool create( const CvSVMParams* params, Calc _calc_func ); + virtual ~CvSVMKernel(); + + virtual void clear(); + virtual void calc( int vcount, int n, const float** vecs, const float* another, float* results ); + + const CvSVMParams* params; + Calc calc_func; + + virtual void calc_non_rbf_base( int vec_count, int vec_size, const float** vecs, + const float* another, float* results, + double alpha, double beta ); + + virtual void calc_linear( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + virtual void calc_rbf( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + virtual void calc_poly( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); + virtual void calc_sigmoid( int vec_count, int vec_size, const float** vecs, + const float* another, float* results ); +}; + + +struct CvSVMKernelRow +{ + CvSVMKernelRow* prev; + CvSVMKernelRow* next; + float* data; +}; + + +struct CvSVMSolutionInfo +{ + double obj; + double rho; + double upper_bound_p; + double upper_bound_n; + double r; // for Solver_NU +}; + +class CV_EXPORTS CvSVMSolver +{ +public: + typedef bool (CvSVMSolver::*SelectWorkingSet)( int& i, int& j ); + typedef float* (CvSVMSolver::*GetRow)( int i, float* row, float* dst, bool existed ); + typedef void (CvSVMSolver::*CalcRho)( double& rho, double& r ); + + CvSVMSolver(); + + CvSVMSolver( int count, int var_count, const float** samples, schar* y, + int alpha_count, double* alpha, double Cp, double Cn, + CvMemStorage* storage, CvSVMKernel* kernel, GetRow get_row, + SelectWorkingSet select_working_set, CalcRho calc_rho ); + virtual bool create( int count, int var_count, const float** samples, schar* y, + int alpha_count, double* alpha, double Cp, double Cn, + CvMemStorage* storage, CvSVMKernel* kernel, GetRow get_row, + SelectWorkingSet select_working_set, CalcRho calc_rho ); + virtual ~CvSVMSolver(); + + virtual void clear(); + virtual bool solve_generic( CvSVMSolutionInfo& si ); + + virtual bool solve_c_svc( int count, int var_count, const float** samples, schar* y, + double Cp, double Cn, CvMemStorage* storage, + CvSVMKernel* kernel, double* alpha, CvSVMSolutionInfo& si ); + virtual bool solve_nu_svc( int count, int var_count, const float** samples, schar* y, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + virtual bool solve_one_class( int count, int var_count, const float** samples, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + + virtual bool solve_eps_svr( int count, int var_count, const float** samples, const float* y, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + + virtual bool solve_nu_svr( int count, int var_count, const float** samples, const float* y, + CvMemStorage* storage, CvSVMKernel* kernel, + double* alpha, CvSVMSolutionInfo& si ); + + virtual float* get_row_base( int i, bool* _existed ); + virtual float* get_row( int i, float* dst ); + + int sample_count; + int var_count; + int cache_size; + int cache_line_size; + const float** samples; + const CvSVMParams* params; + CvMemStorage* storage; + CvSVMKernelRow lru_list; + CvSVMKernelRow* rows; + + int alpha_count; + + double* G; + double* alpha; + + // -1 - lower bound, 0 - free, 1 - upper bound + schar* alpha_status; + + schar* y; + double* b; + float* buf[2]; + double eps; + int max_iter; + double C[2]; // C[0] == Cn, C[1] == Cp + CvSVMKernel* kernel; + + SelectWorkingSet select_working_set_func; + CalcRho calc_rho_func; + GetRow get_row_func; + + virtual bool select_working_set( int& i, int& j ); + virtual bool select_working_set_nu_svm( int& i, int& j ); + virtual void calc_rho( double& rho, double& r ); + virtual void calc_rho_nu_svm( double& rho, double& r ); + + virtual float* get_row_svc( int i, float* row, float* dst, bool existed ); + virtual float* get_row_one_class( int i, float* row, float* dst, bool existed ); + virtual float* get_row_svr( int i, float* row, float* dst, bool existed ); +}; + + +struct CvSVMDecisionFunc +{ + double rho; + int sv_count; + double* alpha; + int* sv_index; +}; + + +// SVM model +class CV_EXPORTS_W CvSVM : public CvStatModel +{ +public: + // SVM type + enum { C_SVC=100, NU_SVC=101, ONE_CLASS=102, EPS_SVR=103, NU_SVR=104 }; + + // SVM kernel type + enum { LINEAR=0, POLY=1, RBF=2, SIGMOID=3 }; + + // SVM params type + enum { C=0, GAMMA=1, P=2, NU=3, COEF=4, DEGREE=5 }; + + CV_WRAP CvSVM(); + virtual ~CvSVM(); + + CvSVM( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx=0, const CvMat* sampleIdx=0, + CvSVMParams params=CvSVMParams() ); + + virtual bool train( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx=0, const CvMat* sampleIdx=0, + CvSVMParams params=CvSVMParams() ); + + virtual bool train_auto( const CvMat* trainData, const CvMat* responses, + const CvMat* varIdx, const CvMat* sampleIdx, CvSVMParams params, + int kfold = 10, + CvParamGrid Cgrid = get_default_grid(CvSVM::C), + CvParamGrid gammaGrid = get_default_grid(CvSVM::GAMMA), + CvParamGrid pGrid = get_default_grid(CvSVM::P), + CvParamGrid nuGrid = get_default_grid(CvSVM::NU), + CvParamGrid coeffGrid = get_default_grid(CvSVM::COEF), + CvParamGrid degreeGrid = get_default_grid(CvSVM::DEGREE), + bool balanced=false ); + + virtual float predict( const CvMat* sample, bool returnDFVal=false ) const; + virtual float predict( const CvMat* samples, CV_OUT CvMat* results ) const; + + CV_WRAP CvSVM( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(), + CvSVMParams params=CvSVMParams() ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(), + CvSVMParams params=CvSVMParams() ); + + CV_WRAP virtual bool train_auto( const cv::Mat& trainData, const cv::Mat& responses, + const cv::Mat& varIdx, const cv::Mat& sampleIdx, CvSVMParams params, + int k_fold = 10, + CvParamGrid Cgrid = CvSVM::get_default_grid(CvSVM::C), + CvParamGrid gammaGrid = CvSVM::get_default_grid(CvSVM::GAMMA), + CvParamGrid pGrid = CvSVM::get_default_grid(CvSVM::P), + CvParamGrid nuGrid = CvSVM::get_default_grid(CvSVM::NU), + CvParamGrid coeffGrid = CvSVM::get_default_grid(CvSVM::COEF), + CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE), + bool balanced=false); + CV_WRAP virtual float predict( const cv::Mat& sample, bool returnDFVal=false ) const; + CV_WRAP_AS(predict_all) void predict( cv::InputArray samples, cv::OutputArray results ) const; + + CV_WRAP virtual int get_support_vector_count() const; + virtual const float* get_support_vector(int i) const; + virtual CvSVMParams get_params() const { return params; }; + CV_WRAP virtual void clear(); + + static CvParamGrid get_default_grid( int param_id ); + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + CV_WRAP int get_var_count() const { return var_idx ? var_idx->cols : var_all; } + +protected: + + virtual bool set_params( const CvSVMParams& params ); + virtual bool train1( int sample_count, int var_count, const float** samples, + const void* responses, double Cp, double Cn, + CvMemStorage* _storage, double* alpha, double& rho ); + virtual bool do_train( int svm_type, int sample_count, int var_count, const float** samples, + const CvMat* responses, CvMemStorage* _storage, double* alpha ); + virtual void create_kernel(); + virtual void create_solver(); + + virtual float predict( const float* row_sample, int row_len, bool returnDFVal=false ) const; + + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + void optimize_linear_svm(); + + CvSVMParams params; + CvMat* class_labels; + int var_all; + float** sv; + int sv_total; + CvMat* var_idx; + CvMat* class_weights; + CvSVMDecisionFunc* decision_func; + CvMemStorage* storage; + + CvSVMSolver* solver; + CvSVMKernel* kernel; + +private: + CvSVM(const CvSVM&); + CvSVM& operator = (const CvSVM&); +}; + +/****************************************************************************************\ +* Expectation - Maximization * +\****************************************************************************************/ +namespace cv +{ +class CV_EXPORTS_W EM : public Algorithm +{ +public: + // Type of covariation matrices + enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL}; + + // Default parameters + enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100}; + + // The initial step + enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0}; + + CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, + const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, + EM::DEFAULT_MAX_ITERS, FLT_EPSILON)); + + virtual ~EM(); + CV_WRAP virtual void clear(); + + CV_WRAP virtual bool train(InputArray samples, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()); + + CV_WRAP virtual bool trainE(InputArray samples, + InputArray means0, + InputArray covs0=noArray(), + InputArray weights0=noArray(), + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()); + + CV_WRAP virtual bool trainM(InputArray samples, + InputArray probs0, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()); + + CV_WRAP Vec2d predict(InputArray sample, + OutputArray probs=noArray()) const; + + CV_WRAP bool isTrained() const; + + AlgorithmInfo* info() const; + virtual void read(const FileNode& fn); + +protected: + + virtual void setTrainData(int startStep, const Mat& samples, + const Mat* probs0, + const Mat* means0, + const vector* covs0, + const Mat* weights0); + + bool doTrain(int startStep, + OutputArray logLikelihoods, + OutputArray labels, + OutputArray probs); + virtual void eStep(); + virtual void mStep(); + + void clusterTrainSamples(); + void decomposeCovs(); + void computeLogWeightDivDet(); + + Vec2d computeProbabilities(const Mat& sample, Mat* probs) const; + + // all inner matrices have type CV_64FC1 + CV_PROP_RW int nclusters; + CV_PROP_RW int covMatType; + CV_PROP_RW int maxIters; + CV_PROP_RW double epsilon; + + Mat trainSamples; + Mat trainProbs; + Mat trainLogLikelihoods; + Mat trainLabels; + + CV_PROP Mat weights; + CV_PROP Mat means; + CV_PROP vector covs; + + vector covsEigenValues; + vector covsRotateMats; + vector invCovsEigenValues; + Mat logWeightDivDet; +}; +} // namespace cv + +/****************************************************************************************\ +* Decision Tree * +\****************************************************************************************/\ +struct CvPair16u32s +{ + unsigned short* u; + int* i; +}; + + +#define CV_DTREE_CAT_DIR(idx,subset) \ + (2*((subset[(idx)>>5]&(1 << ((idx) & 31)))==0)-1) + +struct CvDTreeSplit +{ + int var_idx; + int condensed_idx; + int inversed; + float quality; + CvDTreeSplit* next; + union + { + int subset[2]; + struct + { + float c; + int split_point; + } + ord; + }; +}; + +struct CvDTreeNode +{ + int class_idx; + int Tn; + double value; + + CvDTreeNode* parent; + CvDTreeNode* left; + CvDTreeNode* right; + + CvDTreeSplit* split; + + int sample_count; + int depth; + int* num_valid; + int offset; + int buf_idx; + double maxlr; + + // global pruning data + int complexity; + double alpha; + double node_risk, tree_risk, tree_error; + + // cross-validation pruning data + int* cv_Tn; + double* cv_node_risk; + double* cv_node_error; + + int get_num_valid(int vi) { return num_valid ? num_valid[vi] : sample_count; } + void set_num_valid(int vi, int n) { if( num_valid ) num_valid[vi] = n; } +}; + + +struct CV_EXPORTS_W_MAP CvDTreeParams +{ + CV_PROP_RW int max_categories; + CV_PROP_RW int max_depth; + CV_PROP_RW int min_sample_count; + CV_PROP_RW int cv_folds; + CV_PROP_RW bool use_surrogates; + CV_PROP_RW bool use_1se_rule; + CV_PROP_RW bool truncate_pruned_tree; + CV_PROP_RW float regression_accuracy; + const float* priors; + + CvDTreeParams(); + CvDTreeParams( int max_depth, int min_sample_count, + float regression_accuracy, bool use_surrogates, + int max_categories, int cv_folds, + bool use_1se_rule, bool truncate_pruned_tree, + const float* priors ); +}; + + +struct CV_EXPORTS CvDTreeTrainData +{ + CvDTreeTrainData(); + CvDTreeTrainData( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + const CvDTreeParams& params=CvDTreeParams(), + bool _shared=false, bool _add_labels=false ); + virtual ~CvDTreeTrainData(); + + virtual void set_data( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + const CvDTreeParams& params=CvDTreeParams(), + bool _shared=false, bool _add_labels=false, + bool _update_data=false ); + virtual void do_responses_copy(); + + virtual void get_vectors( const CvMat* _subsample_idx, + float* values, uchar* missing, float* responses, bool get_class_idx=false ); + + virtual CvDTreeNode* subsample_data( const CvMat* _subsample_idx ); + + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + // release all the data + virtual void clear(); + + int get_num_classes() const; + int get_var_type(int vi) const; + int get_work_var_count() const {return work_var_count;} + + virtual const float* get_ord_responses( CvDTreeNode* n, float* values_buf, int* sample_indices_buf ); + virtual const int* get_class_labels( CvDTreeNode* n, int* labels_buf ); + virtual const int* get_cv_labels( CvDTreeNode* n, int* labels_buf ); + virtual const int* get_sample_indices( CvDTreeNode* n, int* indices_buf ); + virtual const int* get_cat_var_data( CvDTreeNode* n, int vi, int* cat_values_buf ); + virtual void get_ord_var_data( CvDTreeNode* n, int vi, float* ord_values_buf, int* sorted_indices_buf, + const float** ord_values, const int** sorted_indices, int* sample_indices_buf ); + virtual int get_child_buf_idx( CvDTreeNode* n ); + + //////////////////////////////////// + + virtual bool set_params( const CvDTreeParams& params ); + virtual CvDTreeNode* new_node( CvDTreeNode* parent, int count, + int storage_idx, int offset ); + + virtual CvDTreeSplit* new_split_ord( int vi, float cmp_val, + int split_point, int inversed, float quality ); + virtual CvDTreeSplit* new_split_cat( int vi, float quality ); + virtual void free_node_data( CvDTreeNode* node ); + virtual void free_train_data(); + virtual void free_node( CvDTreeNode* node ); + + int sample_count, var_all, var_count, max_c_count; + int ord_var_count, cat_var_count, work_var_count; + bool have_labels, have_priors; + bool is_classifier; + int tflag; + + const CvMat* train_data; + const CvMat* responses; + CvMat* responses_copy; // used in Boosting + + int buf_count, buf_size; // buf_size is obsolete, please do not use it, use expression ((int64)buf->rows * (int64)buf->cols / buf_count) instead + bool shared; + int is_buf_16u; + + CvMat* cat_count; + CvMat* cat_ofs; + CvMat* cat_map; + + CvMat* counts; + CvMat* buf; + inline size_t get_length_subbuf() const + { + size_t res = (size_t)(work_var_count + 1) * (size_t)sample_count; + return res; + } + + CvMat* direction; + CvMat* split_buf; + + CvMat* var_idx; + CvMat* var_type; // i-th element = + // k<0 - ordered + // k>=0 - categorical, see k-th element of cat_* arrays + CvMat* priors; + CvMat* priors_mult; + + CvDTreeParams params; + + CvMemStorage* tree_storage; + CvMemStorage* temp_storage; + + CvDTreeNode* data_root; + + CvSet* node_heap; + CvSet* split_heap; + CvSet* cv_heap; + CvSet* nv_heap; + + cv::RNG* rng; +}; + +class CvDTree; +class CvForestTree; + +namespace cv +{ + struct DTreeBestSplitFinder; + struct ForestTreeBestSplitFinder; +} + +class CV_EXPORTS_W CvDTree : public CvStatModel +{ +public: + CV_WRAP CvDTree(); + virtual ~CvDTree(); + + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvDTreeParams params=CvDTreeParams() ); + + virtual bool train( CvMLData* trainData, CvDTreeParams params=CvDTreeParams() ); + + // type in {CV_TRAIN_ERROR, CV_TEST_ERROR} + virtual float calc_error( CvMLData* trainData, int type, std::vector *resp = 0 ); + + virtual bool train( CvDTreeTrainData* trainData, const CvMat* subsampleIdx ); + + virtual CvDTreeNode* predict( const CvMat* sample, const CvMat* missingDataMask=0, + bool preprocessedInput=false ) const; + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvDTreeParams params=CvDTreeParams() ); + + CV_WRAP virtual CvDTreeNode* predict( const cv::Mat& sample, const cv::Mat& missingDataMask=cv::Mat(), + bool preprocessedInput=false ) const; + CV_WRAP virtual cv::Mat getVarImportance(); + + virtual const CvMat* get_var_importance(); + CV_WRAP virtual void clear(); + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* fs, const char* name ) const; + + // special read & write methods for trees in the tree ensembles + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvDTreeTrainData* data ); + virtual void write( CvFileStorage* fs ) const; + + const CvDTreeNode* get_root() const; + int get_pruned_tree_idx() const; + CvDTreeTrainData* get_data(); + +protected: + friend struct cv::DTreeBestSplitFinder; + + virtual bool do_train( const CvMat* _subsample_idx ); + + virtual void try_split_node( CvDTreeNode* n ); + virtual void split_node_data( CvDTreeNode* n ); + virtual CvDTreeSplit* find_best_split( CvDTreeNode* n ); + virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_surrogate_split_ord( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_surrogate_split_cat( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual double calc_node_dir( CvDTreeNode* node ); + virtual void complete_node_dir( CvDTreeNode* node ); + virtual void cluster_categories( const int* vectors, int vector_count, + int var_count, int* sums, int k, int* cluster_labels ); + + virtual void calc_node_value( CvDTreeNode* node ); + + virtual void prune_cv(); + virtual double update_tree_rnc( int T, int fold ); + virtual int cut_tree( int T, int fold, double min_alpha ); + virtual void free_prune_data(bool cut_tree); + virtual void free_tree(); + + virtual void write_node( CvFileStorage* fs, CvDTreeNode* node ) const; + virtual void write_split( CvFileStorage* fs, CvDTreeSplit* split ) const; + virtual CvDTreeNode* read_node( CvFileStorage* fs, CvFileNode* node, CvDTreeNode* parent ); + virtual CvDTreeSplit* read_split( CvFileStorage* fs, CvFileNode* node ); + virtual void write_tree_nodes( CvFileStorage* fs ) const; + virtual void read_tree_nodes( CvFileStorage* fs, CvFileNode* node ); + + CvDTreeNode* root; + CvMat* var_importance; + CvDTreeTrainData* data; + +public: + int pruned_tree_idx; +}; + + +/****************************************************************************************\ +* Random Trees Classifier * +\****************************************************************************************/ + +class CvRTrees; + +class CV_EXPORTS CvForestTree: public CvDTree +{ +public: + CvForestTree(); + virtual ~CvForestTree(); + + virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx, CvRTrees* forest ); + + virtual int get_var_count() const {return data ? data->var_count : 0;} + virtual void read( CvFileStorage* fs, CvFileNode* node, CvRTrees* forest, CvDTreeTrainData* _data ); + + /* dummy methods to avoid warnings: BEGIN */ + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvDTreeParams params=CvDTreeParams() ); + + virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx ); + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvDTreeTrainData* data ); + /* dummy methods to avoid warnings: END */ + +protected: + friend struct cv::ForestTreeBestSplitFinder; + + virtual CvDTreeSplit* find_best_split( CvDTreeNode* n ); + CvRTrees* forest; +}; + + +struct CV_EXPORTS_W_MAP CvRTParams : public CvDTreeParams +{ + //Parameters for the forest + CV_PROP_RW bool calc_var_importance; // true <=> RF processes variable importance + CV_PROP_RW int nactive_vars; + CV_PROP_RW CvTermCriteria term_crit; + + CvRTParams(); + CvRTParams( int max_depth, int min_sample_count, + float regression_accuracy, bool use_surrogates, + int max_categories, const float* priors, bool calc_var_importance, + int nactive_vars, int max_num_of_trees_in_the_forest, + float forest_accuracy, int termcrit_type ); +}; + + +class CV_EXPORTS_W CvRTrees : public CvStatModel +{ +public: + CV_WRAP CvRTrees(); + virtual ~CvRTrees(); + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvRTParams params=CvRTParams() ); + + virtual bool train( CvMLData* data, CvRTParams params=CvRTParams() ); + virtual float predict( const CvMat* sample, const CvMat* missing = 0 ) const; + virtual float predict_prob( const CvMat* sample, const CvMat* missing = 0 ) const; + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvRTParams params=CvRTParams() ); + CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing = cv::Mat() ) const; + CV_WRAP virtual float predict_prob( const cv::Mat& sample, const cv::Mat& missing = cv::Mat() ) const; + CV_WRAP virtual cv::Mat getVarImportance(); + + CV_WRAP virtual void clear(); + + virtual const CvMat* get_var_importance(); + virtual float get_proximity( const CvMat* sample1, const CvMat* sample2, + const CvMat* missing1 = 0, const CvMat* missing2 = 0 ) const; + + virtual float calc_error( CvMLData* data, int type , std::vector* resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR} + + virtual float get_train_error(); + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* fs, const char* name ) const; + + CvMat* get_active_var_mask(); + CvRNG* get_rng(); + + int get_tree_count() const; + CvForestTree* get_tree(int i) const; + +protected: + virtual std::string getName() const; + + virtual bool grow_forest( const CvTermCriteria term_crit ); + + // array of the trees of the forest + CvForestTree** trees; + CvDTreeTrainData* data; + int ntrees; + int nclasses; + double oob_error; + CvMat* var_importance; + int nsamples; + + cv::RNG* rng; + CvMat* active_var_mask; +}; + +/****************************************************************************************\ +* Extremely randomized trees Classifier * +\****************************************************************************************/ +struct CV_EXPORTS CvERTreeTrainData : public CvDTreeTrainData +{ + virtual void set_data( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + const CvDTreeParams& params=CvDTreeParams(), + bool _shared=false, bool _add_labels=false, + bool _update_data=false ); + virtual void get_ord_var_data( CvDTreeNode* n, int vi, float* ord_values_buf, int* missing_buf, + const float** ord_values, const int** missing, int* sample_buf = 0 ); + virtual const int* get_sample_indices( CvDTreeNode* n, int* indices_buf ); + virtual const int* get_cv_labels( CvDTreeNode* n, int* labels_buf ); + virtual const int* get_cat_var_data( CvDTreeNode* n, int vi, int* cat_values_buf ); + virtual void get_vectors( const CvMat* _subsample_idx, float* values, uchar* missing, + float* responses, bool get_class_idx=false ); + virtual CvDTreeNode* subsample_data( const CvMat* _subsample_idx ); + const CvMat* missing_mask; +}; + +class CV_EXPORTS CvForestERTree : public CvForestTree +{ +protected: + virtual double calc_node_dir( CvDTreeNode* node ); + virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual void split_node_data( CvDTreeNode* n ); +}; + +class CV_EXPORTS_W CvERTrees : public CvRTrees +{ +public: + CV_WRAP CvERTrees(); + virtual ~CvERTrees(); + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvRTParams params=CvRTParams()); + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvRTParams params=CvRTParams()); + virtual bool train( CvMLData* data, CvRTParams params=CvRTParams() ); +protected: + virtual std::string getName() const; + virtual bool grow_forest( const CvTermCriteria term_crit ); +}; + + +/****************************************************************************************\ +* Boosted tree classifier * +\****************************************************************************************/ + +struct CV_EXPORTS_W_MAP CvBoostParams : public CvDTreeParams +{ + CV_PROP_RW int boost_type; + CV_PROP_RW int weak_count; + CV_PROP_RW int split_criteria; + CV_PROP_RW double weight_trim_rate; + + CvBoostParams(); + CvBoostParams( int boost_type, int weak_count, double weight_trim_rate, + int max_depth, bool use_surrogates, const float* priors ); +}; + + +class CvBoost; + +class CV_EXPORTS CvBoostTree: public CvDTree +{ +public: + CvBoostTree(); + virtual ~CvBoostTree(); + + virtual bool train( CvDTreeTrainData* trainData, + const CvMat* subsample_idx, CvBoost* ensemble ); + + virtual void scale( double s ); + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvBoost* ensemble, CvDTreeTrainData* _data ); + virtual void clear(); + + /* dummy methods to avoid warnings: BEGIN */ + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvDTreeParams params=CvDTreeParams() ); + virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx ); + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void read( CvFileStorage* fs, CvFileNode* node, + CvDTreeTrainData* data ); + /* dummy methods to avoid warnings: END */ + +protected: + + virtual void try_split_node( CvDTreeNode* n ); + virtual CvDTreeSplit* find_surrogate_split_ord( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_surrogate_split_cat( CvDTreeNode* n, int vi, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi, + float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 ); + virtual void calc_node_value( CvDTreeNode* n ); + virtual double calc_node_dir( CvDTreeNode* n ); + + CvBoost* ensemble; +}; + + +class CV_EXPORTS_W CvBoost : public CvStatModel +{ +public: + // Boosting type + enum { DISCRETE=0, REAL=1, LOGIT=2, GENTLE=3 }; + + // Splitting criteria + enum { DEFAULT=0, GINI=1, MISCLASS=3, SQERR=4 }; + + CV_WRAP CvBoost(); + virtual ~CvBoost(); + + CvBoost( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvBoostParams params=CvBoostParams() ); + + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvBoostParams params=CvBoostParams(), + bool update=false ); + + virtual bool train( CvMLData* data, + CvBoostParams params=CvBoostParams(), + bool update=false ); + + virtual float predict( const CvMat* sample, const CvMat* missing=0, + CvMat* weak_responses=0, CvSlice slice=CV_WHOLE_SEQ, + bool raw_mode=false, bool return_sum=false ) const; + + CV_WRAP CvBoost( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvBoostParams params=CvBoostParams() ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvBoostParams params=CvBoostParams(), + bool update=false ); + + CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing=cv::Mat(), + const cv::Range& slice=cv::Range::all(), bool rawMode=false, + bool returnSum=false ) const; + + virtual float calc_error( CvMLData* _data, int type , std::vector *resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR} + + CV_WRAP virtual void prune( CvSlice slice ); + + CV_WRAP virtual void clear(); + + virtual void write( CvFileStorage* storage, const char* name ) const; + virtual void read( CvFileStorage* storage, CvFileNode* node ); + virtual const CvMat* get_active_vars(bool absolute_idx=true); + + CvSeq* get_weak_predictors(); + + CvMat* get_weights(); + CvMat* get_subtree_weights(); + CvMat* get_weak_response(); + const CvBoostParams& get_params() const; + const CvDTreeTrainData* get_data() const; + +protected: + + void update_weights_impl( CvBoostTree* tree, double initial_weights[2] ); + + virtual bool set_params( const CvBoostParams& params ); + virtual void update_weights( CvBoostTree* tree ); + virtual void trim_weights(); + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + CvDTreeTrainData* data; + CvBoostParams params; + CvSeq* weak; + + CvMat* active_vars; + CvMat* active_vars_abs; + bool have_active_cat_vars; + + CvMat* orig_response; + CvMat* sum_response; + CvMat* weak_eval; + CvMat* subsample_mask; + CvMat* weights; + CvMat* subtree_weights; + bool have_subsample; +}; + + +/****************************************************************************************\ +* Gradient Boosted Trees * +\****************************************************************************************/ + +// DataType: STRUCT CvGBTreesParams +// Parameters of GBT (Gradient Boosted trees model), including single +// tree settings and ensemble parameters. +// +// weak_count - count of trees in the ensemble +// loss_function_type - loss function used for ensemble training +// subsample_portion - portion of whole training set used for +// every single tree training. +// subsample_portion value is in (0.0, 1.0]. +// subsample_portion == 1.0 when whole dataset is +// used on each step. Count of sample used on each +// step is computed as +// int(total_samples_count * subsample_portion). +// shrinkage - regularization parameter. +// Each tree prediction is multiplied on shrinkage value. + + +struct CV_EXPORTS_W_MAP CvGBTreesParams : public CvDTreeParams +{ + CV_PROP_RW int weak_count; + CV_PROP_RW int loss_function_type; + CV_PROP_RW float subsample_portion; + CV_PROP_RW float shrinkage; + + CvGBTreesParams(); + CvGBTreesParams( int loss_function_type, int weak_count, float shrinkage, + float subsample_portion, int max_depth, bool use_surrogates ); +}; + +// DataType: CLASS CvGBTrees +// Gradient Boosting Trees (GBT) algorithm implementation. +// +// data - training dataset +// params - parameters of the CvGBTrees +// weak - array[0..(class_count-1)] of CvSeq +// for storing tree ensembles +// orig_response - original responses of the training set samples +// sum_response - predicitons of the current model on the training dataset. +// this matrix is updated on every iteration. +// sum_response_tmp - predicitons of the model on the training set on the next +// step. On every iteration values of sum_responses_tmp are +// computed via sum_responses values. When the current +// step is complete sum_response values become equal to +// sum_responses_tmp. +// sampleIdx - indices of samples used for training the ensemble. +// CvGBTrees training procedure takes a set of samples +// (train_data) and a set of responses (responses). +// Only pairs (train_data[i], responses[i]), where i is +// in sample_idx are used for training the ensemble. +// subsample_train - indices of samples used for training a single decision +// tree on the current step. This indices are countered +// relatively to the sample_idx, so that pairs +// (train_data[sample_idx[i]], responses[sample_idx[i]]) +// are used for training a decision tree. +// Training set is randomly splited +// in two parts (subsample_train and subsample_test) +// on every iteration accordingly to the portion parameter. +// subsample_test - relative indices of samples from the training set, +// which are not used for training a tree on the current +// step. +// missing - mask of the missing values in the training set. This +// matrix has the same size as train_data. 1 - missing +// value, 0 - not a missing value. +// class_labels - output class labels map. +// rng - random number generator. Used for spliting the +// training set. +// class_count - count of output classes. +// class_count == 1 in the case of regression, +// and > 1 in the case of classification. +// delta - Huber loss function parameter. +// base_value - start point of the gradient descent procedure. +// model prediction is +// f(x) = f_0 + sum_{i=1..weak_count-1}(f_i(x)), where +// f_0 is the base value. + + + +class CV_EXPORTS_W CvGBTrees : public CvStatModel +{ +public: + + /* + // DataType: ENUM + // Loss functions implemented in CvGBTrees. + // + // SQUARED_LOSS + // problem: regression + // loss = (x - x')^2 + // + // ABSOLUTE_LOSS + // problem: regression + // loss = abs(x - x') + // + // HUBER_LOSS + // problem: regression + // loss = delta*( abs(x - x') - delta/2), if abs(x - x') > delta + // 1/2*(x - x')^2, if abs(x - x') <= delta, + // where delta is the alpha-quantile of pseudo responses from + // the training set. + // + // DEVIANCE_LOSS + // problem: classification + // + */ + enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS}; + + + /* + // Default constructor. Creates a model only (without training). + // Should be followed by one form of the train(...) function. + // + // API + // CvGBTrees(); + + // INPUT + // OUTPUT + // RESULT + */ + CV_WRAP CvGBTrees(); + + + /* + // Full form constructor. Creates a gradient boosting model and does the + // train. + // + // API + // CvGBTrees( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams() ); + + // INPUT + // trainData - a set of input feature vectors. + // size of matrix is + // x + // or x + // depending on the tflag parameter. + // matrix values are float. + // tflag - a flag showing how do samples stored in the + // trainData matrix row by row (tflag=CV_ROW_SAMPLE) + // or column by column (tflag=CV_COL_SAMPLE). + // responses - a vector of responses corresponding to the samples + // in trainData. + // varIdx - indices of used variables. zero value means that all + // variables are active. + // sampleIdx - indices of used samples. zero value means that all + // samples from trainData are in the training set. + // varType - vector of length. gives every + // variable type CV_VAR_CATEGORICAL or CV_VAR_ORDERED. + // varType = 0 means all variables are numerical. + // missingDataMask - a mask of misiing values in trainData. + // missingDataMask = 0 means that there are no missing + // values. + // params - parameters of GTB algorithm. + // OUTPUT + // RESULT + */ + CvGBTrees( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams() ); + + + /* + // Destructor. + */ + virtual ~CvGBTrees(); + + + /* + // Gradient tree boosting model training + // + // API + // virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + // INPUT + // trainData - a set of input feature vectors. + // size of matrix is + // x + // or x + // depending on the tflag parameter. + // matrix values are float. + // tflag - a flag showing how do samples stored in the + // trainData matrix row by row (tflag=CV_ROW_SAMPLE) + // or column by column (tflag=CV_COL_SAMPLE). + // responses - a vector of responses corresponding to the samples + // in trainData. + // varIdx - indices of used variables. zero value means that all + // variables are active. + // sampleIdx - indices of used samples. zero value means that all + // samples from trainData are in the training set. + // varType - vector of length. gives every + // variable type CV_VAR_CATEGORICAL or CV_VAR_ORDERED. + // varType = 0 means all variables are numerical. + // missingDataMask - a mask of misiing values in trainData. + // missingDataMask = 0 means that there are no missing + // values. + // params - parameters of GTB algorithm. + // update - is not supported now. (!) + // OUTPUT + // RESULT + // Error state. + */ + virtual bool train( const CvMat* trainData, int tflag, + const CvMat* responses, const CvMat* varIdx=0, + const CvMat* sampleIdx=0, const CvMat* varType=0, + const CvMat* missingDataMask=0, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + + /* + // Gradient tree boosting model training + // + // API + // virtual bool train( CvMLData* data, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ) {return false;}; + + // INPUT + // data - training set. + // params - parameters of GTB algorithm. + // update - is not supported now. (!) + // OUTPUT + // RESULT + // Error state. + */ + virtual bool train( CvMLData* data, + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + + /* + // Response value prediction + // + // API + // virtual float predict_serial( const CvMat* sample, const CvMat* missing=0, + CvMat* weak_responses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + // INPUT + // sample - input sample of the same type as in the training set. + // missing - missing values mask. missing=0 if there are no + // missing values in sample vector. + // weak_responses - predictions of all of the trees. + // not implemented (!) + // slice - part of the ensemble used for prediction. + // slice = CV_WHOLE_SEQ when all trees are used. + // k - number of ensemble used. + // k is in {-1,0,1,..,}. + // in the case of classification problem + // ensembles are built. + // If k = -1 ordinary prediction is the result, + // otherwise function gives the prediction of the + // k-th ensemble only. + // OUTPUT + // RESULT + // Predicted value. + */ + virtual float predict_serial( const CvMat* sample, const CvMat* missing=0, + CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + /* + // Response value prediction. + // Parallel version (in the case of TBB existence) + // + // API + // virtual float predict( const CvMat* sample, const CvMat* missing=0, + CvMat* weak_responses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + // INPUT + // sample - input sample of the same type as in the training set. + // missing - missing values mask. missing=0 if there are no + // missing values in sample vector. + // weak_responses - predictions of all of the trees. + // not implemented (!) + // slice - part of the ensemble used for prediction. + // slice = CV_WHOLE_SEQ when all trees are used. + // k - number of ensemble used. + // k is in {-1,0,1,..,}. + // in the case of classification problem + // ensembles are built. + // If k = -1 ordinary prediction is the result, + // otherwise function gives the prediction of the + // k-th ensemble only. + // OUTPUT + // RESULT + // Predicted value. + */ + virtual float predict( const CvMat* sample, const CvMat* missing=0, + CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ, + int k=-1 ) const; + + /* + // Deletes all the data. + // + // API + // virtual void clear(); + + // INPUT + // OUTPUT + // delete data, weak, orig_response, sum_response, + // weak_eval, subsample_train, subsample_test, + // sample_idx, missing, lass_labels + // delta = 0.0 + // RESULT + */ + CV_WRAP virtual void clear(); + + /* + // Compute error on the train/test set. + // + // API + // virtual float calc_error( CvMLData* _data, int type, + // std::vector *resp = 0 ); + // + // INPUT + // data - dataset + // type - defines which error is to compute: train (CV_TRAIN_ERROR) or + // test (CV_TEST_ERROR). + // OUTPUT + // resp - vector of predicitons + // RESULT + // Error value. + */ + virtual float calc_error( CvMLData* _data, int type, + std::vector *resp = 0 ); + + /* + // + // Write parameters of the gtb model and data. Write learned model. + // + // API + // virtual void write( CvFileStorage* fs, const char* name ) const; + // + // INPUT + // fs - file storage to read parameters from. + // name - model name. + // OUTPUT + // RESULT + */ + virtual void write( CvFileStorage* fs, const char* name ) const; + + + /* + // + // Read parameters of the gtb model and data. Read learned model. + // + // API + // virtual void read( CvFileStorage* fs, CvFileNode* node ); + // + // INPUT + // fs - file storage to read parameters from. + // node - file node. + // OUTPUT + // RESULT + */ + virtual void read( CvFileStorage* fs, CvFileNode* node ); + + + // new-style C++ interface + CV_WRAP CvGBTrees( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvGBTreesParams params=CvGBTreesParams() ); + + CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag, + const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), + const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), + const cv::Mat& missingDataMask=cv::Mat(), + CvGBTreesParams params=CvGBTreesParams(), + bool update=false ); + + CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing=cv::Mat(), + const cv::Range& slice = cv::Range::all(), + int k=-1 ) const; + +protected: + + /* + // Compute the gradient vector components. + // + // API + // virtual void find_gradient( const int k = 0); + + // INPUT + // k - used for classification problem, determining current + // tree ensemble. + // OUTPUT + // changes components of data->responses + // which correspond to samples used for training + // on the current step. + // RESULT + */ + virtual void find_gradient( const int k = 0); + + + /* + // + // Change values in tree leaves according to the used loss function. + // + // API + // virtual void change_values(CvDTree* tree, const int k = 0); + // + // INPUT + // tree - decision tree to change. + // k - used for classification problem, determining current + // tree ensemble. + // OUTPUT + // changes 'value' fields of the trees' leaves. + // changes sum_response_tmp. + // RESULT + */ + virtual void change_values(CvDTree* tree, const int k = 0); + + + /* + // + // Find optimal constant prediction value according to the used loss + // function. + // The goal is to find a constant which gives the minimal summary loss + // on the _Idx samples. + // + // API + // virtual float find_optimal_value( const CvMat* _Idx ); + // + // INPUT + // _Idx - indices of the samples from the training set. + // OUTPUT + // RESULT + // optimal constant value. + */ + virtual float find_optimal_value( const CvMat* _Idx ); + + + /* + // + // Randomly split the whole training set in two parts according + // to params.portion. + // + // API + // virtual void do_subsample(); + // + // INPUT + // OUTPUT + // subsample_train - indices of samples used for training + // subsample_test - indices of samples used for test + // RESULT + */ + virtual void do_subsample(); + + + /* + // + // Internal recursive function giving an array of subtree tree leaves. + // + // API + // void leaves_get( CvDTreeNode** leaves, int& count, CvDTreeNode* node ); + // + // INPUT + // node - current leaf. + // OUTPUT + // count - count of leaves in the subtree. + // leaves - array of pointers to leaves. + // RESULT + */ + void leaves_get( CvDTreeNode** leaves, int& count, CvDTreeNode* node ); + + + /* + // + // Get leaves of the tree. + // + // API + // CvDTreeNode** GetLeaves( const CvDTree* dtree, int& len ); + // + // INPUT + // dtree - decision tree. + // OUTPUT + // len - count of the leaves. + // RESULT + // CvDTreeNode** - array of pointers to leaves. + */ + CvDTreeNode** GetLeaves( const CvDTree* dtree, int& len ); + + + /* + // + // Is it a regression or a classification. + // + // API + // bool problem_type(); + // + // INPUT + // OUTPUT + // RESULT + // false if it is a classification problem, + // true - if regression. + */ + virtual bool problem_type() const; + + + /* + // + // Write parameters of the gtb model. + // + // API + // virtual void write_params( CvFileStorage* fs ) const; + // + // INPUT + // fs - file storage to write parameters to. + // OUTPUT + // RESULT + */ + virtual void write_params( CvFileStorage* fs ) const; + + + /* + // + // Read parameters of the gtb model and data. + // + // API + // virtual void read_params( CvFileStorage* fs ); + // + // INPUT + // fs - file storage to read parameters from. + // OUTPUT + // params - parameters of the gtb model. + // data - contains information about the structure + // of the data set (count of variables, + // their types, etc.). + // class_labels - output class labels map. + // RESULT + */ + virtual void read_params( CvFileStorage* fs, CvFileNode* fnode ); + int get_len(const CvMat* mat) const; + + + CvDTreeTrainData* data; + CvGBTreesParams params; + + CvSeq** weak; + CvMat* orig_response; + CvMat* sum_response; + CvMat* sum_response_tmp; + CvMat* sample_idx; + CvMat* subsample_train; + CvMat* subsample_test; + CvMat* missing; + CvMat* class_labels; + + cv::RNG* rng; + + int class_count; + float delta; + float base_value; + +}; + + + +/****************************************************************************************\ +* Artificial Neural Networks (ANN) * +\****************************************************************************************/ + +/////////////////////////////////// Multi-Layer Perceptrons ////////////////////////////// + +struct CV_EXPORTS_W_MAP CvANN_MLP_TrainParams +{ + CvANN_MLP_TrainParams(); + CvANN_MLP_TrainParams( CvTermCriteria term_crit, int train_method, + double param1, double param2=0 ); + ~CvANN_MLP_TrainParams(); + + enum { BACKPROP=0, RPROP=1 }; + + CV_PROP_RW CvTermCriteria term_crit; + CV_PROP_RW int train_method; + + // backpropagation parameters + CV_PROP_RW double bp_dw_scale, bp_moment_scale; + + // rprop parameters + CV_PROP_RW double rp_dw0, rp_dw_plus, rp_dw_minus, rp_dw_min, rp_dw_max; +}; + + +class CV_EXPORTS_W CvANN_MLP : public CvStatModel +{ +public: + CV_WRAP CvANN_MLP(); + CvANN_MLP( const CvMat* layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + virtual ~CvANN_MLP(); + + virtual void create( const CvMat* layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + virtual int train( const CvMat* inputs, const CvMat* outputs, + const CvMat* sampleWeights, const CvMat* sampleIdx=0, + CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), + int flags=0 ); + virtual float predict( const CvMat* inputs, CV_OUT CvMat* outputs ) const; + + CV_WRAP CvANN_MLP( const cv::Mat& layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + CV_WRAP virtual void create( const cv::Mat& layerSizes, + int activateFunc=CvANN_MLP::SIGMOID_SYM, + double fparam1=0, double fparam2=0 ); + + CV_WRAP virtual int train( const cv::Mat& inputs, const cv::Mat& outputs, + const cv::Mat& sampleWeights, const cv::Mat& sampleIdx=cv::Mat(), + CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(), + int flags=0 ); + + CV_WRAP virtual float predict( const cv::Mat& inputs, CV_OUT cv::Mat& outputs ) const; + + CV_WRAP virtual void clear(); + + // possible activation functions + enum { IDENTITY = 0, SIGMOID_SYM = 1, GAUSSIAN = 2 }; + + // available training flags + enum { UPDATE_WEIGHTS = 1, NO_INPUT_SCALE = 2, NO_OUTPUT_SCALE = 4 }; + + virtual void read( CvFileStorage* fs, CvFileNode* node ); + virtual void write( CvFileStorage* storage, const char* name ) const; + + int get_layer_count() { return layer_sizes ? layer_sizes->cols : 0; } + const CvMat* get_layer_sizes() { return layer_sizes; } + double* get_weights(int layer) + { + return layer_sizes && weights && + (unsigned)layer <= (unsigned)layer_sizes->cols ? weights[layer] : 0; + } + + virtual void calc_activ_func_deriv( CvMat* xf, CvMat* deriv, const double* bias ) const; + +protected: + + virtual bool prepare_to_train( const CvMat* _inputs, const CvMat* _outputs, + const CvMat* _sample_weights, const CvMat* sampleIdx, + CvVectors* _ivecs, CvVectors* _ovecs, double** _sw, int _flags ); + + // sequential random backpropagation + virtual int train_backprop( CvVectors _ivecs, CvVectors _ovecs, const double* _sw ); + + // RPROP algorithm + virtual int train_rprop( CvVectors _ivecs, CvVectors _ovecs, const double* _sw ); + + virtual void calc_activ_func( CvMat* xf, const double* bias ) const; + virtual void set_activ_func( int _activ_func=SIGMOID_SYM, + double _f_param1=0, double _f_param2=0 ); + virtual void init_weights(); + virtual void scale_input( const CvMat* _src, CvMat* _dst ) const; + virtual void scale_output( const CvMat* _src, CvMat* _dst ) const; + virtual void calc_input_scale( const CvVectors* vecs, int flags ); + virtual void calc_output_scale( const CvVectors* vecs, int flags ); + + virtual void write_params( CvFileStorage* fs ) const; + virtual void read_params( CvFileStorage* fs, CvFileNode* node ); + + CvMat* layer_sizes; + CvMat* wbuf; + CvMat* sample_weights; + double** weights; + double f_param1, f_param2; + double min_val, max_val, min_val1, max_val1; + int activ_func; + int max_count, max_buf_sz; + CvANN_MLP_TrainParams params; + cv::RNG* rng; +}; + +/****************************************************************************************\ +* Auxilary functions declarations * +\****************************************************************************************/ + +/* Generates from multivariate normal distribution, where - is an + average row vector, - symmetric covariation matrix */ +CVAPI(void) cvRandMVNormal( CvMat* mean, CvMat* cov, CvMat* sample, + CvRNG* rng CV_DEFAULT(0) ); + +/* Generates sample from gaussian mixture distribution */ +CVAPI(void) cvRandGaussMixture( CvMat* means[], + CvMat* covs[], + float weights[], + int clsnum, + CvMat* sample, + CvMat* sampClasses CV_DEFAULT(0) ); + +#define CV_TS_CONCENTRIC_SPHERES 0 + +/* creates test set */ +CVAPI(void) cvCreateTestSet( int type, CvMat** samples, + int num_samples, + int num_features, + CvMat** responses, + int num_classes, ... ); + +/****************************************************************************************\ +* Data * +\****************************************************************************************/ + +#define CV_COUNT 0 +#define CV_PORTION 1 + +struct CV_EXPORTS CvTrainTestSplit +{ + CvTrainTestSplit(); + CvTrainTestSplit( int train_sample_count, bool mix = true); + CvTrainTestSplit( float train_sample_portion, bool mix = true); + + union + { + int count; + float portion; + } train_sample_part; + int train_sample_part_mode; + + bool mix; +}; + +class CV_EXPORTS CvMLData +{ +public: + CvMLData(); + virtual ~CvMLData(); + + // returns: + // 0 - OK + // -1 - file can not be opened or is not correct + int read_csv( const char* filename ); + + const CvMat* get_values() const; + const CvMat* get_responses(); + const CvMat* get_missing() const; + + void set_response_idx( int idx ); // old response become predictors, new response_idx = idx + // if idx < 0 there will be no response + int get_response_idx() const; + + void set_train_test_split( const CvTrainTestSplit * spl ); + const CvMat* get_train_sample_idx() const; + const CvMat* get_test_sample_idx() const; + void mix_train_and_test_idx(); + + const CvMat* get_var_idx(); + void chahge_var_idx( int vi, bool state ); // misspelled (saved for back compitability), + // use change_var_idx + void change_var_idx( int vi, bool state ); // state == true to set vi-variable as predictor + + const CvMat* get_var_types(); + int get_var_type( int var_idx ) const; + // following 2 methods enable to change vars type + // use these methods to assign CV_VAR_CATEGORICAL type for categorical variable + // with numerical labels; in the other cases var types are correctly determined automatically + void set_var_types( const char* str ); // str examples: + // "ord[0-17],cat[18]", "ord[0,2,4,10-12], cat[1,3,5-9,13,14]", + // "cat", "ord" (all vars are categorical/ordered) + void change_var_type( int var_idx, int type); // type in { CV_VAR_ORDERED, CV_VAR_CATEGORICAL } + + void set_delimiter( char ch ); + char get_delimiter() const; + + void set_miss_ch( char ch ); + char get_miss_ch() const; + + const std::map& get_class_labels_map() const; + +protected: + virtual void clear(); + + void str_to_flt_elem( const char* token, float& flt_elem, int& type); + void free_train_test_idx(); + + char delimiter; + char miss_ch; + //char flt_separator; + + CvMat* values; + CvMat* missing; + CvMat* var_types; + CvMat* var_idx_mask; + + CvMat* response_out; // header + CvMat* var_idx_out; // mat + CvMat* var_types_out; // mat + + int response_idx; + + int train_sample_count; + bool mix; + + int total_class_count; + std::map class_map; + + CvMat* train_sample_idx; + CvMat* test_sample_idx; + int* sample_idx; // data of train_sample_idx and test_sample_idx + + cv::RNG* rng; +}; + + +namespace cv +{ + +typedef CvStatModel StatModel; +typedef CvParamGrid ParamGrid; +typedef CvNormalBayesClassifier NormalBayesClassifier; +typedef CvKNearest KNearest; +typedef CvSVMParams SVMParams; +typedef CvSVMKernel SVMKernel; +typedef CvSVMSolver SVMSolver; +typedef CvSVM SVM; +typedef CvDTreeParams DTreeParams; +typedef CvMLData TrainData; +typedef CvDTree DecisionTree; +typedef CvForestTree ForestTree; +typedef CvRTParams RandomTreeParams; +typedef CvRTrees RandomTrees; +typedef CvERTreeTrainData ERTreeTRainData; +typedef CvForestERTree ERTree; +typedef CvERTrees ERTrees; +typedef CvBoostParams BoostParams; +typedef CvBoostTree BoostTree; +typedef CvBoost Boost; +typedef CvANN_MLP_TrainParams ANN_MLP_TrainParams; +typedef CvANN_MLP NeuralNet_MLP; +typedef CvGBTreesParams GradientBoostingTreeParams; +typedef CvGBTrees GradientBoostingTrees; + +template<> CV_EXPORTS void Ptr::delete_obj(); + +CV_EXPORTS bool initModule_ml(void); + +} + +#endif // __cplusplus +#endif // __OPENCV_ML_HPP__ + +/* End of file. */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/features2d.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/features2d.hpp new file mode 100644 index 0000000..f23bec8 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/features2d.hpp @@ -0,0 +1,155 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_NONFREE_FEATURES_2D_HPP__ +#define __OPENCV_NONFREE_FEATURES_2D_HPP__ + +#include "opencv2/features2d/features2d.hpp" + +#ifdef __cplusplus + +namespace cv +{ + +/*! + SIFT implementation. + + The class implements SIFT algorithm by D. Lowe. +*/ +class CV_EXPORTS_W SIFT : public Feature2D +{ +public: + CV_WRAP explicit SIFT( int nfeatures=0, int nOctaveLayers=3, + double contrastThreshold=0.04, double edgeThreshold=10, + double sigma=1.6); + + //! returns the descriptor size in floats (128) + CV_WRAP int descriptorSize() const; + + //! returns the descriptor type + CV_WRAP int descriptorType() const; + + //! finds the keypoints using SIFT algorithm + void operator()(InputArray img, InputArray mask, + vector& keypoints) const; + //! finds the keypoints and computes descriptors for them using SIFT algorithm. + //! Optionally it can compute descriptors for the user-provided keypoints + void operator()(InputArray img, InputArray mask, + vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false) const; + + AlgorithmInfo* info() const; + + void buildGaussianPyramid( const Mat& base, vector& pyr, int nOctaves ) const; + void buildDoGPyramid( const vector& pyr, vector& dogpyr ) const; + void findScaleSpaceExtrema( const vector& gauss_pyr, const vector& dog_pyr, + vector& keypoints ) const; + +protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; + + CV_PROP_RW int nfeatures; + CV_PROP_RW int nOctaveLayers; + CV_PROP_RW double contrastThreshold; + CV_PROP_RW double edgeThreshold; + CV_PROP_RW double sigma; +}; + +typedef SIFT SiftFeatureDetector; +typedef SIFT SiftDescriptorExtractor; + +/*! + SURF implementation. + + The class implements SURF algorithm by H. Bay et al. + */ +class CV_EXPORTS_W SURF : public Feature2D +{ +public: + //! the default constructor + CV_WRAP SURF(); + //! the full constructor taking all the necessary parameters + explicit CV_WRAP SURF(double hessianThreshold, + int nOctaves=4, int nOctaveLayers=2, + bool extended=true, bool upright=false); + + //! returns the descriptor size in float's (64 or 128) + CV_WRAP int descriptorSize() const; + + //! returns the descriptor type + CV_WRAP int descriptorType() const; + + //! finds the keypoints using fast hessian detector used in SURF + void operator()(InputArray img, InputArray mask, + CV_OUT vector& keypoints) const; + //! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints + void operator()(InputArray img, InputArray mask, + CV_OUT vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false) const; + + AlgorithmInfo* info() const; + + CV_PROP_RW double hessianThreshold; + CV_PROP_RW int nOctaves; + CV_PROP_RW int nOctaveLayers; + CV_PROP_RW bool extended; + CV_PROP_RW bool upright; + +protected: + + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; +}; + +typedef SURF SurfFeatureDetector; +typedef SURF SurfDescriptorExtractor; + +} /* namespace cv */ + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/gpu.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/gpu.hpp new file mode 100644 index 0000000..722ef26 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/gpu.hpp @@ -0,0 +1,128 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_NONFREE_GPU_HPP__ +#define __OPENCV_NONFREE_GPU_HPP__ + +#include "opencv2/core/gpumat.hpp" + +namespace cv { namespace gpu { + +class CV_EXPORTS SURF_GPU +{ +public: + enum KeypointLayout + { + X_ROW = 0, + Y_ROW, + LAPLACIAN_ROW, + OCTAVE_ROW, + SIZE_ROW, + ANGLE_ROW, + HESSIAN_ROW, + ROWS_COUNT + }; + + //! the default constructor + SURF_GPU(); + //! the full constructor taking all the necessary parameters + explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4, + int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false); + + //! returns the descriptor size in float's (64 or 128) + int descriptorSize() const; + + //! upload host keypoints to device memory + void uploadKeypoints(const std::vector& keypoints, GpuMat& keypointsGPU); + //! download keypoints from device to host memory + void downloadKeypoints(const GpuMat& keypointsGPU, std::vector& keypoints); + + //! download descriptors from device to host memory + void downloadDescriptors(const GpuMat& descriptorsGPU, std::vector& descriptors); + + //! finds the keypoints using fast hessian detector used in SURF + //! supports CV_8UC1 images + //! keypoints will have nFeature cols and 6 rows + //! keypoints.ptr(X_ROW)[i] will contain x coordinate of i'th feature + //! keypoints.ptr(Y_ROW)[i] will contain y coordinate of i'th feature + //! keypoints.ptr(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature + //! keypoints.ptr(OCTAVE_ROW)[i] will contain octave of i'th feature + //! keypoints.ptr(SIZE_ROW)[i] will contain size of i'th feature + //! keypoints.ptr(ANGLE_ROW)[i] will contain orientation of i'th feature + //! keypoints.ptr(HESSIAN_ROW)[i] will contain response of i'th feature + void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints); + //! finds the keypoints and computes their descriptors. + //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction + void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors, + bool useProvidedKeypoints = false); + + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints); + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints, GpuMat& descriptors, + bool useProvidedKeypoints = false); + + void operator()(const GpuMat& img, const GpuMat& mask, std::vector& keypoints, std::vector& descriptors, + bool useProvidedKeypoints = false); + + void releaseMemory(); + + // SURF parameters + double hessianThreshold; + int nOctaves; + int nOctaveLayers; + bool extended; + bool upright; + + //! max keypoints = min(keypointsRatio * img.size().area(), 65535) + float keypointsRatio; + + GpuMat sum, mask1, maskSum, intBuffer; + + GpuMat det, trace; + + GpuMat maxPosBuffer; +}; + +} // namespace gpu + +} // namespace cv + +#endif // __OPENCV_NONFREE_GPU_HPP__ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/nonfree.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/nonfree.hpp new file mode 100644 index 0000000..c64c566 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/nonfree.hpp @@ -0,0 +1,57 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_NONFREE_HPP__ +#define __OPENCV_NONFREE_HPP__ + +#include "opencv2/nonfree/features2d.hpp" + +namespace cv +{ + +CV_EXPORTS_W bool initModule_nonfree(); + +} + +#endif + +/* End of file. */ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/ocl.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/ocl.hpp new file mode 100644 index 0000000..ba84d24 --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/nonfree/ocl.hpp @@ -0,0 +1,140 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_NONFREE_OCL_HPP__ +#define __OPENCV_NONFREE_OCL_HPP__ + +#include "opencv2/ocl/ocl.hpp" + +namespace cv +{ + namespace ocl + { + //! Speeded up robust features, port from GPU module. + ////////////////////////////////// SURF ////////////////////////////////////////// + + class CV_EXPORTS SURF_OCL : public cv::Feature2D + { + public: + enum KeypointLayout + { + X_ROW = 0, + Y_ROW, + LAPLACIAN_ROW, + OCTAVE_ROW, + SIZE_ROW, + ANGLE_ROW, + HESSIAN_ROW, + ROWS_COUNT + }; + + //! the default constructor + SURF_OCL(); + //! the full constructor taking all the necessary parameters + explicit SURF_OCL(double _hessianThreshold, int _nOctaves = 4, + int _nOctaveLayers = 2, bool _extended = true, float _keypointsRatio = 0.01f, bool _upright = false); + + //! returns the descriptor size in float's (64 or 128) + int descriptorSize() const; + + int descriptorType() const; + + //! upload host keypoints to device memory + void uploadKeypoints(const vector &keypoints, oclMat &keypointsocl); + //! download keypoints from device to host memory + void downloadKeypoints(const oclMat &keypointsocl, vector &keypoints); + //! download descriptors from device to host memory + void downloadDescriptors(const oclMat &descriptorsocl, vector &descriptors); + //! finds the keypoints using fast hessian detector used in SURF + //! supports CV_8UC1 images + //! keypoints will have nFeature cols and 6 rows + //! keypoints.ptr(X_ROW)[i] will contain x coordinate of i'th feature + //! keypoints.ptr(Y_ROW)[i] will contain y coordinate of i'th feature + //! keypoints.ptr(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature + //! keypoints.ptr(OCTAVE_ROW)[i] will contain octave of i'th feature + //! keypoints.ptr(SIZE_ROW)[i] will contain size of i'th feature + //! keypoints.ptr(ANGLE_ROW)[i] will contain orientation of i'th feature + //! keypoints.ptr(HESSIAN_ROW)[i] will contain response of i'th feature + void operator()(const oclMat &img, const oclMat &mask, oclMat &keypoints); + //! finds the keypoints and computes their descriptors. + //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction + void operator()(const oclMat &img, const oclMat &mask, oclMat &keypoints, oclMat &descriptors, + bool useProvidedKeypoints = false); + void operator()(const oclMat &img, const oclMat &mask, std::vector &keypoints); + void operator()(const oclMat &img, const oclMat &mask, std::vector &keypoints, oclMat &descriptors, + bool useProvidedKeypoints = false); + void operator()(const oclMat &img, const oclMat &mask, std::vector &keypoints, std::vector &descriptors, + bool useProvidedKeypoints = false); + + //! finds the keypoints using fast hessian detector used in SURF + void operator()(InputArray img, InputArray mask, + CV_OUT vector& keypoints) const; + //! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints + void operator()(InputArray img, InputArray mask, + CV_OUT vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false) const; + + AlgorithmInfo* info() const; + + void releaseMemory(); + + // SURF parameters + float hessianThreshold; + int nOctaves; + int nOctaveLayers; + bool extended; + bool upright; + //! max keypoints = min(keypointsRatio * img.size().area(), 65535) + float keypointsRatio; + oclMat sum, mask1, maskSum, intBuffer; + oclMat det, trace; + oclMat maxPosBuffer; + protected: + void detectImpl( const Mat& image, vector& keypoints, const Mat& mask) const; + void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors) const; + }; + } +} + +#endif //__OPENCV_NONFREE_OCL_HPP__ diff --git a/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/objdetect/objdetect.hpp b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/objdetect/objdetect.hpp new file mode 100644 index 0000000..d5d6f0b --- /dev/null +++ b/swix_ios_app/swix_ios_app/swix/objc/opencv2.framework/Versions/A/Headers/objdetect/objdetect.hpp @@ -0,0 +1,1073 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OBJDETECT_HPP__ +#define __OPENCV_OBJDETECT_HPP__ + +#include "opencv2/core/core.hpp" + +#ifdef __cplusplus +#include +#include + +extern "C" { +#endif + +/****************************************************************************************\ +* Haar-like Object Detection functions * +\****************************************************************************************/ + +#define CV_HAAR_MAGIC_VAL 0x42500000 +#define CV_TYPE_NAME_HAAR "opencv-haar-classifier" + +#define CV_IS_HAAR_CLASSIFIER( haar ) \ + ((haar) != NULL && \ + (((const CvHaarClassifierCascade*)(haar))->flags & CV_MAGIC_MASK)==CV_HAAR_MAGIC_VAL) + +#define CV_HAAR_FEATURE_MAX 3 + +typedef struct CvHaarFeature +{ + int tilted; + struct + { + CvRect r; + float weight; + } rect[CV_HAAR_FEATURE_MAX]; +} CvHaarFeature; + +typedef struct CvHaarClassifier +{ + int count; + CvHaarFeature* haar_feature; + float* threshold; + int* left; + int* right; + float* alpha; +} CvHaarClassifier; + +typedef struct CvHaarStageClassifier +{ + int count; + float threshold; + CvHaarClassifier* classifier; + + int next; + int child; + int parent; +} CvHaarStageClassifier; + +typedef struct CvHidHaarClassifierCascade CvHidHaarClassifierCascade; + +typedef struct CvHaarClassifierCascade +{ + int flags; + int count; + CvSize orig_window_size; + CvSize real_window_size; + double scale; + CvHaarStageClassifier* stage_classifier; + CvHidHaarClassifierCascade* hid_cascade; +} CvHaarClassifierCascade; + +typedef struct CvAvgComp +{ + CvRect rect; + int neighbors; +} CvAvgComp; + +/* Loads haar classifier cascade from a directory. + It is obsolete: convert your cascade to xml and use cvLoad instead */ +CVAPI(CvHaarClassifierCascade*) cvLoadHaarClassifierCascade( + const char* directory, CvSize orig_window_size); + +CVAPI(void) cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade ); + +#define CV_HAAR_DO_CANNY_PRUNING 1 +#define CV_HAAR_SCALE_IMAGE 2 +#define CV_HAAR_FIND_BIGGEST_OBJECT 4 +#define CV_HAAR_DO_ROUGH_SEARCH 8 + +//CVAPI(CvSeq*) cvHaarDetectObjectsForROC( const CvArr* image, +// CvHaarClassifierCascade* cascade, CvMemStorage* storage, +// CvSeq** rejectLevels, CvSeq** levelWeightds, +// double scale_factor CV_DEFAULT(1.1), +// int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), +// CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)), +// bool outputRejectLevels = false ); + + +CVAPI(CvSeq*) cvHaarDetectObjects( const CvArr* image, + CvHaarClassifierCascade* cascade, CvMemStorage* storage, + double scale_factor CV_DEFAULT(1.1), + int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), + CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0))); + +/* sets images for haar classifier cascade */ +CVAPI(void) cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade, + const CvArr* sum, const CvArr* sqsum, + const CvArr* tilted_sum, double scale ); + +/* runs the cascade on the specified window */ +CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade, + CvPoint pt, int start_stage CV_DEFAULT(0)); + + +/****************************************************************************************\ +* Latent SVM Object Detection functions * +\****************************************************************************************/ + +// DataType: STRUCT position +// Structure describes the position of the filter in the feature pyramid +// l - level in the feature pyramid +// (x, y) - coordinate in level l +typedef struct CvLSVMFilterPosition +{ + int x; + int y; + int l; +} CvLSVMFilterPosition; + +// DataType: STRUCT filterObject +// Description of the filter, which corresponds to the part of the object +// V - ideal (penalty = 0) position of the partial filter +// from the root filter position (V_i in the paper) +// penaltyFunction - vector describes penalty function (d_i in the paper) +// pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2 +// FILTER DESCRIPTION +// Rectangular map (sizeX x sizeY), +// every cell stores feature vector (dimension = p) +// H - matrix of feature vectors +// to set and get feature vectors (i,j) +// used formula H[(j * sizeX + i) * p + k], where +// k - component of feature vector in cell (i, j) +// END OF FILTER DESCRIPTION +typedef struct CvLSVMFilterObject{ + CvLSVMFilterPosition V; + float fineFunction[4]; + int sizeX; + int sizeY; + int numFeatures; + float *H; +} CvLSVMFilterObject; + +// data type: STRUCT CvLatentSvmDetector +// structure contains internal representation of trained Latent SVM detector +// num_filters - total number of filters (root plus part) in model +// num_components - number of components in model +// num_part_filters - array containing number of part filters for each component +// filters - root and part filters for all model components +// b - biases for all model components +// score_threshold - confidence level threshold +typedef struct CvLatentSvmDetector +{ + int num_filters; + int num_components; + int* num_part_filters; + CvLSVMFilterObject** filters; + float* b; + float score_threshold; +} +CvLatentSvmDetector; + +// data type: STRUCT CvObjectDetection +// structure contains the bounding box and confidence level for detected object +// rect - bounding box for a detected object +// score - confidence level +typedef struct CvObjectDetection +{ + CvRect rect; + float score; +} CvObjectDetection; + +//////////////// Object Detection using Latent SVM ////////////// + + +/* +// load trained detector from a file +// +// API +// CvLatentSvmDetector* cvLoadLatentSvmDetector(const char* filename); +// INPUT +// filename - path to the file containing the parameters of + - trained Latent SVM detector +// OUTPUT +// trained Latent SVM detector in internal representation +*/ +CVAPI(CvLatentSvmDetector*) cvLoadLatentSvmDetector(const char* filename); + +/* +// release memory allocated for CvLatentSvmDetector structure +// +// API +// void cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector); +// INPUT +// detector - CvLatentSvmDetector structure to be released +// OUTPUT +*/ +CVAPI(void) cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector); + +/* +// find rectangular regions in the given image that are likely +// to contain objects and corresponding confidence levels +// +// API +// CvSeq* cvLatentSvmDetectObjects(const IplImage* image, +// CvLatentSvmDetector* detector, +// CvMemStorage* storage, +// float overlap_threshold = 0.5f, +// int numThreads = -1); +// INPUT +// image - image to detect objects in +// detector - Latent SVM detector in internal representation +// storage - memory storage to store the resultant sequence +// of the object candidate rectangles +// overlap_threshold - threshold for the non-maximum suppression algorithm + = 0.5f [here will be the reference to original paper] +// OUTPUT +// sequence of detected objects (bounding boxes and confidence levels stored in CvObjectDetection structures) +*/ +CVAPI(CvSeq*) cvLatentSvmDetectObjects(IplImage* image, + CvLatentSvmDetector* detector, + CvMemStorage* storage, + float overlap_threshold CV_DEFAULT(0.5f), + int numThreads CV_DEFAULT(-1)); + +#ifdef __cplusplus +} + +CV_EXPORTS CvSeq* cvHaarDetectObjectsForROC( const CvArr* image, + CvHaarClassifierCascade* cascade, CvMemStorage* storage, + std::vector& rejectLevels, std::vector& levelWeightds, + double scale_factor CV_DEFAULT(1.1), + int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), + CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)), + bool outputRejectLevels = false ); + +namespace cv +{ + +///////////////////////////// Object Detection //////////////////////////// + +/* + * This is a class wrapping up the structure CvLatentSvmDetector and functions working with it. + * The class goals are: + * 1) provide c++ interface; + * 2) make it possible to load and detect more than one class (model) unlike CvLatentSvmDetector. + */ +class CV_EXPORTS LatentSvmDetector +{ +public: + struct CV_EXPORTS ObjectDetection + { + ObjectDetection(); + ObjectDetection( const Rect& rect, float score, int classID=-1 ); + Rect rect; + float score; + int classID; + }; + + LatentSvmDetector(); + LatentSvmDetector( const vector& filenames, const vector& classNames=vector() ); + virtual ~LatentSvmDetector(); + + virtual void clear(); + virtual bool empty() const; + bool load( const vector& filenames, const vector& classNames=vector() ); + + virtual void detect( const Mat& image, + vector& objectDetections, + float overlapThreshold=0.5f, + int numThreads=-1 ); + + const vector& getClassNames() const; + size_t getClassCount() const; + +private: + vector detectors; + vector classNames; +}; + +// class for grouping object candidates, detected by Cascade Classifier, HOG etc. +// instance of the class is to be passed to cv::partition (see cxoperations.hpp) +class CV_EXPORTS SimilarRects +{ +public: + SimilarRects(double _eps) : eps(_eps) {} + inline bool operator()(const Rect& r1, const Rect& r2) const + { + double delta = eps*(std::min(r1.width, r2.width) + std::min(r1.height, r2.height))*0.5; + return std::abs(r1.x - r2.x) <= delta && + std::abs(r1.y - r2.y) <= delta && + std::abs(r1.x + r1.width - r2.x - r2.width) <= delta && + std::abs(r1.y + r1.height - r2.y - r2.height) <= delta; + } + double eps; +}; + +CV_EXPORTS void groupRectangles(CV_OUT CV_IN_OUT vector& rectList, int groupThreshold, double eps=0.2); +CV_EXPORTS_W void groupRectangles(CV_OUT CV_IN_OUT vector& rectList, CV_OUT vector& weights, int groupThreshold, double eps=0.2); +CV_EXPORTS void groupRectangles( vector& rectList, int groupThreshold, double eps, vector* weights, vector* levelWeights ); +CV_EXPORTS void groupRectangles(vector& rectList, vector& rejectLevels, + vector& levelWeights, int groupThreshold, double eps=0.2); +CV_EXPORTS void groupRectangles_meanshift(vector& rectList, vector& foundWeights, vector& foundScales, + double detectThreshold = 0.0, Size winDetSize = Size(64, 128)); + + +class CV_EXPORTS FeatureEvaluator +{ +public: + enum { HAAR = 0, LBP = 1, HOG = 2 }; + virtual ~FeatureEvaluator(); + + virtual bool read(const FileNode& node); + virtual Ptr clone() const; + virtual int getFeatureType() const; + + virtual bool setImage(const Mat& img, Size origWinSize); + virtual bool setWindow(Point p); + + virtual double calcOrd(int featureIdx) const; + virtual int calcCat(int featureIdx) const; + + static Ptr create(int type); +}; + +template<> CV_EXPORTS void Ptr::delete_obj(); + +enum +{ + CASCADE_DO_CANNY_PRUNING=1, + CASCADE_SCALE_IMAGE=2, + CASCADE_FIND_BIGGEST_OBJECT=4, + CASCADE_DO_ROUGH_SEARCH=8 +}; + +class CV_EXPORTS_W CascadeClassifier +{ +public: + CV_WRAP CascadeClassifier(); + CV_WRAP CascadeClassifier( const string& filename ); + virtual ~CascadeClassifier(); + + CV_WRAP virtual bool empty() const; + CV_WRAP bool load( const string& filename ); + virtual bool read( const FileNode& node ); + CV_WRAP virtual void detectMultiScale( const Mat& image, + CV_OUT vector& objects, + double scaleFactor=1.1, + int minNeighbors=3, int flags=0, + Size minSize=Size(), + Size maxSize=Size() ); + + CV_WRAP virtual void detectMultiScale( const Mat& image, + CV_OUT vector& objects, + vector& rejectLevels, + vector& levelWeights, + double scaleFactor=1.1, + int minNeighbors=3, int flags=0, + Size minSize=Size(), + Size maxSize=Size(), + bool outputRejectLevels=false ); + + + bool isOldFormatCascade() const; + virtual Size getOriginalWindowSize() const; + int getFeatureType() const; + bool setImage( const Mat& ); + +protected: + //virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize, + // int stripSize, int yStep, double factor, vector& candidates ); + + virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize, + int stripSize, int yStep, double factor, vector& candidates, + vector& rejectLevels, vector& levelWeights, bool outputRejectLevels=false); + +protected: + enum { BOOST = 0 }; + enum { DO_CANNY_PRUNING = 1, SCALE_IMAGE = 2, + FIND_BIGGEST_OBJECT = 4, DO_ROUGH_SEARCH = 8 }; + + friend class CascadeClassifierInvoker; + + template + friend int predictOrdered( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + template + friend int predictCategorical( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + template + friend int predictOrderedStump( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + template + friend int predictCategoricalStump( CascadeClassifier& cascade, Ptr &featureEvaluator, double& weight); + + bool setImage( Ptr& feval, const Mat& image); + virtual int runAt( Ptr& feval, Point pt, double& weight ); + + class Data + { + public: + struct CV_EXPORTS DTreeNode + { + int featureIdx; + float threshold; // for ordered features only + int left; + int right; + }; + + struct CV_EXPORTS DTree + { + int nodeCount; + }; + + struct CV_EXPORTS Stage + { + int first; + int ntrees; + float threshold; + }; + + bool read(const FileNode &node); + + bool isStumpBased; + + int stageType; + int featureType; + int ncategories; + Size origWinSize; + + vector stages; + vector classifiers; + vector nodes; + vector leaves; + vector subsets; + }; + + Data data; + Ptr featureEvaluator; + Ptr oldCascade; + +public: + class CV_EXPORTS MaskGenerator + { + public: + virtual ~MaskGenerator() {} + virtual cv::Mat generateMask(const cv::Mat& src)=0; + virtual void initializeMask(const cv::Mat& /*src*/) {}; + }; + void setMaskGenerator(Ptr maskGenerator); + Ptr getMaskGenerator(); + + void setFaceDetectionMaskGenerator(); + +protected: + Ptr maskGenerator; +}; + + +//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector ////////////// + +// struct for detection region of interest (ROI) +struct DetectionROI +{ + // scale(size) of the bounding box + double scale; + // set of requrested locations to be evaluated + vector locations; + // vector that will contain confidence values for each location + vector confidences; +}; + +struct CV_EXPORTS_W HOGDescriptor +{ +public: + enum { L2Hys=0 }; + enum { DEFAULT_NLEVELS=64 }; + + CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8), + cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1), + histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true), + nlevels(HOGDescriptor::DEFAULT_NLEVELS) + {} + + CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, + Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1, + int _histogramNormType=HOGDescriptor::L2Hys, + double _L2HysThreshold=0.2, bool _gammaCorrection=false, + int _nlevels=HOGDescriptor::DEFAULT_NLEVELS) + : winSize(_winSize), blockSize(_blockSize), blockStride(_blockStride), cellSize(_cellSize), + nbins(_nbins), derivAperture(_derivAperture), winSigma(_winSigma), + histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold), + gammaCorrection(_gammaCorrection), nlevels(_nlevels) + {} + + CV_WRAP HOGDescriptor(const String& filename) + { + load(filename); + } + + HOGDescriptor(const HOGDescriptor& d) + { + d.copyTo(*this); + } + + virtual ~HOGDescriptor() {} + + CV_WRAP size_t getDescriptorSize() const; + CV_WRAP bool checkDetectorSize() const; + CV_WRAP double getWinSigma() const; + + CV_WRAP virtual void setSVMDetector(InputArray _svmdetector); + + virtual bool read(FileNode& fn); + virtual void write(FileStorage& fs, const String& objname) const; + + CV_WRAP virtual bool load(const String& filename, const String& objname=String()); + CV_WRAP virtual void save(const String& filename, const String& objname=String()) const; + virtual void copyTo(HOGDescriptor& c) const; + + CV_WRAP virtual void compute(const Mat& img, + CV_OUT vector& descriptors, + Size winStride=Size(), Size padding=Size(), + const vector& locations=vector()) const; + //with found weights output + CV_WRAP virtual void detect(const Mat& img, CV_OUT vector& foundLocations, + CV_OUT vector& weights, + double hitThreshold=0, Size winStride=Size(), + Size padding=Size(), + const vector& searchLocations=vector()) const; + //without found weights output + virtual void detect(const Mat& img, CV_OUT vector& foundLocations, + double hitThreshold=0, Size winStride=Size(), + Size padding=Size(), + const vector& searchLocations=vector()) const; + //with result weights output + CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT vector& foundLocations, + CV_OUT vector& foundWeights, double hitThreshold=0, + Size winStride=Size(), Size padding=Size(), double scale=1.05, + double finalThreshold=2.0,bool useMeanshiftGrouping = false) const; + //without found weights output + virtual void detectMultiScale(const Mat& img, CV_OUT vector& foundLocations, + double hitThreshold=0, Size winStride=Size(), + Size padding=Size(), double scale=1.05, + double finalThreshold=2.0, bool useMeanshiftGrouping = false) const; + + CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs, + Size paddingTL=Size(), Size paddingBR=Size()) const; + + CV_WRAP static vector getDefaultPeopleDetector(); + CV_WRAP static vector getDaimlerPeopleDetector(); + + CV_PROP Size winSize; + CV_PROP Size blockSize; + CV_PROP Size blockStride; + CV_PROP Size cellSize; + CV_PROP int nbins; + CV_PROP int derivAperture; + CV_PROP double winSigma; + CV_PROP int histogramNormType; + CV_PROP double L2HysThreshold; + CV_PROP bool gammaCorrection; + CV_PROP vector svmDetector; + CV_PROP int nlevels; + + + // evaluate specified ROI and return confidence value for each location + void detectROI(const cv::Mat& img, const vector &locations, + CV_OUT std::vector& foundLocations, CV_OUT std::vector& confidences, + double hitThreshold = 0, cv::Size winStride = Size(), + cv::Size padding = Size()) const; + + // evaluate specified ROI and return confidence value for each location in multiple scales + void detectMultiScaleROI(const cv::Mat& img, + CV_OUT std::vector& foundLocations, + std::vector& locations, + double hitThreshold = 0, + int groupThreshold = 0) const; + + // read/parse Dalal's alt model file + void readALTModel(std::string modelfile); + void groupRectangles(vector& rectList, vector& weights, int groupThreshold, double eps) const; +}; + + +CV_EXPORTS_W void findDataMatrix(InputArray image, + CV_OUT vector& codes, + OutputArray corners=noArray(), + OutputArrayOfArrays dmtx=noArray()); +CV_EXPORTS_W void drawDataMatrixCodes(InputOutputArray image, + const vector& codes, + InputArray corners); +} + +/****************************************************************************************\ +* Datamatrix * +\****************************************************************************************/ + +struct CV_EXPORTS CvDataMatrixCode { + char msg[4]; + CvMat *original; + CvMat *corners; +}; + +CV_EXPORTS std::deque cvFindDataMatrix(CvMat *im); + +/****************************************************************************************\ +* LINE-MOD * +\****************************************************************************************/ + +namespace cv { +namespace linemod { + +using cv::FileNode; +using cv::FileStorage; +using cv::Mat; +using cv::noArray; +using cv::OutputArrayOfArrays; +using cv::Point; +using cv::Ptr; +using cv::Rect; +using cv::Size; + +/// @todo Convert doxy comments to rst + +/** + * \brief Discriminant feature described by its location and label. + */ +struct CV_EXPORTS Feature +{ + int x; ///< x offset + int y; ///< y offset + int label; ///< Quantization + + Feature() : x(0), y(0), label(0) {} + Feature(int x, int y, int label); + + void read(const FileNode& fn); + void write(FileStorage& fs) const; +}; + +inline Feature::Feature(int _x, int _y, int _label) : x(_x), y(_y), label(_label) {} + +struct CV_EXPORTS Template +{ + int width; + int height; + int pyramid_level; + std::vector features; + + void read(const FileNode& fn); + void write(FileStorage& fs) const; +}; + +/** + * \brief Represents a modality operating over an image pyramid. + */ +class QuantizedPyramid +{ +public: + // Virtual destructor + virtual ~QuantizedPyramid() {} + + /** + * \brief Compute quantized image at current pyramid level for online detection. + * + * \param[out] dst The destination 8-bit image. For each pixel at most one bit is set, + * representing its classification. + */ + virtual void quantize(Mat& dst) const =0; + + /** + * \brief Extract most discriminant features at current pyramid level to form a new template. + * + * \param[out] templ The new template. + */ + virtual bool extractTemplate(Template& templ) const =0; + + /** + * \brief Go to the next pyramid level. + * + * \todo Allow pyramid scale factor other than 2 + */ + virtual void pyrDown() =0; + +protected: + /// Candidate feature with a score + struct Candidate + { + Candidate(int x, int y, int label, float score); + + /// Sort candidates with high score to the front + bool operator<(const Candidate& rhs) const + { + return score > rhs.score; + } + + Feature f; + float score; + }; + + /** + * \brief Choose candidate features so that they are not bunched together. + * + * \param[in] candidates Candidate features sorted by score. + * \param[out] features Destination vector of selected features. + * \param[in] num_features Number of candidates to select. + * \param[in] distance Hint for desired distance between features. + */ + static void selectScatteredFeatures(const std::vector& candidates, + std::vector& features, + size_t num_features, float distance); +}; + +inline QuantizedPyramid::Candidate::Candidate(int x, int y, int label, float _score) : f(x, y, label), score(_score) {} + +/** + * \brief Interface for modalities that plug into the LINE template matching representation. + * + * \todo Max response, to allow optimization of summing (255/MAX) features as uint8 + */ +class CV_EXPORTS Modality +{ +public: + // Virtual destructor + virtual ~Modality() {} + + /** + * \brief Form a quantized image pyramid from a source image. + * + * \param[in] src The source image. Type depends on the modality. + * \param[in] mask Optional mask. If not empty, unmasked pixels are set to zero + * in quantized image and cannot be extracted as features. + */ + Ptr process(const Mat& src, + const Mat& mask = Mat()) const + { + return processImpl(src, mask); + } + + virtual std::string name() const =0; + + virtual void read(const FileNode& fn) =0; + virtual void write(FileStorage& fs) const =0; + + /** + * \brief Create modality by name. + * + * The following modality types are supported: + * - "ColorGradient" + * - "DepthNormal" + */ + static Ptr create(const std::string& modality_type); + + /** + * \brief Load a modality from file. + */ + static Ptr create(const FileNode& fn); + +protected: + // Indirection is because process() has a default parameter. + virtual Ptr processImpl(const Mat& src, + const Mat& mask) const =0; +}; + +/** + * \brief Modality that computes quantized gradient orientations from a color image. + */ +class CV_EXPORTS ColorGradient : public Modality +{ +public: + /** + * \brief Default constructor. Uses reasonable default parameter values. + */ + ColorGradient(); + + /** + * \brief Constructor. + * + * \param weak_threshold When quantizing, discard gradients with magnitude less than this. + * \param num_features How many features a template must contain. + * \param strong_threshold Consider as candidate features only gradients whose norms are + * larger than this. + */ + ColorGradient(float weak_threshold, size_t num_features, float strong_threshold); + + virtual std::string name() const; + + virtual void read(const FileNode& fn); + virtual void write(FileStorage& fs) const; + + float weak_threshold; + size_t num_features; + float strong_threshold; + +protected: + virtual Ptr processImpl(const Mat& src, + const Mat& mask) const; +}; + +/** + * \brief Modality that computes quantized surface normals from a dense depth map. + */ +class CV_EXPORTS DepthNormal : public Modality +{ +public: + /** + * \brief Default constructor. Uses reasonable default parameter values. + */ + DepthNormal(); + + /** + * \brief Constructor. + * + * \param distance_threshold Ignore pixels beyond this distance. + * \param difference_threshold When computing normals, ignore contributions of pixels whose + * depth difference with the central pixel is above this threshold. + * \param num_features How many features a template must contain. + * \param extract_threshold Consider as candidate feature only if there are no differing + * orientations within a distance of extract_threshold. + */ + DepthNormal(int distance_threshold, int difference_threshold, size_t num_features, + int extract_threshold); + + virtual std::string name() const; + + virtual void read(const FileNode& fn); + virtual void write(FileStorage& fs) const; + + int distance_threshold; + int difference_threshold; + size_t num_features; + int extract_threshold; + +protected: + virtual Ptr processImpl(const Mat& src, + const Mat& mask) const; +}; + +/** + * \brief Debug function to colormap a quantized image for viewing. + */ +void colormap(const Mat& quantized, Mat& dst); + +/** + * \brief Represents a successful template match. + */ +struct CV_EXPORTS Match +{ + Match() + { + } + + Match(int x, int y, float similarity, const std::string& class_id, int template_id); + + /// Sort matches with high similarity to the front + bool operator<(const Match& rhs) const + { + // Secondarily sort on template_id for the sake of duplicate removal + if (similarity != rhs.similarity) + return similarity > rhs.similarity; + else + return template_id < rhs.template_id; + } + + bool operator==(const Match& rhs) const + { + return x == rhs.x && y == rhs.y && similarity == rhs.similarity && class_id == rhs.class_id; + } + + int x; + int y; + float similarity; + std::string class_id; + int template_id; +}; + +inline Match::Match(int _x, int _y, float _similarity, const std::string& _class_id, int _template_id) + : x(_x), y(_y), similarity(_similarity), class_id(_class_id), template_id(_template_id) + { + } + +/** + * \brief Object detector using the LINE template matching algorithm with any set of + * modalities. + */ +class CV_EXPORTS Detector +{ +public: + /** + * \brief Empty constructor, initialize with read(). + */ + Detector(); + + /** + * \brief Constructor. + * + * \param modalities Modalities to use (color gradients, depth normals, ...). + * \param T_pyramid Value of the sampling step T at each pyramid level. The + * number of pyramid levels is T_pyramid.size(). + */ + Detector(const std::vector< Ptr >& modalities, const std::vector& T_pyramid); + + /** + * \brief Detect objects by template matching. + * + * Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid. + * + * \param sources Source images, one for each modality. + * \param threshold Similarity threshold, a percentage between 0 and 100. + * \param[out] matches Template matches, sorted by similarity score. + * \param class_ids If non-empty, only search for the desired object classes. + * \param[out] quantized_images Optionally return vector of quantized images. + * \param masks The masks for consideration during matching. The masks should be CV_8UC1 + * where 255 represents a valid pixel. If non-empty, the vector must be + * the same size as sources. Each element must be + * empty or the same size as its corresponding source. + */ + void match(const std::vector& sources, float threshold, std::vector& matches, + const std::vector& class_ids = std::vector(), + OutputArrayOfArrays quantized_images = noArray(), + const std::vector& masks = std::vector()) const; + + /** + * \brief Add new object template. + * + * \param sources Source images, one for each modality. + * \param class_id Object class ID. + * \param object_mask Mask separating object from background. + * \param[out] bounding_box Optionally return bounding box of the extracted features. + * + * \return Template ID, or -1 if failed to extract a valid template. + */ + int addTemplate(const std::vector& sources, const std::string& class_id, + const Mat& object_mask, Rect* bounding_box = NULL); + + /** + * \brief Add a new object template computed by external means. + */ + int addSyntheticTemplate(const std::vector